diff --git a/.gitignore b/.gitignore index 85e17210..44995983 100644 --- a/.gitignore +++ b/.gitignore @@ -11,6 +11,7 @@ docs/source/opinf/literature.md __pycache__/ .ipynb_checkpoints/ .pytest_cache/ +.ruff_cache/ htmlcov/ .coverage* build/ @@ -52,3 +53,4 @@ html/ # Other *.swp .markdownlint.json +Notes/ diff --git a/docs/_config.yml b/docs/_config.yml index 83a192e9..2506b36f 100644 --- a/docs/_config.yml +++ b/docs/_config.yml @@ -68,6 +68,9 @@ sphinx: sklearn: - "https://scikit-learn.org/stable/" - null + pandas: + - "https://pandas.pydata.org/docs/" + - null mathjax3_config: tex: macros: diff --git a/docs/_toc.yml b/docs/_toc.yml index 7b09e30d..9365a3f8 100644 --- a/docs/_toc.yml +++ b/docs/_toc.yml @@ -21,7 +21,10 @@ parts: # numbered: 1 chapters: - file: source/tutorials/basics.ipynb - - file: source/tutorials/heat_equation.ipynb + - file: source/tutorials/inputs.ipynb + # - file: source/tutorials/lifting.ipynb + # - file: source/tutorials/regularization.ipynb + - file: source/tutorials/parametric.ipynb # API reference via sphinx-autodoc + limited handwritten documentation. - caption: API Reference diff --git a/docs/source/api/ddt.ipynb b/docs/source/api/ddt.ipynb index 41a4bda4..7cc04834 100644 --- a/docs/source/api/ddt.ipynb +++ b/docs/source/api/ddt.ipynb @@ -25,7 +25,7 @@ " DerivativeEstimatorTemplate\n", " UniformFiniteDifferencer\n", " NonuniformFiniteDifferencer\n", - " InterpolationDerivativeEstimator\n", + " InterpDerivativeEstimator\n", "\n", "**Finite Difference Schemes for Uniformly Spaced Data**\n", "\n", @@ -352,7 +352,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "The {class}`InterpolationDerivativeEstimator` interpolates the state data using classes from {mod}`scipy.interpolate` and evaluates the derivative of the interpolant." + "The {class}`InterpDerivativeEstimator` interpolates the state data using classes from {mod}`scipy.interpolate` and evaluates the derivative of the interpolant." ] }, { @@ -361,7 +361,7 @@ "metadata": {}, "outputs": [], "source": [ - "estimator = opinf.ddt.InterpolationDerivativeEstimator(t, \"cubic\")\n", + "estimator = opinf.ddt.InterpDerivativeEstimator(t, \"pchip\")\n", "print(estimator)" ] }, diff --git a/docs/source/api/missing.rst b/docs/source/api/missing.rst index fdb89aec..58dbbbf8 100644 --- a/docs/source/api/missing.rst +++ b/docs/source/api/missing.rst @@ -64,7 +64,7 @@ ddt.ipynb DerivativeEstimatorTemplate UniformFiniteDifferencer NonuniformFiniteDifferencer - InterpolationDerivativeEstimator + InterpDerivativeEstimator fwd1 fwd2 fwd3 @@ -107,16 +107,18 @@ operators.ipynb StateInputOperator ParametricOperatorTemplate ParametricOpInfOperator - InterpolatedConstantOperator - InterpolatedLinearOperator - InterpolatedQuadraticOperator - InterpolatedCubicOperator - InterpolatedInputOperator - InterpolatedStateInputOperator - has_inputs - is_nonparametric - is_parametric - is_uncalibrated + AffineConstantOperator + AffineLinearOperator + AffineQuadraticOperator + AffineCubicOperator + AffineInputOperator + AffineStateInputOperator + InterpConstantOperator + InterpLinearOperator + InterpQuadraticOperator + InterpCubicOperator + InterpInputOperator + InterpStateInputOperator lstsq.ipynb ----------- diff --git a/docs/source/api/models.md b/docs/source/api/models.md index 92d705ce..dc3d3c4f 100644 --- a/docs/source/api/models.md +++ b/docs/source/api/models.md @@ -2,6 +2,28 @@ ```{eval-rst} .. automodule:: opinf.models + +.. currentmodule:: opinf.models + +**Nonparametric Models** + +.. autosummary:: + :toctree: _autosummaries + :nosignatures: + + ContinuousModel + DiscreteModel + +**Parametric Models** + +.. autosummary:: + :toctree: _autosummaries + :nosignatures: + + ParametricContinuousModel + ParametricDiscreteModel + InterpContinuousModel + InterpDiscreteModel ``` :::{admonition} Overview @@ -55,7 +77,6 @@ A _nonparametric_ model is comprised exclusively of [nonparametric operators](se .. currentmodule:: opinf.models .. autosummary:: - :toctree: _autosummaries :nosignatures: ContinuousModel @@ -123,19 +144,28 @@ A _parametric model_ is a model with at least one [parametric operator](sec-oper Parametric models are similar to nonparametric models: they are initialized with a list of operators, use `fit()` to calibrate operator entries, and `predict()` to solve the model. In addition, parametric models have an `evaluate()` method that returns a nonparametric model at a fixed parameter value. -### Interpolated Models +```{eval-rst} +.. currentmodule:: opinf.models + +.. autosummary:: + :nosignatures: + + ParametricContinuousModel + ParametricDiscreteModel +``` + +### Interpolatory Models -Interpolated models consist exclusively of [interpolated operators](sec-operators-interpolated). +Interpolatory models consist exclusively of [interpolatory operators](sec-operators-interpolated). ```{eval-rst} .. currentmodule:: opinf.models .. autosummary:: - :toctree: _autosummaries :nosignatures: - InterpolatedContinuousModel - InterpolatedDiscreteModel + InterpContinuousModel + InterpDiscreteModel ``` :::{tip} @@ -143,26 +173,26 @@ The `operators` constructor argument for these classes can also be a string that | Character | {mod}`opinf.operators` class | | :-------- | :------------------------------- | -| `'c'` | {class}`opinf.operators.InterpolatedConstantOperator` | -| `'A'` | {class}`opinf.operators.InterpolatedLinearOperator` | -| `'H'` | {class}`opinf.operators.InterpolatedQuadraticOperator` | -| `'G'` | {class}`opinf.operators.InterpolatedCubicOperator` | -| `'B'` | {class}`opinf.operators.InterpolatedInputOperator` | -| `'N'` | {class}`opinf.operators.InterpolatedStateInputOperator` | +| `'c'` | {class}`opinf.operators.InterpConstantOperator` | +| `'A'` | {class}`opinf.operators.InterpLinearOperator` | +| `'H'` | {class}`opinf.operators.InterpQuadraticOperator` | +| `'G'` | {class}`opinf.operators.InterpCubicOperator` | +| `'B'` | {class}`opinf.operators.InterpInputOperator` | +| `'N'` | {class}`opinf.operators.InterpStateInputOperator` | ```python import opinf # Initialize the model with a list of operator objects. -model = opinf.models.InterpolatedContinuousModel( +model = opinf.models.InterpContinuousModel( operators=[ - opinf.operators.InterpolatedCubicOperator(), - opinf.operators.InterpolatedStateInputOperator(), + opinf.operators.InterpCubicOperator(), + opinf.operators.InterpStateInputOperator(), ] ) # Equivalently, initialize the model with a string. -model = opinf.models.InterpolatedContinuousModel(operators="GN") +model = opinf.models.InterpContinuousModel(operators="GN") ``` ::: diff --git a/docs/source/api/operators.ipynb b/docs/source/api/operators.ipynb index d4f3595b..5d649ce1 100644 --- a/docs/source/api/operators.ipynb +++ b/docs/source/api/operators.ipynb @@ -40,22 +40,18 @@ "\n", " ParametricOperatorTemplate\n", " ParametricOpInfOperator\n", - " InterpolatedConstantOperator\n", - " InterpolatedLinearOperator\n", - " InterpolatedQuadraticOperator\n", - " InterpolatedCubicOperator\n", - " InterpolatedInputOperator\n", - " InterpolatedStateInputOperator\n", - "\n", - "**Utilities**\n", - "\n", - ".. autosummary::\n", - " :toctree: _autosummaries\n", - "\n", - " has_inputs\n", - " is_nonparametric\n", - " is_parametric\n", - " is_uncalibrated\n", + " AffineConstantOperator\n", + " AffineLinearOperator\n", + " AffineQuadraticOperator\n", + " AffineCubicOperator\n", + " AffineInputOperator\n", + " AffineStateInputOperator\n", + " InterpConstantOperator\n", + " InterpLinearOperator\n", + " InterpQuadraticOperator\n", + " InterpCubicOperator\n", + " InterpInputOperator\n", + " InterpStateInputOperator\n", "```" ] }, @@ -191,7 +187,6 @@ "source": [ "```{eval-rst}\n", ".. autosummary::\n", - " :toctree: _autosummaries\n", " :nosignatures:\n", "\n", " ConstantOperator\n", @@ -303,12 +298,13 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Operator Inference requires state, input, and [derivative](opinf.ddt) data $\\{(\\qhat_j,\\u_j,\\dot{\\qhat}_j)\\}_{j=0}^{k-1}$ that approximately satisfy the desired model dynamics.\n", + "Operator Inference requires state, input, and \"left-hand side\" data $\\{(\\qhat_j,\\u_j,\\z_j)\\}_{j=0}^{k-1}$ that approximately satisfy the desired model dynamics.\n", + "For [time-continuous models](opinf.models.ContinuousModel), $\\z_j$ is the [time derivative](opinf.ddt) of the state data; for [fully discrete models](opinf.models.DiscreteModel), $\\z_j$ is the ``next state,'' usually $\\qhat_{j+1}$.\n", "For {eq}`eq:operators:model`, and assuming each operator is an OpInf operator, the data should approximately satisfy\n", "\n", "$$\n", "\\begin{aligned}\n", - " \\dot{\\qhat}_j\n", + " \\z_j\n", " \\approx \\Ophat(\\qhat_j, \\u_j)\n", " = \\sum_{\\ell=1}^{n_\\textrm{terms}} \\Ophat_{\\ell}(\\qhat_j, \\u_j)\n", " = \\sum_{\\ell=1}^{n_\\textrm{terms}} \\Ohat_{\\ell}\\d_{\\ell}(\\qhat_j, \\u_j),\n", @@ -333,7 +329,7 @@ "\n", "$$\n", "\\begin{aligned}\n", - " \\dot{\\qhat}_j\n", + " \\z_j\n", " \\approx \\sum_{\\ell=1}^{n_\\textrm{terms}} \\Ohat_{\\ell}\\d_{\\ell}(\\qhat_j, \\u_j)\n", " = [~\\Ohat_{1}~~\\cdots~~\\Ohat_{n_\\textrm{terms}}~]\n", " \\left[\\begin{array}{c}\n", @@ -350,7 +346,7 @@ "\\begin{aligned}\n", " \\left[\\begin{array}{c|c|c}\n", " & & \\\\\n", - " \\dot{\\qhat}_0 & \\cdots & \\dot{\\qhat}_{k-1}\n", + " \\z_0 & \\cdots & \\z_{k-1}\n", " \\\\ & &\n", " \\end{array}\\right]\n", " \\approx\n", @@ -368,7 +364,7 @@ "$$\n", "\\begin{aligned}\n", " \\Z\n", - " &= [~\\dot{\\qhat}_0~~\\cdots~~\\dot{\\qhat}_{k-1}~] \\in \\RR^{r\\times k},\n", + " &= [~\\z_0~~\\cdots~~\\z_{k-1}~] \\in \\RR^{r\\times k},\n", " \\\\ & \\\\\n", " \\Ohat\n", " &= [~\\Ohat_{1}~~\\cdots~~\\Ohat_{n_\\textrm{terms}}~] \\in \\RR^{r \\times d},\n", @@ -393,7 +389,7 @@ "Nonparametric OpInf operator classes have two static methods that facilitate constructing the operator regression problem.\n", "\n", "- [`operator_dimension()`](OpInfOperator.operator_dimension): given the state dimension $r$ and the input dimension $r$, return the data dimension $d_\\ell$.\n", - "- [`datablock()`](OpInfOperator.datablock): given the state-input data pairs $\\{(\\qhat_j,\\u_j)\\}_{j=0}^{k-1}$, forms the matrix\n", + "- [`datablock()`](OpInfOperator.datablock): given the state-input data pairs $\\{(\\qhat_j,\\u_j)\\}_{j=0}^{k-1}$, form the matrix\n", "\n", "$$\n", "\\begin{aligned}\n", @@ -442,7 +438,7 @@ "$$\n", "\\begin{aligned}\n", " \\Z\n", - " &= [~\\dot{\\qhat}_0~~\\cdots~~\\dot{\\qhat}_{k-1}~] \\in \\RR^{r\\times k},\n", + " &= [~\\z_0~~\\cdots~~\\z_{k-1}~] \\in \\RR^{r\\times k},\n", " \\\\ \\\\\n", " \\Ohat\n", " &= [~\\Ahat~~\\Bhat~] \\in \\RR^{r \\times (r + m)},\n", @@ -457,6 +453,7 @@ "\\end{aligned}\n", "$$\n", "\n", + "In this setting, $\\z_j = \\dot{\\qhat}_j$, the time derivative of $\\qhat_j$.\n", "Collecting the state snapshots in the matrix $\\Qhat = [~\\qhat_0~~\\cdots~~\\qhat_{k-1}~]\\in\\RR^{r\\times k}$ and the inputs in the matrix $\\U = [~\\u_0~~\\cdots~~\\u_{k-1}~]$, the full data matrix can be abbreviated as $\\D = [~\\Qhat\\trp~~\\U\\trp~]$.\n", "\n", "If the regression $\\Z \\approx \\Ohat\\D\\trp$ is treated as an [ordinary least-squares problem](opinf.lstsq.PlainSolver), the optimization problem to solve is given by\n", @@ -467,7 +464,7 @@ " \\D\\Ohat\\trp - \\Z\\trp\n", " \\right\\|_F^2\n", " = \\min_{\\Ahat,\\Bhat}\\sum_{j=0}^{k-1}\\left\\|\n", - " \\Ahat\\qhat_j + \\Bhat\\u_j - \\dot{\\qhat}_j\n", + " \\Ahat\\qhat_j + \\Bhat\\u_j - \\z_j\n", " \\right\\|_2^2.\n", "\\end{aligned}\n", "$$\n", @@ -480,7 +477,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - ":::{admonition} Operators With Entries Are Not Recalibrated\n", + ":::{admonition} Operators with Entries are _Not_ Recalibrated\n", ":class: important\n", "\n", "Only operators whose entries are _not initialized_ (set to `None`) when a model is constructed are learned with Operator Inference when [`fit()`](opinf.models.ContinuousModel.fit) is called.\n", @@ -501,7 +498,7 @@ "$$\n", "\\begin{aligned}\n", " \\Z\n", - " &= [~(\\dot{\\qhat}_0 - \\Bhat\\u_0)~~\\cdots~~(\\dot{\\qhat}_{k-1} - \\Bhat\\u_{k-1})~] \\in \\RR^{r\\times k},\n", + " &= [~(\\z_0 - \\Bhat\\u_0)~~\\cdots~~(\\z_{k-1} - \\Bhat\\u_{k-1})~] \\in \\RR^{r\\times k},\n", " \\\\\n", " \\Ohat\n", " &= \\Ahat \\in \\RR^{r \\times r},\n", @@ -515,8 +512,8 @@ "\n", "$$\n", "\\begin{aligned}\n", - " &\\min_{\\Ahat,}\\sum_{j=0}^{k-1}\\left\\|\n", - " \\Ahat\\qhat_j - (\\dot{\\qhat}_j - \\Bhat\\u_j)\n", + " &\\min_{\\Ahat}\\sum_{j=0}^{k-1}\\left\\|\n", + " \\Ahat\\qhat_j - (\\z_j - \\Bhat\\u_j)\n", " \\right\\|_2^2.\n", "\\end{aligned}\n", "$$\n", @@ -761,7 +758,7 @@ "\n", "$$\n", "\\begin{aligned}\n", - " \\frac{\\partial}{\\partial \\hat{\\q}_j}\\left[\\hat{q}_i\\hat{s}_i\\right]\n", + " \\frac{\\partial}{\\partial \\hat{q}_j}\\left[\\hat{q}_i\\hat{s}_i\\right]\n", " = \\begin{cases}\n", " \\hat{s}_i & \\textrm{if}~i = j,\n", " \\\\\n", @@ -1000,15 +997,42 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Operators are called _parametric_ if the operator entries depend on an independent parameter vector\n", - "$\\bfmu\\in\\RR^{p}$, i.e., $\\Ophat_{\\ell}(\\qhat,\\u;\\bfmu) = \\Ohat_{\\ell}(\\bfmu)\\d_{\\ell}(\\qhat,\\u)$ where now $\\Ohat:\\RR^{p}\\to\\RR^{r\\times d}$.\n", - "\n", + "An operator is called _parametric_ if it depends on an independent parameter vector\n", + "$\\bfmu\\in\\RR^{p}$, i.e., $\\Ophat_{\\ell} = \\Ophat_{\\ell}(\\qhat,\\u;\\bfmu)$\n", + "When the parameter vector is fixed, a parametric operator becomes nonparametric.\n", + "In particular, a parametric operator's [`evaluate()`](ParametricOperatorTemplate.evaluate) method accepts a parameter vector $\\bfmu$ and returns an instance of a nonparametric operator." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Parametric OpInf operators have the form\n", + "$\\Ophat_{\\ell}(\\qhat,\\u;\\bfmu) = \\Ohat_{\\ell}(\\bfmu)\\d_{\\ell}(\\qhat,\\u)$ defined by the matrix-valued function $\\Ohat_{\\ell}:\\RR^{p}\\to\\RR^{r\\times d_\\ell}$ and (as in the nonparametric case) the data vector $\\d_{\\ell}:\\RR^{r}\\times\\RR^{m}\\to\\RR^{d_\\ell}$.\n", + "This module provides two options for the parameterization of $\\Ohat_{\\ell}(\\bfmu)$: [affine expansion](sec-operators-affine) and [elementwise interpolation](sec-operators-interpolated).\n", + "In each case, Operator Inference begins with $s$ training parameter values $\\bfmu_{0},\\ldots,\\bfmu_{s-1}$ and corresponding state, input, and left-hand side data $\\{(\\qhat_{i,j},\\u_{i,j},\\z_{i,j})\\}_{j=0}^{k_{i}-1}$ for each training parameter value $\\bfmu_{i}$.\n", + "A regression of the form $\\Z \\approx \\Ohat\\D\\trp$ is formed as in the nonparametric case, with the structure of the matrices $\\Ohat$ and $\\D$ depending on the choice of parameterization for each $\\Ohat_{\\ell}(\\bfmu)$." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ ":::{admonition} Example\n", ":class: tip\n", - "Let $\\bfmu = [~\\mu_{1}~~\\mu_{2}~]\\trp$.\n", - "The linear operator\n", - "$\\Ophat_1(\\qhat,\\u;\\bfmu) = (\\mu_{1}\\Ahat_{1} + \\mu_{2}\\Ahat_{2})\\qhat$\n", - "is a parametric operator with parameter-dependent entries $\\Ohat_1(\\bfmu) = \\mu_{1}\\Ahat_{1} + \\mu_{2}\\Ahat_{2}$.\n", + "Let $\\bfmu = [~\\mu_{0}~~\\mu_{1}~]\\trp$.\n", + "The operator\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\Ophat_1(\\qhat,\\u;\\bfmu) = (\\mu_{0}\\Ahat_{0} + \\mu_{1}^{2}\\Ahat_{1})\\qhat\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "is a parametric OpInf operator because it can be written as $\\Ophat_1(\\qhat,\\u;\\bfmu) = \\Ohat_1(\\bfmu)\\d_1(\\qhat,\\u)$ with $\\Ohat_1(\\bfmu) = \\mu_{0}\\Ahat_{0} + \\mu_{1}^{2}\\Ahat_{1}$ and $\\d_1(\\qhat,\\u) = \\qhat$.\n", + "\n", + "This operator can be represented with an {class}`AffineLinearOperator`.\n", + "For a given parameter value, the [`evaluate()`](AffineLinearOperator.evaluate) method returns a {class}`LinearOperator` instance.\n", ":::" ] }, @@ -1016,53 +1040,454 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "(sec-operators-interpolated)=\n", - "### Interpolated Operators" + "(sec-operators-affine)=\n", + "### Affine Operators" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "These operators handle the parametric dependence on $\\bfmu$ by using elementwise interpolation:\n", + "Affine parametric OpInf operators $\\Ophat_{\\ell}(\\qhat,\\u;\\bfmu) = \\Ohat_{\\ell}(\\bfmu)\\d_{\\ell}(\\qhat,\\u)$ parameterize the operator matrix $\\Ohat_{\\ell}(\\bfmu)$ as a sum of constant matrices with parameter-dependent scalar coefficients,\n", "\n", "$$\n", "\\begin{aligned}\n", " \\Ohat_{\\ell}(\\bfmu)\n", - " = \\text{interpolate}(\n", - " (\\bfmu_{1},\\Ohat_{\\ell}^{(1)}),\\ldots,(\\bfmu_{s},\\Ohat_{\\ell}^{(s)}); \\bfmu),\n", + " &= \\sum_{a=0}^{A_{\\ell}-1}\\theta_{\\ell}^{(a)}\\!(\\bfmu)\\,\\Ohat_{\\ell}^{(a)},\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "where each $\\theta_{\\ell}^{(a)}:\\RR^{p}\\to\\RR$ is a scalar-valued function and each $\\Ohat_{\\ell}^{(a)}\\in\\RR^{r\\times d_\\ell}$ is constant.\n", + "Affine expansions are grouped such that the coefficient functions $\\theta_{\\ell}^{(0)},\\ldots,\\theta_{\\ell}^{(A_{\\ell}-1)}$ are linearly independent." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Affine parametric operators arise in Operator Inference settings because linear projection preserves affine structure.\n", + "\n", + ":::{dropdown} Preservation of Affine Structure\n", + "\n", + "Consider a full-order affine parametric OpInf operator\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\Op_{\\ell}(\\q,\\u;\\bfmu)\n", + " = \\sum_{a=0}^{A_{\\ell}-1}\\theta_{\\ell}^{(a)}\\!(\\bfmu)\\,\\Op_{\\ell}^{(a)}\\!(\\q, \\u).\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "Given a trial basis $\\Vr\\in\\RR^{n\\times r}$ and a test basis $\\Wr\\in\\RR^{n\\times r}$, the [intrusive projection](sec-operators-projection) of $\\Op_{\\ell}$ is the operator\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\Ophat_{\\ell}(\\qhat, \\u; \\bfmu)\n", + " &= (\\Wr\\trp\\Vr)^{-1}\\Wr\\trp\\Op_{\\ell}(\\Vr\\qhat, \\u; \\bfmu)\n", + " \\\\\n", + " &= (\\Wr\\trp\\Vr)^{-1}\\Wr\\trp \\sum_{a=0}^{A_{\\ell}-1}\\theta_{\\ell}^{(a)}\\!(\\bfmu)\\,\\Op_{\\ell}^{(a)}\\!(\\V\\qhat, \\u)\n", + " \\\\\n", + " &= \\sum_{a=0}^{A_{\\ell}-1}\\theta_{\\ell}^{(a)}\\!(\\bfmu)\\,(\\Wr\\trp\\Vr)^{-1}\\Wr\\trp\\Op_{\\ell}^{(a)}\\!(\\V\\qhat, \\u)\n", + " = \\sum_{a=0}^{A_{\\ell}-1}\\theta_{\\ell}^{(a)}\\!(\\bfmu)\\,\\Ophat_{\\ell}^{(a)}\\!(\\qhat, \\u),\n", "\\end{aligned}\n", "$$\n", "\n", - "where $\\bfmu_1,\\ldots,\\bfmu_s$ are training parameter values and $\\Ohat_{\\ell}^{(i)} = \\Ohat_{\\ell}(\\bfmu_i)$ for $i=1,\\ldots,s$.\n", + "where $\\Ophat_{\\ell}^{(a)}\\!(\\qhat, \\u) = (\\Wr\\trp\\Vr)^{-1}\\Wr\\trp\\Op_{\\ell}^{(a)}\\!(\\V\\qhat, \\u)$ is the intrusive projection of $\\Op_{\\ell}^{(a)}$.\n", + "That is, the intrusive projection of an affine expansion is an affine expansion of intrusive projections, and both expansions feature the same coefficient functions.\n", + ":::" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Available affine parametric operators are listed below.\n", "\n", "```{eval-rst}\n", ".. currentmodule:: opinf.operators\n", "\n", ".. autosummary::\n", - " :toctree: _autosummaries\n", " :nosignatures:\n", "\n", - " InterpolatedConstantOperator\n", - " InterpolatedLinearOperator\n", - " InterpolatedQuadraticOperator\n", - " InterpolatedCubicOperator\n", - " InterpolatedInputOperator\n", - " InterpolatedStateInputOperator\n", - "```\n", + " AffineConstantOperator\n", + " AffineLinearOperator\n", + " AffineQuadraticOperator\n", + " AffineCubicOperator\n", + " AffineInputOperator\n", + " AffineStateInputOperator\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Affine parametric operators are instantiated with a function $\\boldsymbol{\\theta}_{\\ell}(\\mu) = [~\\theta_{\\ell}^{(0)}(\\bfmu)~~\\cdots~~\\theta_{\\ell}^{(A_{\\ell}-1)}(\\bfmu)~]\\trp$ for the affine expansion coefficients, the number of terms $A_{\\ell}$ in the expansion, and with or without the operator matrices $\\Ohat_{\\ell}^{(1)},\\ldots,\\Ohat_{\\ell}^{(A_{\\ell})}$." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "thetas = lambda mu: np.array([mu[0], mu[1] ** 2])\n", + "A = opinf.operators.AffineLinearOperator(thetas, nterms=2)\n", + "print(A)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Set the constant operator matrices in the affine expansion.\n", + "r = 5\n", + "Ahats = [np.ones((r, r)), np.eye(r)]\n", + "A.set_entries(Ahats, fromblock=False)\n", "\n", - "" + "For the moment we will neglect the $\\ell = 1$ subscript.\n", + "With data $\\{(\\qhat_{i,j},\\u_{i,j},\\z_{i,j})\\}_{j=0}^{k_{i}-1}$ corresponding to $s$ training parameter values $\\bfmu_0,\\ldots,\\bfmu_{s-1}$, we seek the $A$ operator matrices $\\Ohat^{(0)},\\ldots,\\Ohat^{(A-1)}$ such that for each parameter index $i$ and time index $j$, we have\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\z_{i,j}\n", + " \\approx \\Ophat(\\qhat_{i,j},\\u_{i,j};\\bfmu_{i})\n", + " &= \\left(\\sum_{a=0}^{A-1}\\theta^{(a)}\\!(\\bfmu_{i})\\,\\Ohat^{(a)}\\right)\\d(\\qhat_{i,j},\\u_{i,j})\n", + " \\\\\n", + " &= \\left[\\begin{array}{ccc}\n", + " \\Ohat^{(0)} & \\cdots & \\Ohat^{(A-1)}\n", + " \\end{array}\\right]\n", + " \\underbrace{\\left[\\begin{array}{c}\n", + " \\theta^{(0)}\\!(\\bfmu_{i})\\,\\d(\\qhat_{i,j},\\u_{i,j})\n", + " \\\\ \\vdots \\\\\n", + " \\theta^{(A-1)}\\!(\\bfmu_{i})\\,\\d(\\qhat_{i,j},\\u_{i,j})\n", + " \\end{array}\\right]}_{\\d_{i,j}\\in\\RR^{dA}},\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "where $d$ is the output dimension of $\\d$.\n", + "Collecting these expressions for each time index $j = 0, \\ldots, k_i - 1$ (but keeping the parameter index $i$ fixed for the moment) results in\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\underbrace{\\left[\\begin{array}{ccc}\n", + " \\z_{i,0} & \\cdots & \\z_{i,k_i-1}\n", + " \\end{array}\\right]}_{\\Z_i\\in\\RR^{r\\times k_i}}\n", + " \\approx \\left[\\begin{array}{ccc}\n", + " \\Ohat^{(0)} & \\cdots & \\Ohat^{(A-1)}\n", + " \\end{array}\\right]\n", + " \\underbrace{\\left[\\begin{array}{ccc}\n", + " \\d_{i,0} & \\cdots & \\d_{i,k_i-1}\n", + " \\end{array}\\right]}_{\\D_i\\trp\\in\\RR^{dA \\times k_i}}.\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "Finally, we concatenate each of these expressions for each parameter index $i = 0,\\ldots, s-1$ to arrive at\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\underbrace{\\left[\\begin{array}{ccc}\n", + " \\Z_{0} & \\cdots & \\Z_{s-1}\n", + " \\end{array}\\right]}_{\\Z\\in\\RR^{r\\times K}}\n", + " \\approx \\left[\\begin{array}{ccc}\n", + " \\Ohat^{(0)} & \\cdots & \\Ohat^{(A-1)}\n", + " \\end{array}\\right]\n", + " \\underbrace{\\left[\\begin{array}{ccc}\n", + " \\D_{0}\\trp & \\cdots & \\D_{s-1}\\trp\n", + " \\end{array}\\right]}_{\\D\\trp\\in\\RR^{dA \\times K}},\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "where $K = \\sum_{i=0}^{s-1}k_i$, the total number of available data instances.\n", + "This is the familiar $\\Z \\approx \\Ohat\\D\\trp$ where $\\Ohat = [~\\Ohat^{(1)}~~\\cdots~~\\Ohat^{(A)}~]$, which can be solved for $\\Ohat$ using {mod}`opinf.lstsq`.\n", + "\n", + "The construction of $\\D$ is taken care of through the [`datablock()`](AffineLinearOperator.datablock) method of the affine \n", + "\n", + "For models with multiple affine operators, the operator matrix $\\Ohat$ is further concatenated horizontally to accommodate the operator matrices from each affine expansion, and the data matrix $\\D\\trp$ gains additional block rows.\n", + ":::" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "(sec-operators-interpolated)=\n", + "### Interpolatory Operators" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Interpolatory parametric OpInf operators define the parametric dependence of the operator matrix on $\\bfmu$ through elementwise interpolation.\n", + "That is,\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\Ophat_{\\ell}(\\qhat,\\u;\\bfmu)\n", + " = \\Ohat_{\\ell}(\\bfmu)\\d_{\\ell}(\\qhat,\\u),\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "where $\\Ohat_{\\ell}(\\bfmu)$ is determined by interpolating $s$ matrices $\\Ohat_{\\ell}^{(0)},\\ldots,\\Ohat_{\\ell}^{(s-1)}$.\n", + "In the context of Operator Inference, $s$ is the number of training parameter values." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Available interpolatory operators are listed below.\n", + "\n", + "```{eval-rst}\n", + ".. currentmodule:: opinf.operators\n", + "\n", + ".. autosummary::\n", + " :nosignatures:\n", + "\n", + " InterpConstantOperator\n", + " InterpLinearOperator\n", + " InterpQuadraticOperator\n", + " InterpCubicOperator\n", + " InterpInputOperator\n", + " InterpStateInputOperator\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Interpolatory operators can be instantiated with no arguments." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "B = opinf.operators.InterpInputOperator()\n", + "print(B)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "s = 9 # Number of training parameters\n", + "p = 1 # Dimension of the training parameters.\n", + "r = 4 # Dimension of the states.\n", + "m = 2 # Dimension of the inputs.\n", + "\n", + "training_parameters = np.random.standard_normal((s, p))\n", + "operator_matrices = [np.random.random((r, m)) for _ in range(s)]\n", + "\n", + "B.set_training_parameters(training_parameters)\n", + "print(B)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "B.set_entries(operator_matrices, fromblock=False)\n", + "print(B)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "B_nonparametric = B.evaluate(np.random.standard_normal(p))\n", + "print(B_nonparametric)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + ":::{dropdown} Operator Inference for Interpolatory Operators\n", + "\n", + "Consider a model with a single affine operator,\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\z\n", + " = \\Ophat_{1}(\\qhat,\\u;\\bfmu)\n", + " = \\Ohat_{1}(\\bfmu)\\d_{1}(\\qhat,\\u).\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "For the moment we will neglect the $\\ell = 1$ subscript.\n", + "With data $\\{(\\qhat_{i,j},\\u_{i,j},\\z_{i,j})\\}_{j=0}^{k_{i}-1}$ corresponding to $s$ training parameter values $\\bfmu_0,\\ldots,\\bfmu_{s-1}$, we seek $s$ operator matrices $\\Ohat^{(0)},\\ldots,\\Ohat^{(s-1)}\\in\\RR^{r\\times d}$ such that for each parameter index $i$ and time index $j$, we have\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\z_{i,j}\n", + " \\approx \\Ophat(\\qhat_{i,j},\\u_{i,j};\\bfmu_{i})\n", + " = \\Ohat^{(i)}\\d(\\qhat_{i,j},\\u_{i,j}),\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "which comes from the interpolation condition $\\Ohat^{(i)} = \\Ohat_{1}(\\bfmu_{i})$ for $i = 0,\\ldots,s-1$.\n", + "Because only one operator matrix $\\Ohat^{(i)}$ defines the operator action at each parameter value for which we have data, we have $s$ independent nonparametric Operator Inference problems:\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\Z_i\n", + " &\\approx \\Ohat^{(i)}\\D_i\\trp,\n", + " \\\\\n", + " \\Z_i\n", + " &= \\left[\\begin{array}{ccc}\n", + " \\z_{i,0} & \\cdots & \\z_{i,k_{i}-1}\n", + " \\end{array}\\right]\\in\\RR^{r \\times k_{i}},\n", + " \\\\\n", + " \\D_i\\trp\n", + " &= \\left[\\begin{array}{ccc}\n", + " \\d(\\qhat_{i,0}, \\u_{i,0}) & \\cdots & \\d(\\qhat_{i,k_{i}-1}, \\u_{i,k_{i}-1})\n", + " \\end{array}\\right]\\in\\RR^{d\\times k_{i}}\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "The InterpolatedModel classes represent models comprised solely of interpolatory operators.\n", + "If interpolatory operators are mixed with other operators (nonparametric or affine parametric), the $\\Ohat\\D\\trp$ block of the problem for the interpolatory operator is included as follows:\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\left[\\begin{array}{ccc}\n", + " \\Ohat^{(0)} & \\cdots & \\Ohat^{(s-1)}\n", + " \\end{array}\\right]\n", + " \\left[\\begin{array}{cccc}\n", + " \\D_0\\trp & \\0 & \\cdots & \\0 \\\\\n", + " \\0 & \\D_1\\trp & \\cdots & \\0 \\\\\n", + " \\vdots & \\vdots & \\ddots & \\vdots \\\\\n", + " \\0 & \\0 & \\cdots & \\D_{s-1}\\trp\n", + " \\end{array}\\right]\n", + "\\end{aligned}\n", + "$$\n", + ":::" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + ":::{dropdown} Mixing Nonparametric and Parametric Operators\n", + "\n", + "Consider a system of ODEs with a mix of parametric and nonparametric operators,\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\ddt\\qhat(t)\n", + " = \\left(\\mu_{0}\\Ahat^{(0)} + \\cos(\\mu_{1})\\Ahat^{(1)}\\right)\\qhat(t) + \\Hhat[\\qhat(t) \\otimes \\qhat(t)] + \\Bhat(\\bfmu)\\u(t).\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "This model can be written in the general form {eq}`eq:operators:model` with three operators:\n", + "\n", + "- $\\Ophat_1(\\qhat,\\u;\\bfmu) = \\left(\\theta^{(0)}\\!(\\bfmu)\\,\\Ahat^{(0)} + \\theta^{(1)}\\!(\\bfmu)\\,\\Ahat^{(1)}\\right)\\qhat(t)$, an affine-parametric linear operator where $\\theta^{(0)}\\!(\\bfmu) = \\mu_{0}$ and $\\theta^{(1)}\\!(\\bfmu) = \\cos(\\mu_{1})$;\n", + "- $\\Ophat_2(\\qhat,\\u) = \\Hhat[\\qhat(t) \\otimes \\qhat(t)]$, a nonparametric quadratic operator; and\n", + "- $\\Ophat_3(\\qhat,\\u;\\bfmu) = \\Bhat(\\bfmu)\\u(t)$, a parametric input operator without a specified parametric structure.\n", + "\n", + "If $\\Bhat(\\bfmu)$ is parameterized with interpolation, the Operator Inference problem to learn the operator matrices can be written as $\\Z \\approx \\Ohat\\D\\trp$ in the following way.\n", + "Let $\\Qhat_i\\in\\RR^{r\\times k_i}$ and $\\U_i\\in\\RR^{m \\times k_i}$ collect the state and input data for training parameter value $\\bfmu_i$, with corresponding state time derivative data $\\Z_i = \\dot{\\Qhat}_i\\in\\RR^{r\\times k_i}$ for $i = 0,\\ldots, s-1$.\n", + "We then have\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\Z\n", + " &= \\left[\\begin{array}{ccc}\n", + " \\Z_0 & \\cdots & \\Z_{s-1}\n", + " \\end{array}\\right]\\in\\RR^{r\\times K}\n", + " \\\\\n", + " \\Ohat\n", + " &= \\left[\\begin{array}{cc|c|ccc}\n", + " \\Ahat^{(0)} & \\Ahat^{(1)} & \\Hhat & \\Bhat^{(0)} & \\cdots & \\Bhat^{(s-1)}\n", + " \\end{array}\\right]\\in\\RR^{r \\times d}\n", + " \\\\\n", + " \\D\\trp\n", + " &= \\left[\\begin{array}{}\n", + " \\theta^{(0)}\\!(\\bfmu_0)\\Qhat_{0} & \\cdots & \\theta^{(0)}\\!(\\bfmu_s)\\Qhat_{s} \\\\\n", + " \\theta^{(1)}\\!(\\bfmu_0)\\Qhat_{0} & \\cdots & \\theta^{(1)}\\!(\\bfmu_s)\\Qhat_{s} \\\\ \\hline\n", + " \\Qhat_{0}\\odot\\Qhat_{0} & \\cdots & \\Qhat_{s}\\odot\\Qhat_{s} \\\\ \\hline\n", + " \\U_{0} & \\cdots & \\0 \\\\\n", + " \\vdots & \\ddots & \\0 \\\\\n", + " \\0 & \\cdots & \\U_{s-1}\n", + " \\end{array}\\right]\\in\\RR^{d \\times K},\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "where $K = \\sum_{i=0}^{s-1}k_i$ is the total number of data snapshots and $d = 2r + r(r+1)/2 + sm$ is the total operator dimension.\n", + "Note that the operator and data matrices have blocks corresponding to each of the three operators in the model.\n", + ":::" ] } ], diff --git a/docs/source/api/utils.md b/docs/source/api/utils.md index b8125519..a6235ab2 100644 --- a/docs/source/api/utils.md +++ b/docs/source/api/utils.md @@ -4,6 +4,21 @@ .. automodule:: opinf.utils ``` +## Timing Code + +Model reduction is all about speeding up computational tasks. +The following class defines a context manager for timing blocks of code and logging errors. + +```{eval-rst} +.. currentmodule:: opinf.utils + +.. autosummary:: + :toctree: _autosummaries + :nosignatures: + + TimedBlock +``` + ## Load/Save HDF5 Utilities Many `opinf` classes have `save()` methods that export the object to an HDF5 file and a `load()` class method for importing an object from an HDF5 file. @@ -19,3 +34,17 @@ The following functions facilitate that data transfer. hdf5_loadhandle hdf5_savehandle ``` + +## Documentation + +The following function initializes the Matplotlib defaults used in the documentation notebooks. + +```{eval-rst} +.. currentmodule:: opinf.utils + +.. autosummary:: + :toctree: _autosummaries + :nosignatures: + + mpl_config +``` diff --git a/docs/source/opinf/changelog.md b/docs/source/opinf/changelog.md index 9021167e..c35ebabc 100644 --- a/docs/source/opinf/changelog.md +++ b/docs/source/opinf/changelog.md @@ -5,6 +5,28 @@ New versions may introduce substantial new features or API adjustments. ::: +## Version 0.5.8 + +Support for affine-parametric problems: + +- Affine-parametric operator classes `AffineConstantOperator`, `AffineLinearOperator`, etc. +- Parametric model classes `ParametricContinuousModel`, `ParametricDiscreteModel`. +- `ParametricROM` class. +- Updates to operator / model documentation. + +Renamed interpolatory operators / model classes from `Interpolated` to `Interp`. +Old names are deprecated but not yet removed. + +Miscellaneous: + +- Reorganized and expanded tutorials. +- Added and documented `opinf.utils.TimedBlock` context manager for quick timing of code blocks. +- Updated structure for some unit tests. +- Refactored interpolatory operators. +- Standardized string representations, added `[Parametric]ROM.__str__()`. +- Removed some public functions from `operators`, regrouped in `operators._utils`. +- Removed some public functions from `models`, regrouped in `models._utils`. + ## Version 0.5.7 Updates to `opinf.lstsq`: diff --git a/docs/source/tutorials/basics.ipynb b/docs/source/tutorials/basics.ipynb index 0cc1b536..81c84847 100644 --- a/docs/source/tutorials/basics.ipynb +++ b/docs/source/tutorials/basics.ipynb @@ -37,11 +37,12 @@ "metadata": {}, "source": [ ":::{admonition} Governing Equations\n", - ":class: attention\n", + ":class: note\n", "\n", "For the spatial domain $\\Omega = [0,L]\\subset \\RR$ and the time domain $[t_0,t_f]\\subset\\RR$, consider the one-dimensional heat equation with homogeneous Dirichlet boundary conditions:\n", "\n", - "\\begin{align*}\n", + "$$\n", + "\\begin{aligned}\n", " &\\frac{\\partial}{\\partial t} q(x,t) = \\frac{\\partial^2}{\\partial x^2}q(x,t)\n", " & x &\\in\\Omega,\\quad t\\in(t_0,t_f],\n", " \\\\\n", @@ -50,14 +51,15 @@ " \\\\\n", " &q(x,t_0) = q_{0}(x)\n", " & x &\\in \\Omega.\n", - "\\end{align*}\n", + "\\end{aligned}\n", + "$$\n", "\n", "This is a model for a one-dimensional rod that conducts heat.\n", "The unknown state variable $q(x,t)$ represents the temperature of the rod at location $x$ and time $t$; the temperature at the ends of the rod are fixed at $0$ and heat is allowed to flow out of the rod at the ends.\n", ":::\n", "\n", ":::{admonition} Objective\n", - ":class: attention\n", + ":class: note\n", "\n", "Construct a low-dimensional system of ordinary differential equations, called the _reduced-order model_ (ROM), which can be solved rapidly to produce approximate solutions $q(x, t)$ to the partial differential equation given above. We will use OpInf to learn the ROM from high-fidelity data for one choice of initial condition $q_0(x)$ and test its performance on new initial conditions.\n", ":::" @@ -67,40 +69,61 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Training Data" + "We will make use of {mod}`numpy`, {mod}`scipy`, and {mod}`matplotlib` from the standard Python scientific stack, which are all automatically installed when `opinf` is [installed](../opinf/installation.md).\n", + "The {mod}`pandas` library is also used later to consolidate and report results." ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": 1, "metadata": {}, + "outputs": [], "source": [ - "We begin by generating training data through a traditional finite difference discretization of the PDE." + "import numpy as np\n", + "import pandas as pd\n", + "import scipy.sparse\n", + "import scipy.integrate\n", + "import scipy.linalg as la\n", + "import matplotlib.pyplot as plt\n", + "\n", + "import opinf\n", + "\n", + "opinf.utils.mpl_config()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - ":::{important}\n", - "One key advantage of OpInf is that, because it learns a ROM from data alone, direct access to a high-fidelity solver is not required.\n", - "In this tutorial, we explicitly construct the high-fidelity solver, but in practice, we only need the following:\n", - "1. Solution outputs of a high-fidelity solver to learn from, and\n", - "2. Some knowledge of the structure of the governing equations.\n", - ":::" + "## Training Data" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### Define the Full-order Model" + "We begin by generating training data through a traditional numerical method.\n", + "A spatial discretization of the governing equations with $n$ degrees of freedom via finite differences or the finite element method leads to a linear semi-discrete system of $n$ ordinary differential equations,\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\ddt\\q(t) = \\A\\q(t),\n", + " \\qquad\n", + " \\q(0) = \\q_0,\n", + "\\end{aligned}\n", + "$$ (eq_basics_fom)\n", + "\n", + "where $\\q:\\RR\\to\\RR^n$, $\\A\\in\\RR^{n\\times n}$, and $\\q_0\\in\\RR^n$.\n", + "For this tutorial, we use central finite differences to construct this system." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "To solve the problem numerically, let $\\{x\\}_{i=0}^{n+1}$ be an equidistant grid of $n+2$ points on $\\Omega$, i.e.,\n", + ":::{dropdown} Discretization details\n", + "\n", + "For a given $n\\in\\NN$, let $\\{x\\}_{i=0}^{n+1}$ be an equidistant grid of $n+2$ points on $\\Omega$, i.e.,\n", "\n", "$$\n", "\\begin{aligned}\n", @@ -108,29 +131,22 @@ " &\n", " &\\text{and}\n", " &\n", - " \\delta x &= \\frac{L}{n+1} = x_{i+1} - x_{i},\\quad i=1,\\ldots,n-1.\n", + " x_{i+1} - x_{i} &= \\delta x := \\frac{L}{n+1},\\quad i=1,\\ldots,n-1.\n", "\\end{aligned}\n", "$$\n", "\n", "The boundary conditions prescribe $q(x_0,t) = q(x_{n+1},t) = 0$.\n", - "Our goal is to compute $q(x, t)$ at the interior spatial points $x_{1}, x_{2}, \\ldots, x_{n}$ for various $t = [0,T]$. we wish to compute the state vector\n", + "Our goal is to compute $q(x, t)$ at the interior spatial points $x_{1}, x_{2}, \\ldots, x_{n}$ for various $t \\in [t_0,t_f].$ That is, we wish to compute the state vector\n", "\n", "$$\n", "\\begin{aligned}\n", " \\q(t)\n", " = \\left[\\begin{array}{c}\n", " q(x_1,t) \\\\ \\vdots \\\\ q(x_n,t)\n", - " \\end{array}\\right]\\in\\RR^n\n", + " \\end{array}\\right]\\in\\RR^n.\n", "\\end{aligned}\n", "$$\n", "\n", - "for $t\\in[t_0,t_f]$." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ "Introducing a central finite difference approximation for the spatial derivative,\n", "\n", "$$\n", @@ -148,7 +164,7 @@ " \\qquad\n", " \\q(0) = \\q_0,\n", "\\end{aligned}\n", - "$$ (eq_basics_fom)\n", + "$$\n", "\n", "where\n", "\n", @@ -168,16 +184,28 @@ "\\end{aligned}\n", "$$\n", "\n", - "Equation {eq}`eq_basics_fom` is called the _full-order model_ (FOM) or the _high-fidelity model_. The computational complexity of solving {eq}`eq_basics_fom` depends on the dimension $n$, which must often be large in order for $\\q(t)$ to approximate $q(x,t)$ well over the spatial grid. Our goal is to construct a ROM that approximates the FOM, but whose computational complexity only depends on some smaller dimension $r \\ll n$." + ":::" ] }, { "cell_type": "markdown", - "metadata": { - "toc-hr-collapsed": true - }, + "metadata": {}, "source": [ - "### Solve the Full-order Model" + "The system {eq}`eq_basics_fom` is called the _full-order model_ (FOM) or the _high-fidelity model_. The computational complexity of solving {eq}`eq_basics_fom` depends on the dimension $n$, which must often be large in order for $\\q(t)$ to approximate $q(x,t)$ well over the spatial grid. Our goal is to construct a ROM that approximates the FOM, but whose computational complexity only depends on some smaller dimension $r \\ll n$." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + ":::{admonition} No FOM? No Problem.\n", + ":class: important\n", + "\n", + "One key advantage of OpInf is that, because it learns a ROM from data alone, direct access to a FOM is not required.\n", + "In this tutorial, we explicitly construct a FOM, but in practice, we only need the following:\n", + "1. Solution data to learn from, and\n", + "2. Some knowledge of the structure of the governing equations.\n", + ":::" ] }, { @@ -186,8 +214,8 @@ "toc-hr-collapsed": true }, "source": [ - "For this demo, we'll use $t_0 = 0$ and $L = t_f = 1$.\n", - "We begin by simulating the full-order system described above with the initial condition\n", + "For this demo, we set $L = 1$, $t_0 = 0$, $t_f = 1$, and use $n = 2^{10} - 1 = 1023$ spatial degrees of freedom.\n", + "We begin by solving the FOM with the initial condition\n", "\n", "$$\n", "\\begin{aligned}\n", @@ -195,8 +223,8 @@ "\\end{aligned}\n", "$$\n", "\n", - "using a maximal time step size $\\delta t = 10^{-3}$.\n", - "This results in $k = 10^3 + 1 = 1001$ state snapshots (1000 time steps after the initial condition), which are organized as the _snapshot matrix_ $\\Q\\in\\RR^{n\\times k}$, where the $j$th column is the solution trajectory at time $t_j$:\n", + "and record the solution every $\\delta t = 0.0025$ time units.\n", + "This results in $k = 401$ state snapshots ($400$ time steps after the initial condition), which are organized into the _snapshot matrix_ $\\Q\\in\\RR^{n\\times k}$, where the $j$-th column is the solution trajectory at time $t_j$:\n", "\n", "$$\n", "\\begin{aligned}\n", @@ -210,101 +238,64 @@ "\\end{aligned}\n", "$$\n", "\n", - "Note that the initial condition $\\q_{0}$ is included as a column in the snapshot matrix." + "Note that the initial condition $\\q_{0}$ is included as a column in the snapshot matrix.\n", + "\n", + "The following code constructs the spatial and time domains, the FOM matrix $\\A$, the initial condition vector $\\q_0$, and solves the FOM with {func}`scipy.integrate.solve_ivp()`." ] }, { "cell_type": "code", "execution_count": null, "metadata": { - "tags": [] + "tags": [ + "hide-input" + ] }, "outputs": [], - "source": [ - "import numpy as np\n", - "import pandas as pd\n", - "import scipy.linalg as la\n", - "import scipy.sparse as sparse\n", - "import matplotlib.pyplot as plt\n", - "from scipy.integrate import solve_ivp\n", - "\n", - "import opinf\n", - "\n", - "opinf.utils.mpl_config()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], "source": [ "# Construct the spatial domain.\n", - "L = 1 # Spatial domain length.\n", - "n = 2**7 - 1 # Spatial grid size.\n", - "x_all = np.linspace(0, L, n + 2) # Full spatial grid.\n", - "x = x_all[1:-1] # Interior spatial grid (where q is unknown).\n", - "dx = x[1] - x[0] # Spatial resolution.\n", + "L = 1\n", + "n = 2**10 - 1\n", + "x_all = np.linspace(0, L, n + 2)\n", + "x = x_all[1:-1]\n", + "dx = x[1] - x[0]\n", "\n", "# Construct the temporal domain.\n", - "t0, tf = 0, 1 # Initial and final time.\n", - "k = tf * 1000 + 1 # Temporal grid size.\n", - "t = np.linspace(t0, tf, k) # Temporal grid.\n", - "dt = t[1] - t[0] # Temporal resolution.\n", + "t0, tf = 0, 1\n", + "k = 401\n", + "t = np.linspace(t0, tf, k)\n", + "dt = t[1] - t[0]\n", "\n", - "print(f\"Spatial step size:\\tdx = {dx}\")\n", - "print(f\"Temporal step size:\\tdt = {dt}\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ "# Construct the full-order state matrix A.\n", "diags = np.array([1, -2, 1]) / (dx**2)\n", - "A = sparse.diags(diags, [-1, 0, 1], (n, n))\n", + "A = scipy.sparse.diags(diags, [-1, 0, 1], (n, n))\n", "\n", + "# Construct the initial condition for the training data.\n", + "q0 = x * (1 - x)\n", "\n", - "# Define the full-order model dx/dt = f(t,x), x(0) = x0.\n", - "def fom(t, x):\n", - " return A @ x\n", "\n", + "def full_order_solve(initial_condition, time_domain):\n", + " \"\"\"Solve the full-order model with SciPy.\"\"\"\n", + " return scipy.integrate.solve_ivp(\n", + " fun=lambda t, q: A @ q,\n", + " t_span=[time_domain[0], time_domain[-1]],\n", + " y0=initial_condition,\n", + " t_eval=time_domain,\n", + " method=\"BDF\",\n", + " ).y\n", "\n", - "# Construct the initial condition for the training data.\n", - "q0 = x * (1 - x)\n", "\n", - "print(f\"{A.shape=}\\t{q0.shape=}\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Compute snapshots by solving the full-order model with SciPy.\n", - "Q = solve_ivp(fom, [t0, tf], q0, t_eval=t, method=\"BDF\").y\n", + "# Solve the full-order model to obtain training snapshots.\n", + "with opinf.utils.TimedBlock(\"Full-order solve\"):\n", + " Q = full_order_solve(q0, t)\n", "\n", - "print(f\"{Q.shape=}\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - ":::{caution}\n", - "It is often better to use your own ODE solver, tailored to the problem at hand, instead of integration packages such as [**scipy.integrate**](https://docs.scipy.org/doc/scipy/tutorial/integrate.html).\n", - "If the integration strategy of the FOM is known, try using that strategy with the ROM.\n", - ":::" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Visualize Training Data" + "print(f\"\\nSpatial domain size:\\t{x.shape=}\")\n", + "print(f\"Spatial step size:\\t{dx=:.10f}\")\n", + "print(f\"\\nTime domain size:\\t{t.shape=}\")\n", + "print(f\"Temporal step size:\\t{dt=:f}\")\n", + "print(f\"\\nFull-order matrix A:\\t{A.shape=}\")\n", + "print(f\"\\nInitial condition:\\t{q0.shape=}\")\n", + "print(f\"\\nTraining snapshots:\\t{Q.shape=}\")" ] }, { @@ -316,7 +307,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 3, "metadata": { "tags": [ "hide-input" @@ -358,7 +349,8 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "This matches our intuition: initially there is more heat toward the center of the rod, which then diffuses out of the ends of the rod. In the figure, earlier times are lighter colors and later times are darker colors." + "In the figure, earlier times are lighter colors and later times are darker colors.\n", + "This matches our intuition: initially there is more heat toward the center of the rod, which then diffuses out of the ends of the rod." ] }, { @@ -373,13 +365,12 @@ "metadata": {}, "source": [ "At this point, we have gathered some training data by simulating the FOM.\n", - "We also have an initial condition and space and time domains.\n", + "We also have an initial condition and a time domain.\n", "\n", "| Name | Symbol | Code Variable |\n", "| :--- | :----: | :------------ |\n", "| State snapshots | $\\Q$ | `Q` |\n", "| Initial state | $\\q_0$ | `q0` |\n", - "| Spatial variable | $\\Omega$ | `x` |\n", "| Time domain | $[t_0,t_f]$ | `t` |\n", "\n", "Our task now is to construct a low-dimensional system whose solutions can be used as approximate solutions to the PDE.\n", @@ -394,13 +385,13 @@ "source": [ "import opinf\n", "\n", - "# Define the reduced-order model structure.\n", + "# Define the reduced-order model.\n", "rom = opinf.ROM(\n", - " basis=opinf.basis.PODBasis(cumulative_energy=0.999999),\n", + " basis=opinf.basis.PODBasis(cumulative_energy=0.9999),\n", " ddt_estimator=opinf.ddt.UniformFiniteDifferencer(t, \"ord6\"),\n", " model=opinf.models.ContinuousModel(\n", " operators=\"A\",\n", - " solver=opinf.lstsq.L2Solver(regularizer=1e-4),\n", + " solver=opinf.lstsq.L2Solver(regularizer=1e-8),\n", " ),\n", ")\n", "\n", @@ -408,7 +399,8 @@ "rom.fit(Q)\n", "\n", "# Solve the reduced-order model.\n", - "Q_ROM = rom.predict(q0, t, method=\"BDF\", max_step=dt)\n", + "with opinf.utils.TimedBlock(\"Reduced-order solve\"):\n", + " Q_ROM = rom.predict(q0, t, method=\"BDF\", max_step=dt)\n", "\n", "# Compute the relative error of the ROM solution.\n", "opinf.post.frobenius_error(Q, Q_ROM)[1]" @@ -448,8 +440,7 @@ "We choose $\\Vr$ using proper orthogonal decomposition (POD), which is based on the singular value decomposition (SVD) of samples of $\\q(t)$.\n", "The singular values give some guidance on choosing an appropriate ROM dimension $r$.\n", "Fast singular value decay is a good sign that a ROM may be successful with this kind of data; if the singular values do not decay quickly, then a large $r$ may be required to capture the behavior of the system.\n", - "POD is implemented in this package as {class}`opinf.basis.PODBasis`.\n", - "Below, we initialize a `PODBasis` with a criteria for selecting $r$: choose the smallest $r$ such that we capture over $99.9999\\%$ of the [cumulative energy](#sec:api-basis-dimselect) of the system.\n" + "Below, we initialize a {class}`opinf.basis.PODBasis` object with the following criteria for selecting $r$: choose the smallest $r$ such that we capture over $99.9999\\%$ of the [cumulative energy](#sec:api-basis-dimselect) of the system.\n" ] }, { @@ -459,7 +450,7 @@ "outputs": [], "source": [ "# Initialize a basis.\n", - "basis = opinf.basis.PODBasis(cumulative_energy=0.999999)\n", + "basis = opinf.basis.PODBasis(cumulative_energy=0.9999)\n", "\n", "# Fit the basis (compute Vr) using the snapshot data.\n", "basis.fit(Q)\n", @@ -476,7 +467,7 @@ "source": [ "Solutions of our eventual ROM are restricted to linear combinations of these two basis vectors.\n", "\n", - "After the `PODbasis` is initialized and calibrated, we can use it to compress the state snapshots to an $r$-dimensional representation.\n", + "After the basis is initialized and calibrated, we can use it to compress the state snapshots to an $r$-dimensional representation.\n", "In this case, we have $\\qhat_j = \\Vr\\trp\\q_j \\in \\RR^{r}$.\n", "These $\\qhat_j$ are data for the ROM state $\\qhat(t)$ at time $t_j$." ] @@ -498,7 +489,7 @@ "metadata": {}, "source": [ "To see how well the state can be represented by a given basis matrix, it is helpful to examine the _projection_ of the state snapshots.\n", - "For linear state approximations, the projection of $\\q\\in\\RR^n$ is the vector $\\Vr\\Vr\\trp\\q\\in\\RR^n$." + "For linear state approximations like POD, the projection of $\\q\\in\\RR^n$ is the vector $\\Vr\\Vr\\trp\\q\\in\\RR^n$." ] }, { @@ -536,14 +527,14 @@ "1. If time derivatives of the original state snapshots are available, they can be compressed to the reduced state space.\n", "2. Otherwise, the time derivatives may be estimated from the compressed states.\n", "\n", - "The latter scenario (being given state data but not time derivative data) is common, so {mod}`opinf.ddt` defines tools for estimating time derivatives." + "The {mod}`opinf.ddt` module defines tools for estimating time derivatives from state data." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Recall that the FOM in this problem is given by $\\ddt\\q(t) = \\A\\q(t)$.\n", + "Recall that the FOM in this problem {eq}`eq_basics_fom` is given by $\\ddt\\q(t) = \\A\\q(t)$.\n", "In this case we have $\\A$, so we can compute $\\dot{\\q}_j = \\A\\q_j$, then set $\\dot{\\qhat}_j = \\Vr\\trp\\dot{\\q}_j$.\n", "Below, we should how this approach compares with using tools from {mod}`opinf.ddt`.\n", "Since the data $\\q_0,\\ldots,\\q_{k-1}$ are defined on a uniform time grid, we use {class}`opinf.ddt.UniformFiniteDifferencer`." @@ -587,9 +578,9 @@ "metadata": {}, "source": [ "We now have low-dimensional state and time derivative data.\n", - "To learn a ROM with OpInf, we must specify the structure of the ROM, which should be motivated by the FOM and the dimensionality reduction strategy.\n", + "To learn a ROM with OpInf, we must specify the structure of the ROM, which should be motivated by the structure of the FOM and the dimensionality reduction strategy.\n", "\n", - "The FOM is a linear system of ODEs,\n", + "The FOM {eq}`eq_basics_fom` is a linear system of ODEs,\n", "\n", "$$\n", "\\begin{aligned}\n", @@ -617,11 +608,11 @@ " \\qquad\n", " \\qhat(0) = \\Vr\\trp\\q_0,\n", "\\end{aligned}\n", - "$$\n", + "$$ (eq_basics_intrusiverom)\n", "\n", "where $\\tilde{\\A} = \\Vr\\trp\\A\\Vr \\in \\RR^{r\\times r}$.\n", - "This is called the _intrusive Galerkin ROM_ corresponding to the FOM and the choice of basis matrix $\\Vr$.\n", - "The intrusive ROM can only be constructed if $\\A$ is known; with OpInf, we construct a reduced system with the same linear structure as the intrusive ROM, but without using $\\A$ explicitly:\n", + "The system {eq}`eq_basics_intrusiverom` is called the _intrusive Galerkin ROM_ corresponding to the FOM and the choice of basis matrix $\\Vr$.\n", + "The intrusive ROM can only be constructed if $\\A$ is known; with OpInf, we aim to construct a reduced system with the same linear structure as the intrusive ROM, but without using $\\A$ explicitly:\n", "\n", "$$\n", "\\begin{aligned}\n", @@ -631,7 +622,7 @@ "\\end{aligned}\n", "$$\n", "\n", - "where $\\Ahat\\in\\RR^{r\\times r}$.\n", + "for some $\\Ahat\\in\\RR^{r\\times r}$ inferred from the training data.\n", "We specify this linear structure by initializing an {class}`opinf.models.ContinuousModel` with the string `\"A\"`.\n" ] }, @@ -649,8 +640,10 @@ "cell_type": "markdown", "metadata": {}, "source": [ - ":::{tip}\n", - "The `\"A\"` syntax is a shortcut for a slightly longer statement:\n", + ":::{admonition} Model Constructor Shortcut\n", + ":class: tip\n", + "\n", + "The `\"A\"` argument in the constructor is a shortcut for a slightly longer statement:\n", "\n", "```python\n", ">>> model = opinf.models.ContinuousModel([opinf.operators.LinearOperator()])\n", @@ -676,19 +669,32 @@ "OpInf does this through minimizing the residual of the model equation with respect to the data:\n", "\n", "$$\n", - " \\min_{\\Ahat\\in\\RR^{r\\times r}}\\sum_{j=0}^{k-1}\\left\\|\n", - " \\Ahat\\Vr\\trp\\q_{j} - \\Vr\\trp\\dot{\\q}_{j}\n", + "\\begin{aligned}\n", + " \\min_{\\Ahat\\in\\RR^{r\\times r}}\n", + " \\sum_{j=0}^{k-1}\\left\\|\n", + " \\Ahat\\qhat_{j} - \\dot{\\qhat}_{j}\n", " \\right\\|_{2}^2\n", " + \\mathcal{R}(\\Ahat),\n", + "\\end{aligned}\n", "$$ (eq_basics_opinf)\n", "\n", - "where $\\mathcal{R}(\\Ahat)$ is a regularization term (more on this soon).\n", - "The {mod}`opinf.lstsq` module defines tools for solving this problem (or variations on it).\n", + "where $\\mathcal{R}(\\Ahat)$ is a regularization term (more on this later).\n", "\n", + "The {mod}`opinf.lstsq` module defines tools for solving this problem (or variations on it).\n", "By default, the regression is solved without regularization, i.e., $\\mathcal{R}(\\Ahat) = 0$.\n", "The following code compares the OpInf ROM matrix $\\Ahat$ to the intrusive ROM matrix $\\tilde{\\A} = \\Vr\\trp\\A\\Vr$." ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "model.fit(states=Q_, ddts=Qdot_exact)\n", + "print(model)" + ] + }, { "cell_type": "code", "execution_count": null, @@ -699,11 +705,8 @@ "Vr = basis.entries\n", "A_intrusive = Vr.T @ A @ Vr\n", "\n", - "# Construct the OpInf ROM and extract the linear operator.\n", - "model.fit(states=Q_, ddts=Qdot_exact)\n", + "# Compare the OpInf ROM linear operator to the intrusive one.\n", "A_opinf = model.operators[0].entries\n", - "\n", - "# Compare the two linear operators.\n", "np.allclose(A_intrusive, A_opinf)" ] }, @@ -762,10 +765,13 @@ "outputs": [], "source": [ "# Define a solver for the Tikhonov-regularized least-squares problem.\n", - "model.solver = opinf.lstsq.L2Solver(regularizer=1e-2)\n", + "model = opinf.models.ContinuousModel(\n", + " \"A\",\n", + " solver=opinf.lstsq.L2Solver(regularizer=1e-2),\n", + ")\n", "\n", "# Construct the OpInf ROM through regularized least squares.\n", - "model.fit(states=Q_, ddts=Qdot_)\n", + "model.fit(states=Q_, ddts=Qdot_exact)\n", "A_opinf = model.operators[0].entries\n", "\n", "# Compare to the intrusive model.\n", @@ -786,19 +792,18 @@ "cell_type": "markdown", "metadata": {}, "source": [ - ":::{note}\n", "With inexact time derivatives or regularization, OpInf differs slightly from the intrusive operator $\\tilde{\\A}$.\n", - "However, we will see that the ROM produced by OpInf is highly accurate.\n", - "In fact, it is sometimes the case that OpInf outperforms intrusive projection.\n", - ":::" + "However, we will see that in this example the ROM produced by OpInf is highly accurate.\n", + "In fact, it is sometimes the case that OpInf outperforms intrusive Galerkin projection." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ + ":::{admonition} Regularization Matters\n", + ":class: important\n", "\n", - ":::{important}\n", "Regularization is important in all but the simplest OpInf problems.\n", "If OpInf produces an unstable ROM, try different values for the `regularizer`.\n", "See {cite}`mcquarrie2021combustion` for an example of a principled choice of regularization for a combustion problem.\n", @@ -816,8 +821,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Once the model is calibrated, we may solve the ROM with {meth}`opinf.models.ContinuousModel.predict`, which wraps [**scipy.integrate.solve_ivp()**](https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.solve_ivp.html).\n", - "This method takes an initial condition for the model $\\qhat_0 = \\Vr\\trp\\q_0$, the time domain over which to record the solution, and any additional arguments for the integrator." + "Once the model is calibrated, we may solve the ROM with {meth}`opinf.models.ContinuousModel.predict`, which wraps {func}`scipy.integrate.solve_ivp()`. This method takes an initial condition for the model $\\qhat_0 = \\Vr\\trp\\q_0$, the time domain over which to record the solution, and any additional arguments for the integrator." ] }, { @@ -828,6 +832,11 @@ "source": [ "q0_ = basis.compress(q0) # Compress the initial conditions.\n", "\n", + "model = opinf.models.ContinuousModel(\n", + " \"A\",\n", + " solver=opinf.lstsq.L2Solver(regularizer=1e-8),\n", + ").fit(Q_, Qdot_)\n", + "\n", "Q_ROM_ = model.predict(q0_, t, method=\"BDF\")\n", "\n", "print(f\"{Q_ROM_.shape=}\")" @@ -855,15 +864,17 @@ "cell_type": "markdown", "metadata": {}, "source": [ - ":::{tip}\n", - "{meth}`opinf.models.ContinuousModel.predict` is convenient, but [**scipy.integrate.solve_ivp()**](https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.solve_ivp.html) implements relatively few time integration schemes.\n", - "However, the ROM can be simulated by **any** ODE solver scheme by extracting the inferred operator $\\Ahat$. \n", + ":::{admonition} Custom ODE Solvers\n", + ":class: tip\n", + "\n", + "{meth}`opinf.models.ContinuousModel.predict` is convenient, but {func}`scipy.integrate.solve_ivp()` implements a limited repertoire of time integration schemes.\n", + "However, the ROM can be simulated by any ODE solver scheme by extracting the inferred operator $\\Ahat$. \n", "If `timestepper(A, q0)` were a solver for systems of the form $\\ddt\\qhat = \\Ahat\\qhat(t),\\ \\qhat(0) = \\qhat_0$, we could simulate the ROM with the following code.\n", "\n", "```python\n", - "q0_ = Vr.T @ q0 # Compress the initial conditions.\n", + "q0_ = basis.compress(q0) # Compress the initial conditions.\n", "Q_ROM_ = timestepper(model.A_.entries, q0_) # Solve the ROM in the reduced space.\n", - "Q_ROM = Vr @ Q_ROM_ # Decompress the ROM solutions.\n", + "Q_ROM = basis.decompress(Q_ROM_) # Decompress the ROM solutions.\n", "```\n", "\n", "More generally, the method {meth}`opinf.models.ContinuousModel.rhs` represents the right-hand side of the model, the $\\hat{\\mathbf{f}}$ of $\\ddt\\qhat(t) = \\hat{\\mathbf{f}}(t, \\qhat(t))$.\n", @@ -901,16 +912,34 @@ "outputs": [], "source": [ "rom = opinf.ROM(\n", - " basis=opinf.basis.PODBasis(cumulative_energy=0.999999),\n", + " basis=opinf.basis.PODBasis(cumulative_energy=0.9999),\n", " ddt_estimator=opinf.ddt.UniformFiniteDifferencer(t, \"ord6\"),\n", " model=opinf.models.ContinuousModel(\n", " operators=\"A\",\n", - " solver=opinf.lstsq.L2Solver(regularizer=1e-2),\n", + " solver=opinf.lstsq.L2Solver(regularizer=1e-8),\n", " ),\n", ")\n", "\n", + "print(rom)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ "rom.fit(Q)\n", "\n", + "print(rom)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ "Q_ROM_2 = rom.predict(q0, t, method=\"BDF\")\n", "\n", "np.all(Q_ROM_2 == Q_ROM)" @@ -923,6 +952,20 @@ "### Evaluate ROM Performance" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The quality or usefulness of a ROM depends on its accuracy and its computational efficiency." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### ROM Accuracy" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -948,7 +991,31 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "For more detail, we evaluate the $\\ell^2$ error of the ROM output in time, comparing it to the snapshot set via {func}`opinf.post.lp_error`." + "For more detail, we evaluate the $\\ell^2$ error of the ROM output, comparing it to the snapshot set via {func}`opinf.post.lp_error`.\n", + "This calculates the absolute and relative error as a function of time,\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\text{err}_\\text{absolute}(t)\n", + " &= \\|\\q(t) - \\q_{\\text{ROM}}(t)\\|_{2},\n", + " \\\\ ~ \\\\\n", + " \\text{err}_\\text{relative}(t)\n", + " &= \\frac{\\|\\q(t) - \\q_{\\text{ROM}}(t)\\|_{2}}{\\|\\q(t)\\|_{2}}.\n", + "\\end{aligned}\n", + "$$\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + ":::{admonition} Normalized Absolute Error\n", + ":class: tip\n", + "\n", + "In this problem, $\\q(t)\\to\\0$ as $t$ increases, so a relative error may not be appropriate since $\\|\\q(t)\\|_{2}$ appears in the denominator.\n", + "In situations like this, consider using the _normalized absolute error_ by replacing the denominator with $\\max_{\\tau\\in[t_0,t_f]}\\|\\q(t)\\|.$\n", + "Set `normalize=True` in {func}`opinf.post.lp_error()` to use this error measure instead of the relative error.\n", + ":::" ] }, { @@ -957,9 +1024,13 @@ "metadata": {}, "outputs": [], "source": [ - "abs_l2err, rel_l2err = opinf.post.lp_error(Q, Q_ROM)\n", - "plt.semilogy(t, abs_l2err)\n", - "plt.title(r\"Absolute $\\ell^{2}$ error\")\n", + "abs_l2err, norm_l2err = opinf.post.lp_error(Q, Q_ROM, normalize=True)\n", + "fig, ax = plt.subplots(1, 1)\n", + "ax.semilogy(t, abs_l2err, \"-\", label=r\"Absolute $\\ell^2$ error\")\n", + "ax.semilogy(t, norm_l2err, \"--\", label=r\"Normalized absolute $\\ell^2$ error\")\n", + "ax.set_xlabel(r\"$t$\")\n", + "ax.set_ylabel(\"error\")\n", + "ax.legend(loc=\"lower left\")\n", "plt.show()" ] }, @@ -967,7 +1038,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "In this simple example, the error decreases with time (as solutions get quickly pushed to zero), but this is not the kind of error behavior that should be expected for less trivial systems.\n", + "In this simple example, the error decreases with time (as solutions get quickly pushed to zero), but this is not the kind of error behavior that should be expected when modeling more complicated phenomena.\n", "\n", "We can also get a scalar error measurement by calculating the relative Frobenius norm error with {func}`opinf.post.frobenius_error`." ] @@ -986,10 +1057,82 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "In other words, the ROM simulation is within 0.1% of the snapshot data.\n", + "In other words, the ROM simulation is within about 0.1% of the snapshot data.\n", "Note that this value is very close to the projection error that we calculated earlier." ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### ROM Computational Speedup" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "When a FOM is available, a ROM is only useful if it can be solved much faster than the FOM.\n", + "The solution speed can be quickly checked using {class}`opinf.utils.TimedBlock`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "with opinf.utils.TimedBlock(\"Full-order solve\"):\n", + " full_order_solve(q0, t)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "with opinf.utils.TimedBlock(\"Reduced-order solve\"):\n", + " rom.predict(q0, t, method=\"BDF\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "More precise measurements can be take by aliasing the {class}`opinf.utils.TimedBlock` and accessing the `elapsed` attribute.\n", + "Below, we solve each model several times to get an average time." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "n_trials = 10\n", + "\n", + "with opinf.utils.TimedBlock(f\"{n_trials} FOM solves\") as fomtime:\n", + " for _ in range(n_trials):\n", + " full_order_solve(q0, t)\n", + "\n", + "with opinf.utils.TimedBlock(f\"{n_trials} ROM solves\") as romtime:\n", + " for _ in range(n_trials):\n", + " rom.predict(q0, t, method=\"BDF\")\n", + "\n", + "print(f\"Average FOM time: {fomtime.elapsed / n_trials :.6f} s\")\n", + "print(f\"Average ROM time: {romtime.elapsed / n_trials :.6f} s\")\n", + "print(f\"ROM speedup: {fomtime.elapsed / romtime.elapsed :.4f} times!\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this example, the FOM is efficient because it takes advantage of the sparsity of $\\A\\in\\RR^{n\\times n}$.\n", + "Even so, the ROM achieves a modest speedup due to the smaller size of $\\Ahat\\in\\RR^{r\\times r}$." + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -1006,24 +1149,28 @@ "source": [ "The ROM was trained using only data corresponding to the initial condition $q_0(x) = x(1 - x).$ We'll now test the ROM on the following new initial conditions and compare the results to the corresponding FOM solution:\n", "\n", - "\\begin{align*}\n", + "$$\n", + "\\begin{aligned}\n", " q_0(x) &= 10x (1 - x),\n", " &\n", - " q_0(x) &= x^{2}(1 - x)^{2},\n", + " q_0(x) &= 5x^{2}(1 - x)^{2},\n", " \\\\\n", - " q_0(x) &= x^{4}(1 - x)^{4},\n", + " q_0(x) &= 50x^{4}(1 - x)^{4},\n", " &\n", - " q_0(x) &= \\sqrt{x(1 - x)},\n", + " q_0(x) &= \\frac{1}{2}\\sqrt{x(1 - x)},\n", " \\\\\n", - " q_0(x) &= \\sqrt[4]{x(1 - x)},\n", + " q_0(x) &= \\frac{1}{4}\\sqrt[4]{x(1 - x)},\n", " &\n", - " q_0(x) &= \\sin(\\pi x) + \\tfrac{1}{5}\\sin(5\\pi x).\n", - "\\end{align*}\n", + " q_0(x) &= \\frac{1}{3}\\sin(\\pi x) + \\tfrac{1}{5}\\sin(5\\pi x).\n", + "\\end{aligned}\n", + "$$\n", "\n", "Before we compute the ROM error, we also compute the _projection error_ of the new initial condition,\n", "\n", "$$\n", + "\\begin{aligned}\n", " \\frac{||\\q_{0} - \\Vr \\Vr\\trp\\q_{0}||_{2}}{||\\q_{0}||_{2}}.\n", + "\\end{aligned}\n", "$$\n", "\n", "If this projection error is large, then the new initial condition cannot be represented well within the range of $\\Vr$. This will be apparent in the ROM solutions." @@ -1038,7 +1185,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 29, "metadata": {}, "outputs": [], "source": [ @@ -1053,13 +1200,13 @@ " rom : opinf.ROM\n", " Trained reduced-order model object.\n", " label : str\n", - " LaTeX description of the initial condition being tested.\n", + " Description of the initial condition being tested.\n", " \"\"\"\n", " # Calculate the projection error of the new initial condition.\n", " rel_projerr = rom.basis.projection_error(q0, relative=True)\n", "\n", " # Solve the full-order model (FOM) and the reduced-order model (ROM).\n", - " Q_FOM = solve_ivp(fom, [t0, tf], q0, t_eval=t, method=\"BDF\").y\n", + " Q_FOM = full_order_solve(q0, t)\n", " Q_ROM = rom.predict(q0, t, method=\"BDF\")\n", "\n", " # Plot the FOM and ROM solutions side by side.\n", @@ -1096,20 +1243,20 @@ "source": [ "q0_new = [\n", " 10 * x * (1 - x),\n", - " x**2 * (1 - x) ** 2,\n", - " x**4 * (1 - x) ** 4,\n", - " np.sqrt(x * (1 - x)),\n", - " np.sqrt(np.sqrt(x * (1 - x))),\n", - " np.sin(np.pi * x) + np.sin(5 * np.pi * x) / 5,\n", + " 5 * x**2 * (1 - x) ** 2,\n", + " 50 * x**4 * (1 - x) ** 4,\n", + " 0.5 * np.sqrt(x * (1 - x)),\n", + " 0.25 * np.sqrt(np.sqrt(x * (1 - x))),\n", + " np.sin(np.pi * x) / 3 + np.sin(5 * np.pi * x) / 5,\n", "]\n", "\n", "q0_titles = [\n", " r\"$q_{0}(x) = 10 x (1 - x)$\",\n", - " r\"$q_{0}(x) = x^{2} (1 - x)^{2}$\",\n", - " r\"$q_{0}(x) = x^{4} (1 - x)^{4}$\",\n", - " r\"$q_{0}(x) = \\sqrt{x (1 - x)}$\",\n", - " r\"$q_{0}(x) = \\sqrt[4]{x (1 - x)}$\",\n", - " r\"$q_{0}(x) = \\sin(\\pi x) + \\frac{1}{5}\\sin(5\\pi x)$\",\n", + " r\"$q_{0}(x) = 5 x^{2} (1 - x)^{2}$\",\n", + " r\"$q_{0}(x) = 50 x^{4} (1 - x)^{4}$\",\n", + " r\"$q_{0}(x) = \\frac{1}{2}\\sqrt{x (1 - x)}$\",\n", + " r\"$q_{0}(x) = \\frac{1}{4}\\sqrt[4]{x (1 - x)}$\",\n", + " r\"$q_{0}(x) = \\frac{1}{3}\\sin(\\pi x) + \\frac{1}{5}\\sin(5\\pi x)$\",\n", "]\n", "\n", "results = {}\n", @@ -1143,7 +1290,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 31, "metadata": { "tags": [ "hide-input" @@ -1232,12 +1379,14 @@ " ddt_estimator=opinf.ddt.UniformFiniteDifferencer(t, \"ord6\"),\n", " model=opinf.models.ContinuousModel(\n", " operators=\"A\",\n", - " solver=opinf.lstsq.L2Solver(regularizer=1e-5),\n", + " solver=opinf.lstsq.L2Solver(regularizer=1e-8),\n", " ),\n", ")\n", "\n", "# Use the same training data as before, but do not reset the basis.\n", - "_ = rom.fit(Q, fit_basis=False)" + "_ = rom.fit(Q, fit_basis=False)\n", + "\n", + "print(rom)" ] }, { @@ -1267,15 +1416,15 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "With a more expressive basis, we are now capturing the true solutions with the ROM to within 1% error in the Frobenius norm." + "With a more expressive basis, the ROM performance improves significantly." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - ":::{admonition} Takeaway\n", - ":class: attention\n", + ":::{admonition} No Better than the Basis\n", + ":class: tip\n", "This example illustrates a fundamental principle of model reduction: the accuracy of the ROM is limited by the accuracy of the underlying low-dimensional approximation, which in this case is $\\q(t) \\approx \\Vr\\qhat(t)$. In other words, a good $\\Vr$ is critical in order for the ROM to be accurate and predictive.\n", ":::" ] diff --git a/docs/source/tutorials/heat_equation.ipynb b/docs/source/tutorials/heat_equation.ipynb deleted file mode 100644 index 2dd6b52b..00000000 --- a/docs/source/tutorials/heat_equation.ipynb +++ /dev/null @@ -1,1622 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "toc-hr-collapsed": false - }, - "source": [ - "# Heat Equation" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "toc-hr-collapsed": false - }, - "source": [ - "The fundamental goal of model reduction is to efficiently make physics-based predictions. Given synthetic or experimental data that was generated or collected under a certain set of conditions, we aim to construct a cost-effective model that produces accurate solutions under new sets of conditions. This tutorial explores the following prediction problems for the heat equation example of {cite}`peherstorfer2016opinf`:\n", - "1. Predicting **forward in time**.\n", - "2. Using new time-dependent **boundary conditions**.\n", - "3. Changing the **system parameters** (e.g., coefficients in the governing equation)." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "toc-nb-collapsed": true - }, - "source": [ - "## Problem Statement" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - ":::{admonition} Governing Equations\n", - ":class: attention\n", - "\n", - "Let $\\Omega = [0,L]\\subset \\mathbb{R}$ be the spatial domain indicated by the variable $x$, and let $[0,T]\\subset\\mathbb{R}$ be the time domain with variable $t$. We consider the one-dimensional heat equation with non-homogeneous Dirichlet boundary conditions,\n", - "\n", - "\\begin{align*}\n", - " &\\frac{\\partial}{\\partial t} q(x,t;\\mu) = \\mu\\frac{\\partial^2}{\\partial x^2}q(x,t;\\mu)\n", - " & x &\\in\\Omega,\\quad t\\in[0,T],\n", - " \\\\\n", - " &q(0,t;\\mu) = q(L,t;\\mu) = u(t)\n", - " & t &\\in[0,T],\n", - " \\\\\n", - " &q(x,0;\\mu) = \\big(e^{\\alpha(x - 1)} + e^{-\\alpha x} - e^{-\\alpha}\\big)u(0)\n", - " & x &\\in \\Omega,\n", - "\\end{align*}\n", - "\n", - "where the constant $\\mu > 0$ is the thermal diffusivity, $\\alpha>0$ is constant, and $q(x,t;\\mu)$ is the unknown state variable. This is a model for a one-dimensional rod conducting heat with a fixed initial heat profile. The temperature at the ends of the rod are governed by the input function $u(t)$, but heat is allowed to diffuse through the rod and flow out at the ends of the domain. We aim to numerically solve for $q(x,t;\\mu)$ efficiently for all $t \\in [0,T]$ and/or for various choices of $u(t)$ and $\\mu$.\n", - ":::" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - ":::{note}\n", - "This problem can be solved with a straightforward discretization of the spatial domain $\\Omega$ with little computational effort, so using model reduction to speed up the computation is not highly beneficial. However, the way that the user interacts with the package for this problem is highly similar for more complex problems.\n", - ":::" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Prediction in Time" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Our first objective is to get solutions in time beyond a set of available training data.\n", - "\n", - ":::{image} ../../images/summary.svg\n", - ":align: center\n", - ":width: 80 %\n", - ":::" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - ":::{admonition} Objective\n", - ":class: attention\n", - "\n", - "Construct a reduced-order model (ROM) of the heat equation that is **predictive in time**. In other words, we will observe data for $t \\in [0, T']$ with $T' < T$, use that data to construct the ROM, and use the ROM to predict the solution for the entire time domain $[0,T]$.\n", - ":::" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Full-order Model Definition" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "As in the last tutorial, we use a centered finite difference approximation for the spatial derivative to arrive at a first-order system, this time of the form\n", - "\n", - "$$\n", - " \\frac{\\text{d}}{\\text{d}t}\\mathbf{q}(t;\\mu)\n", - " = \\mathbf{A}(\\mu)\\mathbf{q}(t;\\mu) + \\mathbf{B}(\\mu)u(t),\n", - " \\qquad\n", - " \\mathbf{q}(0;\\mu)\n", - " = \\mathbf{q}_0.\n", - "$$ (eq_heat_fom_parametric)\n", - "\n", - ":::{dropdown} Discretization details\n", - "\n", - "We take an equidistant grid $\\{x_i\\}_{i=0}^{n+1} \\subset \\Omega$,\n", - "\n", - "\\begin{align*}\n", - " 0 &= x_0 < x_1 < \\cdots < x_n < x_{n+1} = L\n", - " &\n", - " &\\text{and}\n", - " &\n", - " \\delta x &= \\frac{L}{n+1} = x_{i+1} - x_{i},\\quad i=1,\\ldots,n-1.\n", - "\\end{align*}\n", - "\n", - "The boundary conditions prescribe $q(x_0,t) = q(x_{n+1},t) = u(t)$. Our goal is to compute $q(x,t)$ at the interior spatial points $x_{1},x_{2},\\ldots,x_{n}$ for various $t\\in[0,T]$, so we consider the state vector $\\mathbf{q}(t) = [~q(x_{1}, t)~\\cdots~q(x_{n}, t)~]^{\\top}\\in\\mathbb{R}^n$ and derive a system governing the evolution of $\\mathbf{q}(t)$ in time.\n", - "\n", - "Approximating the spatial derivative with a central finite difference approximation,\n", - "\n", - "$$\n", - " \\frac{\\partial^2}{\\partial x^2}q(x,t)\n", - " \\approx \\frac{q(x-\\delta x,t) - 2q(x,t) + q(x+\\delta x,t)}{(\\delta x)^2},\n", - "$$\n", - "\n", - "we arrive at the following matrices for the full-order model.\n", - "\n", - "\\begin{align*}\n", - " \\mathbf{A}(\\mu) &= \\frac{\\mu}{(\\delta x)^2}\\left[\\begin{array}{ccccc}\n", - " -2 & 1 & & & \\\\\n", - " 1 & -2 & 1 & & \\\\\n", - " & \\ddots & \\ddots & \\ddots & \\\\\n", - " & & 1 & -2 & 1 \\\\\n", - " & & & 1 & -2 \\\\\n", - " \\end{array}\\right] \\in\\mathbb{R}^{n\\times n},\n", - " &\n", - " \\mathbf{B}(\\mu) &= \\frac{\\mu}{(\\delta x)^2}\\left[\\begin{array}{c}\n", - " 1 \\\\ 0 \\\\ \\vdots \\\\ 0 \\\\ 1\n", - " \\end{array}\\right]\\in\\mathbb{R}^{n}.\n", - "\\end{align*}\n", - ":::\n", - "\n", - "The state $\\mathbf{q}(t;\\mu)$ implicity depends on the parameter $\\mu$ because the operators $\\mathbf{A}(\\mu)$ and $\\mathbf{B}(\\mu)$ are parameterized by $\\mu$.\n", - "For now, we set $\\mu = 1$ and simply write\n", - "\n", - "$$\n", - " \\frac{\\text{d}}{\\text{d}t}\\mathbf{q}(t)\n", - " = \\mathbf{A}\\mathbf{q}(t) + \\mathbf{B}u(t),\n", - " \\qquad\n", - " \\mathbf{q}(0)\n", - " = \\mathbf{q}_0.\n", - "$$\n", - "\n", - "This is the _full-order model_ (FOM), which we will use to generate training data for the time domain $[0, T'] \\subset [0, T]$." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Training Data Generation" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let $L = T = \\mu = 1$, $\\alpha = 100$, and suppose for now that the boundary conditions are given by the constant input function $u(t) \\equiv 1$.\n", - "We begin by simulating the full-order system described above with a uniform time step $\\delta t = 10^{-3}$, yielding $10^3 + 1 = 1001$ total time steps (1000 steps past the initial condition).\n", - "We will assume that we can only observe the first $k = 100$ time steps and use the ROM to predict the remaining $901$ steps." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "import scipy.linalg as la\n", - "import scipy.sparse as sparse\n", - "import matplotlib.pyplot as plt\n", - "\n", - "import opinf\n", - "\n", - "opinf.utils.mpl_config()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Construct the spatial domain.\n", - "L = 1 # Spatial domain length.\n", - "n = 2**7 - 1 # Spatial grid size.\n", - "x_all = np.linspace(0, L, n + 2) # Full spatial grid.\n", - "x = x_all[1:-1] # Interior spatial grid (where q is unknown).\n", - "dx = x[1] - x[0] # Spatial resolution.\n", - "\n", - "# Construct the temporal domain.\n", - "T = 1 # Temporal domain length (final simulation time).\n", - "K = T * 10**3 + 1 # Temporal grid size.\n", - "t = np.linspace(0, T, K) # Temporal grid.\n", - "dt = t[1] - t[0] # Temporal resolution.\n", - "\n", - "print(f\"Spatial step size\\tdx = {dx}\")\n", - "print(f\"Temporal step size\\tdt = {dt}\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Construct the full-order state matrix A.\n", - "dx2inv = 1 / dx**2\n", - "diags = np.array([1, -2, 1]) * dx2inv\n", - "A = sparse.diags(diags, [-1, 0, 1], (n, n))\n", - "\n", - "# Construct the full-order input matrix B.\n", - "B = np.zeros_like(x)\n", - "B[0], B[-1] = dx2inv, dx2inv" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Define the inputs.\n", - "input_func = np.ones_like # Constant input function u(t) = 1.\n", - "U_all = input_func(t) # Inputs over the time domain.\n", - "\n", - "# Construct the initial condition.\n", - "alpha = 100\n", - "q0 = np.exp(alpha * (x - 1)) + np.exp(-alpha * x) - np.exp(-alpha)\n", - "\n", - "print(f\"shape of A:\\t{A.shape}\")\n", - "print(f\"shape of B:\\t{B.shape}\")\n", - "print(f\"shape of q0:\\t{q0.shape}\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Since this is a diffusive problem, we will use the implicit (backward) Euler method for solving the ODEs.\n", - "For the problem $\\frac{\\text{d}}{\\text{d}t}\\mathbf{q}(t) = \\mathbf{f}(t, \\mathbf{q}(t), \\mathbf{u}(t))$, implicit Euler is defined by the rule\n", - "\n", - "$$\n", - " \\mathbf{q}_{j+1} = \\mathbf{q}_{j} + \\delta t\\,\\mathbf{f}(t_{j+1},\\mathbf{q}_{j+1},u_{j+1}),\n", - "$$\n", - "\n", - "where $\\mathbf{q}_{j} := \\mathbf{q}(t_{j})$ and $u_{j} := u(t_{j})$.\n", - "With the form $\\mathbf{f}(t,\\mathbf{q}(t),u(t)) = \\mathbf{A}\\mathbf{q}(t) + \\mathbf{B}u(t)$, this becomes\n", - "\n", - "$$\n", - " \\mathbf{q}_{j+1} = (\\mathbf{I} - \\delta t \\mathbf{A})^{-1}\\left(\\mathbf{q}_{j} + \\delta t \\mathbf{B} u_{j+1}\\right),\n", - "$$\n", - "\n", - "where $\\mathbf{I}$ is the identity matrix." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [ - "hide-input" - ] - }, - "outputs": [], - "source": [ - "def implicit_euler(t, q0, A, B, U):\n", - " \"\"\"Solve the system\n", - "\n", - " dq / dt = Aq(t) + Bu(t), q(0) = q0,\n", - "\n", - " over a uniform time domain via the implicit Euler method.\n", - "\n", - " Parameters\n", - " ----------\n", - " t : (k,) ndarray\n", - " Uniform time array over which to solve the ODE.\n", - " q0 : (n,) ndarray\n", - " Initial condition.\n", - " A : (n, n) ndarray\n", - " State matrix.\n", - " B : (n,) or (n, 1) ndarray\n", - " Input matrix.\n", - " U : (k,) ndarray\n", - " Inputs over the time array.\n", - "\n", - " Returns\n", - " -------\n", - " q : (n, k) ndarray\n", - " Solution to the ODE at time t; that is, q[:,j] is the\n", - " computed solution corresponding to time t[j].\n", - " \"\"\"\n", - " # Check and store dimensions.\n", - " k = len(t)\n", - " n = len(q0)\n", - " B = np.ravel(B)\n", - " assert A.shape == (n, n)\n", - " assert B.shape == (n,)\n", - " assert U.shape == (k,)\n", - " I = np.eye(n)\n", - "\n", - " # Check that the time step is uniform.\n", - " dt = t[1] - t[0]\n", - " assert np.allclose(np.diff(t), dt)\n", - "\n", - " # Factor I - dt*A for quick solving at each time step.\n", - " factored = la.lu_factor(I - dt * A)\n", - "\n", - " # Solve the problem by stepping in time.\n", - " q = np.empty((n, k))\n", - " q[:, 0] = q0.copy()\n", - " for j in range(1, k):\n", - " q[:, j] = la.lu_solve(factored, q[:, j - 1] + dt * B * U[j])\n", - "\n", - " return q" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Compute snapshots by solving the equation with implicit_euler().\n", - "Q_all = implicit_euler(t, q0, A, B, U_all)\n", - "\n", - "# Retain only the first k snapshots/inputs for training the ROM.\n", - "k = 100 # Number of training snapshots.\n", - "t_train = t[:k] # Temporal domain for training snapshots.\n", - "Q = Q_all[:, :k] # Observed snapshots." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Finally, we visualize the snapshots to get a sense of how the solution looks qualitatively." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [ - "hide-input" - ] - }, - "outputs": [], - "source": [ - "def plot_heat_data(Z, title, ax=None):\n", - " \"\"\"Visualize temperature data in space and time.\"\"\"\n", - " if ax is None:\n", - " _, ax = plt.subplots(1, 1)\n", - "\n", - " # Plot a few snapshots over the spatial domain.\n", - " sample_columns = [0, 10, 20, 40, 80, 160, 320, 640]\n", - " sample_columns = [0] + [2**d for d in range(10)]\n", - " color = iter(plt.cm.viridis_r(np.linspace(0.05, 1, len(sample_columns))))\n", - " while sample_columns[-1] > Z.shape[1]:\n", - " sample_columns.pop()\n", - " leftBC, rightBC = [input_func(x_all[0])], [input_func(x_all[-1])]\n", - " for j in sample_columns:\n", - " q_all = np.concatenate([leftBC, Z[:, j], rightBC])\n", - " ax.plot(x_all, q_all, color=next(color), label=rf\"$q(x,t_{{{j}}})$\")\n", - "\n", - " ax.set_xlim(x_all[0], x_all[-1])\n", - " ax.set_xlabel(r\"$x$\")\n", - " ax.set_ylabel(r\"$q(x,t)$\")\n", - " ax.legend(loc=(1.05, 0.05))\n", - " ax.set_title(title)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "fig, [ax1, ax2] = plt.subplots(1, 2)\n", - "plot_heat_data(Q, \"Snapshot data for training\", ax1)\n", - "plot_heat_data(Q_all, \"Full-order model solution\", ax2)\n", - "ax1.legend([])\n", - "plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### ROM Construction" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now that we have snapshot data $\\mathbf{Q} \\in \\mathbb{R}^{n \\times k}$, we can construct [a basis matrix](opinf.basis) $\\mathbf{V}_r \\in \\mathbb{R}^{n \\times r}$. The basis matrix relates the high-dimensional and low-dimensional by $\\mathbf{q}(t) = \\mathbf{V}_{r}\\widehat{\\mathbf{q}}(t)$.\n", - "\n", - "For operator inference (OpInf), we often use the [proper orthogonal decomposition](opinf.basis.PODBasis) (POD) basis. The integer $r$, which defines the dimension of the reduced-order model to be constructed, is usually determined by how quickly the singular values of $\\mathbf{Q}$ decay. In this example, we choose the minimal $r$ such that the [residual energy](opinf.basis.residual_energy) is less than a given tolerance $\\varepsilon$, i.e.,\n", - "\n", - "$$\n", - "\\frac{\\sum_{j=r + 1}^{k}\\sigma_{j}^{2}}{\\sum_{j=1}^{k}\\sigma_{j}^{2}} = \\frac{||\\mathbf{Q} - \\mathbf{V}_r \\mathbf{V}_r^{\\top}\\mathbf{Q}||_{F}^{2}}{||\\mathbf{Q}||_{F}^{2}} < \\varepsilon.\n", - "$$" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Compute the POD basis, using the residual energy decay to select r.\n", - "basis = opinf.basis.PODBasis(residual_energy=1e-8).fit(Q)\n", - "print(basis)\n", - "\n", - "# Check the decay of the singular values and the associated residual energy.\n", - "basis.plot_energy(right=25)\n", - "plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "::::{margin}\n", - ":::{note}\n", - "In this case, since $u(t) \\equiv 1$ is constant, we could equivalently set `modelform=\"cA\"` to learn a ROM of the form $\\frac{\\text{d}}{\\text{d}t}\\widehat{\\mathbf{q}}(t) = \\widehat{\\mathbf{c}} + \\widehat{\\mathbf{A}}\\widehat{\\mathbf{q}}(t)$, where $\\widehat{\\mathbf{c}}$ is a constant term.\n", - "There is no difference between the two models, i.e., $\\widehat{\\mathbf{c}} = \\widehat{\\mathbf{B}}u(t) = \\widehat{\\mathbf{B}}$, except that `modelform=\"AB\"` allows us to use different inputs for $u(t)$ later on.\n", - ":::\n", - "::::" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now we can learn the reduced model with OpInf.\n", - "Because the full-order model is of the form $\\frac{\\text{d}}{\\text{d}t}\\mathbf{q}(t) = \\mathbf{A}\\mathbf{q}(t) + \\mathbf{B}u(t)$, we construct a reduced-order model of the form $\\frac{\\text{d}}{\\text{d}t}\\widehat{\\mathbf{q}}(t) = \\widehat{\\mathbf{A}}\\widehat{\\mathbf{q}}(t) + \\widehat{\\mathbf{B}}u(t)$." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Instantiate the model.\n", - "model = opinf.models.ContinuousModel(\n", - " operators=[\n", - " opinf.operators.LinearOperator(),\n", - " opinf.operators.InputOperator(),\n", - " ]\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "To train the model, we need to compress the snapshot data to the low-dimensional subspace defined by the basis.\n", - "We also need the time derivatives $\\frac{d}{dt}\\mathbf{q}(t)$ of the training snapshots.\n", - "If $\\mathbf{A}$ and $\\mathbf{B}$ are known, we can set $\\dot{\\mathbf{q}}_{j} = \\mathbf{A}\\mathbf{q}_{j} + \\mathbf{B}u_{j}$. If we do not have access to $\\mathbf{A}$ and $\\mathbf{B}$, we can estimate the time derivatives using finite differences.\n", - "In this case, we use first-order backward differences." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Compress the snapshot data.\n", - "Q_compressed = basis.compress(Q)\n", - "\n", - "# Estimate time derivatives (dq/dt) for each training snapshot.\n", - "Q_train, Qdot_train, U_train = opinf.ddt.bwd1(Q_compressed, dt, U_all[:k])\n", - "\n", - "print(f\"shape of Q_train:\\t{Q_train.shape}\")\n", - "print(f\"shape of Qdot_train:\\t{Qdot_train.shape}\")\n", - "print(f\"shape of U_train:\\t{U_train.shape}\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Train the reduced-order model.\n", - "model.fit(states=Q_train, ddts=Qdot_train, inputs=U_train)\n", - "print(model)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Model Evaluation" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Like the FOM, we integrate the learned ROM using the implicit Euler method, using the reduced-order operators $\\widehat{\\mathbf{A}}$ and $\\widehat{\\mathbf{B}}$ and the initial condition $\\widehat{\\mathbf{q}}_{0} = \\mathbf{V}^{\\mathsf{T}}\\mathbf{q}_{0}$.\n", - "The resulting low-dimensional state vectors are reconstructed in the full-dimensional space via $\\mathbf{q}(t) = \\mathbf{V}_{r}\\widehat{\\mathbf{q}}(t)$." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Express the initial condition in the coordinates of the basis.\n", - "q0_ = basis.compress(q0)\n", - "\n", - "# Solve the reduced-order model using Implicit Euler.\n", - "Q_ROM = basis.decompress(\n", - " implicit_euler(t, q0_, model.A_.entries, model.B_.entries, U_all)\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "fig, [ax1, ax2] = plt.subplots(1, 2)\n", - "plot_heat_data(Q_ROM, \"Reduced-order model solution\", ax1)\n", - "plot_heat_data(Q_all, \"Full-order model solution\", ax2)\n", - "ax1.legend([])\n", - "plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "To quantify the accuracy of the ROM, we evaluate the ROM solution error in the Frobenius norm and compare it to the projection error,\n", - "\n", - "$$\n", - " \\text{err}_{\\text{ROM}}\n", - " = \\frac{||\\mathbf{Q}_{\\text{all}} - \\mathbf{Q}_{\\text{ROM}}||_F}{||\\mathbf{Q}_{\\text{all}}||_F},\n", - " \\qquad\n", - " \\text{err}_{\\text{proj}}\n", - " = \\frac{||\\mathbf{Q}_{\\text{all}} - \\mathbf{V}_{r}\\mathbf{V}_{r}^{\\top}\\mathbf{Q}_{\\text{all}}||_F}{||\\mathbf{Q}_{\\text{all}}||_F},\n", - "$$\n", - "\n", - "where $\\mathbf{Q}_{\\text{all}}$ is the full-order model solution over the entire time domain and $\\mathbf{Q}_{\\text{ROM}}$ is the reduced-order model solution." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "rel_froerr_projection = basis.projection_error(Q_all, relative=True)\n", - "rel_froerr_opinf = opinf.post.frobenius_error(Q_all, Q_ROM)[1]\n", - "\n", - "print(\n", - " \"Relative Frobenius-norm errors\",\n", - " \"-\" * 33,\n", - " f\"projection error:\\t{rel_froerr_projection:%}\",\n", - " f\"OpInf ROM error:\\t{rel_froerr_opinf:%}\",\n", - " sep=\"\\n\",\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The ROM error cannot be better than the projection error, but the two are pretty close. We also compare the ROM error with the projection error as a function of time, i.e.,\n", - "\n", - "$$\n", - " \\text{err}_{\\text{ROM}}(t)\n", - " = \\frac{\\|\\mathbf{q}(t) - \\mathbf{q}_{\\text{ROM}}(t)\\|_{2}}{\\|\\mathbf{q}(t)\\|_{2}},\n", - " \\qquad\n", - " \\text{err}_{\\text{proj}}(t)\n", - " = \\frac{\\|\\mathbf{q}(t) - \\mathbf{V}_{r}\\mathbf{V}_{r}^{\\mathsf{T}}\\mathbf{q}(t)\\|_{2}}{\\|\\mathbf{q}(t)\\|_{2}},\n", - "$$\n", - "\n", - "where $\\mathbf{q}(t)$ is the full-order solution and $\\mathbf{q}_{\\text{ROM}}(t)$ is the ROM solution at time $t$." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - ":::{tip}\n", - "In this problem, $\\mathbf{q}(t) \\to \\mathbf{0}$ as $t$ increases, so a relative error may not be appropriate since $\\|\\mathbf{q}(t)\\|_{2}$ appears in the denominator.\n", - "In situations like this, consider using the _normalized absolute error_ by replacing the denominator with $\\max_{\\tau\\in[0,T]}\\|\\mathbf{q}(t)\\|,$ for example:\n", - "\n", - "$$\n", - "\\begin{aligned}\n", - " \\text{err}_{\\text{ROM}}(t)\n", - " = \\frac{\\|\\mathbf{q}(t) - \\mathbf{q}_{\\text{ROM}}(t)\\|_{2}}{\\max_{\\tau\\in[0,T]}\\|\\mathbf{q}(\\tau)\\|_{2}}.\n", - "\\end{aligned}\n", - "$$\n", - "\n", - "Use `normalize=True` in `opinf.post.lp_error()` to use this error measure instead of the relative error.\n", - ":::" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "projerr_in_time = opinf.post.lp_error(\n", - " Q_all,\n", - " basis.project(Q_all),\n", - " normalize=True,\n", - ")[1]\n", - "\n", - "\n", - "def plot_errors_over_time(Zlist, labels):\n", - " \"\"\"Plot normalized absolute projection error and ROM errors\n", - " as a function of time.\n", - "\n", - " Parameters\n", - " ----------\n", - " Zlist : list((n, k) ndarrays)\n", - " List of reduced-order model solutions.\n", - " labels : list(str)\n", - " Labels for each of the reduced-order models.\n", - " \"\"\"\n", - " fig, ax = plt.subplots(1, 1)\n", - "\n", - " ax.semilogy(t, projerr_in_time, \"C3\", label=\"Projection Error\")\n", - " colors = [\"C0\", \"C5\"]\n", - " for Z, label, c in zip(Zlist, labels, colors[: len(Zlist)]):\n", - " rel_err = opinf.post.lp_error(Q_all, Z, normalize=True)[1]\n", - " plt.semilogy(t, rel_err, c, label=label)\n", - "\n", - " ax.set_xlim(t[0], t[-1])\n", - " ax.set_xlabel(r\"$t$\")\n", - " ax.set_ylabel(\"Normalized absolute error\")\n", - " ax.legend(loc=\"lower right\")\n", - " plt.show()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "plot_errors_over_time([Q_ROM], [\"ROM Error\"])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Comparison with Intrusive Projection" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "In the limit as the amount of training data $k$ and the dimension $r$ increases, the reduced operators $\\widehat{\\mathbf{A}}$ and $\\widehat{\\mathbf{B}}$ learned through OpInf converge to the corresponding operators obtained through _intrusive projection_,\n", - "\n", - "\\begin{align*}\n", - " \\widetilde{\\mathbf{A}} &= \\mathbf{V}_{r}^{\\mathsf{T}} \\mathbf{A} \\mathbf{V}_{r},\n", - " &\n", - " \\widetilde{\\mathbf{B}} &= \\mathbf{V}_{r}^{\\mathsf{T}}\\mathbf{B}.\n", - "\\end{align*}\n", - "\n", - "Computing $\\widetilde{\\mathbf{A}}$ and $\\widetilde{\\mathbf{B}}$ is considered \"intrusive\" because it requires explicit access to the full-order operators $\\mathbf{A}$ and $\\mathbf{B}$." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "Vr = basis.entries\n", - "Atilde = Vr.T @ A @ Vr\n", - "Btilde = Vr.T @ B\n", - "q0_ = basis.compress(q0)\n", - "Q_ROM_intrusive = basis.decompress(\n", - " implicit_euler(t, q0_, Atilde, Btilde, U_all)\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "fig, [ax1, ax2] = plt.subplots(1, 2)\n", - "plot_heat_data(Q_ROM, \"OpInf ROM solution\", ax1)\n", - "plot_heat_data(Q_ROM_intrusive, \"Intrusive ROM solution\", ax2)\n", - "ax1.legend([])\n", - "plt.show()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "rel_froerr_intrusive = opinf.post.frobenius_error(Q_all, Q_ROM_intrusive)[1]\n", - "\n", - "print(\n", - " \"Relative Frobenius-norm errors\",\n", - " \"-\" * 33,\n", - " f\"projection error:\\t{rel_froerr_projection:%}\",\n", - " f\"OpInf ROM error:\\t{rel_froerr_opinf:%}\",\n", - " f\"intrusive ROM error:\\t{rel_froerr_intrusive:%}\",\n", - " sep=\"\\n\",\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "plot_errors_over_time(\n", - " [Q_ROM, Q_ROM_intrusive],\n", - " [\"OpInf ROM Error\", \"Intrusive ROM Error\"],\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's repeat the experiment with different choices of $r$ to see how the size of the ROM affects its accuracy." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "full_order_operators = [\n", - " opinf.operators.LinearOperator(A.toarray()),\n", - " opinf.operators.InputOperator(B),\n", - "]\n", - "\n", - "\n", - "def run_trial(r):\n", - " \"\"\"Do OpInf / intrusive ROM prediction with r basis vectors.\"\"\"\n", - " basis.set_dimension(num_vectors=r)\n", - " q0_ = basis.compress(q0)\n", - " Q_compressed = basis.compress(Q)\n", - " Q_train, Qdot_train, U_train = opinf.ddt.bwd1(Q_compressed, dt, U_all[:k])\n", - "\n", - " intrusive_reduced_operators = [\n", - " op.galerkin(basis.entries) for op in full_order_operators\n", - " ]\n", - "\n", - " # Construct and simulate the intrusive ROM.\n", - " model_intrusive = opinf.models.ContinuousModel(intrusive_reduced_operators)\n", - " Q_ROM_intrusive = basis.decompress(\n", - " implicit_euler(\n", - " t,\n", - " q0_,\n", - " model_intrusive.A_.entries,\n", - " model_intrusive.B_.entries,\n", - " U_all,\n", - " )\n", - " )\n", - "\n", - " # Construct and simulate the operator inference ROM.\n", - " model_opinf = opinf.models.ContinuousModel(\"AB\").fit(\n", - " states=Q_train,\n", - " ddts=Qdot_train,\n", - " inputs=U_train,\n", - " )\n", - " Q_ROM_opinf = basis.decompress(\n", - " implicit_euler(\n", - " t,\n", - " q0_,\n", - " model_opinf.A_.entries,\n", - " model_opinf.B_.entries,\n", - " U_all,\n", - " )\n", - " )\n", - "\n", - " # Calculate errors.\n", - " projection_error = basis.projection_error(Q_all, relative=True)\n", - " intrusive_error = opinf.post.frobenius_error(Q_all, Q_ROM_intrusive)[1]\n", - " opinf_error = opinf.post.frobenius_error(Q_all, Q_ROM_opinf)[1]\n", - "\n", - " return projection_error, intrusive_error, opinf_error" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [ - "hide-input" - ] - }, - "outputs": [], - "source": [ - "def plot_state_error(rmax, runner, ylabel):\n", - " \"\"\"Run the experiment for r = 1, ..., rmax and plot results.\"\"\"\n", - " rs = np.arange(1, rmax + 1)\n", - " err_projection, err_intrusive, err_opinf = zip(*[runner(r) for r in rs])\n", - "\n", - " _, ax = plt.subplots(1, 1)\n", - " ax.semilogy(\n", - " rs,\n", - " err_projection,\n", - " \"C3-\",\n", - " label=\"projection error\",\n", - " lw=1,\n", - " )\n", - " ax.semilogy(\n", - " rs,\n", - " err_intrusive,\n", - " \"C5+-\",\n", - " label=\"intrusive ROM error\",\n", - " lw=1,\n", - " mew=2,\n", - " )\n", - " ax.semilogy(\n", - " rs,\n", - " err_opinf,\n", - " \"C0o-\",\n", - " label=\"OpInf ROM error\",\n", - " lw=1,\n", - " mfc=\"none\",\n", - " mec=\"C0\",\n", - " mew=1.5,\n", - " )\n", - "\n", - " ax.set_xlim(rs.min(), rs.max())\n", - " ax.set_xticks(rs, [str(int(r)) for r in rs])\n", - " ax.set_xlabel(r\"Reduced dimension $r$\")\n", - " ax.set_ylabel(ylabel)\n", - " ax.grid(ls=\":\")\n", - " ax.legend(loc=\"upper right\", fontsize=14, frameon=True, framealpha=1)\n", - " plt.show()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "plot_state_error(15, run_trial, \"Relative Frobenius-norm error\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - ":::{admonition} Takeaway\n", - ":class: attention\n", - "In this case, the operator inference and intrusive ROMs give essentially the same result.\n", - "However, the operator inference ROM successfully emulates the FOM **without explicit access to** $\\mathbf{A}$ **and** $\\mathbf{B}$.\n", - ":::" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - ":::{admonition} On Convergence\n", - ":class: warning\n", - "The figure above conveys a sense of convergence: as the reduced dimension $r$ increases, the ROM error decreases. In more complex problems, **the error does not always decrease monotonically as $r$ increases**. In fact, at some point as $r$ increases performance often deteriorates significantly due to poor conditioning in the operator inference regression. In practice, choose a reduced dimension $r$ that balances solution accuracy with computational speed, not too small but also not too large.\n", - ":::" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## New Boundary Conditions" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Our heat equation has Dirichlet boundary conditions given by\n", - "\n", - "$$\n", - "q(0,t;\\mu) = q(L,t;\\mu) = u(t).\n", - "$$\n", - "\n", - "In this section we consider the role of $u(t)$, which governs the boundary equations." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - ":::{admonition} Objective\n", - ":class: attention\n", - "\n", - "Construct a reduced-order model (ROM) of the heat equation that can be used for various sets of boundary conditions. We will observe data for some $u(t)$ and use the ROM to predict the solution for new choices of $u(t)$.\n", - ":::" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Note that the full-order model defined in the previous section is valid for arbitrary $u(t)$." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Training Data Generation" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Define several sets of boundary condition inputs.\n", - "Us_all = [\n", - " np.ones_like(t), # u(t) = 1.0\n", - " np.exp(-t), # u(t) = e^(-t)\n", - " 1 + t**2 / 2, # u(t) = 1 + .5 t^2\n", - " 1 - np.sin(np.pi * t) / 2, # u(t) = 1 - sin(πt)/2\n", - " 1 - np.sin(3 * np.pi * t) / 3, # u(t) = 1 - sin(3πt)/2\n", - " 1 + 25 * (t * (t - 1)) ** 3, # u(t) = 1 + 25(t(t - 1))^3\n", - " 1 + np.sin(np.pi * t) * np.exp(-2 * t), # u(t) = 1 + sin(πt)e^(-t)\n", - "]\n", - "\n", - "k = 300 # Number of training snapshots." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Note that $u(0) = 1$ for each of our boundary inputs, which is consistent with the initial condition `q0` used earlier.\n", - "We will gather data for the first few inputs, learn a ROM from the data, and test the ROM on the remaining inputs." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Split into training / testing sets.\n", - "Us_all_train = Us_all[:4]\n", - "Us_all_test = Us_all[4:]\n", - "\n", - "# Visualize the input functions.\n", - "fig, [ax1, ax2] = plt.subplots(1, 2)\n", - "c = 0\n", - "for U in Us_all_train:\n", - " ax1.plot(t, U, color=f\"C{c}\")\n", - " c += 1\n", - "for U in Us_all_test:\n", - " ax2.plot(t, U, color=f\"C{c}\")\n", - " c += 1\n", - "\n", - "ax1.set_title(\"Training inputs\")\n", - "ax2.set_title(\"Testing inputs\")\n", - "ax1.axvline(t[k], color=\"k\")\n", - "for ax in (ax1, ax2):\n", - " ax.set_xlim(0, 1)\n", - " ax.set_xlabel(r\"$t$\")\n", - " ax.set_ylabel(r\"$u(t)$\")\n", - "plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We only record the first $k$ snapshots corresponding to each of the training inputs, so we are still predicting in time as in the previous section." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Compute snapshots by solving the equation with implicit_euler().\n", - "Qs_all = [implicit_euler(t, q0, A, B, U) for U in Us_all]\n", - "Qs_all_train = Qs_all[: len(Us_all_train)]\n", - "Qs_all_test = Qs_all[len(Us_all_train) :]\n", - "\n", - "# Retain only the first k snapshots/inputs for training the ROM.\n", - "t_train = t[:k] # Temporal domain for training snapshots.\n", - "Qs = [Q[:, :k] for Q in Qs_all_train] # Observed snapshots.\n", - "\n", - "# Compute time derivatives (dq/dt) for each snapshot and stack training data.\n", - "Qs_train, Qdots_train, Us_train = [\n", - " np.hstack(X)\n", - " for X in zip(\n", - " *[opinf.ddt.bwd1(Q, dt, U[:k]) for Q, U in zip(Qs, Us_all_train)]\n", - " )\n", - "]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### ROM Construction" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Compute a basis from all of the training snapshots.\n", - "basis = opinf.basis.PODBasis(residual_energy=1e-8).fit(Qs)\n", - "print(basis)\n", - "\n", - "# Express the initial condition in the coordinates of the new basis.\n", - "q0_ = basis.compress(q0)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Train a reduced-order model using the training data.\n", - "model = opinf.models.ContinuousModel(\"AB\")\n", - "model.fit(\n", - " states=basis.compress(Qs_train),\n", - " ddts=basis.compress(Qdots_train),\n", - " inputs=Us_train,\n", - ")\n", - "print(model)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### ROM Evaluation" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We now test the learned ROM on both the training and testing inputs." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def plot_errors_over_time_inputs(Q_ROMs, Q_trues, cidx=0):\n", - " \"\"\"Plot normalized absolute projection error and ROM errors\n", - " as a function of time.\n", - "\n", - " Parameters\n", - " ----------\n", - " Q_ROMs : list((n, k) ndarrays)\n", - " List of reduced-order model solutions.\n", - " Q_trues : list(str)\n", - " List of full-order model solutions.\n", - " \"\"\"\n", - " _, ax = plt.subplots(1, 1)\n", - "\n", - " for Q_ROM, Q_true in zip(Q_ROMs, Q_trues):\n", - " rel_err = opinf.post.lp_error(Q_true, Q_ROM, normalize=True)[1]\n", - " plt.semilogy(t, rel_err, color=f\"C{cidx}\")\n", - " cidx += 1\n", - "\n", - " ax.set_xlim(t[0], t[-1])\n", - " ax.set_xlabel(r\"$t$\")\n", - " ax.set_ylabel(\"Normalized absolute error\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Test ROM accuracy on the training inputs.\n", - "Qs_ROM_train = [\n", - " basis.decompress(\n", - " implicit_euler(t, q0_, model.A_.entries, model.B_.entries, U)\n", - " )\n", - " for U in Us_all_train\n", - "]\n", - "plot_errors_over_time_inputs(Qs_ROM_train, Qs_all_train)\n", - "plt.title(\"ROM error with training inputs\")\n", - "\n", - "# Test ROM accuracy on the testing inputs.\n", - "Qs_ROM_test = [\n", - " basis.decompress(\n", - " implicit_euler(t, q0_, model.A_.entries, model.B_.entries, U)\n", - " )\n", - " for U in Us_all_test\n", - "]\n", - "plot_errors_over_time_inputs(Qs_ROM_test, Qs_all_test, cidx=len(Qs_ROM_train))\n", - "plt.title(\"ROM error with testing inputs\")\n", - "plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "In this experiment, the training and testing error are similar and small (less than 0.1%) throughout the time domain. We conclude this section by checking the average ROM error on the test inputs as a function of basis size." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def run_trial_inputs(r):\n", - " \"\"\"Do OpInf / intrusive ROM prediction with r basis vectors.\"\"\"\n", - " basis.set_dimension(num_vectors=r)\n", - " q0_ = basis.compress(q0)\n", - "\n", - " # Construct the intrusive ROM.\n", - " model_intrusive = opinf.models.ContinuousModel(\n", - " [op.galerkin(basis.entries) for op in full_order_operators]\n", - " )\n", - "\n", - " # Construct the operator inference ROM from the training data.\n", - " model_opinf = opinf.models.ContinuousModel(operators=\"AB\")\n", - " model_opinf.fit(\n", - " states=basis.compress(Qs_train),\n", - " ddts=basis.compress(Qdots_train),\n", - " inputs=Us_train,\n", - " )\n", - "\n", - " # Test the ROMs at each testing input.\n", - " projection_error, intrusive_error, opinf_error = 0, 0, 0\n", - " for Q, U in zip(Qs_all_test, Us_all_test):\n", - " # Simulate the intrusive ROM for this testing input.\n", - " Q_ROM_intrusive = basis.decompress(\n", - " implicit_euler(\n", - " t,\n", - " q0_,\n", - " model_intrusive.A_.entries,\n", - " model_intrusive.B_.entries,\n", - " U,\n", - " )\n", - " )\n", - "\n", - " # Simulate the operator inference ROM for this testing input.\n", - " Q_ROM_opinf = basis.decompress(\n", - " implicit_euler(\n", - " t, q0_, model_opinf.A_.entries, model_opinf.B_.entries, U\n", - " )\n", - " )\n", - "\n", - " # Calculate errors.\n", - " projection_error += basis.projection_error(Q, relative=True)\n", - " intrusive_error += opinf.post.frobenius_error(Q, Q_ROM_intrusive)[1]\n", - " opinf_error += opinf.post.frobenius_error(Q, Q_ROM_opinf)[1]\n", - "\n", - " # Average the relative errors.\n", - " projection_error /= len(Us_all_test)\n", - " intrusive_error /= len(Us_all_test)\n", - " opinf_error /= len(Us_all_test)\n", - "\n", - " return projection_error, intrusive_error, opinf_error" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "plot_state_error(\n", - " 15,\n", - " run_trial_inputs,\n", - " \"Average relative\\nFrobenius-norm error\",\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This experiment shows that the operator inference ROMs is robust to new boundary conditions; in other words, the ROM learns an input operator $\\widehat{\\mathbf{B}}$ that performs well for multiple choices of the input $u(t)$." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Prediction in Parameter Space" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Recall the governing equation,\n", - "\n", - "$$\n", - " \\frac{\\partial}{\\partial t} q(x,t;{\\color{teal}\\mu})\n", - " = {\\color{teal}\\mu}\\frac{\\partial^2}{\\partial x^2}q(x,t;{\\color{teal}\\mu}).\n", - "$$\n", - "\n", - "In this section we examine the role of the constant $\\mu > 0$, the heat diffusivity parameter." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - ":::{admonition} Objective\n", - ":class: attention\n", - "\n", - "Construct a ROM of the heat equation that can be solved for different choices of the diffusivity parameter $\\mu > 0$.\n", - "We will observe data for a few values of $\\mu$ and use the ROM to predict the solution for new values of $\\mu$. As before, we also aim to be predictive in time.\n", - ":::" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Full-order Model Definition" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We solved this problem earlier for fixed $\\mu = 1$.\n", - "For variable $\\mu$, {eq}`eq_heat_fom_parametric` defines the full-order model:\n", - "\n", - "$$\n", - " \\frac{\\text{d}}{\\text{d}t}\\mathbf{q}(t;\\mu)\n", - " = \\mathbf{A}(\\mu)\\mathbf{q}(t;\\mu) + \\mathbf{B}(\\mu)u(t),\n", - " \\qquad\n", - " \\mathbf{q}(0;\\mu)\n", - " = \\mathbf{q}_0.\n", - "$$\n", - "\n", - "Note that $\\mathbf{A}(\\mu) = \\mu \\mathbf{A}(1)$ and $\\mathbf{B}(\\mu) = \\mu \\mathbf{B}(1)$, and that $\\mathbf{A}(1)$ and $\\mathbf{B}(1)$ are the full-order operators we constructed previously." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Training Data Generation" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We consider the parameter domain $\\mathcal{D} = [.1, 10] \\subset \\mathbb{R}$.\n", - "Taking $s$ logarithmically spaced samples $\\{\\mu_i\\}_{i=1}^{s}\\subset\\mathcal{D}$, we solve the full-order model over $[0, T']$ for each parameter sample.\n", - "For each parameter $\\mu_{i}$, the resulting snapshots matrix is denoted as $\\mathbf{Q}(\\mu_{i})\\in \\mathbb{R}^{n \\times k}$.\n", - "We choose $s = 10$ training parameters in the following experiment." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Get s logarithmically spaced paraneter values from D = [.1, 10].\n", - "s = 10 # Number of parameter samples.\n", - "params = np.logspace(-1, 1, s)\n", - "params" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Retain only the first k snapshots/inputs for training the ROM.\n", - "k = 600 # Number of training snapshots.\n", - "t_train = t[:k] # Temporal domain for training snapshots.\n", - "U_train = U_all[:k]\n", - "\n", - "# Solve the full-order model at each of the parameter samples.\n", - "Qs = [implicit_euler(t_train, q0, p * A, p * B, U_train) for p in params]\n", - "Qs_train, Qdots_train, Us_train = zip(\n", - " *[opinf.ddt.bwd1(Q, dt, U_train) for Q in Qs]\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### ROM Construction" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "A (global) POD basis can be constructed from the concatenation of the individual snapshot matrices,\n", - "\n", - "$$\n", - " \\mathbf{Q}\n", - " = \\left[~\\mathbf{Q}(\\mu_1)~\\cdots~\\mathbf{Q}(\\mu_s)~\\right]\n", - " \\in\\mathbb{R}^{n \\times sk}.\n", - "$$\n", - "\n", - "We can select the reduced dimension $r$ as before by examining the residual energy of the singular values." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Compute the POD basis, using the residual energy decay to select r.\n", - "basis = opinf.basis.PODBasis(residual_energy=1e-8).fit(Qs)\n", - "print(basis)\n", - "\n", - "basis.plot_energy(right=30)\n", - "plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Alternatively, we could choose $r$ so that the average relative projection error,\n", - "\n", - "$$\n", - " \\text{avgerr}_\\text{proj} = \\frac{1}{s}\\sum_{i=1}^{s}\\frac{||\\mathbf{Q}(\\mu_i) - \\mathbf{V}_r \\mathbf{V}_r^{\\top}\\mathbf{Q}(\\mu_i)||_F}{||\\mathbf{Q}(\\mu_i)||_F},\n", - "$$\n", - "\n", - "is below a certain threshold, say $10^{-5}$." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def average_relative_projection_error(r):\n", - " \"\"\"Compute the average relative projection error with r basis vectors.\"\"\"\n", - " oldr = basis.reduced_state_dimension\n", - " basis.set_dimension(num_vectors=r)\n", - " avgerr = np.mean([basis.projection_error(Q, relative=True) for Q in Qs])\n", - " basis.set_dimension(num_vectors=oldr)\n", - " return avgerr" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [ - "hide-input" - ] - }, - "outputs": [], - "source": [ - "rs = np.arange(1, 21)\n", - "errors = [average_relative_projection_error(r) for r in rs]\n", - "\n", - "fig, ax = plt.subplots(1, 1)\n", - "ax.axhline(1e-5, color=\"gray\", lw=1)\n", - "ax.axvline(10, color=\"gray\", lw=1)\n", - "ax.semilogy(rs, errors, \"C3.-\", ms=10)\n", - "\n", - "ax.set_xticks(rs[::2])\n", - "ax.set_xlabel(r\"$r$\")\n", - "ax.set_ylabel(\"Average relative\\nprojection error\")\n", - "plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Based on these criteria, we choose $r = 10$." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "basis.set_dimension(num_vectors=10)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Interpolatory Operator Inference" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "There are several strategies to account for the parameter $\\mu$. The reduced-order operators obtained through Galerkin projection are given by\n", - "\n", - "$$\n", - " \\widetilde{\\mathbf{A}}(\\mu)\n", - " = \\mathbf{V}_{r}^{\\mathsf{T}} \\mathbf{A}(\\mu) \\mathbf{V}_{r},\n", - " \\qquad\n", - " \\widetilde{\\mathbf{B}}(\\mu)\n", - " = \\mathbf{V}_{r}^{\\mathsf{T}} \\mathbf{B}(\\mu).\n", - "$$\n", - "\n", - "Here, we perform interpolation on the entries of the reduced-order operators learned for each parameter sample. This means we learn a separate ROM for each $\\mu_i$, $i=1, \\ldots, s$, obtaining reduced-order operators $\\widehat{\\mathbf{A}}(\\mu_{i})$ and $\\widehat{\\mathbf{B}}(\\mu_{i})$.\n", - "Then, for a new parameter value $\\bar{\\mu}\\in\\mathcal{D}$, we interpolate the entries of the learned reduced model operators to create a new reduced model corresponding to $\\bar{\\mu}\\in\\mathcal{D}$.\n", - "The {class}`opinf.models.InterpolatedContinuousModel` class encapsulates this process." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Learn reduced models for each parameter value.\n", - "model = opinf.models.InterpolatedContinuousModel(\"AB\")\n", - "model.fit(\n", - " parameters=params,\n", - " states=basis.compress(Qs_train),\n", - " ddts=basis.compress(Qdots_train),\n", - " inputs=Us_train,\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### ROM Evaluation" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "To test the ROM, we take $s - 1$ parameter values that lie between the training parameter values and compute the average relative state error,\n", - "\n", - "$$\n", - " \\text{avgerr}_\\text{ROM} = \\frac{1}{s - 1}\\sum_{i=1}^{s - 1}\\frac{||\\mathbf{Q}(\\mu_i) - \\mathbf{Q}_{\\text{ROM}}(\\mu_i)||_F}{||\\mathbf{Q}(\\mu_i)||_F},\n", - "$$\n", - "\n", - "where $\\mathbf{Q}_{\\text{ROM}}(\\mu_{i})$ is the ROM solution at the $i$-th test parameter value $\\mu_{i}$." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "params_test = np.sqrt(params[:-1] * params[1:])\n", - "params_test" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "full_order_model = opinf.models.InterpolatedContinuousModel(\n", - " [\n", - " opinf.operators.InterpolatedLinearOperator(\n", - " params, [p * A.toarray() for p in params]\n", - " ),\n", - " opinf.operators.InterpolatedInputOperator(\n", - " params, [p * B for p in params]\n", - " ),\n", - " ]\n", - ")\n", - "\n", - "\n", - "def run_trial_parametric(r):\n", - " \"\"\"Do OpInf / intrusive ROM prediction with r basis vectors.\"\"\"\n", - " basis.set_dimension(num_vectors=r)\n", - " q0_ = basis.compress(q0)\n", - "\n", - " # Compute the intrusive ROM.\n", - " model_intrusive = full_order_model.galerkin(basis.entries)\n", - "\n", - " # Learn an operator inference ROM from the training data.\n", - " model_opinf = opinf.models.InterpolatedContinuousModel(\n", - " operators=\"AB\",\n", - " solver=opinf.lstsq.L2Solver(1e-12),\n", - " ).fit(\n", - " parameters=params,\n", - " states=basis.compress(Qs_train),\n", - " ddts=basis.compress(Qdots_train),\n", - " inputs=Us_train,\n", - " )\n", - "\n", - " # Test the ROM at each parameter in the test set.\n", - " projc_error, intru_error, opinf_error = 0, 0, 0\n", - " for p in params_test:\n", - " # Solve the FOM at this parameter value.\n", - " Ap = p * A\n", - " Bp = p * B\n", - " Q_FOM = implicit_euler(t, q0, Ap, Bp, U_all)\n", - "\n", - " # Simulate the intrusive ROM at this parameter value.\n", - " model = model_intrusive.evaluate(p)\n", - " Q_ROM_intrusive = basis.decompress(\n", - " implicit_euler(t, q0_, model.A_.entries, model.B_.entries, U_all)\n", - " )\n", - "\n", - " # Simulate the interpolating OpInf ROM at this parameter value.\n", - " model = model_opinf.evaluate(p)\n", - " Q_ROM_opinf = basis.decompress(\n", - " implicit_euler(t, q0_, model.A_.entries, model.B_.entries, U_all)\n", - " )\n", - "\n", - " # Calculate errors.\n", - " projc_error += basis.projection_error(Q_FOM, relative=True)\n", - " intru_error += opinf.post.frobenius_error(Q_FOM, Q_ROM_intrusive)[1]\n", - " opinf_error += opinf.post.frobenius_error(Q_FOM, Q_ROM_opinf)[1]\n", - "\n", - " # Average the relative errors.\n", - " projc_error /= len(params_test)\n", - " intru_error /= len(params_test)\n", - " opinf_error /= len(params_test)\n", - "\n", - " return projc_error, intru_error, opinf_error" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "plot_state_error(\n", - " 14,\n", - " run_trial_parametric,\n", - " \"Average relative\\nFrobenius-norm error\",\n", - ")" - ] - } - ], - "metadata": { - "celltoolbar": "Tags", - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.3" - }, - "toc-showmarkdowntxt": false, - "toc-showtags": true - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/docs/source/tutorials/inputs.ipynb b/docs/source/tutorials/inputs.ipynb new file mode 100644 index 00000000..8802b708 --- /dev/null +++ b/docs/source/tutorials/inputs.ipynb @@ -0,0 +1,997 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "toc-hr-collapsed": false + }, + "source": [ + "# External Inputs" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "toc-hr-collapsed": false + }, + "source": [ + "The fundamental goal of model reduction is to efficiently make physics-based predictions. Given synthetic or experimental data that was generated or collected under a certain set of conditions, we aim to construct a cost-effective model that produces accurate solutions under new sets of conditions. The first tutorial showed an example of evaluating a reduced-order model (ROM) for various initial conditions. This tutorial focuses on problems with external time-dependent inputs." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "toc-nb-collapsed": true + }, + "source": [ + "## Problem Statement" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We consider a problem with external inputs that are parameterized by a scalar-valued function $u:\\RR\\to\\RR.$" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + ":::{admonition} Governing Equations\n", + ":class: info\n", + "\n", + "Let $\\Omega = [0,L]\\subset \\mathbb{R}$ be the spatial domain indicated by the variable $x$, and let $[0,T]\\subset\\mathbb{R}$ be the time domain with variable $t$. We consider the one-dimensional heat equation with time-dependent Dirichlet boundary conditions,\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " &\\frac{\\partial}{\\partial t} q(x,t) = \\frac{\\partial^2}{\\partial x^2}q(x,t)\n", + " & x &\\in\\Omega,\\quad t\\in[0,T],\n", + " \\\\\n", + " &q(0,t) = q(L,t) = u(t)\n", + " & t &\\in[0,T],\n", + " \\\\\n", + " &q(x,0) = \\big(e^{\\alpha(x - 1)} + e^{-\\alpha x} - e^{-\\alpha}\\big)u(0)\n", + " & x &\\in \\Omega,\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "where $\\alpha>0$ is constant and $q(x,t)$ is the unknown state variable. This is a model for a one-dimensional rod conducting heat with a fixed initial heat profile. The temperature at the ends of the rod are governed by the input function $u(t)$, but heat is allowed to diffuse through the rod and flow out at the ends of the domain.\n", + ":::" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + ":::{admonition} Objective\n", + ":class: info\n", + "\n", + "Construct a reduced-order model (ROM) which can be solved rapidly to produce approximate solutions $q(x, t)$ to the partial differential equation given above for various choices of the input function $u(t)$.\n", + "In addition, we will only observe data over a limited time interval $t \\in [0, T']$ with $T' < T$, then use the ROM to predict the solution for the entire time domain $[0, T]$.\n", + "Hence, the ROM will be **predictive in time** and **predictive in the inputs**.\n", + ":::" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "import scipy.sparse\n", + "import matplotlib.pyplot as plt\n", + "\n", + "import opinf\n", + "\n", + "opinf.utils.mpl_config()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Single Training Trajectory" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this section a ROM is trained using data collected for a single choice of the input function $u(t).$" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Full-order Model Definition" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "As in the last tutorial, we use a centered finite difference approximation for the spatial derivative to arrive at a system of $n$ ordinary differential equations.\n", + "This time, due to the nonzero boundary conditions, the system takes the form\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\ddt\\q(t) = \\A\\q(t) + \\B u(t),\n", + " \\qquad\n", + " \\q(0) = \\q_0,\n", + "\\end{aligned}\n", + "$$ (eq_inputs_fom)\n", + "\n", + "where $\\q:\\RR\\to\\RR^n$, $\\A\\in\\RR^{n\\times n}$, and $\\B\\in\\RR^{n}$.\n", + "The system {eq}`eq_inputs_fom` is the _full-order model_ (FOM), which we will use to generate training data for the time domain $[0, T'] \\subset [0, T]$." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + ":::{dropdown} Discretization details\n", + "\n", + "We take an equidistant grid $\\{x_i\\}_{i=0}^{n+1} \\subset \\Omega$,\n", + "\n", + "\\begin{align*}\n", + " 0 &= x_0 < x_1 < \\cdots < x_n < x_{n+1} = L\n", + " &\n", + " &\\text{and}\n", + " &\n", + " \\delta x &= \\frac{L}{n+1} = x_{i+1} - x_{i},\\quad i=1,\\ldots,n-1.\n", + "\\end{align*}\n", + "\n", + "The boundary conditions prescribe $q(x_0,t) = q(x_{n+1},t) = u(t)$.\n", + "Our goal is to compute $q(x,t)$ at the interior spatial points $x_{1},x_{2},\\ldots,x_{n}$ for various $t\\in[0,T]$, so we consider the state vector $\\q(t) = [~q(x_{1}, t)~\\cdots~q(x_{n}, t)~]\\trp\\in\\RR^n$ and derive a system governing the evolution of $\\q(t)$ in time.\n", + "\n", + "Approximating the spatial derivative with a central finite difference approximation,\n", + "\n", + "$$\n", + " \\frac{\\partial^2}{\\partial x^2}q(x,t)\n", + " \\approx \\frac{q(x-\\delta x,t) - 2q(x,t) + q(x+\\delta x,t)}{(\\delta x)^2},\n", + "$$\n", + "\n", + "and using the boundary conditions $q(0,t) = q(L,t) = u(t)$, we arrive at the following matrices for the FOM.\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\A(\\mu) &= \\frac{1}{(\\delta x)^2}\\left[\\begin{array}{ccccc}\n", + " -2 & 1 & & & \\\\\n", + " 1 & -2 & 1 & & \\\\\n", + " & \\ddots & \\ddots & \\ddots & \\\\\n", + " & & 1 & -2 & 1 \\\\\n", + " & & & 1 & -2 \\\\\n", + " \\end{array}\\right] \\in\\RR^{n\\times n},\n", + " &\n", + " \\B(\\mu) &= \\frac{1}{(\\delta x)^2}\\left[\\begin{array}{c}\n", + " 1 \\\\ 0 \\\\ \\vdots \\\\ 0 \\\\ 1\n", + " \\end{array}\\right]\\in\\RR^{n}.\n", + "\\end{aligned}\n", + "$$\n", + ":::" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Training Data Generation" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let $L = 1$, $T = 1$, and set $\\alpha = 100$.\n", + "We begin by solving the FOM described above, recording the solution every $\\delta t = 10^{-3}$ time units for a single choice of the input function $u(t)$, yielding $10^3 + 1 = 1001$ total time steps (1000 steps past the initial condition).\n", + "We will assume that we can only observe the first $k = 200$ time steps and use the ROM to predict the remaining $801$ steps.\n", + "Our training input function is\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " u_\\text{train}(t) = 1 + \\frac{1}{4}\\sin(4\\pi t).\n", + "\\end{aligned}\n", + "$$" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "def training_input(tt):\n", + " return np.ones_like(tt) + np.sin(4 * np.pi * tt) / 4" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [ + "hide-input" + ] + }, + "outputs": [], + "source": [ + "# Construct the spatial domain.\n", + "L = 1\n", + "n = 2**10 - 1\n", + "x_all = np.linspace(0, L, n + 2)\n", + "x = x_all[1:-1]\n", + "dx = x[1] - x[0]\n", + "\n", + "# Construct the temporal domain.\n", + "T = 1\n", + "K = 10**3 + 1\n", + "t_all = np.linspace(0, T, K)\n", + "dt = t_all[1] - t_all[0]\n", + "\n", + "# Construct the full-order state matrix A.\n", + "dx2inv = 1 / dx**2\n", + "diags = np.array([1, -2, 1]) * dx2inv\n", + "A = scipy.sparse.diags(diags, [-1, 0, 1], (n, n))\n", + "\n", + "# Construct the full-order input matrix B.\n", + "B = np.zeros_like(x)\n", + "B[0], B[-1] = dx2inv, dx2inv\n", + "\n", + "# Define the full-order model with an opinf.models class.\n", + "fom = opinf.models.ContinuousModel(\n", + " operators=[\n", + " opinf.operators.LinearOperator(A),\n", + " opinf.operators.InputOperator(B),\n", + " ]\n", + ")\n", + "\n", + "# Construct the part of the initial condition not dependent on u(t).\n", + "alpha = 100\n", + "q0 = np.exp(alpha * (x - 1)) + np.exp(-alpha * x) - np.exp(-alpha)\n", + "\n", + "\n", + "def full_order_solve(time_domain, u):\n", + " \"\"\"Solve the full-order model with SciPy.\n", + " Here, u is a callable function.\n", + " \"\"\"\n", + " return fom.predict(q0 * u(0), time_domain, u, method=\"BDF\")\n", + "\n", + "\n", + "# Solve the full-order model with the training input.\n", + "with opinf.utils.TimedBlock(\"Full-order solve\"):\n", + " Q_all = full_order_solve(t_all, training_input)\n", + "\n", + "# Retain only the first k snapshots/inputs for training the ROM.\n", + "k = 200\n", + "t = t_all[:k]\n", + "Q = Q_all[:, :k]\n", + "\n", + "print(f\"\\nSpatial domain:\\t\\t{x.shape=}\")\n", + "print(f\"Spatial step size:\\t{dx=:.10f}\")\n", + "print(f\"\\nFull time domain:\\t{t_all.shape=}\")\n", + "print(f\"Training time domain:\\t{t.shape=}\")\n", + "print(f\"Temporal step size:\\t{dt=:f}\")\n", + "print(f\"\\nFull-order matrix A:\\t{A.shape=}\")\n", + "print(f\"Full-order vector B:\\t{B.shape=}\")\n", + "print(f\"\\nInitial condition:\\t{q0.shape=}\")\n", + "print(f\"\\nAll FOM solutions:\\t{Q_all.shape=}\")\n", + "print(f\"Training snapshots:\\t{Q.shape=}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The following code visualizes the training data and the full FOM solution set by plotting a few snapshots over the spatial domain and the time evolution of the snapshots at a few spatial locations." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "tags": [ + "hide-input" + ] + }, + "outputs": [], + "source": [ + "def plot_data_space(Z, u, title, ax=None):\n", + " \"\"\"Plot state data over space at multiple instances in time.\"\"\"\n", + " if ax is None:\n", + " _, ax = plt.subplots(1, 1)\n", + "\n", + " # Plot a few snapshots over the spatial domain.\n", + " sample_columns = [0] + [2**d for d in range(10)]\n", + " color = iter(plt.cm.viridis_r(np.linspace(0.05, 1, len(sample_columns))))\n", + " while sample_columns[-1] > Z.shape[1] - 1:\n", + " sample_columns = sample_columns[:-1]\n", + " for j in sample_columns:\n", + " leftBC, rightBC = [u(t_all[j])], [u(t_all[j])]\n", + " q_all = np.concatenate([leftBC, Z[:, j], rightBC])\n", + " c = next(color)\n", + " ax.plot(x_all, q_all, lw=1, color=c, label=rf\"$q(x,t_{{{j}}})$\")\n", + "\n", + " ax.set_xlim(x_all[0], x_all[-1])\n", + " ax.set_xlabel(r\"$x$\")\n", + " ax.set_ylabel(r\"$q(x,t)$\")\n", + " ax.legend(loc=(1.05, 0.05))\n", + " ax.set_title(title)\n", + "\n", + "\n", + "def plot_data_time(Z, title, ax=None):\n", + " \"\"\"Plot state in time at multiple spatial locations.\"\"\"\n", + " if ax is None:\n", + " _, ax = plt.subplots(1, 1)\n", + "\n", + " # Plot a few snapshots over the spatial domain.\n", + " sample_rows = np.linspace(0, Z.shape[0] - 1, 11)\n", + " sample_rows = sample_rows[:-1] + (sample_rows[1] - sample_rows[0]) / 4\n", + " sample_rows = sample_rows.astype(int)\n", + " color = iter(plt.cm.inferno(np.linspace(0, 0.8, len(sample_rows))))\n", + " tt = t_all[: Z.shape[1]]\n", + " for i in sample_rows:\n", + " ax.plot(tt, Z[i], lw=1, color=next(color), label=rf\"$q(x_{{{i}}},t)$\")\n", + "\n", + " ax.set_xlim(t_all[0], t_all[-1])\n", + " ax.set_xlabel(r\"$t$\")\n", + " ax.set_ylabel(r\"$q(x,t)$\")\n", + " ax.legend(loc=(1.05, 0.05))\n", + " ax.set_title(title)\n", + "\n", + "\n", + "def plot_two_datasets(Z1, Z2, u, title1=\"\", title2=\"\", cutoff=None):\n", + " \"\"\"Plot two datasets side by side with space and time plots.\"\"\"\n", + " _, [ax1, ax2] = plt.subplots(1, 2, sharex=True, sharey=True)\n", + " plot_data_space(Z1, u, title1, ax1)\n", + " plot_data_space(Z2, u, title2, ax2)\n", + " ax1.legend([])\n", + "\n", + " fig, [ax1, ax2] = plt.subplots(2, 1, sharex=True, sharey=True)\n", + " plot_data_time(Z1, title1, ax1)\n", + " plot_data_time(Z2, title2, ax2)\n", + " ax1.legend([])\n", + " ax1.set_xlabel(\"\")\n", + " fig.subplots_adjust(hspace=0.3)\n", + " if cutoff is not None:\n", + " ax1.axvline(cutoff, color=\"gray\", linewidth=1, linestyle=\"--\")\n", + " ax1.text(cutoff - 10 * dt, 0, \"training\", ha=\"right\", color=\"gray\")\n", + " ax1.text(cutoff + 10 * dt, 0, \"prediction\", ha=\"left\", color=\"gray\")\n", + "\n", + " plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plot_two_datasets(\n", + " Q,\n", + " Q_all,\n", + " training_input,\n", + " \"Snapshot data for training\",\n", + " \"Full-order model solution\",\n", + " cutoff=t[-1],\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### ROM Construction" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We now have snapshot data $\\Q \\in \\RR^{n \\times k}$, but to learn a model with external inputs, we need training data for the inputs as well as for the snapshots.\n", + "Define the vector\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\U = \\left[\\begin{array}{cccc}\n", + " u_\\text{train}(t_0) & u_\\text{train}(t_1) & \\cdots & u_\\text{train}(t_{k-1})\n", + " \\end{array}\\right]\n", + " \\in\\RR^{k},\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "which collects the values of the training input function at the same times as the training snapshots." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "U = training_input(t)\n", + "\n", + "print(f\"Training snapshots:\\t{Q.shape=}\")\n", + "print(f\"Training inputs:\\t{U.shape=}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We will use a {class}`opinf.basis.PODBasis` to reduce the dimension of the snapshot training data, which approximates the discretized state vector as $\\q(t) \\approx \\Vr\\qhat(t)$ for some $\\Vr\\in\\RR^{n\\times r}$ with orthonormal columns and $\\qhat(t)\\in\\RR^{r}$, with and $r\\ll n$.\n", + "Input training data are *not* typically compressed with dimensionality reduction or subjected to other pre-processing routines.\n", + "Because the FOM {eq}`eq_inputs_fom` has the linear-time invariant form $\\ddt\\q(t) = \\A\\q(t) + \\B u(t)$, we seek a ROM with the same structure, i.e.,\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\ddt\\qhat(t) = \\Ahat\\qhat(t) + \\Bhat u(t),\n", + " \\qquad\n", + " \\qhat(0) = \\Vr\\trp\\q_0.\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "Data for the time derivative $\\ddt\\qhat(t)$ are estimated in this example with sixth-order finite differences using {class}`opinf.ddt.UniformFiniteDifferencer`.\n", + "The underlying least-squares problem to determine $\\Ahat$ and $\\Bhat$ is given by\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\min_{\\Ahat,\\Bhat}\n", + " \\sum_{j=0]^{k-1}\\left\\|\n", + " \\Ahat\\qhat_{j} + \\Bhat\\u_j - \\dot{\\qhat}_j\n", + " \\right\\|_{2}^{2},\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "where $\\qhat_j = \\qhat(t_j)\\in\\RR^{r}$ and $u_j = u(t_j)\\in\\RR$ are the state snapshots and input data, respectively, and $\\dot{\\qhat}_j \\approx \\ddt\\qhat(t)|_{t=t_j}\\in\\RR^{r}$ are the estimated time derivatives." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + ":::{dropdown} Why Use the Same Structure?\n", + "\n", + "An OpInf ROM should have the same structure as an intrusive Galerkin ROM.\n", + "The Galerkin ROM for {eq}`eq_inputs_fom` is derived by substituting in the approximation $\\q(t)\\approx\\Vr\\qhat(t)$, yielding\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\ddt\\Vr\\qhat(t) = \\A\\Vr\\qhat(t) + \\B u(t),\n", + " \\qquad\n", + " \\Vr\\qhat(0) = \\q_0.\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "Next, left multiply by $\\Vr\\trp$ and use the fact that $\\Vr\\trp\\Vr = \\I$ to get the following:\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\ddt\\qhat(t) = \\tilde{\\A}\\qhat(t) + \\tilde{\\B}u(t),\n", + " \\qquad\n", + " \\qhat(0) = \\Vr\\trp\\q_0,\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "where $\\tilde{\\A} = \\Vr\\trp\\A\\Vr \\in \\RR^{r\\times r}$ and $\\tilde{\\B} = \\Vr\\trp\\B\\in\\RR^{r}$.\n", + "Note that this ROM has the same input function $u(t)$ as the FOM.\n", + ":::" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Training input data are passed to {meth}`opinf.rom.ROM.fit()` as the `inputs` argument." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "rom = opinf.ROM(\n", + " basis=opinf.basis.PODBasis(residual_energy=1e-6),\n", + " ddt_estimator=opinf.ddt.UniformFiniteDifferencer(t, \"ord6\"),\n", + " model=opinf.models.ContinuousModel(\"AB\"),\n", + ")\n", + "\n", + "with opinf.utils.TimedBlock(\"Fitting OpInf ROM\"):\n", + " rom.fit(Q, inputs=U)\n", + "\n", + "with opinf.utils.TimedBlock(\"Reduced-order solve\"):\n", + " Q_ROM = rom.predict(q0, t_all, input_func=training_input, method=\"BDF\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plot_two_datasets(\n", + " Q_ROM,\n", + " Q_all,\n", + " training_input,\n", + " \"Reduced-order model solution\",\n", + " \"Full-order model solution\",\n", + " cutoff=t[-1],\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "For a closer look at the difference between the FOM and ROM solutions, we compute the relative $\\ell_2$-norm error of the ROM solution as a function of time using {func}`opinf.post.lp_error()` and the relative Forbenius-norm error using {func}`opinf.post.frobenius_error()`." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "tags": [ + "hide-input" + ] + }, + "outputs": [], + "source": [ + "def plot_errors_over_time(\n", + " Ztrue, basis, Z1, label1, Z2=None, label2=None, cutoff=None\n", + "):\n", + " \"\"\"Plot normalized absolute projection error and ROM error(s)\n", + " as a function of time.\n", + " \"\"\"\n", + " _, ax = plt.subplots(1, 1)\n", + "\n", + " projection_err = opinf.post.lp_error(Ztrue, basis.project(Ztrue))[1]\n", + " ax.semilogy(t_all, projection_err, \"C3-\", lw=1, label=\"Projection Error\")\n", + "\n", + " relative_error = opinf.post.lp_error(Ztrue, Z1)[1]\n", + " ax.semilogy(t_all, relative_error, \"C0--\", lw=1, label=label1)\n", + "\n", + " if Z2 is not None:\n", + " relative_error = opinf.post.lp_error(Ztrue, Z2)[1]\n", + " ax.semilogy(t_all, relative_error, \"C5-.\", lw=1, label=label2)\n", + "\n", + " if cutoff is not None:\n", + " ax.axvline(cutoff, color=\"gray\", linewidth=1, linestyle=\"--\")\n", + " ymin = projection_err.min() / 4\n", + " ax.text(cutoff - 10 * dt, ymin, \"training\", ha=\"right\", color=\"gray\")\n", + " ax.text(cutoff + 10 * dt, ymin, \"prediction\", ha=\"left\", color=\"gray\")\n", + " ax.set_ylim(bottom=ymin / 2)\n", + "\n", + " ax.set_xlim(t_all[0], t_all[-1])\n", + " ax.set_xlabel(r\"$t$\")\n", + " ax.set_ylabel(\"Relative error\")\n", + " ax.legend(loc=\"lower right\")\n", + " plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plot_errors_over_time(Q_all, rom.basis, Q_ROM, \"OpInf ROM error\", cutoff=t[-1])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "error_opinf = opinf.post.frobenius_error(Q_all, Q_ROM)[1]\n", + "print(f\"OpInf ROM error:\\t{error_opinf:.4e}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Comparison to the Intrusive Galerkin ROM" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The classical intrusive Galerkin ROM for this problem is given by\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\ddt\\qhat(t) = \\tilde{\\A}\\qhat(t) + \\tilde{\\B}u(t),\n", + " \\qquad\n", + " \\qhat(0) = \\Vr\\trp\\q_0,\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "where $\\tilde{\\A} = \\Vr\\trp\\A\\Vr \\in \\RR^{r\\times r}$ and $\\tilde{\\B} = \\Vr\\trp\\B\\in\\RR^{r}$.\n", + "Here, we form this ROM explicitly (using the same basis matrix $\\Vr$ as before) and compare it to our existing OpInf ROM." + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "rom_intrusive = opinf.ROM(\n", + " basis=rom.basis,\n", + " model=fom.galerkin(rom.basis.entries), # Explicitly project FOM operators.\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "with opinf.utils.TimedBlock(\"Reduced-order model solve (intrusive)\"):\n", + " Q_ROM_intrusive = rom_intrusive.predict(\n", + " q0, t_all, input_func=training_input, method=\"BDF\"\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plot_errors_over_time(\n", + " Q_all,\n", + " rom.basis,\n", + " Q_ROM,\n", + " \"OpInf ROM error\",\n", + " Q_ROM_intrusive,\n", + " \"Intrusive ROM error\",\n", + " cutoff=t[-1],\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "error_intrusive = opinf.post.frobenius_error(Q_all, Q_ROM_intrusive)[1]\n", + "error_projection = rom.basis.projection_error(Q_all, relative=True)\n", + "\n", + "print(\n", + " \"Relative Frobenius-norm errors\",\n", + " \"-\" * 33,\n", + " f\"Projection error:\\t{error_projection:%}\",\n", + " f\"OpInf ROM error:\\t{error_opinf:%}\",\n", + " f\"Intrusive ROM error:\\t{error_intrusive:%}\",\n", + " sep=\"\\n\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this experiment, the OpInf ROM and the corresponding intrusive ROM have comparable error, even though the OpInf ROM is calibrated without intrusive access to the FOM." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Generalization to New Inputs" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The previous experiment uses a single choice of $u(t)$ for the training and for the prediction in time.\n", + "Now, we define a new choice of input function $u(t)$,\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " u_\\text{test}(t)\n", + " = 1 + t(1 - t),\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "and evaluate the FOM and ROM for this new input." + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [], + "source": [ + "def test_input(t):\n", + " return 1 + t * (1 - t)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "with opinf.utils.TimedBlock(\"Full-order solve\"):\n", + " Qtest_FOM = full_order_solve(t_all, test_input)\n", + "\n", + "with opinf.utils.TimedBlock(\"Reduced-order solve (OpInf)\"):\n", + " Qtest_ROM = rom.predict(q0, t_all, test_input, method=\"BDF\")\n", + "\n", + "with opinf.utils.TimedBlock(\"Reduced-order solve (intrusive)\"):\n", + " Qtest_ROM_intrusive = rom_intrusive.predict(\n", + " q0, t_all, test_input, method=\"BDF\"\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plot_two_datasets(\n", + " Qtest_ROM,\n", + " Qtest_FOM,\n", + " test_input,\n", + " \"OpInf Reduced-order model solution\",\n", + " \"Full-order model solution\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plot_errors_over_time(\n", + " Qtest_FOM,\n", + " rom.basis,\n", + " Qtest_ROM,\n", + " \"OpInf ROM error\",\n", + " Qtest_ROM_intrusive,\n", + " \"Intrusive ROM error\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Both ROMs perform well with a new input function, but the intrusive ROM performs slightly better than the OpInf ROM.\n", + "This is typical; intrusive ROMs are often more robust and generalizable than standard OpInf ROMs, but OpInf ROMs tend to reproduce training data better than intrusive ROMs." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Multiple Training Trajectories" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If data corresponding to several choices of the input function $u(t)$ are available for training, we collect a list of snapshot matrices and a list of corresponding inputs to pass to `fit()`." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Training Data Generation" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Below, we solve the PDE using the three input functions for training data:\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " &u_\\text{train}^{(1)}(t) = e^{-t},\n", + " &&&\n", + " &u_\\text{train}^{(2)}(t) = 1 + \\frac{1}{2}t^2,\n", + " &&&\n", + " &u_\\text{train}^{(3)}(t) = 1 - \\frac{1}{2}\\sin(\\pi t).\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "The following input functions are used for testing.\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " &u_\\text{test}^{(1)}(t) = 1 - \\frac{1}{2}\\sin(3\\pi t),\n", + " &&&\n", + " &u_\\text{test}^{(2)}(t) = 1 + 25 (t (t - 1))^3,\n", + " &&&\n", + " &u_\\text{test}^{(3)}(t) = 1 + e^{-2t}\\sin(\\pi t).\n", + "\\end{aligned}\n", + "$$" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [], + "source": [ + "training_inputs = [\n", + " lambda t: np.exp(-t),\n", + " lambda t: 1 + t**2 / 2,\n", + " lambda t: 1 - np.sin(np.pi * t) / 2,\n", + "]\n", + "\n", + "testing_inputs = [\n", + " lambda t: 1 - np.sin(3 * np.pi * t) / 3,\n", + " lambda t: 1 + 25 * (t * (t - 1)) ** 3,\n", + " lambda t: 1 + np.exp(-2 * t) * np.sin(np.pi * t),\n", + "]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [ + "hide-input" + ] + }, + "outputs": [], + "source": [ + "# Visualize the input functions.\n", + "fig, [ax1, ax2] = plt.subplots(1, 2, sharex=True)\n", + "c = 0\n", + "for input_func in training_inputs:\n", + " ax1.plot(t_all, input_func(t_all), color=f\"C{c}\", lw=1)\n", + " c += 1\n", + "for input_func in testing_inputs:\n", + " ax2.plot(t_all, input_func(t_all), color=f\"C{c}\", lw=1)\n", + " c += 1\n", + "\n", + "ax1.set_title(\"Training inputs\")\n", + "ax2.set_title(\"Testing inputs\")\n", + "# ax1.axvline(t[-1], color=\"k\", lw=1)\n", + "ax1.axvline(t[-1], color=\"gray\", linewidth=1, linestyle=\"--\")\n", + "ax1.text(t[-1] - 10 * dt, 1.4, \"training\", ha=\"right\", color=\"gray\")\n", + "ax1.text(t[-1] + 10 * dt, 1.4, \"prediction\", ha=\"left\", color=\"gray\")\n", + "for ax in (ax1, ax2):\n", + " ax.set_xlim(t_all[0], t_all[-1])\n", + " ax.set_xlabel(r\"$t$\")\n", + " ax.set_ylabel(r\"$u(t)$\")\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [], + "source": [ + "# Solve the full-order model for each training input and collect results.\n", + "Qs = [] # State snapshots.\n", + "Us = [] # Corresponding inputs.\n", + "\n", + "for u in training_inputs:\n", + " Qs.append(full_order_solve(t, u))\n", + " Us.append(u(t))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "rom = opinf.ROM(\n", + " basis=opinf.basis.PODBasis(residual_energy=1e-6),\n", + " ddt_estimator=opinf.ddt.UniformFiniteDifferencer(t, \"ord6\"),\n", + " model=opinf.models.ContinuousModel(\"AB\"),\n", + ")\n", + "\n", + "with opinf.utils.TimedBlock(\"Fitting OpInf ROM\"):\n", + " rom.fit(Qs, inputs=Us)\n", + "\n", + "rom_intrusive = opinf.ROM(\n", + " basis=rom.basis,\n", + " model=fom.galerkin(rom.basis.entries),\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for i, u in enumerate(testing_inputs):\n", + " print(f\"Test input function {i+1:d}\")\n", + "\n", + " with opinf.utils.TimedBlock(\"Full-order solve\"):\n", + " Q_FOM = full_order_solve(t_all, u)\n", + "\n", + " with opinf.utils.TimedBlock(\"Reduced-order solve (OpInf)\"):\n", + " Q_ROM = rom.predict(q0, t_all, u, method=\"BDF\")\n", + "\n", + " with opinf.utils.TimedBlock(\"Reduced-order solve (intrusive)\"):\n", + " Q_ROM_intrusive = rom_intrusive.predict(q0, t_all, u, method=\"BDF\")\n", + "\n", + " plot_two_datasets(\n", + " Q_ROM,\n", + " Q_FOM,\n", + " u,\n", + " \"Reduced-order model solution (OpInf)\",\n", + " \"Full-order model solution\",\n", + " )\n", + "\n", + " plot_errors_over_time(\n", + " Q_FOM,\n", + " rom.basis,\n", + " Q_ROM,\n", + " \"OpInf ROM error\",\n", + " Q_ROM_intrusive,\n", + " \"Intrusive ROM error\",\n", + " )" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + ":::{admonition} Multi-dimensional Inputs\n", + ":class: tip\n", + "\n", + "The examples in this tutorial use a scalar-valued input function $u:\\RR\\to\\RR$.\n", + "For models with vector inputs $\\u:\\RR\\to\\RR^m$ with $m > 1$, training inputs are collected into a matrix with $m$ rows:\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\U = \\left[\\begin{array}{cccc}\n", + " \\u(t_0) & \\u(t_1) & \\cdots & \\u_(t_{k-1})\n", + " \\end{array}\\right]\n", + " \\in \\RR^{m \\times k}.\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "This is the matrix used for the `inputs` argument of `fit()`.\n", + ":::" + ] + } + ], + "metadata": { + "celltoolbar": "Tags", + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.3" + }, + "toc-showmarkdowntxt": false, + "toc-showtags": true + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/docs/source/tutorials/parametric.ipynb b/docs/source/tutorials/parametric.ipynb new file mode 100644 index 00000000..00bca2b9 --- /dev/null +++ b/docs/source/tutorials/parametric.ipynb @@ -0,0 +1,559 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "toc-hr-collapsed": false + }, + "source": [ + "# Parametric Problems" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Many systems depend on independent parameters that describe material properties or other physical characteristics of the phenomenon being modeled.\n", + "In such cases, the operators of a reduced-order model (ROM) should be designed to vary with the system parameters. This tutorial demonstrates how to construct and evaluate a parametric ROM through an elementary example." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Problem Statement" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We consider a problem with a single scalar system parameter $\\mu > 0$." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + ":::{admonition} Governing Equations\n", + ":class: info\n", + "\n", + "Let $\\Omega = [0,L]\\subset \\RR$ be the spatial domain indicated by the variable $x$, and let $[0,T]\\subset\\RR$ be the time domain with variable $t$. We consider the one-dimensional heat equation with constant non-homogeneous Dirichlet boundary conditions,\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " &\\frac{\\partial}{\\partial t} q(x,t;\\mu) = \\mu\\frac{\\partial^2}{\\partial x^2}q(x,t;\\mu)\n", + " & x &\\in\\Omega,\\quad t\\in[0,T],\n", + " \\\\\n", + " &q(0,t;\\mu) = \\frac{1}{2}, \\quad q(L,t;\\mu) = 1\n", + " & t &\\in[0,T],\n", + " \\\\\n", + " &q(x,0;\\mu) = \\big(e^{\\alpha(x - 1)} + e^{-\\alpha x} - e^{-\\alpha}\\big)\n", + " & x &\\in \\Omega,\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "where the constant $\\mu > 0$ is a thermal diffusivity parameter, $\\alpha>0$ is constant, and $q(x,t;\\mu)$ is the unknown state variable. This is a model for a one-dimensional rod conducting heat with a fixed initial heat profile. The temperature at the ends of the rod are fixed, but heat is allowed to diffuse through the rod and flow out at the ends of the domain.\n", + ":::" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + ":::{admonition} Objective\n", + ":class: info\n", + "\n", + "Construct a reduced-order model (ROM) which can be solved rapidly to produce approximate solutions $q(x, t; \\mu)$ to the partial differential equation given above for various choices of the diffusivity parameter $\\mu > 0$.\n", + "We will observe data for a few values of $\\mu$, then use the ROM to predict the solution for the entire time domain $[0, T]$ and for new values of $\\mu$. \n", + "Hence, the ROM will be **predictive in the parameter** $\\mu$.\n", + "\n", + "\n", + "\n", + ":::" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "import scipy.sparse\n", + "import matplotlib.pyplot as plt\n", + "\n", + "import opinf\n", + "\n", + "opinf.utils.mpl_config()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Full-order Model Definition" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We consider the parameter domain $\\mathcal{P} = [.1,10]\\subset\\RR$.\n", + "A finite element or finite difference discretization leads to a system of differential equations,\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\ddt\\q(t;\\mu)\n", + " = \\c(\\mu) + \\A(\\mu)\\q(t;\\mu),\n", + " \\qquad\n", + " \\q(0) = \\q_0,\n", + "\\end{aligned}\n", + "$$ (eq_parametric_fom)\n", + "\n", + "where $\\q:\\RR\\times\\mathcal{P}\\to\\RR^n,$ $\\c:\\mathcal{P}\\to\\RR^n,$ and $\\A:\\mathcal{P}\\to\\RR^{n\\times n}.$\n", + "This is the full-order model (FOM).\n", + "The constant term $\\c(\\mu)$ arises due to the nonzero boundary conditions.\n", + "In this case, the parametric dependence on $\\mu$ is linear: there are $\\c^{(0)}\\in\\RR^{n}$ and $\\A^{(0)}\\in\\RR^{n\\times n}$ such that $\\c(\\mu) = \\mu\\c^{(0)}$ and $\\A(\\mu) = \\mu\\A^{(0)}.$" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + ":::{dropdown} Discretization details\n", + "\n", + "We take an equidistant grid $\\{x_i\\}_{i=0}^{n+1} \\subset \\Omega$,\n", + "\n", + "\\begin{align*}\n", + " 0 &= x_0 < x_1 < \\cdots < x_n < x_{n+1} = L\n", + " &\n", + " &\\text{and}\n", + " &\n", + " \\delta x &= \\frac{L}{n+1} = x_{i+1} - x_{i},\\quad i=1,\\ldots,n-1.\n", + "\\end{align*}\n", + "\n", + "The boundary conditions prescribe $q(x_0,t;\\mu) = q(x_{n+1},t;\\mu) = 1$.\n", + "Our goal is to compute $q(x,t)$ at the interior spatial points $x_{1},x_{2},\\ldots,x_{n}$ for various $t\\in[0,T]$, so we consider the state vector $\\q(t;\\mu) = [~q(x_{1}, t;\\mu)~\\cdots~q(x_{n}, t;\\mu)~]\\trp\\in\\RR^n$ and derive a system governing the evolution of $\\q(t;\\mu)$ in time.\n", + "\n", + "Approximating the spatial derivative with a central finite difference approximation,\n", + "\n", + "$$\n", + " \\frac{\\partial^2}{\\partial x^2}q(x,t)\n", + " \\approx \\frac{q(x-\\delta x,t) - 2q(x,t) + q(x+\\delta x,t)}{(\\delta x)^2},\n", + "$$\n", + "\n", + "and using the boundary conditions $q(0,t;\\mu) = q(L,t;\\mu) = 1$, we arrive at the following matrices for the FOM.\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\c^{(0)} &= \\frac{1}{(\\delta x)^2}\\left[\\begin{array}{c}\n", + " 1 \\\\ 0 \\\\ \\vdots \\\\ 0 \\\\ 1\n", + " \\end{array}\\right]\\in\\RR^{n},\n", + " &\n", + " \\A^{(0)} &= \\frac{\\mu}{(\\delta x)^2}\\left[\\begin{array}{ccccc}\n", + " -2 & 1 & & & \\\\\n", + " 1 & -2 & 1 & & \\\\\n", + " & \\ddots & \\ddots & \\ddots & \\\\\n", + " & & 1 & -2 & 1 \\\\\n", + " & & & 1 & -2 \\\\\n", + " \\end{array}\\right] \\in\\RR^{n\\times n}.\n", + "\\end{aligned}\n", + "$$\n", + ":::" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Training Data Generation" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let $L = 1$, $T = 1$, and set $\\alpha = 100$.\n", + "For this demo, we use $n = 2^{10} - 1 = 1023$ spatial degrees of freedom and record the FOM solution every $\\delta t = 0.0025$ time units.\n", + "For each training parameter $\\mu_i$, this results in $k = 401$ state snapshots, organized in snapshot matrices\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\Q_i = \\left[\\begin{array}{cccc}\n", + " \\q(t_0;\\mu_i) & \\q(t_1;\\mu_i) & \\cdots & \\q(t_{k-1};\\mu_i)\n", + " \\end{array}\\right]\n", + " \\in\\RR^{n\\times k},\n", + " \\quad\n", + " i = 0,\\ldots, s-1.\n", + "\\end{aligned}\n", + "$$\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Get s logarithmically spaced paraneter values in D = [.1, 10].\n", + "s = 10\n", + "training_parameters = np.logspace(-1, 1, s)\n", + "print(training_parameters)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [ + "hide-input" + ] + }, + "outputs": [], + "source": [ + "# Construct the spatial domain.\n", + "L = 1\n", + "n = 2**10 - 1\n", + "x_all = np.linspace(0, L, n + 2)\n", + "x = x_all[1:-1]\n", + "dx = x[1] - x[0]\n", + "\n", + "# Construct the temporal domain.\n", + "T = 1\n", + "K = 401\n", + "t_all = np.linspace(0, T, K)\n", + "dt = t_all[1] - t_all[0]\n", + "\n", + "# Construct the full-order state matrix A.\n", + "dx2inv = 1 / dx**2\n", + "diags = np.array([1, -2, 1]) * dx2inv\n", + "A0 = scipy.sparse.diags(diags, [-1, 0, 1], (n, n))\n", + "\n", + "# Construct the full-order input matrix B.\n", + "c0 = np.zeros_like(x)\n", + "c0[0], c0[-1] = dx2inv, dx2inv\n", + "\n", + "# Construct the part of the initial condition not dependent on u(t).\n", + "alpha = 100\n", + "q0 = np.exp(alpha * (x - 1)) + np.exp(-alpha * x) - np.exp(-alpha)\n", + "\n", + "\n", + "def full_order_solve(mu, time_domain):\n", + " \"\"\"Solve the full-order model with SciPy.\n", + " Here, u is a callable function.\n", + " \"\"\"\n", + " return scipy.integrate.solve_ivp(\n", + " fun=lambda t, q: mu * (c0 + A0 @ q),\n", + " y0=q0,\n", + " t_span=[time_domain[0], time_domain[-1]],\n", + " t_eval=time_domain,\n", + " method=\"BDF\",\n", + " ).y\n", + "\n", + "\n", + "Qs = []\n", + "# Solve the full-order model at the training parameter values.\n", + "with opinf.utils.TimedBlock(\"Full-order solves\"):\n", + " for mu in training_parameters:\n", + " Qs.append(full_order_solve(mu, t_all))\n", + "\n", + "\n", + "print(f\"\\nSpatial domain:\\t\\t{x.shape=}\")\n", + "print(f\"Spatial step size:\\t{dx=:.10f}\")\n", + "print(f\"\\nFull time domain:\\t{t_all.shape=}\")\n", + "# print(f\"Training time domain:\\t{t.shape=}\")\n", + "print(f\"Temporal step size:\\t{dt=:f}\")\n", + "print(f\"\\nFull-order matrix A0:\\t{A0.shape=}\")\n", + "print(f\"Full-order vector c0:\\t{c0.shape=}\")\n", + "print(f\"\\nInitial condition:\\t{q0.shape=}\")\n", + "print(f\"Training snapshots:\\t{Qs[0].shape=}\")" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "tags": [ + "hide-input" + ] + }, + "outputs": [], + "source": [ + "def plot_data_space(Z, title, ax=None):\n", + " \"\"\"Plot state data over space at multiple instances in time.\"\"\"\n", + " if ax is None:\n", + " _, ax = plt.subplots(1, 1)\n", + "\n", + " # Plot a few snapshots over the spatial domain.\n", + " sample_columns = [0] + [2**d for d in range(10)]\n", + " color = iter(plt.cm.viridis_r(np.linspace(0.05, 1, len(sample_columns))))\n", + " while sample_columns[-1] > Z.shape[1] - 1:\n", + " sample_columns = sample_columns[:-1]\n", + " for j in sample_columns:\n", + " q_all = np.concatenate([[0.5], Z[:, j], [1]])\n", + " c = next(color)\n", + " ax.plot(x_all, q_all, lw=1, color=c, label=rf\"$q(x,t_{{{j}}})$\")\n", + "\n", + " ax.set_xlim(x_all[0], x_all[-1])\n", + " ax.set_xlabel(r\"$x$\")\n", + " ax.set_ylabel(r\"$q(x,t)$\")\n", + " ax.legend(loc=(1.05, 0.05))\n", + " ax.set_title(title)\n", + "\n", + "\n", + "def plot_two_datasets(Z1, title1, Z2, title2):\n", + " \"\"\"Plot two datasets side by side.\"\"\"\n", + " _, [ax1, ax2] = plt.subplots(1, 2)\n", + " plot_data_space(Z1, title1, ax1)\n", + " plot_data_space(Z2, title2, ax2)\n", + " ax1.legend([])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for i in [0, s // 2, s - 1]:\n", + " plot_data_space(Qs[i], rf\"Full-order model solution at $\\mu = \\mu_{i}$\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Reduced-order Model Construction" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now that we have parameter and snapshot data, we instantiate a `opinf.ParametricROM` and pass the training parameter values and the corresponding state snapshots to the `fit()` method.\n", + "\n", + "We will use a {class}`opinf.basis.PODBasis` to reduce the dimension of the snapshot training data, which approximates the discretized state vector as $\\q(t;\\mu) \\approx \\Vr\\qhat(t;\\mu)$ for some $\\Vr\\in\\RR^{n\\times r}$ with orthonormal columns and $\\qhat(t)\\in\\RR^{r}$, with and $r\\ll n$.\n", + "Based on the FOM {eq}`eq_parametric_fom`, we specify a ROM with the following structure:\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\ddt\\qhat(t;\\mu)\n", + " &= \\chat(\\mu) + \\Ahat(\\mu)\\qhat(t;\\mu)\n", + " = \\mu\\chat^{(0)} + \\mu\\Ahat^{(0)}\\qhat(t;\\mu),\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "where $\\chat^{(0)}\\in\\RR^{r}$ and $\\Ahat^{(0)}\\in\\RR^{r\\times r}.$\n", + "Data for the time derivative $\\ddt\\qhat(t)$ are estimated in this example with sixth-order finite differences using {class}`opinf.ddt.UniformFiniteDifferencer`.\n", + "The underlying least-squares problem to determine $\\chat^{(0)}$ and $\\Ahat^{(0)}$ is given by\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\min_{\\Ahat,\\Bhat}\n", + " \\sum_{i=0}^{s-1}\\sum_{j=0}^{k-1}\\left\\|\n", + " \\mu_{i}\\chat^{(0)} + \\mu_{i}\\Ahat^{(0)}\\qhat_{i,j} - \\dot{\\qhat}_{i,j}\n", + " \\right\\|_{2}^{2},\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "where $\\qhat_{i,j} = \\qhat(t_j;\\mu_i)\\in\\RR^{r}$ are the state snapshots and $\\dot{\\qhat}_{i,j} \\approx \\ddt\\qhat(t;\\mu_{i})|_{t=t_j}\\in\\RR^{r}$ are the estimated time derivatives." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + ":::{dropdown} Preserving Parametric Structure\n", + "\n", + "An OpInf ROM should have the same structure as an intrusive Galerkin ROM.\n", + "The Galerkin ROM for {eq}`eq_parametric_fom` is derived by substituting in the approximation $\\q(t;\\mu)\\approx\\Vr\\qhat(t;\\mu)$, yielding\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\ddt\\Vr\\qhat(t;\\mu)\n", + " = \\c(\\mu) + \\A(\\mu)\\Vr\\qhat(t;\\mu)\n", + " \\qquad\n", + " \\Vr\\qhat(0) = \\q_0.\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "Next, left multiply by $\\Vr\\trp$ and use the fact that $\\Vr\\trp\\Vr = \\I$ to get the following:\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\ddt\\qhat(t;\\mu)\n", + " = \\tilde{\\c} + \\tilde{\\A}(\\mu)\\qhat(t;\\mu)\n", + " \\qquad\n", + " \\qhat(0) = \\Vr\\trp\\q_0,\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "where $\\tilde{\\c}(\\mu) = \\Vr\\trp\\c(\\mu)\\in\\RR^{r}$ and $\\tilde{\\A}(\\mu) = \\Vr\\trp\\A(\\mu)\\Vr \\in \\RR^{r\\times r}.$\n", + "Finally, using the formulae $\\c(\\mu) = \\mu\\c^{(0)}$ and $\\A(\\mu) = \\mu\\A^{(0)}$, we can further simplify to\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\tilde{\\c}(\\mu)\n", + " &= \\Vr\\trp\\c(\\mu)\n", + " = \\mu\\Vr\\trp\\c^{(0)}\n", + " \\\\\n", + " \\tilde{\\A}(\\mu)\n", + " &= \\Vr\\trp\\A(\\mu)\\Vr\n", + " = \\mu\\Vr\\trp\\A^{(0)}\\Vr.\n", + "\\end{aligned}\n", + "$$\n", + "\n", + ":::" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + ":::{admonition} Interpolatory and Affine Parameterizations\n", + ":class: tip\n", + "\n", + "In this problem, the dependence on $\\mu$ in the ROM operators $\\chat(\\mu)$ and $\\Ahat(\\mu)$ is known from because the structure from the FOM is preserved by linear projection (see [affine operators](sec-operators-affine)).\n", + "If the dependence on $\\mu$ is not known a-priori or cannot be written in an affine form, [interpolatory operators](sec-operators-interpolated) sometimes provide a feasible alternative.\n", + ":::" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "rom = opinf.ParametricROM(\n", + " basis=opinf.basis.PODBasis(projection_error=1e-6),\n", + " ddt_estimator=opinf.ddt.UniformFiniteDifferencer(t_all, \"ord6\"),\n", + " model=opinf.models.ParametricContinuousModel(\n", + " operators=[\n", + " opinf.operators.AffineConstantOperator(1),\n", + " opinf.operators.AffineLinearOperator(1),\n", + " ],\n", + " solver=opinf.lstsq.L2Solver(1e-6),\n", + " ),\n", + ").fit(training_parameters, Qs)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Reduced-order Model Evaluation" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We start by checking comparing the solutions of the ROM at the training parameter values to the training snapshots." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for i, mu in enumerate(training_parameters):\n", + " with opinf.utils.TimedBlock(\"Reduced-order solve\"):\n", + " Q_ROM = rom.predict(mu, q0, t_all, method=\"BDF\")\n", + " fig, [ax1, ax2] = plt.subplots(1, 2)\n", + " plot_data_space(Qs[i], \"Snapshot data\", ax1)\n", + " plot_data_space(Q_ROM, \"ROM state output\", ax2)\n", + " ax1.legend([])\n", + " plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, we solve the FOM and ROM at new parameter values not included in the training set." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "test_parameters = np.sqrt(training_parameters[:-1] * training_parameters[1:])\n", + "print(test_parameters)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "errors = []\n", + "\n", + "for mu in test_parameters:\n", + " with opinf.utils.TimedBlock(\"Full-order solve\"):\n", + " Q_FOM = full_order_solve(mu, t_all)\n", + "\n", + " with opinf.utils.TimedBlock(\"Reduced-order solve\"):\n", + " Q_ROM = rom.predict(mu, q0, t_all, method=\"BDF\")\n", + "\n", + " plot_two_datasets(\n", + " Q_FOM,\n", + " \"Full-order model solution\",\n", + " Q_ROM,\n", + " \"Reduced-order model solution\",\n", + " )\n", + " plt.show()\n", + " errors.append(opinf.post.frobenius_error(Q_FOM, Q_ROM)[1])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for mu, err in zip(test_parameters, errors):\n", + " print(f\"Test parameter mu = {mu:.6f}: error = {err:.4%}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + ":::{admonition} Stay Tuned\n", + ":class: note\n", + "\n", + "More examples are forthcoming.\n", + ":::" + ] + } + ], + "metadata": { + "celltoolbar": "Tags", + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.3" + }, + "toc-showmarkdowntxt": false, + "toc-showtags": true + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/src/opinf/__init__.py b/src/opinf/__init__.py index 7885f472..dc9e2310 100644 --- a/src/opinf/__init__.py +++ b/src/opinf/__init__.py @@ -7,7 +7,7 @@ https://github.com/Willcox-Research-Group/rom-operator-inference-Python3 """ -__version__ = "0.5.7" +__version__ = "0.5.8" from . import ( basis, @@ -24,3 +24,19 @@ ) from .roms import * + +__all__ = [ + "basis", + "errors", + "ddt", + "lift", + "lstsq", + "models", + "operators", + "pre", + "post", + "roms", + "utils", +] + +__all__ += roms.__all__ diff --git a/src/opinf/basis/_base.py b/src/opinf/basis/_base.py index 2c2fac76..512a7870 100644 --- a/src/opinf/basis/_base.py +++ b/src/opinf/basis/_base.py @@ -65,13 +65,13 @@ def name(self, label: str): def __str__(self): """String representation: class and dimensions.""" - out = [self.__class__.__name__] + out = [ + self.__class__.__name__, + f"full_state_dimension: {self.full_state_dimension}", + f"reduced_state_dimension: {self.reduced_state_dimension}", + ] if (name := self.name) is not None: out[0] = f"{out[0]} for variable '{name}'" - if (n := self.full_state_dimension) is not None: - out.append(f"Full state dimension n = {n:d}") - if (r := self.reduced_state_dimension) is not None: - out.append(f"Reduced state dimension r = {r:d}") return "\n ".join(out) def __repr__(self): diff --git a/src/opinf/basis/_pod.py b/src/opinf/basis/_pod.py index 1d503f8f..458e662c 100644 --- a/src/opinf/basis/_pod.py +++ b/src/opinf/basis/_pod.py @@ -333,15 +333,15 @@ def __str__(self): if (ce := self.cumulative_energy) is not None: if self.__energy_is_being_estimated: - out.append(f"Approximate cumulative energy: {ce:%}") + out.append(f"approximate cumulative energy: {ce:%}") else: - out.append(f"Cumulative energy: {ce:%}") + out.append(f"cumulative energy: {ce:%}") if (re := self.residual_energy) is not None: if self.__energy_is_being_estimated: - out.append(f"Approximate residual energy: {re:.4e}") + out.append(f"approximate residual energy: {re:.4e}") else: - out.append(f"Residual energy: {re:.4e}") + out.append(f"residual energy: {re:.4e}") if (mv := self.max_vectors) is not None: out.append(f"{mv:d} basis vectors available") diff --git a/src/opinf/ddt/_finite_difference.py b/src/opinf/ddt/_finite_difference.py index d6e477b5..c7b9a9bf 100644 --- a/src/opinf/ddt/_finite_difference.py +++ b/src/opinf/ddt/_finite_difference.py @@ -863,11 +863,13 @@ def scheme(self): return self.__scheme def __str__(self): - """String representation: class name, time domain.""" - head = DerivativeEstimatorTemplate.__str__(self) - tail = [f"time step: {self.dt:.2e}"] - tail.append(f"finite difference scheme: {self.scheme.__name__}()") - return f"{head}\n " + "\n ".join(tail) + return "\n ".join( + [ + DerivativeEstimatorTemplate.__str__(self), + f"dt: {self.dt:.4e}", + f"scheme: {self.scheme.__name__}()", + ] + ) # Main routine ------------------------------------------------------------ def estimate(self, states, inputs=None): @@ -927,9 +929,12 @@ def __init__(self, time_domain): def __str__(self): """String representation: class name, time domain.""" - head = DerivativeEstimatorTemplate.__str__(self) - tail = "finite difference engine: np.gradient(edge_order=2)" - return f"{head}\n {tail}" + return "\n ".join( + [ + DerivativeEstimatorTemplate.__str__(self), + "scheme: np.gradient(edge_order=2)", + ] + ) # Main routine ------------------------------------------------------------ def estimate(self, states, inputs=None): diff --git a/src/opinf/ddt/_interpolation.py b/src/opinf/ddt/_interpolation.py index 75099e1c..85198e3d 100644 --- a/src/opinf/ddt/_interpolation.py +++ b/src/opinf/ddt/_interpolation.py @@ -2,7 +2,7 @@ """Time derivative estimators based on interpolation.""" __all__ = [ - "InterpolationDerivativeEstimator", + "InterpDerivativeEstimator", ] @@ -15,7 +15,7 @@ from ._base import DerivativeEstimatorTemplate -class InterpolationDerivativeEstimator(DerivativeEstimatorTemplate): +class InterpDerivativeEstimator(DerivativeEstimatorTemplate): r"""Time derivative estimator based on interpolation. For a set of (compressed) snapshots @@ -41,6 +41,9 @@ class InterpolationDerivativeEstimator(DerivativeEstimatorTemplate): This is a local interpolation method and is more resitant to outliers than :class:`scipy.interpolate.CubicSpline`. However, it is not recommended if the time points are not uniformly spaced. + * ``"pchip"``: use :class:`scipy.interpolate.PchipInterpolator`. + The interpolator preserves monotonicity in the interpolation data + and does not overshoot if the data is not smooth. new_time_domain : (k',) ndarray or None If given, evaluate the interpolator at these points to generate new state snapshots and corresponding time derivatives. If input snapshots @@ -54,8 +57,9 @@ class InterpolationDerivativeEstimator(DerivativeEstimatorTemplate): _interpolators = types.MappingProxyType( { - "cubic": interp.CubicSpline, "akima": interp.Akima1DInterpolator, + "cubic": interp.CubicSpline, + "pchip": interp.PchipInterpolator, } ) diff --git a/src/opinf/lstsq/_base.py b/src/opinf/lstsq/_base.py index 94aab551..2b4ac908 100644 --- a/src/opinf/lstsq/_base.py +++ b/src/opinf/lstsq/_base.py @@ -133,10 +133,10 @@ def __str__(self) -> str: """String representation: class name + dimensions.""" out = [self.__class__.__name__] if (self.data_matrix is not None) and (self.lhs_matrix is not None): - out.append(f" Data matrix: {self.data_matrix.shape}") - out.append(f" Condition number: {self.cond():.4e}") - out.append(f" LHS matrix: {self.lhs_matrix.shape}") - out.append(f" Operator matrix: {self.r, self.d}") + out.append(f" data_matrix: {self.data_matrix.shape}") + out.append(f" condition number: {self.cond():.4e}") + out.append(f" lhs_matrix: {self.lhs_matrix.shape}") + out.append(f" solve().shape: {self.r, self.d}") else: out[0] += " (not trained)" return "\n".join(out) diff --git a/src/opinf/lstsq/_tikhonov.py b/src/opinf/lstsq/_tikhonov.py index d0fc7872..415e941a 100644 --- a/src/opinf/lstsq/_tikhonov.py +++ b/src/opinf/lstsq/_tikhonov.py @@ -236,9 +236,18 @@ def options(self): def __str__(self): """String representation: dimensions + solver options.""" - start = SolverTemplate.__str__(self) kwargs = self._print_kwargs(self.options) - return start + f"\n SVD solver: scipy.linalg.svd({kwargs})" + if np.isscalar(self.regularizer): + regstr = f"{self.regularizer:.4e}" + else: + regstr = f"{self.regularizer.shape}" + return "\n ".join( + [ + SolverTemplate.__str__(self), + f"regularizer: {regstr}", + f"SVD solver: scipy.linalg.svd({kwargs})", + ] + ) # Main methods ------------------------------------------------------------ def fit(self, data_matrix: np.ndarray, lhs_matrix: np.ndarray): @@ -566,11 +575,22 @@ def options(self): def __str__(self): """String representation: dimensions + solver options.""" - s = SolverTemplate.__str__(self) + kwargs = self._print_kwargs(self.options) + if self.regularizer[0].ndim == 1: + regstr = f" {self.regularizer.shape}" + else: + regstr = ( + f" {len(self.regularizer)} " + f"{self.regularizer[0].shape} ndarrays" + ) if self.method == "lstsq": kwargs = self._print_kwargs(self.options) - return s + f"\n solver ('lstsq'): scipy.linalg.lstsq({kwargs})" - return s + "\n solver ('normal'): scipy.linalg.solve(assume_a='pos')" + spstr = f"solver ('lstsq'): scipy.linalg.lstsq({kwargs})" + else: + spstr = "solver ('normal'): scipy.linalg.solve(assume_a='pos')" + return "\n ".join( + [SolverTemplate.__str__(self), f"regularizer: {regstr}", spstr] + ) def _check_regularizer_shape(self): if (shape1 := self.regularizer.shape) != (shape2 := (self.d, self.d)): diff --git a/src/opinf/lstsq/_tsvd.py b/src/opinf/lstsq/_tsvd.py index a3e912c9..24328003 100644 --- a/src/opinf/lstsq/_tsvd.py +++ b/src/opinf/lstsq/_tsvd.py @@ -33,7 +33,7 @@ class TruncatedSVDSolver(SolverTemplate): \operatorname{rank}(\D') = d'. If :math:`\D = \bfPhi\bfSigma\bfPsi\trp` is the singular value - decomposition of :math:\D`, then defining + decomposition of :math:`\D`, then defining .. math:: \bfPhi' = \bfPhi_{:d', :} diff --git a/src/opinf/models/_utils.py b/src/opinf/models/_utils.py new file mode 100644 index 00000000..77828deb --- /dev/null +++ b/src/opinf/models/_utils.py @@ -0,0 +1,46 @@ +# models/_utils.py +"""Private utility functions for working with Model classes.""" + +__all__ = [ + "is_continuous", + "is_discrete", + "is_parametric", + "is_nonparametric", +] + +from .mono._nonparametric import ( + ContinuousModel, + DiscreteModel, + _NonparametricModel, +) +from .mono._parametric import ( + _ParametricContinuousMixin, + _ParametricDiscreteMixin, + _ParametricModel, +) + + +def is_continuous(model): + """``True`` if the model is time continuous (semi-discrete).""" + return isinstance( + model, + (ContinuousModel, _ParametricContinuousMixin), + ) + + +def is_discrete(model): + """``True`` if the model is time discrete (fully discrete).""" + return isinstance( + model, + (DiscreteModel, _ParametricDiscreteMixin), + ) + + +def is_nonparametric(model): + """``True`` if the model is nonparametric.""" + return isinstance(model, _NonparametricModel) + + +def is_parametric(model): + """``True`` if the model is parametric.""" + return isinstance(model, _ParametricModel) diff --git a/src/opinf/models/mono/_base.py b/src/opinf/models/mono/_base.py index 4df920d6..da45d234 100644 --- a/src/opinf/models/mono/_base.py +++ b/src/opinf/models/mono/_base.py @@ -8,7 +8,15 @@ import numpy as np from ... import errors, lstsq -from ... import operators as _operators +from ...operators import ( + ConstantOperator, + LinearOperator, + QuadraticOperator, + CubicOperator, + InputOperator, + StateInputOperator, + _utils as oputils, +) class _Model(abc.ABC): @@ -94,11 +102,11 @@ def operators(self, ops): raise TypeError( f"invalid operator of type '{op.__class__.__name__}'" ) - if _operators.is_uncalibrated(op): + if oputils.is_uncalibrated(op): toinfer.append(i) else: known.append(i) - if _operators.has_inputs(op): + if oputils.has_inputs(op): self._has_inputs = True self._check_operator_types_unique([ops[i] for i in toinfer]) @@ -127,32 +135,32 @@ def __iter__(self): @property def c_(self): """:class:`opinf.operators.ConstantOperator` (or ``None``).""" - return self._get_operator_of_type(_operators.ConstantOperator) + return self._get_operator_of_type(ConstantOperator) @property def A_(self): """:class:`opinf.operators.LinearOperator` (or ``None``).""" - return self._get_operator_of_type(_operators.LinearOperator) + return self._get_operator_of_type(LinearOperator) @property def H_(self): """:class:`opinf.operators.QuadraticOperator` (or ``None``).""" - return self._get_operator_of_type(_operators.QuadraticOperator) + return self._get_operator_of_type(QuadraticOperator) @property def G_(self): """:class:`opinf.operators.CubicOperator` (or ``None``).""" - return self._get_operator_of_type(_operators.CubicOperator) + return self._get_operator_of_type(CubicOperator) @property def B_(self): """:class:`opinf.operators.InputOperator` (or ``None``).""" - return self._get_operator_of_type(_operators.InputOperator) + return self._get_operator_of_type(InputOperator) @property def N_(self): """:class:`opinf.operators.StateInputOperator` (or ``None``).""" - return self._get_operator_of_type(_operators.StateInputOperator) + return self._get_operator_of_type(StateInputOperator) # Properties: dimensions -------------------------------------------------- @staticmethod @@ -161,9 +169,7 @@ def _check_state_dimension_consistency(ops): inferrable operators whose entries have not been set. """ rs = { - op.state_dimension - for op in ops - if not _operators.is_uncalibrated(op) + op.state_dimension for op in ops if not oputils.is_uncalibrated(op) } if len(rs) > 1: raise errors.DimensionalityError( @@ -196,13 +202,13 @@ def _check_input_dimension_consistency(ops): """Ensure all *input* operators with initialized entries have the same ``input dimension``. """ - inputops = [op for op in ops if _operators.has_inputs(op)] + inputops = [op for op in ops if oputils.has_inputs(op)] if len(inputops) == 0: return 0 ms = { op.input_dimension for op in inputops - if not _operators.is_uncalibrated(op) + if not oputils.is_uncalibrated(op) } if len(ms) > 1: raise errors.DimensionalityError( @@ -227,8 +233,8 @@ def input_dimension(self, m): if self.__operators is not None: for op in self.operators: if ( - _operators.has_inputs(op) - and not _operators.is_uncalibrated(op) + oputils.has_inputs(op) + and not oputils.is_uncalibrated(op) and op.input_dimension != m ): raise AttributeError( @@ -330,7 +336,7 @@ def galerkin(self, Vr, Wr=None): [ ( old_op.copy() - if _operators.is_uncalibrated(old_op) + if oputils.is_uncalibrated(old_op) else old_op.galerkin(Vr, Wr) ) for old_op in self.operators diff --git a/src/opinf/models/mono/_nonparametric.py b/src/opinf/models/mono/_nonparametric.py index 5fc6e8c2..40bdacce 100644 --- a/src/opinf/models/mono/_nonparametric.py +++ b/src/opinf/models/mono/_nonparametric.py @@ -2,7 +2,7 @@ """Nonparametric monolithic dynamical systems models.""" __all__ = [ - "SteadyModel", + # "SteadyModel", "DiscreteModel", "ContinuousModel", ] @@ -15,7 +15,28 @@ from ._base import _Model from ... import errors, utils -from ... import operators as _operators +from ...operators import ( + ConstantOperator, + LinearOperator, + QuadraticOperator, + CubicOperator, + InputOperator, + StateInputOperator, + _utils as oputils, +) + + +_operator_name2class = { + OpClass.__name__: OpClass + for OpClass in ( + ConstantOperator, + LinearOperator, + QuadraticOperator, + CubicOperator, + InputOperator, + StateInputOperator, + ) +} # Base class ================================================================== @@ -37,12 +58,12 @@ class _NonparametricModel(_Model): # Properties: operators --------------------------------------------------- _operator_abbreviations = { - "c": _operators.ConstantOperator, - "A": _operators.LinearOperator, - "H": _operators.QuadraticOperator, - "G": _operators.CubicOperator, - "B": _operators.InputOperator, - "N": _operators.StateInputOperator, + "c": ConstantOperator, + "A": LinearOperator, + "H": QuadraticOperator, + "G": CubicOperator, + "B": InputOperator, + "N": StateInputOperator, } @staticmethod @@ -50,7 +71,7 @@ def _isvalidoperator(op): """Return True if and only if ``op`` is a valid operator object for this class of model. """ - return _operators.is_nonparametric(op) + return oputils.is_nonparametric(op) @staticmethod def _check_operator_types_unique(ops): @@ -69,20 +90,23 @@ def _get_operator_of_type(self, OpClass): # String representation --------------------------------------------------- def __str__(self): """String representation: structure of the model, dimensions, etc.""" - # Build model structure. - out, terms = [], [] + terms = [ + op._str(self._STATE_LABEL, self._INPUT_LABEL) + for op in self.operators + ] + + out = [ + self.__class__.__name__, + f"structure: {self._LHS_LABEL} = " + " + ".join(terms), + f"state_dimension: {self.state_dimension}", + f"input_dimension: {self.input_dimension}", + "operators:", + ] for op in self.operators: - terms.append(op._str(self._STATE_LABEL, self._INPUT_LABEL)) - structure = " + ".join(terms) - out.append(f"Model structure: {self._LHS_LABEL} = {structure}") + out.append(" " + "\n ".join(str(op).split("\n"))) + out.append("solver: " + "\n ".join(str(self.solver).split("\n"))) - # Report dimensions. - if self.state_dimension: - out.append(f"State dimension r = {self.state_dimension:d}") - if self.input_dimension: - out.append(f"Input dimension m = {self.input_dimension:d}") - - return "\n".join(out) + return "\n ".join(out) def __repr__(self): """Unique ID + string representation.""" @@ -437,7 +461,7 @@ def load(cls, loadfile: str): for i in range(num_operators): gp = hf[f"operator_{i}"] OpClassName = gp["meta"].attrs["class"] - ops.append(getattr(_operators, OpClassName).load(gp)) + ops.append(_operator_name2class[OpClassName].load(gp)) # Construct the model. model = cls(ops) diff --git a/src/opinf/models/mono/_parametric.py b/src/opinf/models/mono/_parametric.py index ee3ac0b2..36207a44 100644 --- a/src/opinf/models/mono/_parametric.py +++ b/src/opinf/models/mono/_parametric.py @@ -2,8 +2,11 @@ """Parametric monolithic dynamical systems models.""" __all__ = [ - # "ParametricDiscreteModel", - # "ParametricContinuousModel", + "ParametricDiscreteModel", + "ParametricContinuousModel", + "InterpDiscreteModel", + "InterpContinuousModel", + # Deprecations: "InterpolatedDiscreteModel", "InterpolatedContinuousModel", ] @@ -18,7 +21,30 @@ _FrozenContinuousModel, ) from ... import errors, utils -from ... import operators as _operators +from ...operators import ( + OperatorTemplate, + ParametricOperatorTemplate, + InterpConstantOperator, + InterpLinearOperator, + InterpQuadraticOperator, + InterpCubicOperator, + InterpInputOperator, + InterpStateInputOperator, + _utils as oputils, +) + + +_operator_name2class = { + OpClass.__name__: OpClass + for OpClass in ( + InterpConstantOperator, + InterpLinearOperator, + InterpQuadraticOperator, + InterpCubicOperator, + InterpInputOperator, + InterpStateInputOperator, + ) +} # Base classes ================================================================ @@ -48,20 +74,6 @@ def _INPUT_LABEL(self): # pragma: no cover """String representation of input, e.g., "u(t)".""" return self._ModelClass._INPUT_LABEL - @property - def ModelClass(self): - """Nonparametric model class that represents this parametric model - when evaluated at a particular parameter value. - - Examples - -------- - >>> model = MyParametricModel(init_args).fit(fit_args) - >>> model_evaluated = model.evaluate(parameter_value) - >>> type(model_evaluated) is MyParametricModel.ModelClass - True - """ - return self._ModelClass - # Properties: operators --------------------------------------------------- _operator_abbreviations = dict() @@ -70,8 +82,8 @@ def _isvalidoperator(self, op): return isinstance( op, ( - _operators.OperatorTemplate, - _operators.ParametricOperatorTemplate, + OperatorTemplate, + ParametricOperatorTemplate, ), ) @@ -81,7 +93,7 @@ def _check_operator_types_unique(ops): of operation (e.g., two constant operators). """ OpClasses = { - (op.OperatorClass if _operators.is_parametric(op) else type(op)) + (op._OperatorClass if oputils.is_parametric(op) else type(op)) for op in ops } if len(OpClasses) != len(ops): @@ -92,9 +104,9 @@ def _get_operator_of_type(self, OpClass): operator class ``OpClass``. """ for op in self.operators: - if ( - _operators.is_parametric(op) and op.OperatorClass is OpClass - ) or (_operators.is_nonparametric(op) and isinstance(op, OpClass)): + if oputils.is_parametric(op) and op._OperatorClass is OpClass: + return op + if oputils.is_nonparametric(op) and isinstance(op, OpClass): return op @property @@ -109,7 +121,7 @@ def operators(self, ops): # Check at least one operator is parametric. parametric_operators = [ - op for op in self.operators if _operators.is_parametric(op) + op for op in self.operators if oputils.is_parametric(op) ] if len(parametric_operators) == 0: warnings.warn( @@ -119,80 +131,60 @@ def operators(self, ops): ) # Check that not every operator is interpolated. - if not isinstance(self, _InterpolatedModel): + if not isinstance(self, _InterpModel): interpolated_operators = [ - op - for op in self.operators - if _operators._interpolate.is_interpolated(op) + op for op in self.operators if oputils.is_interpolated(op) ] if len(interpolated_operators) == len(self.operators): warnings.warn( "all operators interpolatory, " - "consider using an InterpolatedModel class", + "consider using an InterpModel class", errors.OpInfWarning, ) - self.__p = self._check_parameter_dimension_consistency(self.operators) - - def _clear(self): - """Reset the entries of the non-intrusive operators and the - state, input, and parameter dimensions. - """ - _Model._clear(self) - self.__p = self._check_parameter_dimension_consistency(self.operators) + self._synchronize_parameter_dimensions() # Properties: dimensions -------------------------------------------------- - @staticmethod - def _check_parameter_dimension_consistency(ops): - """Ensure all operators have the same parameter dimension.""" + @property + def parameter_dimension(self): + r"""Dimension :math:`p` of a parameter vector :math:`\bfmu`.""" + return self.__p + + def _synchronize_parameter_dimensions(self, newdim=None): + """Synchronize the parameter_dimension attribute for each operator.""" + # Get any non-None parameter dimensions and check for uniqueness. ps = { op.parameter_dimension - for op in ops - if _operators.is_parametric(op) - and op.parameter_dimension is not None + for op in self.operators + if oputils.is_parametric(op) and op.parameter_dimension is not None } if len(ps) > 1: raise errors.DimensionalityError( "operators not aligned " "(parameter_dimension must be the same for all operators)" ) - return ps.pop() if len(ps) == 1 else None + p = ps.pop() if len(ps) == 1 else None - @property - def parameter_dimension(self): - """Dimension :math:`p` of the parameters.""" - return self.__p + # Check operator parameter_dimension matches new parameter_dimension. + if newdim is not None: + if p is None: + p = newdim + if p != newdim: + raise errors.DimensionalityError( + f"{p} = each operator.parameter_dimension != " + f"parameter dimension = {newdim}" + ) - @parameter_dimension.setter - def parameter_dimension(self, p): - """Set the parameter dimension. Not allowed if any - existing operators have ``parameter_dimension != p``. - """ - if self.operators is not None: + # Ensure all parametric operators have the same parameter_dimension. + if p is not None: for op in self.operators: - if _operators.is_nonparametric(op): - continue - if (opp := op.parameter_dimension) is not None and opp != p: - raise AttributeError( - "can't set attribute " - f"(existing operators have p = {self.__p})" - ) - self.__p = p - - def _set_parameter_dimension_from_data(self, parameters): - """Extract and save the dimension of the parameter space from a set of - parameter values. + if ( + oputils.is_parametric(op) + and op.parameter_dimension is None + ): + op.parameter_dimension = p - Parameters - ---------- - parameters : (s, p) or (p,) ndarray - Parameter value(s). - """ - if (dim := len(shape := np.shape(parameters))) == 1: - self.parameter_dimension = 1 - elif dim == 2: - self.parameter_dimension = shape[1] - else: - raise ValueError("parameter values must be scalars or 1D arrays") + # Set the model's parameter_dimension to the same as the operators. + self.__p = p # Fitting ----------------------------------------------------------------- def _process_fit_arguments(self, parameters, states, lhs, inputs): @@ -204,8 +196,15 @@ def _process_fit_arguments(self, parameters, states, lhs, inputs): self._clear() # Process parameters. - parameters = np.array(parameters) - self._set_parameter_dimension_from_data(parameters) + if (dim := len(shape := np.shape(parameters))) == 1: + p = 1 + elif dim == 2: + p = shape[1] + else: + raise errors.DimensionalityError( + "'parameters' must be a sequence of scalars or 1D arrays" + ) + self._synchronize_parameter_dimensions(p) n_datasets = len(parameters) def _check_valid_dimension0(dataset, label): @@ -252,7 +251,7 @@ def _check_valid_dimension2(dataset, label): inputs = [np.atleast_2d(U) for U in inputs] if not self.input_dimension: self.input_dimension = inputs[0].shape[0] - _check_valid_dimension0(lhs, self._LHS_ARGNAME) + _check_valid_dimension0(inputs, "inputs") for i, subset in enumerate(inputs): if (dim := subset.shape[0]) != (m := self.input_dimension): raise errors.DimensionalityError( @@ -264,16 +263,29 @@ def _check_valid_dimension2(dataset, label): # Subtract known operator evaluations from the LHS. for ell in self._indices_of_known_operators: + op = self.operators[ell] + _isparametric = oputils.is_parametric(op) for i, lhsi in enumerate(lhs): - lhs[i] = lhsi - self.operators[ell].apply( - parameters[i], states[i], inputs[i] - ) + _args = [states[i], inputs[i]] + if _isparametric: + _args.insert(0, parameters[i]) + lhs[i] = lhsi - op.apply(*_args) return parameters, states, lhs, inputs def _assemble_data_matrix(self, parameters, states, inputs): """Assemble the data matrix for operator inference.""" - raise NotImplementedError("future release") + blocks = [] + for i in self._indices_of_operators_to_infer: + op = self.operators[i] + if not oputils.is_parametric(op): + block = np.hstack( + [op.datablock(Q, U) for Q, U in zip(states, inputs)] + ) + else: + block = op.datablock(parameters, states, inputs) + blocks.append(block.T) + return np.hstack(blocks) def _fit_solver(self, parameters, states, lhs, inputs=None): """Construct a solver for the operator inference least-squares @@ -285,13 +297,32 @@ def _fit_solver(self, parameters, states, lhs, inputs=None): inputs_, ) = self._process_fit_arguments(parameters, states, lhs, inputs) + # Set training_parameters for interpolatory operators. + for op in self.operators: + if oputils.is_interpolated(op): + op.set_training_parameters(parameters_) + # Set up non-intrusive learning. D = self._assemble_data_matrix(parameters_, states_, inputs_) self.solver.fit(D, np.hstack(lhs_)) + self.__s = len(parameters_) def _extract_operators(self, Ohat): """Unpack the operator matrix and populate operator entries.""" - raise NotImplementedError("future release") + index = 0 + for i in self._indices_of_operators_to_infer: + op = self.operators[i] + if oputils.is_parametric(op): + endex = index + op.operator_dimension( + self.__s, self.state_dimension, self.input_dimension + ) + op.set_entries(Ohat[:, index:endex], fromblock=True) + else: + endex = index + op.operator_dimension( + self.state_dimension, self.input_dimension + ) + op.set_entries(Ohat[:, index:endex]) + index = endex def refit(self): """Solve the Operator Inference regression using the data from the @@ -313,6 +344,7 @@ def refit(self): # Execute non-intrusive learning. self._extract_operators(self.solver.solve()) + return self def fit(self, parameters, states, lhs, inputs=None): r"""Learn the model operators from data. @@ -348,7 +380,7 @@ def fit(self, parameters, states, lhs, inputs=None): Parameters ---------- - parameters : list of s scalars or (p,) 1D ndarrays + parameters : list of s (floats or (p,) ndarrays) Parameter values for which training data are available. states : list of s (r, k) ndarrays Snapshot training data. Each array ``states[i]`` is the data @@ -366,7 +398,7 @@ def fit(self, parameters, states, lhs, inputs=None): Input training data. Each array ``inputs[i]`` is the data corresponding to parameter value ``parameters[i]``; each column ``inputs[i][:, j]`` corresponds to the snapshot ``states[:, j]``. - May be a two-dimensional array if `m=1` (scalar input). + May be a two-dimensional array if :math:`m=1` (scalar input). Returns ------- @@ -380,8 +412,7 @@ def fit(self, parameters, states, lhs, inputs=None): return self self._fit_solver(parameters, states, lhs, inputs) - self.refit() - return self + return self.refit() # Parametric evaluation --------------------------------------------------- def evaluate(self, parameter): @@ -397,8 +428,11 @@ def evaluate(self, parameter): model : _NonparametricModel Nonparametric model of type ``ModelClass``. """ - return self.ModelClass( - [op.evaluate(parameter) for op in self.operators] + return self._ModelClass( + [ + op.evaluate(parameter) if oputils.is_parametric(op) else op + for op in self.operators + ] ) def rhs(self, parameter, *args, **kwargs): @@ -573,7 +607,7 @@ def fit(self, parameters, states, nextstates=None, inputs=None): Input training data. Each array ``inputs[i]`` is the data corresponding to parameter value ``parameters[i]``; each column ``inputs[i][:, j]`` corresponds to the snapshot ``states[:, j]``. - May be a two-dimensional array if `m=1` (scalar input). + May be a two-dimensional array if :math:`m=1` (scalar input). Returns ------- @@ -781,7 +815,8 @@ def fit(self, parameters, states, ddts, inputs=None): corresponding to parameter value ``parameters[i]``; each column ``inputs[i][:, j]`` corresponds to the snapshot ``states[i][:, j]``. - May be a two-dimensional array if `m=1` (scalar input). + May be a two-dimensional array if :math:`m=1` (scalar input). + Only required if one or more model operators depend on inputs. Returns ------- @@ -973,8 +1008,8 @@ class ParametricContinuousModel(_ParametricContinuousMixin, _ParametricModel): pass -# Special case: fully interpolation-based models ============================== -class _InterpolatedModel(_ParametricModel): +# Special case: completely interpolation-based models ========================= +class _InterpModel(_ParametricModel): """Base class for parametric monolithic models where all operators MUST be interpolation-based parametric operators. In this special case, the inference problems completely decouple by training parameter. @@ -1007,7 +1042,7 @@ class _InterpolatedModel(_ParametricModel): @property def _ModelFitClass(self): """Parent of ModelClass that has a callable ``fit()`` method.""" - return self.ModelClass.__bases__[-1] + return self._ModelClass.__bases__[-1] def __init__(self, operators, solver=None, InterpolatorClass=None): """Define the model structure and set the interpolator class.""" @@ -1037,15 +1072,9 @@ def _from_models(cls, parameters, models, InterpolatorClass: type = None): for one-dimensional parameters and :class:`scipy.interpolate.LinearNDInterpolator` otherwise. """ - # Check for consistency in the models. + # Check for consistency in the model operators. opclasses = [type(op) for op in models[0].operators] - ModelFitClass = cls._ModelClass.__bases__[-1] for mdl in models: - # Model class. - if not isinstance(mdl, ModelFitClass): - raise TypeError( - f"expected models of type '{ModelFitClass.__name__}'" - ) # Operator count and type. if len(mdl.operators) != len(opclasses): raise ValueError( @@ -1062,9 +1091,7 @@ def _from_models(cls, parameters, models, InterpolatorClass: type = None): # Extract the operators from the individual models. return cls( operators=[ - _operators._interpolate.nonparametric_to_interpolated( - OpClass - )._from_operators( + oputils.nonparametric_to_interpolated(OpClass)._from_operators( training_parameters=parameters, operators=[mdl.operators[ell] for mdl in models], InterpolatorClass=InterpolatorClass, @@ -1097,17 +1124,17 @@ def set_interpolator(self, InterpolatorClass): # Properties: operators --------------------------------------------------- _operator_abbreviations = { - "c": _operators.InterpolatedConstantOperator, - "A": _operators.InterpolatedLinearOperator, - "H": _operators.InterpolatedQuadraticOperator, - "G": _operators.InterpolatedCubicOperator, - "B": _operators.InterpolatedInputOperator, - "N": _operators.InterpolatedStateInputOperator, + "c": InterpConstantOperator, + "A": InterpLinearOperator, + "H": InterpQuadraticOperator, + "G": InterpCubicOperator, + "B": InterpInputOperator, + "N": InterpStateInputOperator, } def _isvalidoperator(self, op): """Only interpolated parametric operators are allowed.""" - return _operators._interpolate.is_interpolated(op) + return oputils.is_interpolated(op) # Fitting ----------------------------------------------------------------- def _assemble_data_matrix(self, *args, **kwargs): # pragma: no cover @@ -1139,7 +1166,7 @@ def _fit_solver(self, parameters, states, lhs, inputs=None): for i in range(n_datasets): model_i = self._ModelFitClass( operators=[ - op.OperatorClass( + op._OperatorClass( op.entries[i] if op.entries is not None else None ) for op in self.operators @@ -1164,8 +1191,8 @@ def refit(self): # Solve each independent subproblem. # TODO: parallelize? - for model_i in self._submodels: - model_i.refit() + for submodel in self._submodels: + submodel.refit() # Interpolate the resulting operators. for ell, op in enumerate(self.operators): @@ -1175,8 +1202,6 @@ def refit(self): [mdl.operators[ell].entries for mdl in self._submodels] ) - # self.__InterpolatorClass = type(self.operators[0].interpolator) - return self # Model persistence ------------------------------------------------------- @@ -1293,7 +1318,7 @@ def load(cls, loadfile: str, InterpolatorClass: type = None): gp = hf[f"operator_{i}"] OpClassName = gp["meta"].attrs["class"] ops.append( - getattr(_operators, OpClassName).load( + _operator_name2class[OpClassName].load( gp, InterpolatorClass ) ) @@ -1317,7 +1342,7 @@ def copy(self): ) -class InterpolatedDiscreteModel(_ParametricDiscreteMixin, _InterpolatedModel): +class InterpDiscreteModel(_ParametricDiscreteMixin, _InterpModel): r"""Parametric discrete dynamical system model :math:`\qhat(\bfmu)_{j+1} = \fhat(\qhat(\bfmu)_{j}, \u_{j}; \bfmu)` where the parametric dependence is handled by elementwise interpolation. @@ -1353,9 +1378,9 @@ class InterpolatedDiscreteModel(_ParametricDiscreteMixin, _InterpolatedModel): pass -class InterpolatedContinuousModel( +class InterpContinuousModel( _ParametricContinuousMixin, - _InterpolatedModel, + _InterpModel, ): r"""Parametric system of ordinary differential equations :math:`\ddt\qhat(t; \bfmu) = \fhat(\qhat(t; \bfmu), \u(t); \bfmu)` where @@ -1388,3 +1413,36 @@ class InterpolatedContinuousModel( """ pass + + +# Deprecations ================================================================ +class InterpolatedDiscreteModel(InterpDiscreteModel): + def __init__(self, operators, solver=None, InterpolatorClass=None): + warnings.warn( + "InterpolatedDiscreteModel has been renamed " + "and will be removed in an upcoming release, use " + "InterpDiscreteModel", + DeprecationWarning, + ) + InterpDiscreteModel.__init__( + self, + operators=operators, + solver=solver, + InterpolatorClass=InterpolatorClass, + ) + + +class InterpolatedContinuousModel(InterpContinuousModel): + def __init__(self, operators, solver=None, InterpolatorClass=None): + warnings.warn( + "InterpolatedContinuousModel has been renamed " + "and will be removed in an upcoming release, use " + "InterpContinuousModel", + DeprecationWarning, + ) + InterpContinuousModel.__init__( + self, + operators=operators, + solver=solver, + InterpolatorClass=InterpolatorClass, + ) diff --git a/src/opinf/operators/__init__.py b/src/opinf/operators/__init__.py index 8db86f97..17901a97 100644 --- a/src/opinf/operators/__init__.py +++ b/src/opinf/operators/__init__.py @@ -3,4 +3,5 @@ from ._base import * from ._nonparametric import * +from ._affine import * from ._interpolate import * diff --git a/src/opinf/operators/_affine.py b/src/opinf/operators/_affine.py new file mode 100644 index 00000000..eb8ee04b --- /dev/null +++ b/src/opinf/operators/_affine.py @@ -0,0 +1,787 @@ +# operators/_affine.py +"""Classes for parametric OpInf operators where the parametric dependence is +expressed as an affine expansion. +""" + +__all__ = [ + "AffineConstantOperator", + "AffineLinearOperator", + "AffineQuadraticOperator", + "AffineCubicOperator", + "AffineInputOperator", + "AffineStateInputOperator", +] + +import h5py +import warnings +import numpy as np +import scipy.sparse as sparse + +from .. import errors, utils +from ._base import ParametricOpInfOperator, InputMixin +from ._nonparametric import ( + ConstantOperator, + LinearOperator, + QuadraticOperator, + CubicOperator, + InputOperator, + StateInputOperator, +) + + +# Helper functions ============================================================ +def _identity(x): + """Identity function.""" + return x + + +def _is_iterable(obj): + """Return True if obj is iterable, False, else.""" + try: + iter(obj) + return True + except TypeError: + return False + + +def _vectorizer(functions): + """Translate a tuple of functions into a ndarray-valued function.""" + if any(not callable(func) for func in functions): + raise TypeError("if 'coeffs' is iterable each entry must be callable") + + def _vectorized(parameter): + return np.array([func(parameter) for func in functions]) + + return _vectorized + + +# Base class ================================================================== +class _AffineOperator(ParametricOpInfOperator): + r"""Base class for parametric operators where the parameter dependence + can be written as an affine expansion with known scalar coefficients + which are a function of the parameter vector. + + This type of operator can be written as + + .. math:: + \Ophat_{\ell}(\qhat,\u;\bfmu) = \left(\sum_{a=0}^{A_{\ell}-1} + \theta_{\ell}^{(0)}\!(\bfmu)\Ohat_{\ell}^{(a)} + \right)\d_{\ell}(\qhat, \u) + + where each :math:`\theta_{\ell}^{(a)}:\RR^{p}\to\RR` is a scalar-valued + function of the parameter vector, each + :math:`\Ohat_{\ell}^{(a)}\in\RR^{r\times d}` is a constant matrix, and + :math:`\d:\RR^{r}\times\RR^{m}\to\RR^{d}.` + + Parent class: :class:`opinf.operators.ParametricOpInfOperator` + + Parameters + ---------- + coeffs : callable, (iterable of callables), or int + Coefficient functions for the terms of the affine expansion. + + * If callable, it should receive a parameter vector + :math:`\bfmu` and return the vector of affine coefficients, + :math:`[~\theta_{\ell}^{(0)}(\bfmu) + ~~\cdots~~\theta_{\ell}^{(A_{\ell}-1)}(\bfmu)~]\trp`. + In this case, ``nterms`` is a required argument. + * If an iterable, each entry should be a callable representing a + single affine coefficient function :math:`\theta_{\ell}^{(a)}`. + * If an integer :math:`p`, set :math:`A_{\ell} = p` and define + :math:`\theta_{\ell}^{(i)}\!(\bfmu) = \mu_i`. This is equivalent to + using ``coeffs=lambda mu: mu``, except the parameter dimension is + also captured and ``nterms`` is not required. + nterms : int or None + Number of terms :math:`A_{\ell}` in the affine expansion. + Only required if ``coeffs`` is provided as a callable. + entries : (list of ndarrays), ndarray, or None + Operator matrices for each term of the affine expansion, i.e., + :math:`\Ohat_{\ell}^{(0)},\ldots,\Ohat_{\ell}^{(A_{\ell}-1)}.` + If not provided in the constructor, use :meth:`set_entries()` later. + fromblock : bool + If ``True``, interpret ``entries`` as a horizontal concatenation + of arrays; if ``False`` (default), interpret ``entries`` as a list + of arrays. + """ + + # Initialization ---------------------------------------------------------- + def __init__( + self, + coeffs, + nterms: int = None, + entries=None, + fromblock: bool = False, + ): + """Set coefficient functions and (if given) operator matrices.""" + ParametricOpInfOperator.__init__(self) + if nterms is not None and (not isinstance(nterms, int) or nterms < 1): + raise TypeError( + "when provided, argument 'nterms' must be a positive integer" + ) + self.__nterms = nterms + + # Parse the coefficient functions. + if isinstance(coeffs, int) and coeffs > 0: + if nterms is not None and nterms != coeffs: + warnings.warn( + f"{coeffs} = coeffs != nterms = {nterms}, ignoring " + f"argument 'nterms' and setting nterms = {coeffs}", + errors.OpInfWarning, + ) + self.__nterms = coeffs + self.parameter_dimension = coeffs + coeffs = _identity + if not callable(coeffs): + if not _is_iterable(coeffs): + raise TypeError( + "argument 'coeffs' must be " + "callable, iterable, or a positive int" + ) + A_ell = len(coeffs) + if nterms is not None and nterms != A_ell: + warnings.warn( + f"{A_ell} = len(coeffs) != nterms = {nterms}, ignoring " + f"argument 'nterms' and setting nterms = {A_ell}", + errors.OpInfWarning, + ) + self.__nterms = A_ell + coeffs = _vectorizer(coeffs) + if self.__nterms is None: + raise ValueError( + "argument 'nterms' required when argument 'coeffs' is callable" + ) + self.__thetas = coeffs + + if entries is not None: + self.set_entries(entries, fromblock=fromblock) + + # Properties -------------------------------------------------------------- + def coeffs(self, parameter): + r"""Evaluate the coefficient functions for each term of the affine + expansion for a given parameter vector. + + This method represents the vector-valued function + :math:`\boldsymbol{\theta}_{\ell} : \RR^{p} \to \RR^{A_{\ell}}` + given by :math:`\boldsymbol{\theta}_{\ell}(\bfmu) = [~ + \theta_{\ell}^{(0)}~~\cdots~~\theta_{\ell}^{(A_{\ell}-1)}~]\trp.` + + Parameters + ---------- + parameter : (p,) ndarray + Parameter vector to evaluate. + + Returns + ------- + coefficients : (nterms,) ndarray + Coefficients of the affine expansion at the given ``parameter``. + """ + return self.__thetas(parameter) + + @property + def entries(self) -> list: + r"""Operator matrices for each term of the affine expansion, i.e., + :math:`\Ohat_{\ell}^{(0)},\ldots,\Ohat_{\ell}^{(A_{\ell}-1)}.` + """ + return ParametricOpInfOperator.entries.fget(self) + + @property + def nterms(self) -> int: + r"""Number of terms :math:`A_{\ell}` in the affine expansion.""" + return self.__nterms + + def set_entries(self, entries, fromblock: bool = False) -> None: + r"""Set the operator matrices for each term of the affine expansion. + + Parameters + ---------- + entries : list of s (r, d) ndarrays, or (r, sd) ndarray + Operator matrices, either as a list of arrays + (``fromblock=False``, default) + or as a horizontal concatenatation of arrays (``fromblock=True``). + fromblock : bool + If ``True``, interpret ``entries`` as a horizontal concatenation + of arrays; if ``False`` (default), interpret ``entries`` as a list + of arrays. + """ + # Extract / verify the entries. + nterms = self.nterms + if fromblock: + if not isinstance(entries, np.ndarray) or ( + entries.ndim not in (1, 2) + ): + raise ValueError( + "entries must be a 1- or 2-dimensional ndarray " + "when fromblock=True" + ) + entries = np.split(entries, nterms, axis=-1) + if np.ndim(entries) > 1: + self._check_shape_consistency(entries, "entries") + if (n_arrays := len(entries)) != nterms: + raise ValueError( + f"{nterms} = number of affine expansion terms " + f"!= len(entries) = {n_arrays}" + ) + + ParametricOpInfOperator.set_entries( + self, + [self._OperatorClass(A).entries for A in entries], + ) + + def __str__(self) -> str: + lines = ParametricOpInfOperator.__str__(self).split("\n") + lines.insert(-1, f" expansion terms: {self.nterms}") + return "\n".join(lines) + + # Evaluation -------------------------------------------------------------- + @utils.requires("entries") + def evaluate(self, parameter): + r"""Evaluate the operator at the given parameter value. + + Parameters + ---------- + parameter : (p,) ndarray or float + Parameter value :math:`\bfmu` at which to evalute the operator. + + Returns + ------- + op : nonparametric :mod:`opinf.operators` operator + Nonparametric operator corresponding to the parameter value. + """ + if self.parameter_dimension is None: + self._set_parameter_dimension_from_values([parameter]) + self._check_parametervalue_dimension(parameter) + theta_mus = self.coeffs(parameter) + if self.nterms == 1 and np.isscalar(theta_mus): + theta_mus = [theta_mus] + entries = sum([tm * A for tm, A in zip(theta_mus, self.entries)]) + return self._OperatorClass(entries) + + # Dimensionality reduction ------------------------------------------------ + @utils.requires("entries") + def galerkin(self, Vr, Wr=None): + r"""Project this operator to a low-dimensional linear space. + + Consider an affine operator + + .. math:: + \Op_{\ell}(\q,\u;\bfmu) + = \sum_{a=0}^{A_{\ell}-1}\theta_{\ell}^{(a)}\!(\bfmu)\, + \Op_{\ell}^{(a)}\!(\q, \u) + + where + + * :math:`\q\in\RR^n` is the full-order state, + * :math:`\u\in\RR^m` is the input, + * :math:`\bfmu\in\RR^p` is the parameter vector, and + * each :math:`\Op_{\ell}^{(a)}\!(\q,\u)` is a nonparametric operator. + + Given a *trial basis* :math:`\Vr\in\RR^{n\times r}` and a *test basis* + :math:`\Wr\in\RR^{n\times r}`, the corresponding *intrusive projection* + of :math:`\f` is the affine operator + + .. math:: + \fhat_{\ell}(\qhat,\u;\bfmu) + = \sum_{a=0}^{A_{\ell}-1}\theta_{\ell}^{(a)}\!(\bfmu)\, + (\Wr\trp\Vr)^{-1}\Wr\trp\Op_{\ell}^{(a)}\!(\V\qhat, \u) + = \sum_{a=0}^{A_{\ell}-1}\theta_{\ell}^{(a)}\!(\bfmu)\, + \Ophat_{\ell}^{(a)}\!(\qhat, \u), + + where :math:`\Ophat_{\ell}^{(a)}\!(\qhat, \u) + = (\Wr\trp\Vr)^{-1}\Wr\trp\Op_{\ell}^{(a)}\!(\V\qhat, \u)` + is the intrusive projection of :math:`\Op_{\ell}^{(a)}.` + Here, :math:`\qhat\in\RR^r` is the reduced-order state, which enables + the low-dimensional state approximation :math:`\q = \Vr\qhat.` + If :math:`\Wr = \Vr`, the result is called a *Galerkin projection*. + If :math:`\Wr \neq \Vr`, it is called a *Petrov-Galerkin projection*. + + Parameters + ---------- + Vr : (n, r) ndarray + Basis for the trial space. + Wr : (n, r) ndarray or None + Basis for the test space. If ``None``, defaults to ``Vr``. + + Returns + ------- + op : operator + New object of the same class as ``self``. + """ + return self.__class__( + coeffs=self.coeffs, + nterms=self.nterms, + entries=[ + self._OperatorClass(A).galerkin(Vr, Wr).entries + for A in self.entries + ], + fromblock=False, + ) + + # Operator inference ------------------------------------------------------ + def operator_dimension(self, s: int, r: int, m: int) -> int: + r"""Number of columns in the concatenated operator matrix. + + For affine operators, this is :math:`A_{\ell}\cdot d(r,m)`, + where :math:`A_{\ell}` is the number of terms in the affine expansion + and :math:`d(r,m)` is the dimension of the function + :math:`\d(\qhat,\u)`. + + Parameters + ---------- + s : int + Number of training parameter values. + r : int + State dimension. + m : int or None + Input dimension. + + Returns + ------- + d : int + Number of columns in the concatenated operator matrix. + """ + return self.nterms * self._OperatorClass.operator_dimension(r, m) + + def datablock(self, parameters, states, inputs=None) -> np.ndarray: + r"""Return the data matrix block corresponding to the operator. + + For affine operators :math:`\Ophat_{\ell}(\qhat,\u;\bfmu) + = \Ohat_{\ell}(\bfmu)\d_{\ell}(\qhat,\u)` with + :math:`\Ohat_{\ell}(\bfmu)\in\RR^{r\times d}` and + :math:`\d_{\ell}(\qhat,\u)\in\RR^{d}`, this is the block matrix + + .. math:: + \D_{\ell}\trp + = \left[\begin{array}{ccc} + \theta_{\ell}^{(0)}\!(\bfmu_{0})\, + \d_{\ell}(\Qhat_{0},\U_{0}) + & \cdots & + \theta_{\ell}^{(0)}\!(\bfmu_{s-1})\, + \d_{\ell}(\Qhat_{s-1},\U_{s-1}) + \\ \vdots & & \vdots \\ + \theta_{\ell}^{(A_{\ell})}\!(\bfmu_{0})\, + \d_{\ell}(\Qhat_{0},\U_{0}) + & \cdots & + \theta_{\ell}^{(A_{\ell})}\!(\bfmu_{s-1})\, + \d_{\ell}(\Qhat_{s-1},\U_{s-1}) + \end{array}\right] + \in \RR^{A_{\ell}d \times \sum_{i=0}^{s-1}k_i} + + where :math:`\Qhat_{i} = + [~\qhat_{i,0}~~\cdots~~\qhat_{i,k_i-1}] \in \RR^{r \times k_i}` + and :math:`\U_{i} = + [~\u_{i,0}~~\cdots~~\u_{i,k_i-1}] \in \RR^{m\times k_i}` + are the state snapshots and inputs corresponding to training parameter + value :math:`\bfmu_i\in\RR^{p}`, :math:`i = 0, \ldots, s-1`, where + :math:`s` is the number of training parameter values. The notation + :math:`\d_{\ell}(\Qhat_{i},\U_{i})` is shorthand for the matrix + + .. math:: + \d(\Qhat_{i},\U_{i}) + = \left[\begin{array}{ccc} + \d_{\ell}(\qhat_{i,0},\u_{i,0}) + & \cdots & + \d_{\ell}(\qhat_{i,k_i-1},\u_{i,k_i-1}) + \end{array}\right] + \in \RR^{d \times k_i}. + + Parameters + ---------- + parameters : (s, p) ndarray + Traning parameter values :math:`\bfmu_{0},\ldots,\bfmu_{s-1}.` + states : list of s (r, k) ndarrays + State snapshots for each of the :math:`s` training parameter + values, i.e., :math:`\Qhat_{0},\ldots,\Qhat_{s-1}.` + inputs : list of s (m, k)-or-(k,) ndarrays or None + Inputs corresponding to the state snapshots, i.e., + :math:`\U_{0},\ldots,\U_{s-1}.` + If each input matrix is 1D, it is assumed that :math:`m = 1.` + + Returns + ------- + block : (D, K) ndarray + Data block for the affine operator. Here, + :math:`D = A_{\ell}d(r,m)` and :math:`K = \sum_{i=0}^{s-1}k_i` + is the total number of snapshots. + """ + if not isinstance(self, InputMixin): + inputs = [None] * len(parameters) + blockcolumns = [] + for mu, Q, U in zip(parameters, states, inputs): + Di = self._OperatorClass.datablock(Q, U) + theta_mus = self.coeffs(mu) + if self.nterms == 1 and np.isscalar(theta_mus): + theta_mus = [theta_mus] + blockcolumns.append(np.vstack([theta * Di for theta in theta_mus])) + return np.hstack(blockcolumns) + + # Model persistence ------------------------------------------------------- + def copy(self): + """Return a copy of the operator. Only the operator matrices are + copied, not the coefficient functions. + """ + As = None + if self.entries is not None: + As = [A.copy() for A in self.entries] + op = self.__class__( + coeffs=self.__thetas, + nterms=self.nterms, + entries=As, + fromblock=False, + ) + if self.parameter_dimension is not None: + op.parameter_dimension = self.parameter_dimension + return op + + def save(self, savefile: str, overwrite: bool = False) -> None: + """Save the operator to an HDF5 file. + + Since the :attr:`coeffs` are callables, they cannot be + serialized, and are therefore an argument to :meth:`load()`. + + Parameters + ---------- + savefile : str + Path of the file to save the basis in. + overwrite : bool + If ``True``, overwrite the file if it already exists. If ``False`` + (default), raise a ``FileExistsError`` if the file already exists. + """ + with utils.hdf5_savehandle(savefile, overwrite) as hf: + meta = hf.create_dataset("meta", shape=(0,)) + meta.attrs["class"] = self.__class__.__name__ + if (p := self.parameter_dimension) is not None: + meta.attrs["parameter_dimension"] = p + meta.attrs["nterms"] = self.nterms + if self.entries is not None: + group = hf.create_group("entries") + for i, Ai in enumerate(self.entries): + name = f"A{i:d}" + if sparse.issparse(Ai): + utils.save_sparray(group.create_group(name), Ai) + else: + group.create_dataset(name, data=Ai) + + @classmethod + def load(cls, loadfile: str, coeffs): + """Load an affine parametric operator from an HDF5 file. + + Parameters + ---------- + loadfile : str + Path to the file where the operator was stored via :meth:`save()`. + coeffs : iterable of callables + Scalar-valued coefficient functions for each term of the affine + expansion. + Returns + ------- + op : _AffineOperator + Initialized operator object. + """ + with utils.hdf5_loadhandle(loadfile) as hf: + ClassName = hf["meta"].attrs["class"] + if ClassName != cls.__name__: + raise TypeError( + f"file '{loadfile}' contains '{ClassName}' " + f"object, use '{ClassName}.load()'" + ) + nterms = int(hf["meta"].attrs["nterms"]) + + entries = None + if "entries" in hf: + entries = [] + group = hf["entries"] + for i in range(len(group)): + obj = group[f"A{i:d}"] + if isinstance(obj, h5py.Dataset): + entries.append(obj[:]) + else: + entries.append(utils.load_sparray(obj)) + + op = cls(coeffs, nterms=nterms, entries=entries, fromblock=False) + + if (key := "parameter_dimension") in hf["meta"].attrs: + op.parameter_dimension = int(hf["meta"].attrs[key]) + return op + + +# Public affine operator classes ============================================== +class AffineConstantOperator(_AffineOperator): + r"""Affine-parametric constant operator + :math:`\Ophat_{\ell}(\qhat,\u;\bfmu) + = \chat_{\ell}(\bfmu) + = \sum_{a=0}^{A_{\ell}-1}\theta_\ell^{(a)}\!(\bfmu)\,\chat_{\ell}^{(a)}.` + + Here, each :math:`\theta_\ell^{(a)}:\RR^{p}\to\RR` is a scalar-valued + function of the parameter vector + and each :math:`\chat_{\ell}^{(a)} \in \RR^r` is a constant vector, + see :class:`opinf.operators.ConstantOperator`. + + Parameters + ---------- + coeffs : callable, (iterable of callables), or int + Coefficient functions for the terms of the affine expansion. + + * If callable, it should receive a parameter vector + :math:`\bfmu` and return the vector of affine coefficients, + :math:`[~\theta_{\ell}^{(0)}(\bfmu) + ~~\cdots~~\theta_{\ell}^{(A_{\ell}-1)}(\bfmu)~]\trp`. + In this case, ``nterms`` is a required argument. + * If an iterable, each entry should be a callable representing a + single affine coefficient function :math:`\theta_{\ell}^{(a)}`. + * If an integer :math:`p`, set :math:`A_{\ell} = p` and define + :math:`\theta_{\ell}^{(i)}\!(\bfmu) = \mu_i`. This is equivalent to + using ``coeffs=lambda mu: mu``, except the parameter dimension is + also captured and ``nterms`` is not required. + entries : list of ndarrays, or None + Operator vectors for each term of the affine expansion, i.e., + :math:`\chat_{\ell}^{(0)},\ldots,\chat_{\ell}^{(A_{\ell}-1)}.` + If not provided in the constructor, use :meth:`set_entries` later. + fromblock : bool + If ``True``, interpret ``entries`` as a horizontal concatenation + of arrays; if ``False`` (default), interpret ``entries`` as a list + of arrays. + """ + + _OperatorClass = ConstantOperator + + +class AffineLinearOperator(_AffineOperator): + r"""Affine-parametric linear operator + :math:`\Ophat_{\ell}(\qhat,\u;\bfmu) + = \Ahat_{\ell}(\bfmu)\qhat = \left( + \sum_{a=0}^{A_{\ell}-1}\theta_{\ell}^{(a)}\!(\bfmu)\,\Ahat_{\ell}^{(a)} + \right)\qhat.` + + Here, each :math:`\theta_\ell^{(a)}:\RR^{p}\to\RR` is a scalar-valued + function of the parameter vector + and each :math:`\Ahat_{\ell}^{(a)} \in \RR^{r\times r}` is a constant + matrix, see :class:`opinf.operators.LinearOperator`. + + Parameters + ---------- + coeffs : callable, (iterable of callables), or int + Coefficient functions for the terms of the affine expansion. + + * If callable, it should receive a parameter vector + :math:`\bfmu` and return the vector of affine coefficients, + :math:`[~\theta_{\ell}^{(0)}(\bfmu) + ~~\cdots~~\theta_{\ell}^{(A_{\ell}-1)}(\bfmu)~]\trp`. + In this case, ``nterms`` is a required argument. + * If an iterable, each entry should be a callable representing a + single affine coefficient function :math:`\theta_{\ell}^{(a)}`. + * If an integer :math:`p`, set :math:`A_{\ell} = p` and define + :math:`\theta_{\ell}^{(i)}\!(\bfmu) = \mu_i`. This is equivalent to + using ``coeffs=lambda mu: mu``, except the parameter dimension is + also captured and ``nterms`` is not required. + entries : list of ndarrays, or None + Operator matrices for each term of the affine expansion, i.e., + :math:`\Ahat_{\ell}^{(0)},\ldots,\Ahat_{\ell}^{(A_{\ell}-1)}.` + If not provided in the constructor, use :meth:`set_entries` later. + fromblock : bool + If ``True``, interpret ``entries`` as a horizontal concatenation + of arrays; if ``False`` (default), interpret ``entries`` as a list + of arrays. + """ + + _OperatorClass = LinearOperator + + +class AffineQuadraticOperator(_AffineOperator): + r"""Affine-parametric quadratic operator + :math:`\Ophat_{\ell}(\qhat,\u;\bfmu) + = \Hhat_{\ell}(\bfmu)[\qhat\otimes\qhat] = \left( + \sum_{a=0}^{A_{\ell}-1}\theta_{\ell}^{(a)}\!(\bfmu)\,\Hhat_{\ell}^{(a)} + \right)[\qhat\otimes\qhat].` + + Here, each :math:`\theta_\ell^{(a)}:\RR^{p}\to\RR` is a scalar-valued + function of the parameter vector + and each :math:`\Hhat_{\ell}^{(a)} \in \RR^{r\times r^2}` is a constant + matrix, see :class:`opinf.operators.QuadraticOperator`. + + Parameters + ---------- + coeffs : callable, (iterable of callables), or int + Coefficient functions for the terms of the affine expansion. + + * If callable, it should receive a parameter vector + :math:`\bfmu` and return the vector of affine coefficients, + :math:`[~\theta_{\ell}^{(0)}(\bfmu) + ~~\cdots~~\theta_{\ell}^{(A_{\ell}-1)}(\bfmu)~]\trp`. + In this case, ``nterms`` is a required argument. + * If an iterable, each entry should be a callable representing a + single affine coefficient function :math:`\theta_{\ell}^{(a)}`. + * If an integer :math:`p`, set :math:`A_{\ell} = p` and define + :math:`\theta_{\ell}^{(i)}\!(\bfmu) = \mu_i`. This is equivalent to + using ``coeffs=lambda mu: mu``, except the parameter dimension is + also captured and ``nterms`` is not required. + entries : list of ndarrays, or None + Operator matrices for each term of the affine expansion, i.e., + :math:`\Hhat_{\ell}^{(0)},\ldots,\Hhat_{\ell}^{(A_{\ell}-1)}.` + If not provided in the constructor, use :meth:`set_entries` later. + fromblock : bool + If ``True``, interpret ``entries`` as a horizontal concatenation + of arrays; if ``False`` (default), interpret ``entries`` as a list + of arrays. + """ + + _OperatorClass = QuadraticOperator + + +class AffineCubicOperator(_AffineOperator): + r"""Affine-parametric cubic operator + :math:`\Ophat_{\ell}(\qhat,\u;\bfmu) + = \Ghat_{\ell}(\bfmu)[\qhat\otimes\qhat\otimes\qhat] = \left( + \sum_{a=0}^{A_{\ell}-1}\theta_{\ell}^{(a)}\!(\bfmu)\,\Ghat_{\ell}^{(a)} + \right)[\qhat\otimes\qhat\otimes\qhat].` + + Here, each :math:`\theta_\ell^{(a)}:\RR^{p}\to\RR` is a scalar-valued + function of the parameter vector + and each :math:`\Ghat_{\ell}^{(a)} \in \RR^{r\times r^3}` is a constant + matrix, see :class:`opinf.operators.CubicOperator`. + + Parameters + ---------- + coeffs : callable, (iterable of callables), or int + Coefficient functions for the terms of the affine expansion. + + * If callable, it should receive a parameter vector + :math:`\bfmu` and return the vector of affine coefficients, + :math:`[~\theta_{\ell}^{(0)}(\bfmu) + ~~\cdots~~\theta_{\ell}^{(A_{\ell}-1)}(\bfmu)~]\trp`. + In this case, ``nterms`` is a required argument. + * If an iterable, each entry should be a callable representing a + single affine coefficient function :math:`\theta_{\ell}^{(a)}`. + * If an integer :math:`p`, set :math:`A_{\ell} = p` and define + :math:`\theta_{\ell}^{(i)}\!(\bfmu) = \mu_i`. This is equivalent to + using ``coeffs=lambda mu: mu``, except the parameter dimension is + also captured and ``nterms`` is not required. + entries : list of ndarrays, or None + Operator matrices for each term of the affine expansion, i.e., + :math:`\Ghat_{\ell}^{(0)},\ldots,\Ghat_{\ell}^{(A_{\ell}-1)}.` + If not provided in the constructor, use :meth:`set_entries` later. + fromblock : bool + If ``True``, interpret ``entries`` as a horizontal concatenation + of arrays; if ``False`` (default), interpret ``entries`` as a list + of arrays. + """ + + _OperatorClass = CubicOperator + + +class AffineInputOperator(_AffineOperator, InputMixin): + r"""Affine-parametric input operator + :math:`\Ophat_{\ell}(\qhat,\u;\bfmu) + = \Bhat_{\ell}(\bfmu)\u = \left( + \sum_{a=0}^{A_{\ell}-1}\theta_{\ell}^{(a)}\!(\bfmu)\,\Bhat_{\ell}^{(a)} + \right)\u.` + + Here, each :math:`\theta_\ell^{(a)}:\RR^{p}\to\RR` is a scalar-valued + function of the parameter vector + and each :math:`\Bhat_{\ell}^{(a)} \in \RR^{r\times m}` is a constant + matrix, see :class:`opinf.operators.InputOperator`. + + Parameters + ---------- + coeffs : callable, (iterable of callables), or int + Coefficient functions for the terms of the affine expansion. + + * If callable, it should receive a parameter vector + :math:`\bfmu` and return the vector of affine coefficients, + :math:`[~\theta_{\ell}^{(0)}(\bfmu) + ~~\cdots~~\theta_{\ell}^{(A_{\ell}-1)}(\bfmu)~]\trp`. + In this case, ``nterms`` is a required argument. + * If an iterable, each entry should be a callable representing a + single affine coefficient function :math:`\theta_{\ell}^{(a)}`. + * If an integer :math:`p`, set :math:`A_{\ell} = p` and define + :math:`\theta_{\ell}^{(i)}\!(\bfmu) = \mu_i`. This is equivalent to + using ``coeffs=lambda mu: mu``, except the parameter dimension is + also captured and ``nterms`` is not required. + entries : list of ndarrays, or None + Operator matrices for each term of the affine expansion, i.e., + :math:`\Bhat_{\ell}^{(0)},\ldots,\Bhat_{\ell}^{(A_{\ell}-1)}.` + If not provided in the constructor, use :meth:`set_entries` later. + fromblock : bool + If ``True``, interpret ``entries`` as a horizontal concatenation + of arrays; if ``False`` (default), interpret ``entries`` as a list + of arrays. + """ + + _OperatorClass = InputOperator + + @property + def input_dimension(self): + r"""Dimension of the input :math:`\u` that the operator acts on.""" + return None if self.entries is None else self.shape[1] + + +class AffineStateInputOperator(_AffineOperator, InputMixin): + r"""Affine-parametric state-input operator + :math:`\Ophat_{\ell}(\qhat,\u;\bfmu) + = \Nhat_{\ell}(\bfmu)\qhat = \left( + \sum_{a=0}^{A_{\ell}-1}\theta_{\ell}^{(a)}\!(\bfmu)\,\Nhat_{\ell}^{(a)} + \right)[\u\otimes\qhat].` + + Here, each :math:`\theta_\ell^{(a)}:\RR^{p}\to\RR` is a scalar-valued + function of the parameter vector + and each :math:`\Nhat_{\ell}^{(a)} \in \RR^{r\times rm}` is a constant + matrix, see :class:`opinf.operators.StateInputOperator`. + + Parameters + ---------- + coeffs : callable, (iterable of callables), or int + Coefficient functions for the terms of the affine expansion. + + * If callable, it should receive a parameter vector + :math:`\bfmu` and return the vector of affine coefficients, + :math:`[~\theta_{\ell}^{(0)}(\bfmu) + ~~\cdots~~\theta_{\ell}^{(A_{\ell}-1)}(\bfmu)~]\trp`. + In this case, ``nterms`` is a required argument. + * If an iterable, each entry should be a callable representing a + single affine coefficient function :math:`\theta_{\ell}^{(a)}`. + * If an integer :math:`p`, set :math:`A_{\ell} = p` and define + :math:`\theta_{\ell}^{(i)}\!(\bfmu) = \mu_i`. This is equivalent to + using ``coeffs=lambda mu: mu``, except the parameter dimension is + also captured and ``nterms`` is not required. + entries : list of ndarrays, or None + Operator matrices for each term of the affine expansion, i.e., + :math:`\Nhat_{\ell}^{(0)},\ldots,\Nhat_{\ell}^{(A_{\ell}-1)}.` + If not provided in the constructor, use :meth:`set_entries` later. + fromblock : bool + If ``True``, interpret ``entries`` as a horizontal concatenation + of arrays; if ``False`` (default), interpret ``entries`` as a list + of arrays. + """ + + _OperatorClass = StateInputOperator + + @property + def input_dimension(self): + r"""Dimension of the input :math:`\u` that the operator acts on.""" + if self.entries is None: + return None + r, rm = self.shape + return rm // r + + +# Utilities =================================================================== +def is_affine(obj) -> bool: + """Return ``True`` if ``obj`` is a interpolated operator object.""" + return isinstance(obj, _AffineOperator) + + +def nonparametric_to_affine(OpClass: type) -> type: + """Get the affine operator class corresponding to a nonparametric + operator class. + + """ + for AffineClassName in __all__: + AffineClass = eval(AffineClassName) + if not isinstance(AffineClass, type) or not issubclass( + AffineClass, _AffineOperator + ): # pragma: no cover + continue + if AffineClass._OperatorClass is OpClass: + return AffineClass + raise TypeError( + f"_AffineOperator for class '{OpClass.__name__}' not found" + ) diff --git a/src/opinf/operators/_base.py b/src/opinf/operators/_base.py index a51c5694..37ea6592 100644 --- a/src/opinf/operators/_base.py +++ b/src/opinf/operators/_base.py @@ -3,14 +3,10 @@ __all__ = [ "InputMixin", - "has_inputs", "OperatorTemplate", - "is_nonparametric", "OpInfOperator", "ParametricOperatorTemplate", - "is_parametric", "ParametricOpInfOperator", - "is_uncalibrated", ] import os @@ -35,9 +31,11 @@ class InputMixin(abc.ABC): @property @abc.abstractmethod - def input_dimension(self) -> int: # pragma: no cover - r"""Dimension of the input :math:`\u` that the operator acts on.""" - raise NotImplementedError + def input_dimension(self) -> int: + r"""Dimension :math:`m` of the input :math:`\u` that the operator + acts on. + """ + raise NotImplementedError # pragma: no cover def has_inputs(obj) -> bool: @@ -77,9 +75,11 @@ class OperatorTemplate(abc.ABC): # Properties -------------------------------------------------------------- @property @abc.abstractmethod - def state_dimension(self) -> int: # pragma: no cover - r"""Dimension of the state :math:`\qhat` that the operator acts on.""" - raise NotImplementedError + def state_dimension(self) -> int: + r"""Dimension :math:`r` of the state :math:`\qhat` that the operator + acts on. + """ + raise NotImplementedError # pragma: no cover def __str__(self) -> str: """String representation: class name + dimensions.""" @@ -153,7 +153,7 @@ def jacobian(self, state: np.ndarray, input_=None) -> np.ndarray: jac : (r, r) ndarray State Jacobian. """ - raise NotImplementedError + raise NotImplementedError # pragma: no cover # Dimensionality reduction ------------------------------------------------ def galerkin(self, Vr: np.ndarray, Wr=None): @@ -175,9 +175,10 @@ def galerkin(self, Vr: np.ndarray, Wr=None): Parameters ---------- Vr : (n, r) ndarray - Basis for the trial space. + Basis for the trial space :math:`\Vr`. Wr : (n, r) ndarray or None - Basis for the test space. If ``None``, defaults to ``Vr``. + Basis for the test space :math:`\Wr`. + If ``None`` (default), use ``Vr`` as the test basis. Returns ------- @@ -187,7 +188,7 @@ def galerkin(self, Vr: np.ndarray, Wr=None): ``input_dimension`` attribute of the new operator should be ``self.input_dimension``. """ - raise NotImplementedError + raise NotImplementedError # pragma: no cover # Model persistence ------------------------------------------------------- def copy(self): @@ -205,10 +206,10 @@ def save(self, savefile: str, overwrite: bool = False) -> None: If ``True``, overwrite the file if it already exists. If ``False`` (default), raise a ``FileExistsError`` if the file already exists. """ - raise NotImplementedError + raise NotImplementedError # pragma: no cover @classmethod - def load(cls, loadfile: str): # pragma: no cover + def load(cls, loadfile: str): """Load an operator from an HDF5 file. Parameters @@ -216,7 +217,7 @@ def load(cls, loadfile: str): # pragma: no cover loadfile : str Path to the file where the operator was stored via :meth:`save()`. """ - raise NotImplementedError + raise NotImplementedError # pragma: no cover # Verification ------------------------------------------------------------ def verify( @@ -529,7 +530,9 @@ def _clear(self): @staticmethod def _validate_entries(entries): """Ensure argument is a NumPy array and screen for NaN, Inf entries.""" - if not (isinstance(entries, np.ndarray) or sparse.issparse(entries)): + if sparse.issparse(entries): + return + if not isinstance(entries, np.ndarray): raise TypeError( "operator entries must be NumPy or scipy.sparse array" ) @@ -538,13 +541,13 @@ def _validate_entries(entries): elif np.any(np.isinf(entries)): raise ValueError("operator entries must not be Inf") - def set_entries(self, entries): + def set_entries(self, entries) -> None: """Set the :attr:`entries` attribute.""" self.__entries = entries # Properties -------------------------------------------------------------- @property - def entries(self): + def entries(self) -> np.ndarray: r"""Discrete representation of the operator, the matrix :math:`\Ohat`. """ @@ -561,13 +564,15 @@ def entries(self): self._clear() @property - def shape(self): - """Shape of the operator entries array.""" + def shape(self) -> tuple: + """Shape of the operator matrix.""" return None if self.entries is None else self.entries.shape @property - def state_dimension(self): - r"""Dimension of the state :math:`\qhat` that the operator acts on.""" + def state_dimension(self) -> int: + r"""Dimension :math:`r` of the state :math:`\qhat` that the operator + acts on. + """ return None if self.entries is None else self.entries.shape[0] # Magic methods ----------------------------------------------------------- @@ -600,9 +605,13 @@ def __add__(self, other): ) return scls(self.entries + other.entries) + def __str__(self): + out = OperatorTemplate.__str__(self) + return out + f"\n entries.shape: {self.shape}" + # Evaluation -------------------------------------------------------------- @utils.requires("entries") - def jacobian(self, state, input_=None): # pragma: no cover + def jacobian(self, state, input_=None) -> np.ndarray: # pragma: no cover r"""Construct the state Jacobian of the operator. If :math:`[\![\q]\!]_{i}` denotes the :math:`i`-th entry of a vector @@ -680,7 +689,7 @@ def galerkin(self, Vr, Wr=None): # Operator inference ------------------------------------------------------ @staticmethod @abc.abstractmethod - def operator_dimension(r: int, m: int = None) -> int: # pragma: no cover + def operator_dimension(r: int, m: int = None) -> int: r"""Column dimension of the operator entries. Child classes should decorate this method with ``@staticmethod``. @@ -698,7 +707,7 @@ def operator_dimension(r: int, m: int = None) -> int: # pragma: no cover Number of columns in the operator entries matrix. This is also the number of rows in the data matrix block. """ - raise NotImplementedError + raise NotImplementedError # pragma: no cover @staticmethod @abc.abstractmethod @@ -855,21 +864,21 @@ def verify( # Parametric operators ======================================================== class ParametricOperatorTemplate(abc.ABC): - r"""Template for general operators that depend on external parameters, + r"""Template for operators that depend on external parameters, :math:`\Ophat_{\ell}(\qhat,\u;\bfmu).` In this package, a parametric "operator" is a function - :math:`\Ophat_{\ell}: \RR^n \times \RR^m \times \RR^p \to \RR^n` that acts - on a state vector :math:`\qhat\in\RR^n`, an (optional) input vector + :math:`\Ophat_{\ell}: \RR^r \times \RR^m \times \RR^p \to \RR^r` that acts + on a state vector :math:`\qhat\in\RR^r`, an (optional) input vector :math:`\u\in\RR^m`, and a parameter vector :math:`\bfmu\in\RR^p`. - Models are defined as the sum of several operators, - for example, an :class:`opinf.models.ContinuousModel` object represents a - system of ordinary differential equations: + Parametric models are defined as the sum of several operators, at least + one of which is parametric. + For example, a system of ODEs: .. math:: - \ddt\qhat(t) - = \sum_{\ell=1}^{n_\textrm{terms}}\Ophat_{\ell}(\qhat(t),\u(t)). + \ddt\qhat(t;\bfmu) + = \sum_{\ell=1}^{n_\textrm{terms}}\Ophat_{\ell}(\qhat(t),\u(t);\bfmu). Notes ----- @@ -878,54 +887,53 @@ class ParametricOperatorTemplate(abc.ABC): For nonparametric model terms, see :class:`OperatorTemplate`. For model terms that can be learned with Operator Inference, see :class:`OpInfOperator` or :class:`ParametricOpInfOperator`. - """ - # Meta properties --------------------------------------------------------- + # Nonparametric operator class that this parametric operator evaluates to. _OperatorClass = NotImplemented - @property - def OperatorClass(self): - """Nonparametric :mod:`opinf.operators` class that represents - this parametric operator evaluated at a particular parameter value. - - Examples - -------- - >>> Op = MyParametricOperator(init_args).evaluate(parameter_value) - >>> isinstance(Op, MyParametricOperator.OperatorClass) - True - """ - return self._OperatorClass - # Properties -------------------------------------------------------------- @property @abc.abstractmethod - def state_dimension(self) -> int: # pragma: no cover - r"""Dimension of the state :math:`\qhat` that the operator acts on.""" - raise NotImplementedError + def state_dimension(self) -> int: + r"""Dimension :math:`r` of the state :math:`\qhat` that the operator + acts on. + """ + raise NotImplementedError # pragma: no cover @property @abc.abstractmethod - def parameter_dimension(self) -> int: # pragma: no cover - r"""Dimension of the parameters :math:`\bfmu` that the operator acts - on. + def parameter_dimension(self) -> int: + r"""Dimension :math:`p` of the parameter vector :math:`\bfmu` that the + operator matrix depends on. """ - raise NotImplementedError + raise NotImplementedError # pragma: no cover def __str__(self) -> str: - """String representation: class name + dimensions.""" + """String representation: class name, dimensions, evaluation type.""" out = [self.__class__.__name__] out.append(f"state_dimension: {self.state_dimension}") if has_inputs(self): out.append(f"input_dimension: {self.input_dimension}") out.append(f"parameter_dimension: {self.parameter_dimension}") + out.append(f"evaluate(parameter) -> {self._OperatorClass.__name__}") return "\n ".join(out) + def __repr__(self) -> str: + return utils.str2repr(self) + # Evaluation -------------------------------------------------------------- + def _check_parametervalue_dimension(self, parameter): + """Ensure a new parameter value has the expected shape.""" + if (pdim := self.parameter_dimension) is None: + raise RuntimeError("parameter_dimension not set") + if np.atleast_1d(parameter).shape[0] != pdim: + raise ValueError(f"expected parameter of shape ({pdim:d},)") + @abc.abstractmethod - def evaluate(self, parameter): # pragma: no cover + def evaluate(self, parameter): r"""Evaluate the operator at the given parameter value, - resulting in a nonparametric operator of type ``OperatorClass``. + resulting in a nonparametric operator. Parameters ---------- @@ -934,14 +942,14 @@ def evaluate(self, parameter): # pragma: no cover Returns ------- - evaluated_operator : nonparametric operator. + op : nonparametric :mod:`opinf.operators` operator Nonparametric operator corresponding to the parameter value. This should be an instance of :class:`OperatorTemplate` (or a class that inherits from it). """ - raise NotImplementedError + raise NotImplementedError # pragma: no cover - def apply(self, parameter, state, input_): + def apply(self, parameter, state, input_=None): r"""Apply the operator to the given state and input at the specified parameter value, :math:`\Ophat_\ell(\qhat,\u;\bfmu)`. @@ -1021,7 +1029,7 @@ def jacobian(self, parameter, state, input_=None): return self.evaluate(parameter).jacobian(state, input_) # Dimensionality reduction ------------------------------------------------ - def galerkin(self, Vr, Wr=None): # pragma: no cover + def galerkin(self, Vr, Wr=None): r"""Get the (Petrov-)Galerkin projection of this operator. Consider an operator :math:`\Op(\q,\u)`, where :math:`\q\in\RR^n` @@ -1052,12 +1060,12 @@ def galerkin(self, Vr, Wr=None): # pragma: no cover ``input_dimension`` attribute of the new operator should be ``self.input_dimension``. """ - raise NotImplementedError + raise NotImplementedError # pragma: no cover # Model persistence ------------------------------------------------------- def copy(self): """Return a copy of the operator.""" - return copy.deepcopy(self) + return copy.deepcopy(self) # pragma: no cover def save(self, savefile: str, overwrite: bool = False) -> None: """Save the operator to an HDF5 file. @@ -1070,10 +1078,10 @@ def save(self, savefile: str, overwrite: bool = False) -> None: If ``True``, overwrite the file if it already exists. If ``False`` (default), raise a ``FileExistsError`` if the file already exists. """ - raise NotImplementedError + raise NotImplementedError # pragma: no cover @classmethod - def load(cls, loadfile: str): # pragma: no cover + def load(cls, loadfile: str): """Load an operator from an HDF5 file. Parameters @@ -1081,7 +1089,76 @@ def load(cls, loadfile: str): # pragma: no cover loadfile : str Path to the file where the operator was stored via :meth:`save()`. """ - raise NotImplementedError + raise NotImplementedError # pragma: no cover + + # Verification ------------------------------------------------------------ + def verify(self, testparam=None): + """Verify dimension attributes and :meth:`evaluate()`. + + Parameters + ---------- + testparam : (p,) ndarray or None + Test parameter at which to evaluate the operator. + If ``None`` (default), draw test parameter entries from the + standard Normal distribution. + """ + # Check the _OperatorClass. + if not issubclass(self._OperatorClass, OperatorTemplate): + raise errors.VerificationError( + "_OperatorClass must be a nonparametric operator type" + ) + + # Verify dimensions exist and are valid. + if not isinstance((r := self.state_dimension), int) or r <= 0: + raise errors.VerificationError( + "state_dimension must be a positive integer " + f"(current value: {repr(r)}, of type '{type(r).__name__}')" + ) + + if hasinputs := has_inputs(self): + if not isinstance((m := self.input_dimension), int) or m <= 0: + raise errors.VerificationError( + "input_dimension must be a positive integer " + f"(current value: {repr(m)}, of type '{type(r).__name__}')" + ) + else: + m = 0 + + # Get a test parameter. + if testparam is None: + testparam = np.random.standard_normal(self.parameter_dimension) + if np.shape(testparam) != (self.parameter_dimension,): + raise ValueError("testparam.shape != (parameter_dimension,)") + + # Evaluate the operator at the test parameter. + op_evaluated = self.evaluate(testparam) + if not isinstance(op_evaluated, self._OperatorClass): + raise errors.VerificationError( + "evaluate() must return instance of type _OperatorClass" + ) + if not is_nonparametric(op_evaluated): + raise errors.VerificationError( + "_OperatorClass must be a nonparametric operator type" + ) + + if op_evaluated.state_dimension != self.state_dimension: + raise errors.VerificationError( + "result of evaluate() does not retain the state_dimension" + ) + if hasinputs: + if not has_inputs(op_evaluated): + raise errors.VerificationError( + "result of evaluate() should depend on inputs" + ) + if op_evaluated.input_dimension != m: + raise errors.VerificationError( + "result of evaluate() does not retain the input_dimension" + ) + else: + if has_inputs(op_evaluated): + raise errors.VerificationError( + "result of evaluate() should not depend on inputs" + ) def is_parametric(obj) -> bool: @@ -1090,36 +1167,24 @@ def is_parametric(obj) -> bool: class ParametricOpInfOperator(ParametricOperatorTemplate): - r"""Base class for operators that depend on external parameters, i.e., + r"""Template for operators that depend on external parameters, and which + can be calibrated through operator inference, i.e., :math:`\Ophat_\ell(\qhat,\u;\bfmu) = \Ohat_\ell(\bfmu)\d_\ell(\qhat,\u)`. - - Evaluating a ``_ParametricOpertor`` at a specific parameter value - results in an object that inherits from - :class:`opinf.operators.OpInfOperator`. - - Examples - -------- - >>> parametric_operator = MyParametricOperator(init_args) - >>> nonparametric_operator = parametric_operator.evaluate(parameter_value) - >>> isinstance(nonparametric_operator, OpInfOperator) - True """ - # TODO: pull entries property back into this class as in OpInfOperator. - # Initialization ---------------------------------------------------------- def __init__(self): """Initialize the parameter_dimension.""" - self.__p = None + self._clear() - @abc.abstractmethod - def _clear(self) -> None: # pragma: no cover + def _clear(self) -> None: """Reset the operator to its post-constructor state.""" - raise NotImplementedError + self.__p = None + self.__entries = None - def _set_parameter_dimension_from_data(self, parameters) -> None: + def _set_parameter_dimension_from_values(self, parameters) -> None: """Extract and save the dimension of the parameter space from a set of - parameter values. + one or more parameter values. Parameters ---------- @@ -1142,52 +1207,110 @@ def _check_shape_consistency(iterable, prefix: str) -> None: raise ValueError(f"{prefix} shapes inconsistent") # Properties -------------------------------------------------------------- + @property + def state_dimension(self) -> int: + r"""Dimension :math:`r` of the state :math:`\qhat` that the operator + acts on. + """ + return None if self.entries is None else self.entries[0].shape[0] + @property def parameter_dimension(self) -> int: - r"""Dimension of the parameters :math:`\bfmu` that the operator acts - on. + r"""Dimension :math:`p` of the parameter vector :math:`\bfmu` that the + operator matrix depends on. """ return self.__p + @parameter_dimension.setter + def parameter_dimension(self, p): + """Set :attr:`parameter_dimension`. + Only allowed if :attr:`parameter_dimension` is currently ``None``. + """ + if self.__p is not None: + raise AttributeError( + "can't set property 'parameter_dimension' twice" + ) + if not isinstance(p, int) or p < 1: + raise ValueError("parameter_dimension must be a positive integer") + self.__p = p + @property - @abc.abstractmethod - def shape(self) -> tuple: # pragma: no cover - """Shape of the operator entries matrix when evaluated + def shape(self) -> tuple: + """Shape of the operator matrix when evaluated at a parameter value. """ - raise NotImplementedError + return None if self.entries is None else self.entries[0].shape - # Evaluation -------------------------------------------------------------- - def _check_parametervalue_dimension(self, parameter): - """Ensure a new parameter value has the expected shape.""" - if (pdim := self.parameter_dimension) is None: - raise RuntimeError("parameter_dimension not set") - if np.atleast_1d(parameter).shape[0] != pdim: - raise ValueError(f"expected parameter of shape ({pdim:d},)") + @property + def entries(self): + r"""Arrays that define the operator matrix as a function of the + parameter vector. + """ + return self.__entries + + @abc.abstractmethod + def set_entries(self, entries, fromblock: bool = False) -> None: + r"""Set the arrays that define the operator matrix as a function of + the parameter vector. + + Parameters + ---------- + entries : list of s (r, d) ndarrays, or (r, sd) ndarray + Operator entries, either as a list of arrays + (``fromblock=False``, default) + or as a horizontal concatenatation of arrays (``fromblock=True``). + fromblock : bool + If ``True``, interpret ``entries`` as a horizontal concatenation + of arrays; if ``False`` (default), interpret ``entries`` as a list + of arrays. + """ + self.__entries = entries # Operator inference ------------------------------------------------------ @abc.abstractmethod - def datablock(self, states, inputs=None): # pragma: no cover + def operator_dimension(self, s: int, r: int, m: int = None) -> int: + r"""Number of columns in the total operator matrix. + + Parameters + ---------- + s : int + Number of training parameter values. + r : int + State dimension. + m : int or None + Input dimension. + + Returns + ------- + d : int + Number of columns in the total operator entries matrix. + This is also the number of rows in the data matrix block. + """ + raise NotImplementedError # pragma: no cover + + @abc.abstractmethod + def datablock(self, parameters, states, inputs=None): r"""Return the data matrix block corresponding to the operator. Parameters ---------- - states : list of s (r, k) ndarrays - State snapshots for each of the `s` training parameter values. - inputs : list of s (m, k) ndarrays + parameters : (s, p) ndarray + Traning parameter values :math:`\bfmu_{0},\ldots,\bfmu_{s-1}`. + states : list of s (r, k_i) ndarrays + State snapshots for each of the :math:`s` training parameter + values. + inputs : list of s (m, k_i) ndarrays Inputs corresponding to the state snapshots. Returns ------- - block : ndarray + block : (D, K) ndarray Data block for the parametric operator. + Here, :math:`D` is the total operator matrix dimension and + :math:`K = \sum_{i=0}^{s-1}k_i`, the total number of state + snapshots. """ - raise NotImplementedError - - @abc.abstractmethod - def operator_dimension(self, r, m): # pragma: no cover - """Number of columns in the operator matrix.""" - raise NotImplementedError + raise NotImplementedError # pragma: no cover def is_uncalibrated(obj) -> bool: diff --git a/src/opinf/operators/_interpolate.py b/src/opinf/operators/_interpolate.py index a3719b54..629d1994 100644 --- a/src/opinf/operators/_interpolate.py +++ b/src/opinf/operators/_interpolate.py @@ -4,6 +4,13 @@ """ __all__ = [ + "InterpConstantOperator", + "InterpLinearOperator", + "InterpQuadraticOperator", + "InterpCubicOperator", + "InterpInputOperator", + "InterpStateInputOperator", + # Deprecations: "InterpolatedConstantOperator", "InterpolatedLinearOperator", "InterpolatedQuadraticOperator", @@ -30,43 +37,35 @@ # Base class ================================================================== -class _InterpolatedOperator(ParametricOpInfOperator): +class _InterpOperator(ParametricOpInfOperator): r"""Base class for parametric operators where the parameter dependence is handled with element-wise interpolation. - For a set of training parameter values :math:`\{\bfmu_i\}_{i=1}^{s}`, + For a set of training parameter values :math:`\{\bfmu_i\}_{i=0}^{s-1}`, this type of operator is given by :math:`\Ophat_\ell(\qhat, \u, \bfmu) = \Ohat_\ell(\bfmu)\d(\qhat, \u)` - where :math:`\Ohat_{\ell}(\bfmu)` is calculated by interpolating - operator entries that correspond to each parameter value: + where :math:`\Ohat_{\ell}(\bfmu)` is calculated by interpolating the + operator matrix entries that correspond to each parameter value: .. math:: \Ohat_{\ell}(\bfmu) = \textrm{interpolate}( - (\bfmu_1,\Ohat_{\ell}^{(1)}),\ldots,(\Ohat_{\ell}^{(s)}\bfmu_s);\bfmu), + (\bfmu_0,\Ohat_{\ell}^{(0)}), + \ldots,(\Ohat_{\ell}^{(s-1)}\bfmu_{s-1});\bfmu), where :math:`\Ohat_\ell^{(i)} = \Ohat_\ell(\bfmu_i)` for each - :math:`i=1,\ldots,s`. + :math:`i=0,\ldots,s-1`. Parent class: :class:`opinf.operators.ParametricOpInfOperator` - Child classes: - - * :class:`opinf.operators.InterpolatedConstantOperator` - * :class:`opinf.operators.InterpolatedLinearOperator` - * :class:`opinf.operators.InterpolatedQuadraticOperator` - * :class:`opinf.operators.InterpolatedCubicOperator` - * :class:`opinf.operators.InterpolatedInputOperator` - * :class:`opinf.operators.InterpolatedStateInputOperator` - Parameters ---------- training_parameters : list of s scalars or (p,) 1D ndarrays - Parameter values for which the operator entries are known + Parameter values for which the operator matrix is known or will be inferred from data. If not provided in the constructor, use :meth:`set_training_parameters` later. entries : list of s ndarrays, or None - Operator entries corresponding to the ``training_parameters``. + Operator matrices corresponding to the ``training_parameters``. If not provided in the constructor, use :meth:`set_entries` later. InterpolatorClass : type or None Class for the elementwise interpolation. Must obey the syntax @@ -94,12 +93,11 @@ def __init__( fromblock=False, ): """Set attributes and, if training parameters and entries are given, - construct the elementwise operator interpolator. + construct the elementwise operator matrix interpolator. """ ParametricOpInfOperator.__init__(self) self.__parameters = None - self.__entries = None self.__interpolator = None self.__InterpolatorClass = InterpolatorClass @@ -120,9 +118,8 @@ def _from_operators( Parameters ---------- - operators : list of :mod:`opinf.operators` objects - Operators to interpolate. Must be of class ``OperatorClass`` - and have ``entries`` set. + operators : list of nonparametric :mod:`opinf.operators` operators + Operators to interpolate with ``entries`` already set. """ # Check everything is initialized. for op in operators: @@ -146,13 +143,15 @@ def _from_operators( def _clear(self) -> None: """Reset the operator to its post-constructor state without entries.""" - self.__entries = None + ParametricOpInfOperator._clear(self) self.__interpolator = None # Properties -------------------------------------------------------------- @property def training_parameters(self): - """Parameter values for which the operator entries are known.""" + """Parameter values where the operator matrix is known + or will be inferred from data. + """ return self.__parameters @training_parameters.setter @@ -166,7 +165,7 @@ def set_training_parameters(self, training_parameters): Parameters ---------- training_parameters : list of s scalars or (p,) 1D ndarrays - Parameter values for which the operator entries are known + Parameter values for which the operator matrix is known or will be inferred from data. """ if self.__interpolator is not None: @@ -183,35 +182,26 @@ def set_training_parameters(self, training_parameters): parameters = np.array(training_parameters) if parameters.ndim not in (1, 2): raise ValueError("parameter values must be scalars or 1D arrays") - self._set_parameter_dimension_from_data(parameters) + self._set_parameter_dimension_from_values(parameters) + if parameters.ndim == 2 and parameters.shape[-1] == 1: + parameters = parameters.ravel() self.__parameters = parameters @property - def entries(self): - """Operator entries corresponding to the training parameters values, - i.e., ``entries[i]`` are the operator entries corresponding to the + def entries(self) -> np.ndarray: + """Operator matrices corresponding to the training parameters values, + i.e., ``entries[i]`` is the operator matrix corresponding to the parameter value ``training_parameters[i]``. """ - return self.__entries - - @entries.setter - def entries(self, entries): - """Set the operator entries.""" - self.set_entries(entries) - - @entries.deleter - def entries(self): - """Reset the ``entries`` attribute.""" - self._clear() + return ParametricOpInfOperator.entries.fget(self) def set_entries(self, entries, fromblock: bool = False) -> None: - r"""Set the operator entries, the matrices - :math:`\Ohat_{\ell}^{(1)},\ldots,\Ohat_{\ell}^{(s)}`. + r"""Set the operator matrices at the training parameter values. Parameters ---------- entries : list of s (r, d) ndarrays, or (r, sd) ndarray - Operator entries, either as a list of arrays + Operator matrices, either as a list of arrays (``fromblock=False``, default) or as a horizontal concatenatation of arrays (``fromblock=True``). fromblock : bool @@ -242,23 +232,12 @@ def set_entries(self, entries, fromblock: bool = False) -> None: f"!= len(entries) = {n_arrays}" ) - self.__entries = np.array( - [self.OperatorClass(A).entries for A in entries] + ParametricOpInfOperator.set_entries( + self, + np.array([self._OperatorClass(A).entries for A in entries]), ) self.set_interpolator(self.__InterpolatorClass) - @property - def state_dimension(self) -> int: - r"""Dimension of the state :math:`\qhat` that the operator acts on.""" - return None if self.entries is None else self.entries[0].shape[0] - - @property - def shape(self) -> tuple: - """Shape of the operator entries matrix when evaluated - at a parameter value. - """ - return None if self.entries is None else self.entries[0].shape - # Interpolation ----------------------------------------------------------- @property def interpolator(self): @@ -268,7 +247,7 @@ def interpolator(self): return self.__interpolator def set_interpolator(self, InterpolatorClass): - """Construct the interpolator for the operator entries. + """Construct the interpolator for the operator matrix. Parameters ---------- @@ -281,30 +260,28 @@ def set_interpolator(self, InterpolatorClass): This can be, e.g., a class from :mod:`scipy.interpolate`. """ if self.entries is not None: + params = self.training_parameters + entries = self.entries + # Default interpolator classes. if InterpolatorClass is None: - if (dim := self.training_parameters.ndim) == 1: + if (dim := params.ndim) == 1: InterpolatorClass = spinterp.CubicSpline + paramsort = np.argsort(params) + params = params[paramsort] + entries = self.entries[paramsort] elif dim == 2: InterpolatorClass = spinterp.LinearNDInterpolator - self.__interpolator = InterpolatorClass( - self.training_parameters, - self.entries, - ) + # Do the interpolation. + self.__interpolator = InterpolatorClass(params, entries) self.__InterpolatorClass = InterpolatorClass # Magic methods ----------------------------------------------------------- - def __len__(self) -> int: - """Length: number of training data points for the interpolation.""" - if self.training_parameters is None: - return 0 - return len(self.training_parameters) - def __eq__(self, other) -> bool: - """Test whether the training parameters and operator entries of two - _InterpolatedOperator objects are the same. + """Test whether the training parameters and operator matrices of two + _InterpOperator objects are the same. """ if not isinstance(other, self.__class__): return False @@ -338,11 +315,25 @@ def __eq__(self, other) -> bool: return np.allclose(self.entries, other.entries) return True + def __str__(self): + lines = ParametricOpInfOperator.__str__(self).split("\n") + + nparams = "None" + if (params := self.training_parameters) is not None: + nparams = len(params) + lines.insert(-1, f" training parameters: {nparams}") + + ICname = "None" + if (IC := self.__InterpolatorClass) is not None: + ICname = IC.__name__ + lines.insert(-1, f" type(interpolator): {ICname}") + + return "\n".join(lines) + # Evaluation -------------------------------------------------------------- @utils.requires("entries") def evaluate(self, parameter): - r"""Evaluate the operator at the given parameter value, - :math:`\Ophat_{\ell}(\cdot,\cdot;\bfmu)`. + r"""Evaluate the operator at the given parameter value. Parameters ---------- @@ -351,11 +342,16 @@ def evaluate(self, parameter): Returns ------- - op : :mod:`opinf.operators` operator of type ``OperatorClass``. + op : nonparametric :mod:`opinf.operators` operator Nonparametric operator corresponding to the parameter value. """ self._check_parametervalue_dimension(parameter) - return self.OperatorClass(self.interpolator(parameter)) + if self.parameter_dimension == 1 and not np.isscalar(parameter): + parameter = parameter[0] + entries = self.interpolator(parameter) + if entries.ndim == 3: + entries = entries[0] + return self._OperatorClass(entries) # Dimensionality reduction ------------------------------------------------ @utils.requires("entries") @@ -367,14 +363,14 @@ def galerkin(self, Vr, Wr=None): .. math:: \f_\ell(\q,\u;\bfmu) = \textrm{interpolate}( - (\bfmu_1,\f_{\ell}^{(1)}(\q,\u)),\ldots, - (\bfmu_s,\f_{\ell}^{(s)}(\q,\u)); \bfmu), + (\bfmu_0,\f_{\ell}^{(0)}(\q,\u)),\ldots, + (\bfmu_{s-1},\f_{\ell}^{(s-1)}(\q,\u)); \bfmu), where * :math:`\q\in\RR^n` is the full-order state, * :math:`\u\in\RR^m` is the input, - * :math:`\bfmu_1,\ldots,\bfmu_s\in\RR^p` + * :math:`\bfmu_0,\ldots,\bfmu_{s-1}\in\RR^p` are the (fixed) training parameter values, * :math:`\f_{\ell}^{(i)}(\q,\u) = \f_{\ell}(\q,\u;\bfmu_i)` is the operators evaluated at the :math:`i`-th training parameter @@ -389,8 +385,8 @@ def galerkin(self, Vr, Wr=None): .. math:: \fhat_{\ell}(\qhat,\u;\bfmu) = \textrm{interpolate}( - (\bfmu_1,\Wr\trp\f_{\ell}^{(1)}(\Vr\qhat,\u)),\ldots, - (\bfmu_s,\Wr\trp\f_{\ell}^{(s)}(\Vr\qhat,\u)); \bfmu), + (\bfmu_0,\Wr\trp\f_{\ell}^{(0)}(\Vr\qhat,\u)),\ldots, + (\bfmu_{s-1},\Wr\trp\f_{\ell}^{(s-1)}(\Vr\qhat,\u)); \bfmu), Here, :math:`\qhat\in\RR^r` is the reduced-order state, which enables the low-dimensional state approximation :math:`\q = \Vr\qhat`. @@ -412,7 +408,7 @@ def galerkin(self, Vr, Wr=None): return self.__class__( training_parameters=self.training_parameters, entries=[ - self.OperatorClass(A).galerkin(Vr, Wr).entries + self._OperatorClass(A).galerkin(Vr, Wr).entries for A in self.entries ], InterpolatorClass=self.__InterpolatorClass, @@ -421,7 +417,7 @@ def galerkin(self, Vr, Wr=None): # Operator inference ------------------------------------------------------ @classmethod - def datablock(cls, states, inputs=None): + def datablock(cls, parameters, states, inputs=None) -> np.ndarray: r"""Return the data matrix block corresponding to the operator. For interpolated operators, this is a block diagonal matrix where the @@ -431,6 +427,8 @@ def datablock(cls, states, inputs=None): Parameters ---------- + parameters : (s, p) ndarray + Traning parameter values :math:`\bfmu_{0},\ldots,\bfmu_{s-1}`. states : list of s (r, k) or (k,) ndarrays State snapshots for each of the `s` training parameter values. If each snapshot matrix is 1D, it is assumed that :math:`r = 1`. @@ -445,6 +443,8 @@ def datablock(cls, states, inputs=None): of rows in the data block corresponding to a single training parameter value. """ + if not issubclass(cls, InputMixin): + inputs = [None] * len(parameters) return la.block_diag( *[ cls._OperatorClass.datablock(Q, U) @@ -454,8 +454,7 @@ def datablock(cls, states, inputs=None): @classmethod def operator_dimension(cls, s: int, r: int, m: int) -> int: - r"""Number of columns `sd` in the concatenated operator matrix - :math:`[~\Ohat_{\ell}^{(1)}~~\cdots~~\Ohat_{\ell}^{(s)}~]`. + r"""Number of columns in the concatenated operator matrix. Parameters ---------- @@ -481,6 +480,9 @@ def copy(self): def save(self, savefile: str, overwrite: bool = False) -> None: """Save the operator to an HDF5 file. + If the :attr:`interpolator` is not from :mod:`scipy.interpolate`, + it must be passed to :meth:`load()` when recovering the operator. + Parameters ---------- savefile : str @@ -524,7 +526,7 @@ def load(cls, loadfile: str, InterpolatorClass: type = None): ---------- loadfile : str Path to the file where the operator was stored via :meth:`save()`. - InterpolatorClass : type + InterpolatorClass : type or None Class for the elementwise interpolation. Must obey the syntax >>> interpolator = InterpolatorClass(data_points, data_values) @@ -535,7 +537,7 @@ def load(cls, loadfile: str, InterpolatorClass: type = None): Returns ------- - op : _Operator + op : _InterpOperator Initialized operator object. """ with utils.hdf5_loadhandle(loadfile) as hf: @@ -581,32 +583,32 @@ def load(cls, loadfile: str, InterpolatorClass: type = None): # Public interpolated operator classes ======================================== -class InterpolatedConstantOperator(_InterpolatedOperator): +class InterpConstantOperator(_InterpOperator): r"""Parametric constant operator :math:`\Ophat_{\ell}(\qhat,\u;\bfmu) = \chat(\bfmu) \in \RR^r` where the parametric dependence is handled with elementwise interpolation. .. math:: \chat(\bfmu) = \textrm{interpolate}( - (\bfmu_1,\chat^{(1)}),\ldots,(\bfmu_s,\chat^{(s)}); \bfmu) + (\bfmu_0,\chat^{(0)}),\ldots,(\bfmu_{s-1},\chat^{(s-1)}); \bfmu) Here, - * :math:`\bfmu_1,\ldots,\bfmu_s\in\RR^p` + * :math:`\bfmu_0,\ldots,\bfmu_{s-1}\in\RR^p` are the (fixed) training parameter values, and * :math:`\chat^{(i)} = \chat(\bfmu_i) \in \RR^r` - are the operator entries evaluated at the training parameter values. + is the operator vector evaluated at the training parameter values. See :class:`opinf.operators.ConstantOperator`. Parameters ---------- training_parameters : list of s scalars or (p,) 1D ndarrays - Parameter values for which the operator entries are known - or will be inferred from data. If not provided in the constructor, + Parameter values for which the operator vector is known or + will be inferred from data. If not provided in the constructor, use :meth:`set_training_parameters` later. entries : list of s ndarrays, or None - Operator entries corresponding to the ``training_parameters``. + Operator vectors corresponding to the ``training_parameters``. If not provided in the constructor, use :meth:`set_entries` later. InterpolatorClass : type or None Class for the elementwise interpolation. Must obey the syntax @@ -628,7 +630,7 @@ class InterpolatedConstantOperator(_InterpolatedOperator): _OperatorClass = ConstantOperator -class InterpolatedLinearOperator(_InterpolatedOperator): +class InterpLinearOperator(_InterpOperator): r"""Parametric linear operator :math:`\Ophat_{\ell}(\qhat,\u;\bfmu) = \Ahat(\bfmu)\qhat` where :math:`\Ahat(\bfmu) \in \RR^{r \times r}` and @@ -636,14 +638,14 @@ class InterpolatedLinearOperator(_InterpolatedOperator): .. math:: \Ahat(\bfmu) = \textrm{interpolate}( - (\bfmu_1,\Ahat^{(1)}),\ldots,(\bfmu_s,\Ahat^{(s)}); \bfmu) + (\bfmu_0,\Ahat^{(0)}),\ldots,(\bfmu_{s-1},\Ahat^{(s-1)}); \bfmu) Here, - * :math:`\bfmu_1,\ldots,\bfmu_s\in\RR^p` + * :math:`\bfmu_0,\ldots,\bfmu_{s-1}\in\RR^p` are the (fixed) training parameter values, and * :math:`\Ahat^{(i)} = \Ahat(\bfmu_i) \in \RR^{r \times r}` - are the operator entries evaluated at the training parameter values. + is the operator matrix for training parameter value :math:`\bfmu_i`. See :class:`opinf.operators.LinearOperator` @@ -676,7 +678,7 @@ class InterpolatedLinearOperator(_InterpolatedOperator): _OperatorClass = LinearOperator -class InterpolatedQuadraticOperator(_InterpolatedOperator): +class InterpQuadraticOperator(_InterpOperator): r"""Parametric quadratic operator :math:`\Ophat_{\ell}(\qhat,\u;\bfmu) = \Hhat(\bfmu)[\qhat\otimes\qhat]` where :math:`\Ahat(\bfmu) \in \RR^{r \times r^2}` and @@ -684,11 +686,11 @@ class InterpolatedQuadraticOperator(_InterpolatedOperator): .. math:: \Hhat(\bfmu) = \textrm{interpolate}( - (\bfmu_1,\Hhat^{(1)}),\ldots,(\bfmu_s,\Hhat^{(s)}); \bfmu) + (\bfmu_0,\Hhat^{(0)}),\ldots,(\bfmu_{s-1},\Hhat^{(s-1)}); \bfmu) Here, - * :math:`\bfmu_1,\ldots,\bfmu_s\in\RR^p` + * :math:`\bfmu_0,\ldots,\bfmu_{s-1}\in\RR^p` are the (fixed) training parameter values, and * :math:`\Hhat^{(i)} = \Hhat(\bfmu_i) \in \RR^{r \times r^2}` are the operator entries evaluated at the training parameter values. @@ -724,7 +726,7 @@ class InterpolatedQuadraticOperator(_InterpolatedOperator): _OperatorClass = QuadraticOperator -class InterpolatedCubicOperator(_InterpolatedOperator): +class InterpCubicOperator(_InterpOperator): r"""Parametric cubic operator :math:`\Ophat_{\ell}(\qhat,\u;\bfmu) = \Ghat(\bfmu)[\qhat\otimes\qhat\otimes\qhat]` @@ -733,11 +735,11 @@ class InterpolatedCubicOperator(_InterpolatedOperator): .. math:: \Ghat(\bfmu) = \textrm{interpolate}( - (\bfmu_1,\Ghat^{(1)}),\ldots,(\bfmu_s,\Ghat^{(s)}); \bfmu) + (\bfmu_0,\Ghat^{(0)}),\ldots,(\bfmu_{s-1},\Ghat^{(s-1)}); \bfmu) Here, - * :math:`\bfmu_1,\ldots,\bfmu_s\in\RR^p` + * :math:`\bfmu_0,\ldots,\bfmu_{s-1}\in\RR^p` are the (fixed) training parameter values, and * :math:`\Ghat^{(i)} = \Ghat(\bfmu_i) \in \RR^{r \times r^3}` are the operator entries evaluated at the training parameter values. @@ -773,7 +775,7 @@ class InterpolatedCubicOperator(_InterpolatedOperator): _OperatorClass = CubicOperator -class InterpolatedInputOperator(_InterpolatedOperator, InputMixin): +class InterpInputOperator(_InterpOperator, InputMixin): r"""Parametric input operator :math:`\Ophat_{\ell}(\qhat,\u;\bfmu) = \Bhat(\bfmu)\u` where :math:`\Bhat(\bfmu) \in \RR^{r \times m}` and @@ -781,11 +783,11 @@ class InterpolatedInputOperator(_InterpolatedOperator, InputMixin): .. math:: \Bhat(\bfmu) = \textrm{interpolate}( - (\bfmu_1,\Bhat^{(1)}),\ldots,(\bfmu_s,\Bhat^{(s)}); \bfmu) + (\bfmu_0,\Bhat^{(0)}),\ldots,(\bfmu_{s-1},\Bhat^{(s-1)}); \bfmu) Here, - * :math:`\bfmu_1,\ldots,\bfmu_s\in\RR^p` + * :math:`\bfmu_0,\ldots,\bfmu_{s-1}\in\RR^p` are the (fixed) training parameter values, and * :math:`\Bhat^{(i)} = \Bhat(\bfmu_i) \in \RR^{r \times m}` are the operator entries evaluated at the training parameter values. @@ -827,7 +829,7 @@ def input_dimension(self): return None if self.entries is None else self.shape[1] -class InterpolatedStateInputOperator(_InterpolatedOperator, InputMixin): +class InterpStateInputOperator(_InterpOperator, InputMixin): r"""Parametric state-input operator :math:`\Ophat_{\ell}(\qhat,\u;\bfmu) = \Nhat(\bfmu)[\u\otimes\qhat]` where :math:`\Nhat(\bfmu) \in \RR^{r \times rm}` and @@ -835,11 +837,11 @@ class InterpolatedStateInputOperator(_InterpolatedOperator, InputMixin): .. math:: \Nhat(\bfmu) = \textrm{interpolate}( - (\bfmu_1,\Nhat^{(1)}),\ldots,(\bfmu_s,\Nhat^{(s)}); \bfmu) + (\bfmu_0,\Nhat^{(0)}),\ldots,(\bfmu_{s-1},\Nhat^{(s-1)}); \bfmu) Here, - * :math:`\bfmu_1,\ldots,\bfmu_s\in\RR^p` + * :math:`\bfmu_0,\ldots,\bfmu_{s-1}\in\RR^p` are the (fixed) training parameter values, and * :math:`\Nhat^{(i)} = \Nhat(\bfmu_i) \in \RR^{r \times rm}` are the operator entries evaluated at the training parameter values. @@ -886,7 +888,7 @@ def input_dimension(self): # Utilities =================================================================== def is_interpolated(obj) -> bool: """Return ``True`` if ``obj`` is a interpolated operator object.""" - return isinstance(obj, _InterpolatedOperator) + return isinstance(obj, _InterpOperator) def nonparametric_to_interpolated(OpClass: type) -> type: @@ -894,14 +896,153 @@ def nonparametric_to_interpolated(OpClass: type) -> type: operator class. """ - for InterpolatedClassName in __all__: - InterpolatedClass = eval(InterpolatedClassName) - if not isinstance(InterpolatedClass, type) or not issubclass( - InterpolatedClass, _InterpolatedOperator + for InterpClassName in __all__: + InterpClass = eval(InterpClassName) + if not isinstance(InterpClass, type) or not issubclass( + InterpClass, _InterpOperator ): # pragma: no cover continue - if InterpolatedClass._OperatorClass is OpClass: - return InterpolatedClass + if InterpClass._OperatorClass is OpClass: + return InterpClass raise TypeError( - f"_InterpolatedOperator for class '{OpClass.__name__}' not found" + f"_InterpOperator for class '{OpClass.__name__}' not found" ) + + +# Deprecations ================================================================ +class InterpolatedConstantOperator(InterpConstantOperator): + def __init__( + self, + training_parameters=None, + entries=None, + InterpolatorClass: type = None, + fromblock=False, + ): + warnings.warn( + "InterpolatedConstantOperator has been renamed " + "and will be removed in an upcoming release, use " + "InterpConstantOperator", + DeprecationWarning, + ) + InterpConstantOperator.__init__( + self, + training_parameters=training_parameters, + entries=entries, + InterpolatorClass=InterpolatorClass, + fromblock=fromblock, + ) + + +class InterpolatedLinearOperator(InterpLinearOperator): + def __init__( + self, + training_parameters=None, + entries=None, + InterpolatorClass: type = None, + fromblock=False, + ): + warnings.warn( + "InterpolatedLinearOperator has been renamed " + "and will be removed in an upcoming release, use " + "InterpLinearOperator", + DeprecationWarning, + ) + InterpLinearOperator.__init__( + self, + training_parameters=training_parameters, + entries=entries, + InterpolatorClass=InterpolatorClass, + fromblock=fromblock, + ) + + +class InterpolatedQuadraticOperator(InterpQuadraticOperator): + def __init__( + self, + training_parameters=None, + entries=None, + InterpolatorClass: type = None, + fromblock=False, + ): + warnings.warn( + "InterpolatedQuadraticOperator has been renamed " + "and will be removed in an upcoming release, use " + "InterpQuadraticOperator", + DeprecationWarning, + ) + InterpQuadraticOperator.__init__( + self, + training_parameters=training_parameters, + entries=entries, + InterpolatorClass=InterpolatorClass, + fromblock=fromblock, + ) + + +class InterpolatedCubicOperator(InterpCubicOperator): + def __init__( + self, + training_parameters=None, + entries=None, + InterpolatorClass: type = None, + fromblock=False, + ): + warnings.warn( + "InterpolatedCubicOperator has been renamed " + "and will be removed in an upcoming release, use " + "InterpCubicOperator", + DeprecationWarning, + ) + InterpCubicOperator.__init__( + self, + training_parameters=training_parameters, + entries=entries, + InterpolatorClass=InterpolatorClass, + fromblock=fromblock, + ) + + +class InterpolatedInputOperator(InterpInputOperator): + def __init__( + self, + training_parameters=None, + entries=None, + InterpolatorClass: type = None, + fromblock=False, + ): + warnings.warn( + "InterpolatedInputOperator has been renamed " + "and will be removed in an upcoming release, use " + "InterpInputOperator", + DeprecationWarning, + ) + InterpInputOperator.__init__( + self, + training_parameters=training_parameters, + entries=entries, + InterpolatorClass=InterpolatorClass, + fromblock=fromblock, + ) + + +class InterpolatedStateInputOperator(InterpStateInputOperator): + def __init__( + self, + training_parameters=None, + entries=None, + InterpolatorClass: type = None, + fromblock=False, + ): + warnings.warn( + "InterpolatedStateInputOperator has been renamed " + "and will be removed in an upcoming release, use " + "InterpStateInputOperator", + DeprecationWarning, + ) + InterpStateInputOperator.__init__( + self, + training_parameters=training_parameters, + entries=entries, + InterpolatorClass=InterpolatorClass, + fromblock=fromblock, + ) diff --git a/src/opinf/operators/_nonparametric.py b/src/opinf/operators/_nonparametric.py index 0fa7ac17..c9a4fd70 100644 --- a/src/opinf/operators/_nonparametric.py +++ b/src/opinf/operators/_nonparametric.py @@ -13,6 +13,7 @@ import itertools import numpy as np import scipy.linalg as la +import scipy.sparse as sparse import scipy.special as special from .. import utils @@ -26,13 +27,13 @@ class ConstantOperator(OpInfOperator): Parameters ---------- entries : (r,) ndarray or None - Operator entries :math:`\chat`. + Operator vector :math:`\chat`. Examples -------- >>> import numpy as np >>> c = opinf.operators.ConstantOperator() - >>> entries = np.random.random(10) # Operator entries. + >>> entries = np.random.random(10) # Operator vector. >>> c.set_entries(np.random.random(10)) >>> c.shape (10,) @@ -45,15 +46,37 @@ class ConstantOperator(OpInfOperator): def _str(statestr=None, inputstr=None): return "c" + @property + def entries(self): + r"""Operator vector :math:`\chat`.""" + return OpInfOperator.entries.fget(self) + + @entries.setter + def entries(self, entries): + """Set the ``entries`` attribute.""" + OpInfOperator.entries.fset(self, entries) + + @entries.deleter + def entries(self): + """Reset the ``entries`` attribute.""" + OpInfOperator.entries.fdel(self) + + @property + def shape(self): + r"""Shape :math:`(r,)` of the operator vector :math:`\chat`.""" + return OpInfOperator.shape.fget(self) + def set_entries(self, entries): - r"""Set the ``entries`` attribute. + r"""Set the operator vector :math:`\chat`. Parameters ---------- entries : (r,) ndarray - Operator entries :math:`\chat`. + Operator vector :math:`\chat`. """ - if np.isscalar(entries): + if sparse.issparse(entries): + entries = entries.toarray() + elif np.isscalar(entries): entries = np.atleast_1d(entries) self._validate_entries(entries) @@ -152,7 +175,7 @@ def datablock(states, inputs=None): @staticmethod def operator_dimension(r=None, m=None): - r"""Column dimension of the operator entries (always 1). + r"""Column dimension of the operator vector (always 1). Parameters ---------- @@ -172,13 +195,13 @@ class LinearOperator(OpInfOperator): Parameters ---------- entries : (r, r) ndarray or None - Operator entries :math:`\Ahat`. + Operator matrix :math:`\Ahat`. Examples -------- >>> import numpy as np >>> A = opinf.operators.LinearOperator() - >>> entries = np.random.random((10, 10)) # Operator entries. + >>> entries = np.random.random((10, 10)) # Operator matrix. >>> A.set_entries(entries) >>> A.shape (10, 10) @@ -192,15 +215,38 @@ class LinearOperator(OpInfOperator): def _str(statestr, inputstr=None): return f"A{statestr}" + @property + def entries(self): + r"""Operator matrix :math:`\Ahat`.""" + return OpInfOperator.entries.fget(self) + + @entries.setter + def entries(self, entries): + """Set the ``entries`` attribute.""" + OpInfOperator.entries.fset(self, entries) + + @entries.deleter + def entries(self): + """Reset the ``entries`` attribute.""" + OpInfOperator.entries.fdel(self) + + @property + def shape(self): + r"""Shape :math:`(r, r)` of the operator matrix :math:`\Ahat`.""" + return OpInfOperator.shape.fget(self) + def set_entries(self, entries): - r"""Set the ``entries`` attribute. + r"""Set the operator matrix :math:`\Ahat`. Parameters ---------- entries : (r, r) ndarray - Operator entries :math:`\Ahat`. + Operator matrix :math:`\Ahat`. """ - if np.isscalar(entries) or np.shape(entries) == (1,): + if sparse.issparse(entries): + if not isinstance(entries, sparse.csr_array): + entries = entries.tocsr() + elif np.isscalar(entries) or np.shape(entries) == (1,): entries = np.atleast_2d(entries) self._validate_entries(entries) @@ -309,7 +355,7 @@ def datablock(states, inputs=None): @staticmethod def operator_dimension(r, m=None): - """Column dimension :math:`r` of the operator entries. + r"""Column dimension :math:`r` of the operator matrix :math:`\Ahat`. Parameters ---------- @@ -326,20 +372,21 @@ class QuadraticOperator(OpInfOperator): :math:`\Ophat_{\ell}(\qhat,\u) = \Hhat[\qhat\otimes\qhat]` where :math:`\Hhat\in\RR^{r \times r^{2}}`. - Internally, the action of the operator is computed as the product of a - :math:`r \times r(r+1)/2` matrix and a compressed version of the Kronecker - product :math:`\qhat \otimes \qhat`. + Internally, the action of the operator is computed as the product of an + :math:`r \times r(r+1)/2` matrix :math:`\tilde{\H}` and a + compressed version of the Kronecker product :math:`\qhat \otimes \qhat`. Parameters ---------- entries : (r, r^2) or (r, r(r+1)/2) or (r, r, r) ndarray or None - Operator entries :math:`\Hhat`. + Operator matrix :math:`\Hhat`, its compressed representation + :math:`\tilde{\H}`, or the equivalent symmetric tensor. Examples -------- >>> import numpy as np >>> H = opinf.operators.QuadraticOperator() - >>> entries = np.random.random((10, 100)) # Operator entries. + >>> entries = np.random.random((10, 100)) # Operator matrix. >>> H.set_entries(entries) >>> H.shape # Compressed shape. (10, 55) @@ -367,13 +414,39 @@ def _precompute_jacobian_jit(self): Ht = self.expand_entries(self.entries).reshape((r, r, r)) self._prejac = Ht + Ht.transpose(0, 2, 1) + @property + def entries(self): + r"""Internal representation :math:`\tilde{\H}` of the operator + matrix :math:`\Hhat`. + """ + return OpInfOperator.entries.fget(self) + + @entries.setter + def entries(self, entries): + """Set the ``entries`` attribute.""" + OpInfOperator.entries.fset(self, entries) + + @entries.deleter + def entries(self): + """Reset the ``entries`` attribute.""" + OpInfOperator.entries.fdel(self) + + @property + def shape(self): + r"""Shape :math:`(r, r(r+1)/2)` of the internal representation + :math:`\tilde{\H}` of the operator matrix :math:`\Hhat`. + """ + return OpInfOperator.shape.fget(self) + def set_entries(self, entries): - r"""Set the ``entries`` attribute. + r"""Set the internal representation :math:`\tilde{\H}` of the operator + matrix :math:`\Hhat`. Parameters ---------- entries : (r, r^2) or (r, r(r+1)/2) or (r, r, r) ndarray - Operator entries :math:`\Hhat`. + Operator matrix :math:`\Hhat`, its compressed representation + :math:`\tilde{\H}`, or the equivalent symmetric tensor. """ if np.isscalar(entries) or np.shape(entries) == (1,): entries = np.atleast_2d(entries) @@ -489,16 +562,16 @@ def datablock(states, inputs=None): \end{array}\right] \in\RR^{r^2 \times k}. - Internally, a compressed Kronecker product :math:`\tilde{\otimes}` with + Internally, a compressed Kronecker product :math:`\hat{\otimes}` with :math:`r(r+1)/2 < r^{2}` degrees of freedom is used for efficiency, hence the data block is actually .. math:: \D\trp = \left[\begin{array}{ccc} - \qhat_0\tilde{\otimes}\qhat_0 + \qhat_0\,\hat{\otimes}\,\qhat_0 & \cdots & - \qhat_{k-1}\tilde{\otimes}\qhat_{k-1} + \qhat_{k-1}\,\hat{\otimes}\,\qhat_{k-1} \end{array}\right] \in\RR^{r(r+1)/2 \times k}. @@ -519,7 +592,8 @@ def datablock(states, inputs=None): @staticmethod def operator_dimension(r, m=None): - """Column dimension :math:`r(r+1)/2` of the operator entries. + r"""Column dimension :math:`r(r+1)/2` of the internal representation + :math:`\tilde{\H}` of the operator matrix :math:`\Hhat`. Parameters ---------- @@ -561,11 +635,11 @@ def ckron(state, checkdim=False): Cross terms :math:`\hat{q}_i \hat{q}_j` for :math:`i \neq j` appear twice in :math:`\qhat\otimes\qhat`. - The *compressed Kronecker product* :math:`\qhat\hat{\otimes}\qhat` + The *compressed Kronecker product* :math:`\qhat\,\hat{\otimes}\,\qhat` consists of the unique terms of :math:`\qhat\otimes\qhat`: .. math:: - \qhat\hat{\otimes}\qhat + \qhat\,\hat{\otimes}\,\qhat = \left[\begin{array}{c} \hat{q}_{1}^2 \\ @@ -604,9 +678,9 @@ def ckron(state, checkdim=False): \end{array}\right] = \left[\begin{array}{ccc} & & \\ - \qhat_0\hat{\otimes}\qhat_0 + \qhat_0\,\hat{\otimes}\,\qhat_0 & \cdots & - \qhat_{k-1}\hat{\otimes}\qhat_{k-1} + \qhat_{k-1}\,\hat{\otimes}\,\qhat_{k-1} \\ & & \end{array}\right] \in \RR^{r(r+1)/2 \times k}. @@ -665,7 +739,8 @@ def ckron_indices(r): def compress_entries(H): r"""Given :math:`\Hhat\in\RR^{a\times r^2}`, construct the matrix :math:`\tilde{\H}\in\RR^{a \times r(r+1)/2}` such that - :math:`\Hhat[\qhat\otimes\qhat] = \tilde{\H}[\qhat\hat{\otimes}\qhat]` + :math:`\Hhat[\qhat\otimes\qhat] + = \tilde{\H}[\qhat\,\hat{\otimes}\,\qhat]` for all :math:`\qhat\in\RR^{r}` where :math:`\hat{\otimes}` is the compressed Kronecker product (see :meth:`ckron`). @@ -719,7 +794,8 @@ def compress_entries(H): def expand_entries(Hc): r"""Given :math:`\tilde{\H}\in\RR^{a \times r(r+1)/2}`, construct the matrix :math:`\Hhat\in\RR^{a\times r^2}` such that - :math:`\Hhat[\qhat\otimes\qhat] = \tilde{\H}[\qhat\hat{\otimes}\qhat]` + :math:`\Hhat[\qhat\otimes\qhat] + = \tilde{\H}[\qhat\,\hat{\otimes}\,\qhat]` for all :math:`\qhat\in\RR^{r}` where :math:`\hat{\otimes}` is the compressed Kronecker product (see :meth:`ckron`). @@ -782,20 +858,22 @@ class CubicOperator(OpInfOperator): :math:`\Ophat_{\ell}(\qhat,\u) = \Ghat[\qhat\otimes\qhat\otimes\qhat]` where :math:`\Ghat\in\RR^{r \times r^{3}}`. - Internally, the action of the operator is computed as the product of a - :math:`r \times r(r+1)(r+2)/6` matrix and a compressed version of the - triple Kronecker product :math:`\qhat \otimes \qhat \otimes \qhat`. + Internally, the action of the operator is computed as the product of an + :math:`r \times r(r+1)(r+2)/6` matrix :math:`\tilde{\G}` and a compressed + version of the triple Kronecker product + :math:`\qhat \otimes \qhat \otimes \qhat`. Parameters ---------- entries : (r, r^3) or (r, r(r+1)(r+2)/6) or (r, r, r, r) ndarray or None - Operator entries :math:`\Ghat`. + Operator matrix :math:`\Ghat`, its compressed representation + :math:`\tilde{\G}`, or the equivalent symmetric 4-tensor. Examples -------- >>> import numpy as np >>> G = opinf.operators.CubicOperator() - >>> entries = np.random.random((10, 1000)) # Operator entries. + >>> entries = np.random.random((10, 1000)) # Operator matrix. >>> G.set_entries(entries) >>> G.shape # Compressed shape. (10, 220) @@ -823,13 +901,39 @@ def _precompute_jacobian_jit(self): Gt = self.expand_entries(self.entries).reshape((r, r, r, r)) self._prejac = Gt + Gt.transpose(0, 2, 1, 3) + Gt.transpose(0, 3, 1, 2) + @property + def entries(self): + r"""Internal representation :math:`\tilde{\G}` of the operator + matrix :math:`\Ghat`. + """ + return OpInfOperator.entries.fget(self) + + @entries.setter + def entries(self, entries): + """Set the ``entries`` attribute.""" + OpInfOperator.entries.fset(self, entries) + + @entries.deleter + def entries(self): + """Reset the ``entries`` attribute.""" + OpInfOperator.entries.fdel(self) + + @property + def shape(self): + r"""Shape :math:`(r, r(r+1)(r+2)/6)` of the internal representation + :math:`\tilde{\G}` of the operator matrix :math:`\Ghat`. + """ + return OpInfOperator.shape.fget(self) + def set_entries(self, entries): - r"""Set the ``entries`` attribute. + r"""Set the internal representation :math:`\tilde{\G}` of the operator + matrix :math:`\Ghat`. Parameters ---------- entries : (r, r^3) or (r, r(r+1)(r+2)/6) or (r, r, r, r) ndarray - Operator entries :math:`\Ghat`. + Operator matrix :math:`\Ghat`, its compressed representation + :math:`\tilde{\G}`, or the equivalent symmetric 4-tensor. """ if np.isscalar(entries) or np.shape(entries) == (1,): entries = np.atleast_2d(entries) @@ -905,8 +1009,7 @@ def jacobian(self, state, input_=None): @utils.requires("entries") def galerkin(self, Vr, Wr=None): r"""Return the Galerkin projection of the operator, - :math:`\widehat{\mathbf{G}} = - (\Wr\trp\Vr)^{-1}\Wr\trp\mathbf{G}[\Vr\otimes\Vr\otimes\Vr]`. + :math:`\Ghat = (\Wr\trp\Vr)^{-1}\Wr\trp\G[\Vr\otimes\Vr\otimes\Vr]`. Parameters ---------- @@ -958,9 +1061,9 @@ def datablock(states, inputs=None): .. math:: \D\trp = \left[\begin{array}{ccc} - \qhat_0\tilde{\otimes}\qhat_0\tilde{\otimes}\qhat_0 + \qhat_0\,\hat{\otimes}\,\qhat_0\,\hat{\otimes}\,\qhat_0 & \cdots & - \qhat_{k-1}\tilde{\otimes}\qhat_{k-1}\tilde{\otimes}\qhat_{k-1} + \qhat_{k-1}\,\hat{\otimes}\,\qhat_{k-1}\,\hat{\otimes}\,\qhat_{k-1} \end{array}\right] \in\RR^{r(r+1)(r+2)/6 \times k}. @@ -981,7 +1084,8 @@ def datablock(states, inputs=None): @staticmethod def operator_dimension(r, m=None): - """Column dimension :math:`r(r+1)(r+2)/6` of the operator entries. + r"""Column dimension :math:`r(r+1)(r+2)/6` of the internal + representation :math:`\tilde{\G}` of the operator matrix :math:`\Ghat`. Parameters ---------- @@ -1014,17 +1118,17 @@ def ckron(state): not all equal appear multiple times in :math:`\qhat\otimes\qhat\otimes\qhat`. The *compressed cubic Kronecker product* - :math:`\qhat\hat{\otimes}\qhat\hat{\otimes}\qhat` + :math:`\qhat\,\hat{\otimes}\,\qhat\,\hat{\otimes}\,\qhat` consists of the unique terms of :math:`\qhat\otimes\qhat\otimes\qhat`: .. math:: - \qhat\hat{\otimes}\qhat\hat{\otimes}\qhat + \qhat\,\hat{\otimes}\,\qhat\,\hat{\otimes}\,\qhat = \left[\begin{array}{c} \hat{q}_{1}^3 \\ - \hat{q}_{2}[\![\qhat\hat{\otimes}\qhat]\!]_{1:2} + \hat{q}_{2}[\![\qhat\,\hat{\otimes}\,\qhat]\!]_{1:2} \\ \vdots \\ - \hat{q}_{r}[\![\qhat\hat{\otimes}\qhat]\!]_{1:r} + \hat{q}_{r}[\![\qhat\,\hat{\otimes}\,\qhat]\!]_{1:r} \end{array}\right] \in \RR^{r(r+1)(r+2)/6}. @@ -1089,7 +1193,7 @@ def compress_entries(G): r"""Given :math:`\Ghat\in\RR^{a\times r^2}`, construct the matrix :math:`\tilde{\G}\in\RR^{a \times r(r+1)(r+2)/6}` such that :math:`\Ghat[\qhat\otimes\qhat\otimes\qhat] - = \tilde{\G}[\qhat\hat{\otimes}\qhat\hat{\otimes}\qhat]` + = \tilde{\G}[\qhat\,\hat{\otimes}\,\qhat\,\hat{\otimes}\,\qhat]` for all :math:`\qhat\in\RR^{r}` where :math:`\cdot\hat{\otimes}\cdot\hat{\otimes}\cdot` is the compressed cubic Kronecker product (see :meth:`ckron`). @@ -1147,7 +1251,7 @@ def expand_entries(Gc): r"""Given :math:`\tilde{\G}\in\RR^{a \times r(r+1)(r+2)/6}`, construct the matrix :math:`\Ghat\in\RR^{a\times r^3}` such that :math:`\Ghat[\qhat\otimes\qhat\otimes\qhat] - = \tilde{\G}[\qhat\hat{\otimes}\qhat\hat{\otimes}\qhat]` + = \tilde{\G}[\qhat\,\hat{\otimes}\,\qhat\,\hat{\otimes}\,\qhat]` for all :math:`\qhat\in\RR^{r}` where :math:`\cdot\hat{\otimes}\cdot\hat{\otimes}\cdot` is the compressed cubic Kronecker product (see :meth:`ckron`). @@ -1226,13 +1330,13 @@ class InputOperator(OpInfOperator, InputMixin): Parameters ---------- entries : (r, m) ndarray or None - Operator entries :math:`\Bhat`. + Operator matrix :math:`\Bhat`. Examples -------- >>> import numpy as np >>> B = opinf.operators.LinearOperator() - >>> entries = np.random.random((10, 3)) # Operator entries. + >>> entries = np.random.random((10, 3)) # Operator matrix. >>> B.set_entries(entries) >>> B.shape (10, 3) @@ -1244,20 +1348,42 @@ class InputOperator(OpInfOperator, InputMixin): @property def input_dimension(self): - r"""Dimension of the input :math:`\u` that the operator acts on.""" + r"""Dimension :math:`m` of the input :math:`\u` that the operator + acts on. + """ return None if self.entries is None else self.entries.shape[1] @staticmethod def _str(statestr, inputstr): return f"B{inputstr}" + @property + def entries(self): + r"""Operator matrix :math:`\Bhat`.""" + return OpInfOperator.entries.fget(self) + + @entries.setter + def entries(self, entries): + """Set the ``entries`` attribute.""" + OpInfOperator.entries.fset(self, entries) + + @entries.deleter + def entries(self): + """Reset the ``entries`` attribute.""" + OpInfOperator.entries.fdel(self) + + @property + def shape(self): + r"""Shape :math:`(r, m)` of the operator matrix :math:`\Bhat`.""" + return OpInfOperator.shape.fget(self) + def set_entries(self, entries): - r"""Set the ``entries`` attribute. + r"""Set the operator matrix :math:`\Bhat`. Parameters ---------- entries : (r, m) ndarray - Operator entries :math:`\Bhat`. + Operator matrix :math:`\Bhat`. """ if np.isscalar(entries) or np.shape(entries) == (1,): entries = np.atleast_2d(entries) @@ -1354,7 +1480,7 @@ def datablock(states, inputs): @staticmethod def operator_dimension(r, m): - """Column dimension :math:`m` of the operator entries. + r"""Column dimension :math:`m` of the operator matrix :math:`\Bhat`. Parameters ---------- @@ -1375,7 +1501,7 @@ class StateInputOperator(OpInfOperator, InputMixin): Parameters ---------- entries : (r, rm) ndarray or None - Operator entries :math:`\Nhat`. + Operator matrix :math:`\Nhat`. Examples -------- @@ -1394,7 +1520,9 @@ class StateInputOperator(OpInfOperator, InputMixin): @property def input_dimension(self): - r"""Dimension of the input :math:`\u` that the operator acts on.""" + r"""Dimension :math:`m` of the input :math:`\u` that the operator + acts on. + """ if self.entries is None: return None return self.entries.shape[1] // self.entries.shape[0] @@ -1403,13 +1531,33 @@ def input_dimension(self): def _str(statestr, inputstr): return f"N[{inputstr} ⊗ {statestr}]" + @property + def entries(self): + r"""Operator matrix :math:`\Nhat`.""" + return OpInfOperator.entries.fget(self) + + @entries.setter + def entries(self, entries): + """Set the ``entries`` attribute.""" + OpInfOperator.entries.fset(self, entries) + + @entries.deleter + def entries(self): + """Reset the ``entries`` attribute.""" + OpInfOperator.entries.fdel(self) + + @property + def shape(self): + r"""Shape :math:`(r, rm)` of the operator matrix :math:`\Nhat`.""" + return OpInfOperator.shape.fget(self) + def set_entries(self, entries): - r"""Set the ``entries`` attribute. + r"""Set the operator matrix :math:`\Nhat`. Parameters ---------- entries : (r, rm) ndarray - Operator entries :math:`\Nhat`. + Operator matrix :math:`\Nhat`. """ if np.isscalar(entries) or np.shape(entries) == (1,): entries = np.atleast_2d(entries) @@ -1491,8 +1639,7 @@ def jacobian(self, state, input_): @utils.requires("entries") def galerkin(self, Vr, Wr=None): r"""Return the Galerkin projection of the operator, - :math:`\widehat{\mathbf{N}} = - (\Wr\trp\Vr)^{-1}\Wr\trp\mathbf{N}[\I_{m}\otimes\Vr]`. + :math:`\Nhat = (\Wr\trp\Vr)^{-1}\Wr\trp\N[\I_{m}\otimes\Vr]`. Parameters ---------- @@ -1554,7 +1701,7 @@ def datablock(states, inputs): @staticmethod def operator_dimension(r, m): - """Column dimension :math:`rm` of the operator entries. + r"""Column dimension :math:`rm` of the operator matrix :math:`\Nhat`. Parameters ---------- diff --git a/src/opinf/operators/_utils.py b/src/opinf/operators/_utils.py new file mode 100644 index 00000000..ebb50b2c --- /dev/null +++ b/src/opinf/operators/_utils.py @@ -0,0 +1,17 @@ +# operators/_utils.py +"""Private utility functions for working with Operator classes.""" + +__all__ = [ + "has_inputs", + "is_nonparametric", + "is_parametric", + "is_uncalibrated", + "is_affine", + "is_interpolated", + "nonparametric_to_affine", + "nonparametric_to_interpolated", +] + +from ._base import has_inputs, is_nonparametric, is_parametric, is_uncalibrated +from ._affine import is_affine, nonparametric_to_affine +from ._interpolate import is_interpolated, nonparametric_to_interpolated diff --git a/src/opinf/roms/_base.py b/src/opinf/roms/_base.py new file mode 100644 index 00000000..9d04fe2c --- /dev/null +++ b/src/opinf/roms/_base.py @@ -0,0 +1,416 @@ +# roms/_base.py +"""Base for ROM classes.""" + +__all__ = [] + +import abc +import warnings +import numpy as np + +from .. import errors, utils +from .. import lift, pre, basis as _basis, ddt +from ..models import _utils as modutils + + +class _BaseROM(abc.ABC): + """Reduced-order model. + + This class connects classes from the various submodules to form a complete + reduced-order modeling workflow. + + High-dimensional data + -> transformed / preprocessed data + -> compressed data + -> low-dimensional model. + + Parameters + ---------- + model : :mod:`opinf.models` object + System model. + lifter : :mod:`opinf.lift` object or None + Lifting transformation. + transformer : :mod:`opinf.pre` object or None + Preprocesser. + basis : :mod:`opinf.basis` object or None + Dimensionality reducer. + ddt_estimator : :mod:`opinf.ddt` object or None + Time derivative estimator. + Ignored if ``model`` is not time continuous. + """ + + def __init__(self, model, lifter, transformer, basis, ddt_estimator): + """Store attributes. Child classes should verify model type.""" + self.__model = model + + # Verify lifter. + if not (lifter is None or isinstance(lifter, lift.LifterTemplate)): + warnings.warn( + "lifter not derived from LifterTemplate, " + "unexpected behavior may occur", + errors.OpInfWarning, + ) + self.__lifter = lifter + + # Verify transformer. + if not ( + transformer is None + or isinstance( + transformer, + (pre.TransformerTemplate, pre.TransformerMulti), + ) + ): + warnings.warn( + "transformer not derived from TransformerTemplate " + "or TransformerMulti, unexpected behavior may occur", + errors.OpInfWarning, + ) + self.__transformer = transformer + + # Verify basis. + if not ( + basis is None + or isinstance(basis, (_basis.BasisTemplate, _basis.BasisMulti)) + ): + warnings.warn( + "basis not derived from BasisTemplate or BasisMulti, " + "unexpected behavior may occur", + errors.OpInfWarning, + ) + self.__basis = basis + + # Verify ddt_estimator. + if (ddt_estimator is not None) and not self._iscontinuous: + warnings.warn( + "ddt_estimator ignored for discrete models", + errors.OpInfWarning, + ) + ddt_estimator = None + if not ( + ddt_estimator is None + or isinstance(ddt_estimator, ddt.DerivativeEstimatorTemplate) + ): + warnings.warn( + "ddt_estimator not derived from DerivativeEstimatorTemplate, " + "unexpected behavior may occur", + errors.OpInfWarning, + ) + self.__ddter = ddt_estimator + + # Properties -------------------------------------------------------------- + @property + def lifter(self): + """Lifting transformation.""" + return self.__lifter + + @property + def transformer(self): + """Preprocesser.""" + return self.__transformer + + @property + def basis(self): + """Dimensionality reducer.""" + return self.__basis + + @property + def ddt_estimator(self): + """Time derivative estimator.""" + return self.__ddter + + @property + def model(self): + """System model.""" + return self.__model + + @property + def _iscontinuous(self): + """``True`` if the model is time continuous (semi-discrete), + ``False`` if the model if fully discrete. + """ + return modutils.is_continuous(self.model) + + # Printing ---------------------------------------------------------------- + def __str__(self): + """String representation.""" + lines = [] + for label, obj in [ + ("lifter", self.lifter), + ("transformer", self.transformer), + ("basis", self.basis), + ("ddt_estimator", self.ddt_estimator), + ("model", self.model), + ]: + if obj is not None: + lines.append(f"{label}: {str(obj)}") + + body = "\n ".join("\n".join(lines).split("\n")) + return f"{self.__class__.__name__}\n {body}" + + def __repr__(self): + """Repr: address + string representatation.""" + return utils.str2repr(self) + + # Mappings between original and latent state spaces ----------------------- + def encode( + self, + states, + lhs=None, + inplace: bool = False, + *, + fit_transformer: bool = False, + fit_basis: bool = False, + ): + """Map high-dimensional data to its low-dimensional representation. + + Parameters + ---------- + states : (n,) or (n, k) ndarray + State snapshots in the original state space. + lhs : (n,) or (n, k) ndarray or None + Left-hand side regression data. + + - If the model is time continuous, these are the time derivatives + of the state snapshots. + - If the model is fully discrete, these are the "next states" + corresponding to the state snapshots. + inplace : bool + If ``True``, modify the ``states`` and ``lhs`` in-place in the + preprocessing transformation (if applicable). + + Returns + ------- + states_encoded : (r,) or (r, k) ndarray + Low-dimensional representation of ``states`` + in the latent reduced state space. + lhs_encoded : (r,) or (r, k) ndarray + Low-dimensional representation of ``lhs`` + in the latent reduced state space. + **Only returned** if ``lhs`` is not ``None``. + """ + # Lifting. + if self.lifter is not None: + if lhs is not None: + if self._iscontinuous: + lhs = self.lifter.lift_ddts(states, lhs) + else: + lhs = self.lifter.lift(lhs) + states = self.lifter.lift(states) + + # Preprocessing. + if self.transformer is not None: + if fit_transformer: + states = self.transformer.fit_transform( + states, + inplace=inplace, + ) + else: + states = self.transformer.transform(states, inplace=inplace) + if lhs is not None: + if self._iscontinuous: + lhs = self.transformer.transform_ddts(lhs, inplace=inplace) + else: + lhs = self.transformer.transform(lhs, inplace=inplace) + + # Dimensionality reduction. + if self.basis is not None: + if fit_basis: + self.basis.fit(states) + states = self.basis.compress(states) + if lhs is not None: + lhs = self.basis.compress(lhs) + + if lhs is not None: + return states, lhs + return states + + def decode(self, states_encoded, locs=None): + """Map low-dimensional data to the original state space. + + Parameters + ---------- + states_encoded : (r, ...) ndarray + Low-dimensional state or states + in the latent reduced state space. + locs : slice or (p,) ndarray of integers or None + If given, return the decoded state at only the p specified + locations (indices) described by ``locs``. + + Returns + ------- + states_decoded : (n, ...) ndarray + Version of ``states_compressed`` in the original state space. + """ + inplace = False + # Reverse dimensionality reduction. + states = states_encoded + if self.basis is not None: + inplace = True + states = self.basis.decompress(states, locs=locs) + + # Reverse preprocessing. + if self.transformer is not None: + states = self.transformer.inverse_transform( + states, + inplace=inplace, + locs=locs, + ) + + # Reverse lifting. + if self.lifter is not None: + states = self.lifter.unlift(states) + + return states + + def project(self, states): + """Project a high-dimensional state vector to the subset of the + high-dimensional space that can be represented by the basis. + + This is done by + + 1. expressing the state in low-dimensional latent coordinates, then + 2. reconstructing the high-dimensional state corresponding to those + coordinates. + + In other words, ``project(Q)`` is equivalent to ``decode(encode(Q))``. + + Parameters + ---------- + states : (n, ...) ndarray + Matrix of `n`-dimensional state vectors, or a single state vector. + + Returns + ------- + state_projected : (n, ...) ndarray + Matrix of `n`-dimensional projected state vectors, or a single + projected state vector. + """ + return self.decode(self.encode(states)) + + # Abstract methods -------------------------------------------------------- + def _check_fit_args(self, lhs, inputs): + """Verify required arguments for :meth:`fit()`.""" + + # Make sure lhs is given if required. + if lhs is None and self._iscontinuous and self.ddt_estimator is None: + raise ValueError( + "argument 'lhs' required when model is time-continuous" + " and ddt_estimator=None" + ) + + # Make sure inputs are passed in correctly when requried. + if inputs is None and self.model._has_inputs: + raise ValueError( + "argument 'inputs' required (model depends on external inputs)" + ) + + # Training ---------------------------------------------------------------- + @abc.abstractmethod + def fit( + self, + states, + lhs, + inputs, + fit_transformer: bool, + fit_basis: bool, + ): + """Calibrate the model to training data. + + Child classes should overwrite this method to include a call to + the ``fit()`` method of :attr:`model`. + + Parameters + ---------- + states : list of s (n, k_i) ndarrays + State snapshots in the original state space. Each array + ``states[i]`` is data for a single trajectory; each column + ``states[i][:, j]`` is one snapshot. + lhs : list of s (n, k_i) ndarrays or None + Left-hand side regression data. Each array ``lhs[i]`` is the data + corresponding to parameter value ``parameters[i]``; each column + ``lhs[i][:, j]`` corresponds to the snapshot ``states[i][:, j]``. + + - If the model is time continuous, these are the time derivatives + of the state snapshots. + - If the model is fully discrete, these are the "next states" + corresponding to the state snapshots. + + If ``None``, these are estimated using :attr:`ddt_estimator` + (time continuous) or extracted from ``states`` (fully discrete). + inputs : list of s (m, k_i) ndarrays or None + Input training data. Each array ``inputs[i]`` is the data + corresponding to parameter value ``parameters[i]``; each column + ``inputs[i][:, j]`` corresponds to the snapshot ``states[:, j]``. + May be a two-dimensional array if :math:`m=1` (scalar input). + Only required if one or more model operators depend on inputs. + fit_transformer : bool + If ``True``, calibrate the preprocessing transformation + using the ``states``. If ``False``, assume the transformer is + already calibrated. + fit_basis : bool + If ``True``, calibrate the high-to-low dimensional mapping + using the ``states``. + If ``False``, assume the basis is already calibrated. + + Returns + ------- + self + """ + # Lifting. + if self.lifter is not None: + if lhs is not None: + if self._iscontinuous: + lhs = [ + self.lifter.lift_ddts(Q, Z) + for Q, Z in zip(states, lhs) + ] + else: + lhs = [self.lifter.lift(Z) for Z in lhs] + states = [self.lifter.lift(Q) for Q in states] + + # Preprocessing. + if self.transformer is not None: + if fit_transformer: + self.transformer.fit(np.hstack(states)) + states = [self.transformer.transform(Q) for Q in states] + if lhs is not None: + if self._iscontinuous: + lhs = [self.transformer.transform_ddts(Z) for Z in lhs] + else: + lhs = [self.transformer.transform(Z) for Z in lhs] + + # Dimensionality reduction. + if self.basis is not None: + if fit_basis: + self.basis.fit(np.hstack(states)) + states = [self.basis.compress(Q) for Q in states] + if lhs is not None: + lhs = [self.basis.compress(Z) for Z in lhs] + + # Time derivative estimation / discrete LHS + if lhs is None: + if self._iscontinuous: + if inputs is None: + states, lhs = zip( + *[self.ddt_estimator.estimate(Q) for Q in states] + ) + else: + states, lhs, inputs = zip( + *[ + self.ddt_estimator.estimate(Q, U) + for Q, U in zip(states, inputs) + ] + ) + else: + lhs = [Q[:, 1:] for Q in states] + states = [Q[:, :-1] for Q in states] + if inputs is not None: + inputs = [ + U[..., : Q.shape[1]] for Q, U in zip(states, inputs) + ] + + return states, lhs, inputs + + @abc.abstractmethod + def predict(self, *args, **kwargs): + """Evaluate the model.""" + raise NotImplementedError # pragma: no cover diff --git a/src/opinf/roms/_nonparametric.py b/src/opinf/roms/_nonparametric.py index da9f271b..02bb4b5b 100644 --- a/src/opinf/roms/_nonparametric.py +++ b/src/opinf/roms/_nonparametric.py @@ -5,31 +5,37 @@ "ROM", ] -import warnings +import numpy as np -from .. import errors, models, utils +from ..models import _utils as modutils +from ._base import _BaseROM -class ROM: - """Nonparametric reduced-order model class. +class ROM(_BaseROM): + r"""Nonparametric reduced-order model. This class connects classes from the various submodules to form a complete reduced-order modeling workflow. - High-dimensional data -> transformed / preprocessed data -> compressed data - -> low-dimensional model. + High-dimensional data + :math:`\to` transformed / preprocessed data + :math:`\to` compressed data + :math:`\to` low-dimensional model. Parameters ---------- - model : opinf.models.ContinuousModel or opinf.models.DiscreteModel - System model. - lifter : opinf.lift.LifterTemplate or None + model : :mod:`opinf.models` object + Nonparametric system model, an instance of one of the following: + + * :class:`opinf.models.ContinuousModel` + * :class:`opinf.models.DiscreteModel` + lifter : :mod:`opinf.lift` object or None Lifting transformation. - transformer : opinf.pre.TransformerTemplate or None + transformer : :mod:`opinf.pre` object or None Preprocesser. - basis : opinf.basis.BasisTemplate + basis : :mod:`opinf.basis` object or None Dimensionality reducer. - ddt_estimator : opinf.ddt.DerivativeEstimatorTemplate + ddt_estimator : :mod:`opinf.ddt` object or None Time derivative estimator. Ignored if ``model`` is not time continuous. """ @@ -44,225 +50,53 @@ def __init__( ddt_estimator=None, ): """Store each argument as an attribute.""" - # TODO: verify each argument here. - self.__model = model - self.__lifter = lifter - self.__transformer = transformer - self.__basis = basis - self.__ddter = ddt_estimator - - # Properties -------------------------------------------------------------- - @property - def lifter(self): - """Lifting transformation.""" - return self.__lifter - - @property - def transformer(self): - """Preprocesser.""" - return self.__transformer - - @property - def basis(self): - """Dimensionality reducer.""" - return self.__basis - - @property - def ddt_estimator(self): - """Time derivative estimator.""" - return self.__ddter - - @property - def model(self): - """System model.""" - return self.__model - - @property - def iscontinuous(self): - """``True`` if the model is time continuous (semi-discrete), - ``False`` if the model if fully discrete. - """ - return isinstance(self.model, models.ContinuousModel) - - # Printing ---------------------------------------------------------------- - def __str__(self): - """String representation.""" - lines = ["Nonparametric reduced-order model"] - - def indent(text): - return "\n".join(f" {line}" for line in text.rstrip().split("\n")) - - for label, obj in [ - ("Lifting", self.lifter), - ("Transformer", self.transformer), - ("Basis", self.basis), - ("Time derivative estimator", self.ddt_estimator), - ("Model", self.model), - ]: - if obj is not None: - lines.append(f"{label}:") - lines.append(indent(str(obj))) - - return "\n".join(lines) - - def __repr__(self): - """Repr: address + string representatation.""" - return utils.str2repr(self) - - # Mappings between original and latent state spaces ----------------------- - def encode( - self, - states, - lhs=None, - inplace: bool = False, - *, - fit_transformer: bool = False, - fit_basis: bool = False, - ): - """Map high-dimensional data to its low-dimensional representation. - - Parameters - ---------- - states : (n, ...) ndarray - State snapshots in the original state space. - lhs : (n, ...) ndarray or None - Left-hand side regression data. - - - If the model is time continuous, these are the time derivatives - of the state snapshots. - - If the model is fully discrete, these are the "next states" - corresponding to the state snapshots. - inplace : bool - If ``True``, modify the ``states`` and ``lhs`` in-place in the - preprocessing transformation (if applicable). - - Returns - ------- - states_encoded : (r, ...) ndarray - Low-dimensional representation of ``states`` - in the latent reduced state space. - lhs_encoded : (r, ...) ndarray - Low-dimensional representation of ``lhs`` - in the latent reduced state space. - **Only returned** if ``lhs`` is not ``None``. - """ - # Lifting. - if self.lifter is not None: - if self.iscontinuous and lhs is not None: - lhs = self.lifter.lift_ddts(lhs) - states = self.lifter.lift(states) + if not modutils.is_nonparametric(model): + raise TypeError("'model' must be a nonparametric model instance") + super().__init__(model, lifter, transformer, basis, ddt_estimator) - # Preprocessing. - if self.transformer is not None: - if fit_transformer: - states = self.transformer.fit_transform( - states, - inplace=inplace, - ) - else: - states = self.tranformer.tranform(states, inplace=inplace) - if lhs is not None: - if self.iscontinuous: - lhs = self.transformer.transform_ddts(lhs, inplace=inplace) - else: - lhs = self.transformer.tranform(lhs, inplace=inplace) - - # Dimensionality reduction. - if self.basis is not None: - if fit_basis: - self.basis.fit(states) - states = self.basis.compress(states) - if lhs is not None: - lhs = self.basis.compress(lhs) - - if lhs is not None: - return states, lhs - return states - - def decode(self, states_encoded): - """Map low-dimensional data to the original state space. - - Parameters - ---------- - states_encoded : (r, ...) ndarray - Low-dimensional state or states - in the latent reduced state space. - - Returns - ------- - states_decoded : (n, ...) ndarray - Version of ``states_compressed`` in the original state space. - """ - # Reverse dimensionality reduction. - states = states_encoded - if self.basis is not None: - states = self.basis.decompress(states) - - # Reverse preprocessing. - if self.transformer is not None: - states = self.transformer.inverse_transform(states, inplace=True) - - # Reverse lifting. - if self.lifter is not None: - states = self.lifter.unlift(states) - - return states - - def project(self, states): - """Project a high-dimensional state vector to the subset of the - high-dimensional space that can be represented by the basis. - - This is done by - - 1. expressing the state in low-dimensional latent coordinates, then - 2. reconstructing the high-dimensional state corresponding to those - coordinates. - - In other words, ``project(Q)`` is equivalent to ``decode(encode(Q))``. - - Parameters - ---------- - states : (n, ...) ndarray - Matrix of `n`-dimensional state vectors, or a single state vector. - - Returns - ------- - state_projected : (n, ...) ndarray - Matrix of `n`-dimensional projected state vectors, or a single - projected state vector. - """ - return self.decode(self.encode(states)) - - # Training ---------------------------------------------------------------- + # Training and evaluation ------------------------------------------------- def fit( self, states, lhs=None, inputs=None, - inplace: bool = False, fit_transformer: bool = True, fit_basis: bool = True, ): - """Calibrate the model to the data. + """Calibrate the model to training data. Parameters ---------- - states : (n, k) ndarray - State snapshots in the original state space. - lhs : (n, k) ndarray or None - Left-hand side regression data. + states : list of s (n, k_i) ndarrays + State snapshots in the original state space. Each array + ``states[i]`` is data corresponding to a different trajectory; + each column ``states[i][:, j]`` is one snapshot. + If there is only one trajectory of training data (s = 1), + ``states`` may be an (n, k) ndarray. In this case, it is assumed + that ``lhs`` and ``inputs`` (if given) are arrays, not a sequence + of arrays. + lhs : list of s (n, k_i) ndarrays or None + Left-hand side regression data. Each array ``lhs[i]`` is the data + corresponding to parameter value ``parameters[i]``; each column + ``lhs[i][:, j]`` corresponds to the snapshot ``states[i][:, j]``. - If the model is time continuous, these are the time derivatives of the state snapshots. - If the model is fully discrete, these are the "next states" corresponding to the state snapshots. - inplace : bool - If ``True``, modify the ``states`` and ``lhs`` in-place in the - preprocessing transformation (if applicable). + + If ``None``, these are estimated using :attr:`ddt_estimator` + (time continuous) or extracted from ``states`` (fully discrete). + inputs : list of s (m, k_i) ndarrays or None + Input training data. Each array ``inputs[i]`` is the data + corresponding to parameter value ``parameters[i]``; each column + ``inputs[i][:, j]`` corresponds to the snapshot ``states[:, j]``. + May be a two-dimensional array if :math:`m=1` (scalar input). + Only required if one or more model operators depend on inputs. fit_transformer : bool - If ``True`` (default), calibrate the high-to-low dimensional - mapping using the ``states``. - If ``False``, assume the transformer is already calibrated. + If ``True`` (default), calibrate the preprocessing transformation + using the ``states``. If ``False``, assume the transformer is + already calibrated. fit_basis : bool If ``True``, calibrate the high-to-low dimensional mapping using the ``states``. @@ -272,64 +106,46 @@ def fit( ------- self """ + _BaseROM._check_fit_args(self, lhs=lhs, inputs=inputs) - # Express the states and the LHS in the latent state space. - reduced = self.encode( - states, + # Single trajectory case. + if states[0].ndim == 1: + states = [states] + if lhs is not None: + lhs = [lhs] + if inputs is not None: + inputs = [inputs] + + states, lhs, inputs = _BaseROM.fit( + self, + states=states, lhs=lhs, - inplace=inplace, + inputs=inputs, fit_transformer=fit_transformer, fit_basis=fit_basis, ) - if lhs is None: - states = reduced - else: - states, lhs = reduced - - # If needed, estimate time derivatives. - if self.iscontinuous: - if lhs is None: - if self.ddt_estimator is None: - raise ValueError( - "ddt_estimator required for time-continuous model " - "and lhs=None" - ) - estimated = self.ddt_estimator.estimate(states, inputs) - if inputs is None: - states, lhs = estimated - else: - states, lhs, inputs = estimated - elif self.ddt_estimator is not None: - warnings.warn( - "using provided time derivatives, ignoring ddt_estimator", - errors.OpInfWarning, - ) - # Calibrate the model. - kwargs = dict(inputs=inputs) - if self.iscontinuous: - self.model.fit(states, lhs, **kwargs) - else: - if lhs is not None: - kwargs["nextstates"] = lhs - self.model.fit(states, **kwargs) + # Concatentate trajectories. + if inputs is not None: + inputs = np.hstack(inputs) + self.model.fit(np.hstack(states), np.hstack(lhs), inputs) return self - # Evaluation -------------------------------------------------------------- def predict(self, state0, *args, **kwargs): """Evaluate the reduced-order model. - Parameters are the same as the model's ``predict()`` method. + Arguments are the same as the ``predict()`` method of :attr:`model`. Parameters ---------- state0 : (n,) ndarray Initial state, expressed in the original state space. *args : list - Other positional arguments to ``model.predict()``. + Other positional arguments to the ``predict()`` method of + :attr:`model`. **kwargs : dict - Keyword arguments to ``model.predict()``. + Keyword arguments to the ``predict()`` method of :attr:`model`. Returns ------- diff --git a/src/opinf/roms/_parametric.py b/src/opinf/roms/_parametric.py index e5aa6767..2b950a89 100644 --- a/src/opinf/roms/_parametric.py +++ b/src/opinf/roms/_parametric.py @@ -1,4 +1,144 @@ # roms/_parametric.py """Parametric ROM classes.""" -__all__ = [] +__all__ = [ + "ParametricROM", +] + +from ..models import _utils as modutils +from ._base import _BaseROM + + +class ParametricROM(_BaseROM): + r"""Parametric reduced-order model. + + This class connects classes from the various submodules to form a complete + reduced-order modeling workflow. + + High-dimensional data + :math:`\to` transformed / preprocessed data + :math:`\to` compressed data + :math:`\to` low-dimensional model. + + Parameters + ---------- + model : :mod:`opinf.models` object + Parametric system model, an instance of one of the following: + + * :class:`opinf.models.ParametricContinuousModel` + * :class:`opinf.models.ParametricDiscreteModel` + * :class:`opinf.models.InterpContinuousModel` + * :class:`opinf.models.InterpDiscreteModel` + lifter : :mod:`opinf.lift` object or None + Lifting transformation. + transformer : :mod:`opinf.pre` object or None + Preprocesser. + basis : :mod:`opinf.basis` object or None + Dimensionality reducer. + ddt_estimator : :mod:`opinf.ddt` object or None + Time derivative estimator. + Ignored if ``model`` is not time continuous. + """ + + def __init__( + self, + model, + *, + lifter=None, + transformer=None, + basis=None, + ddt_estimator=None, + ): + """Store each argument as an attribute.""" + if not modutils.is_parametric(model): + raise TypeError("'model' must be a parametric model instance") + super().__init__(model, lifter, transformer, basis, ddt_estimator) + + # Training and evaluation ------------------------------------------------- + def fit( + self, + parameters, + states, + lhs=None, + inputs=None, + fit_transformer: bool = True, + fit_basis: bool = True, + ): + """Calibrate the model to training data. + + Parameters + ---------- + parameters : list of s (floats or (p,) ndarrays) + Parameter values for which training data are available. + states : list of s (n, k_i) ndarrays + State snapshots in the original state space. Each array + ``states[i]`` is the data corresponding to parameter value + ``parameters[i]``; each column ``states[i][:, j]`` is one snapshot. + lhs : list of s (n, k_i) ndarrays or None + Left-hand side regression data. Each array ``lhs[i]`` is the data + corresponding to parameter value ``parameters[i]``; each column + ``lhs[i][:, j]`` corresponds to the snapshot ``states[i][:, j]``. + + - If the model is time continuous, these are the time derivatives + of the state snapshots. + - If the model is fully discrete, these are the "next states" + corresponding to the state snapshots. + + If ``None``, these are estimated using :attr:`ddt_estimator` + (time continuous) or extracted from ``states`` (fully discrete). + inputs : list of s (m, k_i) ndarrays or None + Input training data. Each array ``inputs[i]`` is the data + corresponding to parameter value ``parameters[i]``; each column + ``inputs[i][:, j]`` corresponds to the snapshot ``states[:, j]``. + May be a two-dimensional array if :math:`m=1` (scalar input). + Only required if one or more model operators depend on inputs. + fit_transformer : bool + If ``True`` (default), calibrate the preprocessing transformation + using the ``states``. If ``False``, assume the transformer is + already calibrated. + fit_basis : bool + If ``True`` (default), calibrate the high-to-low dimensional + mapping using the ``states``. + If ``False``, assume the basis is already calibrated. + + Returns + ------- + self + """ + _BaseROM._check_fit_args(self, lhs=lhs, inputs=inputs) + states, lhs, inputs = _BaseROM.fit( + self, + states=states, + lhs=lhs, + inputs=inputs, + fit_transformer=fit_transformer, + fit_basis=fit_basis, + ) + self.model.fit(parameters, states, lhs, inputs) + return self + + def predict(self, parameter, state0, *args, **kwargs): + r"""Evaluate the reduced-order model. + + Arguments are the same as the ``predict()`` method of :attr:`model`. + + Parameters + ---------- + parameter : (p,) ndarray + Parameter value :math:`\bfmu`. + state0 : (n,) ndarray + Initial state, expressed in the original state space. + *args : list + Other positional arguments to the ``predict()`` method of + :attr:`model`. + **kwargs : dict + Keyword arguments to the ``predict()`` method of :attr:`model`. + + Returns + ------- + states: (n, k) ndarray + Solution to the model, expressed in the original state space. + """ + q0_ = self.encode(state0, fit_transformer=False, fit_basis=False) + states = self.model.predict(parameter, q0_, *args, **kwargs) + return self.decode(states) diff --git a/src/opinf/utils/_hdf5.py b/src/opinf/utils/_hdf5.py index 64000bd7..5de10ff7 100644 --- a/src/opinf/utils/_hdf5.py +++ b/src/opinf/utils/_hdf5.py @@ -4,15 +4,19 @@ __all__ = [ "hdf5_savehandle", "hdf5_loadhandle", + "save_sparray", + "load_sparray", ] import os import h5py import warnings +import scipy.sparse as sparse from .. import errors +# File handle classes ========================================================= class _hdf5_filehandle: """Get a handle to an open HDF5 file to read or write to. @@ -122,3 +126,115 @@ def __exit__(self, exc_type, exc_value, exc_traceback): raise except Exception as ex: raise errors.LoadfileFormatError(ex.args[0]) from ex + + +# Other tools ================================================================= +def save_sparray(group: h5py.Group, arr: sparse.sparray) -> None: + """Save a :mod:`scipy.sparse` matrix efficiently in an HDF5 group. + + This method mimics the behavior of :meth:`scipy.sparse.save_npz()` but + for an open HDF5 file. See :func:`load_sparray()`. + + Parameters + ---------- + arr : scipy.sparse.sparray + Sparse SciPy array, in any sparse format. + group : h5py.Group + HDF5 group to save the sparse array to. + + Examples + -------- + >>> import h5py + >>> import scipy.sparse as sparse + >>> from opinf.utils import save_sparray, load_sparray + + # Create a sparse array. + >>> A = sparse.dok_array((100, 100), dtype=float) + >>> A[0, 5] = 12 + >>> A[4, 1] = 123.456 + >>> A + <100x100 sparse array of type '' + with 2 stored elements in Dictionary Of Keys format> + >>> print(A) + (np.int32(0), np.int32(5)) 12.0 + (np.int32(4), np.int32(1)) 123.456 + + # Save the sparse array to an HDF5 file. + >>> with h5py.File("myfile.h5", "w") as hf: + ... save_sparray(hf.create_group("sparsearray"), A) + + # Load the sparse array from the file. + >>> with h5py.File("myfile.h5", "r") as hf: + ... B = load_sparray(hf["sparsearray"]) + >>> B + <100x100 sparse array of type '' + with 2 stored elements in Dictionary Of Keys format> + >>> print(B) + (np.int32(0), np.int32(5)) 12.0 + (np.int32(4), np.int32(1)) 123.456 + """ + if not sparse.issparse(arr): + raise TypeError("second arg must be a scipy.sparse array") + + # Convert to COO format and save data attributes. + A = arr.tocoo() + group.create_dataset("data", data=A.data) + group.create_dataset("row", data=A.row) + group.create_dataset("col", data=A.col) + group.attrs["shape"] = A.shape + group.attrs["arrtype"] = type(arr).__name__[:3] + + +def load_sparray(group: h5py.Group) -> sparse.sparray: + """Save a :mod:`scipy.sparse` matrix efficiently in an HDF5 group. + + This method mimics the behavior of :meth:`scipy.sparse.load_npz()` but + for an open HDF5 file. See :func:`save_sparray()`. + + Parameters + ---------- + group : h5py.Group + HDF5 group create and save the sparse array to. + + Returns + ------- + arr : scipy.sparse.sparray + Sparse SciPy array, in the sparse format it was in before saving. + + Examples + -------- + >>> import h5py + >>> import scipy.sparse as sparse + >>> from opinf.utils import save_sparray, load_sparray + + # Create a sparse array. + >>> A = sparse.dok_array((100, 100), dtype=float) + >>> A[0, 5] = 12 + >>> A[4, 1] = 123.456 + >>> A + <100x100 sparse array of type '' + with 2 stored elements in Dictionary Of Keys format> + >>> print(A) + (np.int32(0), np.int32(5)) 12.0 + (np.int32(4), np.int32(1)) 123.456 + + # Save the sparse array to an HDF5 file. + >>> with h5py.File("myfile.h5", "w") as hf: + ... save_sparray(hf.create_group("sparsearray"), A) + + # Load the sparse array from the file. + >>> with h5py.File("myfile.h5", "r") as hf: + ... B = load_sparray(hf["sparsearray"]) + >>> B + <100x100 sparse array of type '' + with 2 stored elements in Dictionary Of Keys format> + >>> print(B) + (np.int32(0), np.int32(5)) 12.0 + (np.int32(4), np.int32(1)) 123.456 + """ + A = sparse.coo_matrix( + (group["data"], (group["row"], group["col"])), + shape=group.attrs["shape"], + ) + arrtype = str(group.attrs["arrtype"]) + return getattr(A, f"to{arrtype}")() diff --git a/src/opinf/utils/_timer.py b/src/opinf/utils/_timer.py index 0eab2d4c..32f7752a 100644 --- a/src/opinf/utils/_timer.py +++ b/src/opinf/utils/_timer.py @@ -1,84 +1,149 @@ # utils/_timer.py """Context manager for timing blocks of code.""" +__all__ = [ + "TimedBlock", +] +import os import time import signal import logging -class timed_block: - """Context manager for timing a block of code and reporting the timing. - - **WARNING**: this context manager may only function on Linux/Unix machines - (Windows is not supported). +class TimedBlock: + r"""Context manager for timing a block of code and reporting the timing. Parameters ---------- message : str Message to log / print. - timelimit : float + timelimit : int Number of seconds to wait before raising an error. + Floats are rounded down to an integer. + + Warnings + -------- + This context manager may only function on Linux/Unix machines + (Windows is not currently supported). Examples -------- - >>> with timed_block("This is a test"): + >>> import time + >>> import opinf + + Without a time limit. + + >>> with opinf.utils.TimedBlock(): ... # Code to be timed ... time.sleep(2) - ... - This is a test...done in 2.00 s. + Running code block...done in 2.00 s. - >>> with timed_block("Another test", timelimit=3): + With a custom message. + + >>> with opinf.utils.TimedBlock("This is a test"): + ... time.sleep(3) + This is a test...done in 3.00 s. + + With a time limit. + + >>> with opinf.utils.TimedBlock("Another test", timelimit=3): ... # Code to be timed and halted within the specified time limit. ... i = 0 ... while True: ... i += 1 - Another test...TIMED OUT after 3.00 s. + Another test... + TimeoutError: TIMED OUT after 3.00s. + + Set up a logfile to record messages to. + + >>> opinf.utils.TimedBlock.setup_logfile("log.log") + Logging to '/path/to/current/folder/log.log' + + ``TimedBlock()`` will now write to the log file as well as print to screen. + + >>> with opinf.utils.TimedBlock("logfile test"): + ... time.sleep(1) + logfile test...done in 1.00 s. + >>> with open("log.log", "r") as infile: + ... print(infile.read().strip()) + INFO: logfile test...done in 1.001150 s. + + Turn off print statements (but keep logging). + + >>> opinf.utils.TimedBlock.verbose = False + >>> with opinf.utils.TimedBlock("not printed to the screen"): + ... time.sleep(1) + >>> with open("log.log", "r") as infile: + ... print(infile.read().strip()) + INFO: logfile test...done in 1.001150 s. + INFO: not printed to the screen...done in 1.002232 s. + + Capture the time elapsed for later use. + + >>> with opinf.utils.TimedBlock("how long?") as timer: + ... time.sleep(2) + >>> timer.elapsed + 2.002866268157959 """ verbose = True - @staticmethod - def _signal_handler(signum, frame): - raise TimeoutError("timed out!") + formatter = logging.Formatter( + fmt="%(asctime)s %(levelname)s:\t%(message)s", + datefmt="%Y-%m-%d %H:%M:%S", + ) + + def __init__( + self, + message: str = "Running code block", + timelimit: int = None, + ): + """Store print/log message.""" + self.__front = "\n" if message.endswith("\n") else "" + self.message = message.rstrip() + self.__back = "\n" if "\r" not in message else "" + if timelimit is not None: + timelimit = max(int(timelimit), 1) + self.__timelimit = timelimit + self.__elapsed = None @property def timelimit(self): """Time limit (in seconds) for the block to complete.""" - return self._timelimit + return self.__timelimit - def __init__(self, message, timelimit=None): - """Store print/log message.""" - self._frontend = "\n" if message.endswith("\n") else "" - self.message = message.rstrip() - self._backend = "\n" if "\r" not in message else "" - self._timelimit = timelimit + @property + def elapsed(self): + """Actual time (in seconds) the block took to complete.""" + return self.__elapsed + + @staticmethod + def _signal_handler(signum, frame): + raise TimeoutError("timed out!") def __enter__(self): """Print the message and record the current time.""" if self.verbose: - print(f"{self.message}...", end=self._frontend, flush=True) + print(f"{self.message}...", end=self.__front, flush=True) self._tic = time.time() - if self._timelimit is not None: + if self.timelimit is not None: signal.signal(signal.SIGALRM, self._signal_handler) - signal.alarm(self._timelimit) + signal.alarm(self.timelimit) return self def __exit__(self, exc_type, exc_value, exc_traceback): """Calculate and report the elapsed time.""" self._toc = time.time() - if self._timelimit is not None: + if self.timelimit is not None: signal.alarm(0) elapsed = self._toc - self._tic if exc_type: # Report an exception if present. - if self._timelimit is not None and exc_type is TimeoutError: - print( - f"TIMED OUT after {elapsed:.2f} s.", - flush=True, - end=self._backend, - ) - logging.info(f"TIMED OUT after {elapsed:.2f} s.") - raise + if self.timelimit is not None and exc_type is TimeoutError: + print(flush=True) + report = f"TIMED OUT after {elapsed:.2f} s." + logging.info(f"{self.message}...{report}") + raise TimeoutError(report) print(f"{exc_type.__name__}: {exc_value}") logging.info(self.message) logging.error( @@ -88,11 +153,38 @@ def __exit__(self, exc_type, exc_value, exc_traceback): raise else: # If no exception, report execution time. if self.verbose: - print( - f"done in {elapsed:.2f} s.", - flush=True, - end=self._backend, - ) + print(f"done in {elapsed:.2f} s.", flush=True, end=self.__back) logging.info(f"{self.message}...done in {elapsed:.6f} s.") - self.elapsed = elapsed + self.__elapsed = elapsed return + + @classmethod + def add_logfile(cls, logfile: str = "log.log") -> None: + """Instruct :class:`TimedBlock` to log messages to the ``logfile``. + + Parameters + ---------- + logfile : str + File to log to. + """ + logger = logging.getLogger() + logpath = os.path.abspath(logfile) + + # Check that we aren't already logging to this file. + for handler in logger.handlers: + if ( + isinstance(handler, logging.FileHandler) + and os.path.abspath(handler.baseFilename) == logpath + ): + if cls.verbose: + print(f"Already logging to {logpath}") + return + + # Add a new handler for this file. + newhandler = logging.FileHandler(logpath, "a") + newhandler.setFormatter(cls.formatter) + newhandler.setLevel(logging.INFO) + logger.setLevel(logging.INFO) + logger.addHandler(newhandler) + if cls.verbose: + print(f"Logging to '{os.path.abspath(logfile)}'") diff --git a/tests/basis/test_base.py b/tests/basis/test_base.py index 1e70422a..b3e24d71 100644 --- a/tests/basis/test_base.py +++ b/tests/basis/test_base.py @@ -48,21 +48,16 @@ def test_state_dimensions(self): assert basis.reduced_state_dimension is None def test_str(self): - """Test __str__() and __repr__().""" + """Lightly test __str__() and __repr__().""" basis = self.Dummy() - assert str(basis) == "Dummy" + str(basis) basis.full_state_dimension = 10 - assert str(basis) == "Dummy\n Full state dimension n = 10" + str(basis) basis.name = "varname" basis.reduced_state_dimension = 5 - assert str(basis) == ( - "Dummy for variable 'varname'" - "\n Full state dimension n = 10" - "\n Reduced state dimension r = 5" - ) assert repr(basis).count(str(basis)) == 1 def test_project(self, q=5): diff --git a/tests/basis/test_linear.py b/tests/basis/test_linear.py index cdba410b..7ea6217c 100644 --- a/tests/basis/test_linear.py +++ b/tests/basis/test_linear.py @@ -89,20 +89,11 @@ def test_init(self, n=10, r=3): assert ex.value.args[0] == "expected one- or two-dimensional weights" def test_str(self): - """Test __str__() and __repr__().""" + """Lightly test __str__() and __repr__().""" basis = self.Basis(self._orth(10, 4)) - assert str(basis) == ( - "LinearBasis" - "\n Full state dimension n = 10" - "\n Reduced state dimension r = 4" - ) + str(basis) basis = self.Basis(self._orth(9, 5), name="varname") - assert str(basis) == ( - "LinearBasis for variable 'varname'" - "\n Full state dimension n = 9" - "\n Reduced state dimension r = 5" - ) assert repr(basis).count(str(basis)) == 1 # Dimension reduction ---------------------------------------------------- diff --git a/tests/basis/test_multi.py b/tests/basis/test_multi.py index 28e843b5..62b7f51a 100644 --- a/tests/basis/test_multi.py +++ b/tests/basis/test_multi.py @@ -64,7 +64,7 @@ class Dummy3(Dummy2): pass def test_init(self): - """Test BasisMulti.__init__(), bases, dimensions.""" + """Test __init__(), bases, dimensions.""" bases = [self.Dummy(), self.Dummy2(), self.Dummy3(name="third")] basis = self.Basis(bases) assert basis.num_variables == len(bases) @@ -123,7 +123,7 @@ class ExtraDummy: # Magic methods ----------------------------------------------------------- def test_getitem(self): - """Test BasisMulti.__getitem__().""" + """Test __getitem__().""" bases = [self.Dummy(), self.Dummy2(), self.Dummy()] basis = self.Basis(bases) for i, bs in enumerate(bases): @@ -135,7 +135,7 @@ def test_getitem(self): assert basis[name] is bases[i] def test_eq(self): - """Test BasisMulti.__eq__().""" + """Test __eq__().""" bases = [self.Dummy(), self.Dummy2(), self.Dummy3()] basis1 = self.Basis(bases) @@ -152,23 +152,13 @@ def test_eq(self): assert basis1 == basis2 def test_str(self): - """Test BasisMulti.__str__().""" - bases = [self.Dummy(), self.Dummy2()] - basis = self.Basis(bases) - - stringrep = str(basis) - assert stringrep.startswith("2-variable BasisMulti\n") - for bs in bases: - assert str(bs) in stringrep - - # Quick repr() test. - rep = repr(basis) - assert stringrep in rep - assert str(hex(id(basis))) in rep + """Lightly test __str__() and __repr__().""" + basis = self.Basis([self.Dummy(), self.Dummy2()]) + assert repr(basis).count(str(basis)) == 1 # Convenience methods ----------------------------------------------------- def test_get_var(self, ns=(4, 5, 6), rs=(2, 3, 4), k=5): - """Test BasisMulti.get_var().""" + """Test get_var().""" basis_A = self.Dummy(name="A") basis_B = self.Dummy(name="B") basis_C = self.Dummy(name="C") @@ -216,7 +206,7 @@ def test_get_var(self, ns=(4, 5, 6), rs=(2, 3, 4), k=5): assert ex.value.args[0].startswith("states.shape[0] must be") def test_split(self, ns=(11, 13), rs=(5, 7), k=5): - """Test BasisMulti.split().""" + """Test split().""" bases = [self.Dummy(), self.Dummy2()] basis = self.Basis(bases, ns) @@ -332,7 +322,7 @@ def test_save(self): os.remove(target) def test_load(self): - """Test BasisMulti.load().""" + """Test load().""" target = "_loadbasismultitest.h5" if os.path.isfile(target): # pragma: no cover os.remove(target) diff --git a/tests/basis/test_pod.py b/tests/basis/test_pod.py index c33e2c38..522b0ec3 100644 --- a/tests/basis/test_pod.py +++ b/tests/basis/test_pod.py @@ -203,45 +203,23 @@ def test_set_dimension(self, n=40, k=11, r=9): assert basis.projection_error(Q, relative=True) < 0.02 def test_str(self, n=30, k=20, r=10): - """Test __str__().""" + """Lightly test __str__() and __repr__().""" basis = self.Basis(num_vectors=r) - strbasis = str(basis) - assert strbasis.count("\n") == 1 - assert strbasis.endswith("SVD solver: scipy.linalg.svd()") + str(basis) Q = np.random.random((n, k)) basis.fit(Q) - strbasis = str(basis) - assert strbasis.count(f"Full state dimension n = {n}") == 1 - assert strbasis.count(f"Reduced state dimension r = {r}") == 1 - assert strbasis.count(f"{k} basis vectors available") == 1 - assert strbasis.count("Cumulative energy:") == 1 - assert strbasis.count("Residual energy:") == 1 - assert strbasis.endswith("SVD solver: scipy.linalg.svd()") + str(basis) basis = self.Basis( num_vectors=r, max_vectors=r, svdsolver="randomized", ).fit(Q) - strbasis = str(basis) - assert strbasis.count(f"Full state dimension n = {n}") == 1 - assert strbasis.count(f"Reduced state dimension r = {r}") == 1 - assert strbasis.count(f"{r} basis vectors available") == 1 - assert strbasis.count("Approximate cumulative energy:") == 1 - assert strbasis.count("Approximate residual energy:") == 1 - assert strbasis.endswith("sklearn.utils.extmath.randomized_svd()") + str(basis) basis = self.Basis(num_vectors=r, svdsolver=lambda s: s) - strbasis = str(basis) - assert strbasis.endswith("SVD solver: custom lambda function") - - def mysvdsolver(*args): - pass - - basis = self.Basis(num_vectors=r, svdsolver=mysvdsolver) - strbasis = str(basis) - assert strbasis.endswith("SVD solver: mysvdsolver()") + assert repr(basis).count(str(basis)) == 1 def test_fit(self, n=60, k=20, r=4): """Test fit().""" diff --git a/tests/ddt/test_interpolation.py b/tests/ddt/test_interpolation.py index c8165003..3cb1862b 100644 --- a/tests/ddt/test_interpolation.py +++ b/tests/ddt/test_interpolation.py @@ -11,10 +11,10 @@ _module = opinf.ddt._interpolation -class TestInterpolationDerivativeEstimator: - """Test opinf.ddt.InterpolationDerivativeEstimator.""" +class TestInterpDerivativeEstimator: + """Test opinf.ddt.InterpDerivativeEstimator.""" - Estimator = _module.InterpolationDerivativeEstimator + Estimator = _module.InterpDerivativeEstimator def test_init(self, k=100): """Test __init__() and properties.""" diff --git a/tests/lstsq/test_base.py b/tests/lstsq/test_base.py index 9cbda199..cd5cf566 100644 --- a/tests/lstsq/test_base.py +++ b/tests/lstsq/test_base.py @@ -106,29 +106,14 @@ def test_fit(self, k=30, d=20, r=5): # String representations -------------------------------------------------- def test_str(self, k=20, d=6, r=3): - """Test __str__() and __repr__().""" - # Before fitting. + """Lightly test __str__() and __repr__().""" solver = self.Dummy() - assert str(solver) == "Dummy (not trained)" - - rep = repr(solver) - assert rep.startswith(" 0: + op3 = opinf.operators.AffineInputOperator( + coeffs=p, + entries=[np.random.random((r, m)) for _ in range(p)], + ) + operators.append(op3) + return operators, np.random.random(p) - operators = [DummyParametricOperator(), DummyOpInfOperator()] + def test_check_operator_types_unique(self, p=2): + """Test _check_operator_types_unique().""" + operators = [ + opinf.operators.AffineLinearOperator(p), + opinf.operators.LinearOperator(), + ] with pytest.raises(ValueError) as ex: - self.Dummy._check_operator_types_unique(operators) + self.Model._check_operator_types_unique(operators) assert ex.value.args[0] == ( "duplicate type in list of operators to infer" ) - operators = [DummyParametricOperator(), DummyParametricOperator2()] - self.Dummy._check_operator_types_unique(operators) + operators[1] = opinf.operators.ConstantOperator() + self.Model._check_operator_types_unique(operators) - def test_set_operators(self): - """Test _ParametricModel.operators.fset().""" - operators = [DummyOpInfOperator()] + def test_set_operators(self, p=3): + """Test operators.fset().""" + operators = [opinf.operators.LinearOperator()] with pytest.warns(opinf.errors.OpInfWarning) as wn: - self.Dummy(operators) + self.Model(operators) assert wn[0].message.args[0] == ( "no parametric operators detected, " "consider using a nonparametric model class" ) - operators = [DummyInterpolatedOperator()] - + operators = [opinf.operators.InterpLinearOperator()] with pytest.warns(opinf.errors.OpInfWarning) as wn: - self.Dummy(operators) + self.Model(operators) assert wn[0].message.args[0] == ( "all operators interpolatory, " - "consider using an InterpolatedModel class" + "consider using an InterpModel class" ) - operators = [DummyParametricOperator(), DummyParametricOperator2()] - model = self.Dummy(operators) - assert model.parameter_dimension is None + # Several operators provided. + operators = [ + opinf.operators.ConstantOperator(), + opinf.operators.AffineLinearOperator(p), + ] + model = self.Model(operators) + assert len(model.operators) == 2 + for modelop, op in zip(model.operators, operators): + assert modelop is op - def test_get_operator_of_type(self): - """Test _ParametricModel._get_operator_of_type().""" - op1 = DummyParametricOperator() - op2 = DummyParametricOperator2() - model = self.Dummy([op1, op2]) + # Single operator provided + model = self.Model(operators[1]) + assert len(model.operators) == 1 + assert model.operators[0] is operators[1] - op = model._get_operator_of_type(DummyOpInfOperator) - assert op is op1 + def test_get_operator_of_type(self, p=2): + """Test _get_operator_of_type().""" + operators = [ + opinf.operators.ConstantOperator(), + opinf.operators.AffineLinearOperator(p), + ] + model = self.Model(operators) - op = model._get_operator_of_type(DummyOpInfOperator2) - assert op is op2 + op = model._get_operator_of_type(opinf.operators.ConstantOperator) + assert op is operators[0] + + op = model._get_operator_of_type(opinf.operators.LinearOperator) + assert op is operators[1] op = model._get_operator_of_type(float) assert op is None - def test_check_parameter_dimension_consistency(self, s=3): - """Test _check_parameter_dimension_consistency().""" - op = DummyOpInfOperator() - p = self.Dummy._check_parameter_dimension_consistency([op]) - assert p is None + def test_parameter_dimension(self, p=4): + """Test parameter_dimension and _synchronize_parameter_dimensions().""" + op0 = opinf.operators.ConstantOperator() + op1 = opinf.operators.AffineLinearOperator(np.sin, nterms=p) + model = self.Model([op0, op1]) + assert model.parameter_dimension is None - op1 = DummyParametricOperator() - op1._set_parameter_dimension_from_data(np.empty((s, 10))) - p = self.Dummy._check_parameter_dimension_consistency([op1]) - assert p == 10 + op1.parameter_dimension = p + model._synchronize_parameter_dimensions() + assert model.parameter_dimension == p - op2 = DummyParametricOperator2() - op2._set_parameter_dimension_from_data(np.empty((s, 20))) + op1 = opinf.operators.AffineLinearOperator(np.sin, nterms=p) + op2 = opinf.operators.AffineInputOperator(p) + assert op1.parameter_dimension is None + model = self.Model([op0, op1, op2]) + assert op1.parameter_dimension == p + assert model.parameter_dimension == p with pytest.raises(opinf.errors.DimensionalityError) as ex: - self.Dummy._check_parameter_dimension_consistency([op1, op2]) + model._synchronize_parameter_dimensions(p + 2) assert ex.value.args[0] == ( - "operators not aligned " - "(parameter_dimension must be the same for all operators)" + f"{p} = each operator.parameter_dimension " + f"!= parameter dimension = {p + 2}" ) - - def test_parameter_dimension(self, s=3, p=4): - """Test _ParametricModel.parameter_dimension.""" - op = DummyParametricOperator() - model = self.Dummy([op, DummyOpInfOperator2()]) - - model._set_parameter_dimension_from_data(np.empty((s, p))) assert model.parameter_dimension == p + assert op1.parameter_dimension == p + assert op2.parameter_dimension == p - model.parameter_dimension = 10 - assert model.parameter_dimension == 10 - - op._set_parameter_dimension_from_data(np.empty((s, 20))) - - with pytest.raises(AttributeError) as ex: - model.parameter_dimension = 15 + op1 = opinf.operators.AffineLinearOperator(p) + op2 = opinf.operators.AffineInputOperator(p + 1) + with pytest.raises(opinf.errors.DimensionalityError) as ex: + self.Model([op0, op1, op2]) assert ex.value.args[0] == ( - "can't set attribute (existing operators have p = 10)" + "operators not aligned " + "(parameter_dimension must be the same for all operators)" ) - model.parameter_dimension = 20 - assert model.parameter_dimension == 20 + def test_process_fit_arguments(self, s=10, p=2, m=4, r=3, k=10): + """Test _process_fit_arguments().""" + params = np.random.random((s, p)) + states = [np.ones((r, k)) for _ in range(s)] + lhs = [np.ones((r, k)) for _ in range(s)] + inputs = [np.empty((m, k)) for _ in range(s)] - model = self.Dummy(DummyParametricOperator()) - model._set_parameter_dimension_from_data(np.empty(s)) - assert model.parameter_dimension == 1 + op = self._get_single_operator() + model = self.Model([op]) - with pytest.raises(ValueError) as ex: - model._set_parameter_dimension_from_data(np.empty((s, s, s))) + # Invalid parameters. + with pytest.raises(opinf.errors.DimensionalityError) as ex: + model._process_fit_arguments(np.empty((3, 3, 3)), None, None, None) assert ex.value.args[0] == ( - "parameter values must be scalars or 1D arrays" + "'parameters' must be a sequence of scalars or 1D arrays" ) - def test_process_fit_arguments(self, s=5, p=2, m=4, r=3, k=10): - """Test _ParametricModel._process_fit_arguments().""" - op = DummyParametricOperator() - model = self.Dummy([op]) - params = np.empty((s, p)) - states = [np.empty((r, k)) for _ in range(s)] - lhs = [np.empty((r, k)) for _ in range(s)] - - # Inconsistent number of parameter values. + # Inconsistent number of datasets across arguments. with pytest.raises(opinf.errors.DimensionalityError) as ex: model._process_fit_arguments(params, states[1:], None, None) assert ex.value.args[0] == ( f"len(states) = {s-1} != {s} = len(parameters)" ) + with pytest.raises(opinf.errors.DimensionalityError) as ex: + model._process_fit_arguments(params, states, lhs[:-1], None) + assert ex.value.args[0] == ( + f"len({self.Model._ModelClass._LHS_ARGNAME}) = {s-1} " + f"!= {s} = len(parameters)" + ) + model._has_inputs = True + with pytest.raises(opinf.errors.DimensionalityError) as ex: + model._process_fit_arguments(params, states, lhs, inputs[1:]) + assert ex.value.args[0] == ( + f"len(inputs) = {s-1} != {s} = len(parameters)" + ) + inputs1D = np.empty((s - 1, k)) + with pytest.raises(opinf.errors.DimensionalityError) as ex: + model._process_fit_arguments(params, states, lhs, inputs1D) + assert ex.value.args[0] == ( + f"len(inputs) = {s-1} != {s} = len(parameters)" + ) + model._has_inputs = False # Inconsistent state dimension. states[1] = np.empty((r - 1, k)) @@ -254,12 +201,12 @@ def test_process_fit_arguments(self, s=5, p=2, m=4, r=3, k=10): with pytest.raises(opinf.errors.DimensionalityError) as ex: model._process_fit_arguments(params, states, lhs, None) assert ex.value.args[0] == ( - f"mylhs[1].shape[-1] = {k} != {k-1} = states[1].shape[-1]" + f"{model._LHS_ARGNAME}[1].shape[-1] = {k} " + f"!= {k-1} = states[1].shape[-1]" ) # Inconsistent input dimension. states[1] = np.empty((r, k)) - inputs = [np.empty((m, k)) for _ in range(s)] inputs[1] = np.empty((m - 1, k)) model._has_inputs = True with pytest.raises(opinf.errors.DimensionalityError) as ex: @@ -267,126 +214,440 @@ def test_process_fit_arguments(self, s=5, p=2, m=4, r=3, k=10): assert ex.value.args[0] == f"inputs[1].shape[0] = {m-1} != {m} = m" # Correct usage, partially intrusive - op2 = DummyParametricOperator2(np.random.random((r, r))) - model = self.Dummy([op, op2]) + op2 = opinf.operators.AffineConstantOperator( + p, + entries=[np.random.random(r) for _ in range(p)], + ) + if isinstance(self, _TestInterpModel): + op2 = opinf.operators.InterpConstantOperator( + training_parameters=params, + entries=[np.zeros(r) for _ in range(s)], + ) + + model = self.Model([op, op2]) model._process_fit_arguments(params, states, lhs, None) model._has_inputs = True inputs[1] = np.empty((m, k)) model._process_fit_arguments(params, states, lhs, inputs) - def test_evaluate(self, r=4): - """Test _ParametricModel.evaluate().""" - op1 = DummyParametricOperator(np.random.random((r, r))) - op2 = DummyParametricOperator2(np.random.random((r, r))) - model = self.Dummy([op1, op2]) - model_evaluated = model.evaluate(None) - assert isinstance(model_evaluated, DummyNonparametricModel) - assert len(model_evaluated.operators) == 2 - assert isinstance(model_evaluated.operators[0], DummyOpInfOperator) - assert isinstance(model_evaluated.operators[1], DummyOpInfOperator2) - assert model_evaluated.state_dimension == r - - def test_rhs(self, r=2): - """Test _ParametricModel.rhs().""" - op1 = DummyParametricOperator(np.random.random((r, r))) - op2 = DummyParametricOperator2(np.random.random((r, r))) - model = self.Dummy([op1, op2]) - assert model.state_dimension == r - assert model.rhs(np.empty(r), None, None) == 2 * _applyvalue - - def test_jacobian(self, r=3): - """Test _ParametricModel.jacobian().""" - op1 = DummyParametricOperator(np.random.random((r, r))) - op2 = DummyParametricOperator2(np.random.random((r, r))) - model = self.Dummy([op1, op2]) - assert model.state_dimension == r - assert np.all(model.jacobian(np.empty(r), None, None) == 2 * _jacvalue) - - def test_predict(self, r=4): - """Test _ParametricModel.predict().""" - op1 = DummyParametricOperator(np.random.random((r, r))) - op2 = DummyParametricOperator2(np.random.random((r, r))) - model = self.Dummy([op1, op2]) - assert model.state_dimension == r - assert model.predict(None) == _predictvalue - - -class TestInterpolatedModel: - """Test models.mono._parametric._InterpolatedModel.""" - - class Dummy(_module._InterpolatedModel): - _ModelClass = DummyNonparametricModel2 - - def test_from_models(self, r=4): - """Test _InterpolatedModel._from_models().""" - mu = np.sort(np.random.random(2)) - model1 = DummyNonparametricModel( - [DummyOpInfOperator2(np.random.random(r))] - ) - - # Wrong type of model. - model2 = self.Dummy([opinf.operators.InterpolatedCubicOperator()]) + def test_fit(self, s=10, p=3, m=2, r=4, k=20): + """Test fit() and refit() (but not all intermediate steps).""" + params = np.random.random((s, p)) + states = [np.ones((r, k)) for _ in range(s)] + lhs = [np.ones((r, k)) for _ in range(s)] + inputs = [np.ones((m, k)) for _ in range(s)] + + operators, _ = self._get_parametric_operators(p, r, m) + + # Fully intrusive case. + model = self.Model(operators) + with pytest.warns(opinf.errors.OpInfWarning) as wn: + out = model.fit(params, states, lhs, inputs) + assert len(wn) == 1 + assert wn[0].message.args[0] == ( + "all operators initialized explicitly, nothing to learn" + ) + assert out is model + + with pytest.warns(opinf.errors.OpInfWarning) as wn: + out = model.refit() + assert len(wn) == 1 + assert wn[0].message.args[0] == ( + "all operators initialized explicitly, nothing to learn" + ) + assert out is model + + # One affine operator. + model = self.Model([opinf.operators.AffineLinearOperator(p)]) + out = model.fit(params, states, lhs) + assert out is model + for op in model.operators: + assert op.parameter_dimension == p + assert op.entries is not None + + # Multiple affine operators. + model = self.Model( + [ + opinf.operators.AffineLinearOperator(p), + opinf.operators.AffineInputOperator(p), + ] + ) + out = model.fit(params, states, lhs, inputs) # BUG + assert out is model + for op in model.operators: + assert op.parameter_dimension == p + assert op.entries is not None + + # Mix of affine and interpolatory operators. + model = self.Model( + [ + opinf.operators.AffineLinearOperator(p), + opinf.operators.InterpInputOperator(), + ] + ) + out = model.fit(params, states, lhs, inputs) + assert out is model + for op in model.operators: + assert op.parameter_dimension == p + assert op.entries is not None + + # Mix of nonparametric, affine, and interpolatory operators. + model = self.Model( + [ + opinf.operators.ConstantOperator(), + opinf.operators.AffineLinearOperator(p), + opinf.operators.InterpInputOperator(), + ] + ) + out = model.fit(params, states, lhs, inputs) + assert out is model + assert model.operators[0].entries is not None + for op in model.operators[1:]: + assert op.parameter_dimension == p + assert op.entries is not None + + def test_evaluate(self, p=8, r=4, m=2): + """Test evaluate().""" + operators, testparam = self._get_parametric_operators(p, r, m) + + # Some operators not populated. + model = self.Model([self._get_single_operator()]) + with pytest.raises(AttributeError): + model.evaluate(testparam) + + # Test with and without input operators. + for ops in operators[:-1], operators: + model = self.Model(ops) + model_evaluated = model.evaluate(testparam) + assert isinstance(model_evaluated, self.Model._ModelClass) + assert len(model_evaluated.operators) == len(model.operators) + assert model_evaluated.state_dimension == r + for pop, op in zip(model.operators, model_evaluated.operators): + pop_evaluated = pop.evaluate(testparam) + assert isinstance(op, pop_evaluated.__class__) + assert np.array_equal(op.entries, pop_evaluated.entries) + assert model_evaluated.input_dimension == model.input_dimension + + def test_rhs(self, p=7, r=2, m=4): + """Lightly test rhs().""" + operators, testparam = self._get_parametric_operators(p, r, m) + teststate = np.random.random(r) + args = [testparam, teststate] + if self._iscontinuous: + args.insert(0, np.random.random()) # time argument + + def testinput(t): + return np.random.random(m) + + else: + testinput = np.random.random(m) + + # Some operators not populated. + model = self.Model([self._get_single_operator()]) + with pytest.raises(AttributeError): + model.rhs(*args) + + # Without inputs. + model = self.Model(operators[:-1]) + out = model.rhs(*args) + assert isinstance(out, np.ndarray) + assert out.shape == (r,) + + # With inputs. + args.append(testinput) + model = self.Model(operators) + out = model.rhs(*args) + assert isinstance(out, np.ndarray) + assert out.shape == (r,) + + def test_jacobian(self, p=9, r=3, m=2): + """Lightly test jacobian().""" + operators, testparam = self._get_parametric_operators(p, r, m) + teststate = np.random.random(r) + args = [testparam, teststate] + if self._iscontinuous: + args.insert(0, np.random.random()) # time argument + + def testinput(t): + return np.random.random(m) + + else: + testinput = np.random.random(m) + + # Some operators not populated. + model = self.Model([self._get_single_operator()]) + with pytest.raises(AttributeError): + model.jacobian(*args) + + # Without inputs. + model = self.Model(operators[:-1]) + out = model.jacobian(*args) + assert isinstance(out, np.ndarray) + assert out.shape == (r, r) + + # With inputs. + args.append(testinput) + model = self.Model(operators) + out = model.jacobian(*args) + assert isinstance(out, np.ndarray) + assert out.shape == (r, r) + + +class TestParametricDiscreteModel(_TestParametricModel): + """Test opinf.models.ParametricDiscreteModel.""" + + Model = _module.ParametricDiscreteModel + _iscontinuous = False + + def test_predict(self, p=5, r=3, m=2, niters=10): + """Lightly test InterpDiscreteModel.predict().""" + testparam = np.random.random(p) + state0 = np.random.random(r) + + model = self.Model( + opinf.operators.AffineLinearOperator( + p, + entries=np.zeros((p, r, r)), + ) + ) + out = model.predict(testparam, state0, niters) + assert isinstance(out, np.ndarray) + assert out.shape == (r, niters) + assert np.all(out[:, 0] == state0) + assert np.all(out[:, 1:] == 0) + + inputs = np.random.random((m, niters)) + model = self.Model( + opinf.operators.AffineInputOperator( + p, + entries=np.zeros((p, r, m)), + ) + ) + out = model.predict(testparam, state0, niters, inputs) + assert isinstance(out, np.ndarray) + assert out.shape == (r, niters) + assert np.all(out[:, 0] == state0) + assert np.all(out[:, 1:] == 0) + + +class TestParametricContinuousModel(_TestParametricModel): + """Test opinf.models.ParametricContinuousModel.""" + + Model = _module.ParametricContinuousModel + _iscontinuous = True + + def test_predict(self, p=4, r=4, m=2, k=40): + """Lightly test predict().""" + testparam = np.random.random(p) + state0 = np.random.random(r) + t = np.linspace(0, 1, k) + + model = self.Model( + opinf.operators.AffineLinearOperator( + p, + entries=np.zeros((p, r, r)), + ) + ) + out = model.predict(testparam, state0, t) + assert isinstance(out, np.ndarray) + assert out.shape == (r, k) + for j in range(k): + assert np.allclose(out[:, j], state0) + + def input_func(t): + return np.random.random(m) + + model = self.Model( + opinf.operators.AffineInputOperator( + p, + entries=np.zeros((p, r, m)), + ) + ) + out = model.predict(testparam, state0, t, input_func) + assert isinstance(out, np.ndarray) + assert out.shape == (r, k) + for j in range(k): + assert np.allclose(out[:, j], state0) + + +# Interpolatotry models ======================================================= +class _TestInterpModel(_TestParametricModel): + """Test models.mono._parametric._InterpModel.""" + + def _get_single_operator(self): + """Get a single uncalibrated operator.""" + return opinf.operators.InterpLinearOperator() + + def _get_parametric_operators(self, s, r, m=0): + """Get calibrated constant + linear + input affine operators.""" + params = np.sort(np.random.random(s)) + op1 = opinf.operators.InterpConstantOperator( + params, + entries=[np.random.random(r) for _ in range(s)], + ) + op2 = opinf.operators.InterpLinearOperator( + params, + entries=[np.random.random((r, r)) for _ in range(s)], + ) + operators = [op1, op2] + if m > 0: + op3 = opinf.operators.InterpInputOperator( + params, + entries=[np.random.random((r, m)) for _ in range(s)], + ) + operators.append(op3) + return operators, (params[-1] + params[0]) / 2 + + def test_set_operators(self): + """Test operators.fset().""" + operators = [opinf.operators.LinearOperator()] + with pytest.raises(TypeError) as ex: - self.Dummy._from_models(mu, [model2, model1]) + self.Model(operators) + assert ex.value.args[0] == "invalid operator of type 'LinearOperator'" + + # Several operators provided. + operators = [ + opinf.operators.InterpConstantOperator(), + opinf.operators.InterpLinearOperator(), + ] + model = self.Model(operators) + assert len(model.operators) == 2 + for modelop, op in zip(model.operators, operators): + assert modelop is op + + # Single operator provided + model = self.Model(operators[1]) + assert len(model.operators) == 1 + assert model.operators[0] is operators[1] + + def test_get_operator_of_type(self): + """Test _get_operator_of_type().""" + operators = [ + opinf.operators.InterpConstantOperator(), + opinf.operators.InterpLinearOperator(), + ] + model = self.Model(operators) + + op = model._get_operator_of_type(opinf.operators.ConstantOperator) + assert op is operators[0] + + op = model._get_operator_of_type(opinf.operators.LinearOperator) + assert op is operators[1] + + op = model._get_operator_of_type(float) + assert op is None + + def test_parameter_dimension(self, p=4): + """Test parameter_dimension and _synchronize_parameter_dimensions().""" + op1 = opinf.operators.InterpLinearOperator() + assert op1.parameter_dimension is None + model = self.Model([op1]) + assert model.parameter_dimension is None + + op1.parameter_dimension = p + model._synchronize_parameter_dimensions() + assert model.parameter_dimension == p + + op1 = opinf.operators.InterpLinearOperator() + op2 = opinf.operators.InterpInputOperator() + op2.parameter_dimension = p + assert op1.parameter_dimension is None + model = self.Model([op1, op2]) + assert op1.parameter_dimension == p + assert op2.parameter_dimension == p + assert model.parameter_dimension == p + + with pytest.raises(opinf.errors.DimensionalityError) as ex: + model._synchronize_parameter_dimensions(p + 2) assert ex.value.args[0] == ( - "expected models of type 'DummyNonparametricModel'" + f"{p} = each operator.parameter_dimension " + f"!= parameter dimension = {p + 2}" ) + assert model.parameter_dimension == p + assert op1.parameter_dimension == p + assert op2.parameter_dimension == p - # Inconsistent number of operators. - model2 = DummyNonparametricModel( - [DummyOpInfOperator(), DummyOpInfOperator2()] + op1 = opinf.operators.InterpLinearOperator() + op2 = opinf.operators.InterpInputOperator() + op1.parameter_dimension = p + op2.parameter_dimension = p + 1 + with pytest.raises(opinf.errors.DimensionalityError) as ex: + self.Model([op1, op2]) + assert ex.value.args[0] == ( + "operators not aligned " + "(parameter_dimension must be the same for all operators)" ) + + def test_from_models(self, s=10, r=4, m=2): + """Test _InterpModel._from_models().""" + operators = [ + [ + opinf.operators.ConstantOperator(np.random.random(r)), + opinf.operators.LinearOperator(np.random.random((r, r))), + opinf.operators.InputOperator(np.random.random((r, m))), + ] + for _ in range(s) + ] + mu = np.sort(np.random.random(s)) + + # Inconsistent number of operators. + model1 = self.Model._ModelClass(operators[0]) + model2 = self.Model._ModelClass(operators[1][:-1]) with pytest.raises(ValueError) as ex: - self.Dummy._from_models(mu, [model1, model2]) + self.Model._from_models(mu, [model1, model2]) assert ex.value.args[0] == ( "models not aligned (inconsistent number of operators)" ) # Inconsistent operator types. - model2 = DummyNonparametricModel( - [DummyOpInfOperator(np.random.random(r))] - ) + model1 = self.Model._ModelClass(operators[0][1:]) + model2 = self.Model._ModelClass(operators[1][:-1]) with pytest.raises(ValueError) as ex: - self.Dummy._from_models(mu, [model1, model2]) + self.Model._from_models(mu, [model1, model2]) assert ex.value.args[0] == ( "models not aligned (inconsistent operator types)" ) # Correct usage - OpClass = opinf.operators.ConstantOperator - model1 = DummyNonparametricModel([OpClass(np.random.random(r))]) - model2 = DummyNonparametricModel([OpClass(np.random.random(r))]) - model = self.Dummy._from_models(mu, [model1, model2]) - assert isinstance(model, self.Dummy) - assert len(model.operators) == 1 + models = [self.Model._ModelClass(ops) for ops in operators] + model = self.Model._from_models(mu, models) + assert isinstance(model, self.Model) + assert len(model.operators) == 3 assert isinstance( model.operators[0], - opinf.operators.InterpolatedConstantOperator, + opinf.operators.InterpConstantOperator, ) + # Check the interpolation is as expected. + testparam = np.random.random() + IClass = type(model.operators[0].interpolator) + c00 = IClass(mu, [ops[0][0] for ops in operators]) + assert c00(testparam) == model.evaluate(testparam).operators[0][0] + def test_set_interpolator(self, s=10, p=2, r=2): - """Test _InterpolatedModel._set_interpolator().""" + """Test _InterpModel._set_interpolator().""" mu = np.random.random((s, p)) operators = [ - opinf.operators.InterpolatedConstantOperator( + opinf.operators.InterpConstantOperator( training_parameters=mu, entries=np.random.random((s, r)), InterpolatorClass=interp.NearestNDInterpolator, ), - opinf.operators.InterpolatedLinearOperator( + opinf.operators.InterpLinearOperator( training_parameters=mu, entries=np.random.random((s, r, r)), InterpolatorClass=interp.NearestNDInterpolator, ), ] - model = self.Dummy(operators) + model = self.Model(operators) for op in operators: assert isinstance(op.interpolator, interp.NearestNDInterpolator) - model = self.Dummy( + model = self.Model( operators, InterpolatorClass=interp.LinearNDInterpolator, ) @@ -402,16 +663,16 @@ def test_set_interpolator(self, s=10, p=2, r=2): assert isinstance(op.interpolator, interp.NearestNDInterpolator) def test_fit_solver(self, s=10, r=3, k=20): - """Test _InterpolatedModel._fit_solver().""" + """Test _InterpModel._fit_solver().""" operators = [ - opinf.operators.InterpolatedConstantOperator(), - opinf.operators.InterpolatedLinearOperator(), + opinf.operators.InterpConstantOperator(), + opinf.operators.InterpLinearOperator(), ] params = np.sort(np.random.random(s)) states = np.random.random((s, r, k)) lhs = np.random.random((s, r, k)) - model = self.Dummy(operators) + model = self.Model(operators) model._fit_solver(params, states, lhs) assert hasattr(model, "solvers") @@ -422,7 +683,10 @@ def test_fit_solver(self, s=10, r=3, k=20): assert hasattr(model, "_submodels") assert len(model._submodels) == s for mdl in model._submodels: - assert isinstance(mdl, DummyNonparametricModel) + assert isinstance( + mdl, + opinf.models.mono._nonparametric._NonparametricModel, + ) assert len(mdl.operators) == len(operators) for op in mdl.operators: assert op.entries is None @@ -432,16 +696,16 @@ def test_fit_solver(self, s=10, r=3, k=20): assert np.all(model._training_parameters == params) def test_refit(self, s=10, r=3, k=15): - """Test _InterpolatedModel.refit().""" + """Test _InterpModel.refit().""" operators = [ - opinf.operators.InterpolatedConstantOperator(), - opinf.operators.InterpolatedLinearOperator(), + opinf.operators.InterpConstantOperator(), + opinf.operators.InterpLinearOperator(), ] params = np.sort(np.random.random(s)) states = np.random.random((s, r, k)) lhs = np.random.random((s, r, k)) - model = self.Dummy(operators) + model = self.Model(operators) with pytest.raises(RuntimeError) as ex: model.refit() @@ -453,20 +717,23 @@ def test_refit(self, s=10, r=3, k=15): assert hasattr(model, "_submodels") assert len(model._submodels) == s for mdl in model._submodels: - assert isinstance(mdl, DummyNonparametricModel) + assert isinstance( + mdl, + opinf.models.mono._nonparametric._NonparametricModel, + ) assert len(mdl.operators) == len(operators) for op in mdl.operators: assert op.entries is not None def test_save(self, target="_interpmodelsavetest.h5"): - """Test _InterpolatedModel._save().""" + """Test _InterpModel._save().""" if os.path.isfile(target): os.remove(target) - model = self.Dummy( + model = self.Model( [ - opinf.operators.InterpolatedConstantOperator(), - opinf.operators.InterpolatedLinearOperator(), + opinf.operators.InterpConstantOperator(), + opinf.operators.InterpLinearOperator(), ] ) model.save(target) @@ -490,34 +757,34 @@ def test_save(self, target="_interpmodelsavetest.h5"): os.remove(target) def test_load(self, target="_interpmodelloadtest.h5"): - """Test _InterpolatedModel._load().""" + """Test _InterpModel._load().""" if os.path.isfile(target): os.remove(target) operators = [ - opinf.operators.InterpolatedConstantOperator(), - opinf.operators.InterpolatedLinearOperator(), + opinf.operators.InterpConstantOperator(), + opinf.operators.InterpLinearOperator(), ] - model = self.Dummy(operators, InterpolatorClass=float) + model = self.Model(operators, InterpolatorClass=float) with pytest.warns(opinf.errors.OpInfWarning): model.save(target) with pytest.raises(opinf.errors.LoadfileFormatError) as ex: - self.Dummy.load(target) + self.Model.load(target) assert ex.value.args[0] == ( f"unknown InterpolatorClass 'float', call load({target}, float)" ) - self.Dummy.load(target, float) + self.Model.load(target, float) - model1 = self.Dummy( + model1 = self.Model( operators, InterpolatorClass=interp.NearestNDInterpolator, ) model1.save(target, overwrite=True) with pytest.warns(opinf.errors.OpInfWarning) as wn: - model2 = self.Dummy.load(target, float) + model2 = self.Model.load(target, float) assert wn[0].message.args[0] == ( "InterpolatorClass=float does not match loadfile " "InterpolatorClass 'NearestNDInterpolator'" @@ -525,10 +792,10 @@ def test_load(self, target="_interpmodelloadtest.h5"): model2.set_interpolator(interp.NearestNDInterpolator) assert model2 == model1 - model2 = self.Dummy.load(target) + model2 = self.Model.load(target) assert model2 == model1 - model1 = self.Dummy( + model1 = self.Model( "AB", InterpolatorClass=interp.NearestNDInterpolator, ) @@ -536,28 +803,28 @@ def test_load(self, target="_interpmodelloadtest.h5"): model1.input_dimension = 4 model1.save(target, overwrite=True) - model2 = self.Dummy.load(target) + model2 = self.Model.load(target) assert model2 == model1 os.remove(target) def test_copy(self, s=10, p=2, r=3): - """Test _InterpolatedModel._copy().""" + """Test _InterpModel._copy().""" - model1 = self.Dummy( + model1 = self.Model( [ - opinf.operators.InterpolatedConstantOperator(), - opinf.operators.InterpolatedLinearOperator(), + opinf.operators.InterpConstantOperator(), + opinf.operators.InterpLinearOperator(), ] ) mu = np.random.random((s, p)) - model2 = self.Dummy( + model2 = self.Model( [ - opinf.operators.InterpolatedConstantOperator( + opinf.operators.InterpConstantOperator( mu, entries=np.random.random((s, r)) ), - opinf.operators.InterpolatedLinearOperator( + opinf.operators.InterpLinearOperator( mu, entries=np.random.random((s, r, r)) ), ], @@ -566,85 +833,38 @@ def test_copy(self, s=10, p=2, r=3): for model in (model1, model2): model_copied = model.copy() - assert isinstance(model_copied, self.Dummy) + assert isinstance(model_copied, self.Model) assert model_copied is not model assert model_copied == model -class TestInterpolatedDiscreteModel: - """Test models.mono._parametric.InterpolatedDiscreteModel.""" +class TestInterpDiscreteModel(_TestInterpModel): + """Test models.mono._parametric.InterpDiscreteModel.""" - ModelClass = _module.InterpolatedDiscreteModel + Model = _module.InterpDiscreteModel + _iscontinuous = False def test_fit(self, s=10, p=2, r=3, m=2, k=20): - """Lightly test InterpolatedDiscreteModel.fit().""" + """Lightly test InterpDiscreteModel.fit().""" params = np.random.random((s, p)) states = np.random.random((s, r, k)) nextstates = np.random.random((s, r, k)) inputs = np.random.random((s, m, k)) - model = self.ModelClass("A") + model = self.Model("A") out = model.fit(params, states) assert out is model - model = self.ModelClass("AB") + model = self.Model("AB") out = model.fit(params, states, nextstates, inputs) assert out is model - def test_rhs(self, s=10, r=3, m=2): - """Lightly test InterpolatedDiscreteModel.rhs().""" - params = np.sort(np.random.random(s)) - state = np.random.random(r) - model = self.ModelClass( - opinf.operators.InterpolatedLinearOperator( - params, np.random.random((s, r, r)) - ) - ) - out = model.rhs(params[2], state) - assert isinstance(out, np.ndarray) - assert out.shape == (r,) - - input_ = np.random.random(m) - model = self.ModelClass( - opinf.operators.InterpolatedInputOperator( - params, np.random.random((s, r, m)) - ) - ) - out = model.rhs(params[-2], state, input_) - assert isinstance(out, np.ndarray) - assert out.shape == (r,) - - def test_jacobian(self, s=9, r=2, m=3): - """Lightly test InterpolatedDiscreteModel.jacobian().""" - params = np.sort(np.random.random(s)) - state = np.random.random(r) - model = self.ModelClass( - opinf.operators.InterpolatedLinearOperator( - params, np.random.random((s, r, r)) - ) - ) - out = model.jacobian(params[2], state) - assert isinstance(out, np.ndarray) - assert out.shape == (r, r) - - input_ = np.random.random(m) - model = self.ModelClass( - opinf.operators.InterpolatedInputOperator( - params, np.random.random((s, r, m)) - ) - ) - out = model.jacobian(params[-2], state, input_) - assert isinstance(out, np.ndarray) - assert out.shape == (r, r) - def test_predict(self, s=11, r=4, m=2, niters=10): - """Lightly test InterpolatedDiscreteModel.predict().""" + """Lightly test InterpDiscreteModel.predict().""" params = np.sort(np.random.random(s)) state0 = np.random.random(r) - model = self.ModelClass( - opinf.operators.InterpolatedLinearOperator( - params, np.zeros((s, r, r)) - ) + model = self.Model( + opinf.operators.InterpLinearOperator(params, np.zeros((s, r, r))) ) out = model.predict(params[2], state0, niters) assert isinstance(out, np.ndarray) @@ -653,10 +873,8 @@ def test_predict(self, s=11, r=4, m=2, niters=10): assert np.all(out[:, 1:] == 0) inputs = np.random.random((m, niters)) - model = self.ModelClass( - opinf.operators.InterpolatedInputOperator( - params, np.zeros((s, r, m)) - ) + model = self.Model( + opinf.operators.InterpInputOperator(params, np.zeros((s, r, m))) ) out = model.predict(params[-2], state0, niters, inputs) assert isinstance(out, np.ndarray) @@ -665,85 +883,34 @@ def test_predict(self, s=11, r=4, m=2, niters=10): assert np.all(out[:, 1:] == 0) -class TestInterpolatedContinuousModel: - """Test models.mono._parametric.InterpolatedContinuousModel.""" +class TestInterpContinuousModel(_TestInterpModel): + """Test models.mono._parametric.InterpContinuousModel.""" - ModelClass = _module.InterpolatedContinuousModel + Model = _module.InterpContinuousModel + _iscontinuous = True def test_fit(self, s=10, p=2, r=3, m=2, k=20): - """Test InterpolatedContinuousModel.fit().""" + """Test InterpContinuousModel.fit().""" params = np.random.random((s, p)) states = np.random.random((s, r, k)) ddts = np.random.random((s, r, k)) inputs = np.random.random((s, m, k)) - model = self.ModelClass("A") + model = self.Model("A") out = model.fit(params, states, ddts) assert out is model - model = self.ModelClass("AB") + model = self.Model("AB") out = model.fit(params, states, ddts, inputs) assert out is model - def test_rhs(self, s=10, r=3, m=2): - """Lightly test InterpolatedContinuousModel.rhs().""" - params = np.sort(np.random.random(s)) - state = np.random.random(r) - model = self.ModelClass( - opinf.operators.InterpolatedLinearOperator( - params, np.random.random((s, r, r)) - ) - ) - out = model.rhs(None, params[2], state) - assert isinstance(out, np.ndarray) - assert out.shape == (r,) - - def input_func(t): - return np.random.random(m) - - model = self.ModelClass( - opinf.operators.InterpolatedInputOperator( - params, np.random.random((s, r, m)) - ) - ) - out = model.rhs(np.pi, params[-2], state, input_func) - assert isinstance(out, np.ndarray) - assert out.shape == (r,) - - def test_jacobian(self, s=9, r=2, m=3): - """Lightly test InterpolatedContinuousModel.jacobian().""" - params = np.sort(np.random.random(s)) - state = np.random.random(r) - model = self.ModelClass( - opinf.operators.InterpolatedLinearOperator( - params, np.random.random((s, r, r)) - ) - ) - out = model.jacobian(None, params[2], state) - assert isinstance(out, np.ndarray) - assert out.shape == (r, r) - - def input_func(t): - return np.random.random(m) - - model = self.ModelClass( - opinf.operators.InterpolatedInputOperator( - params, np.random.random((s, r, m)) - ) - ) - out = model.jacobian(np.pi, params[-2], state, input_func) - assert isinstance(out, np.ndarray) - assert out.shape == (r, r) - def test_predict(self, s=11, r=4, m=2, k=40): - """Lightly test InterpolatedContinuousModel.predict().""" + """Lightly test InterpContinuousModel.predict().""" params = np.sort(np.random.random(s)) state0 = np.random.random(r) t = np.linspace(0, 1, k) - model = self.ModelClass( - opinf.operators.InterpolatedLinearOperator( - params, np.zeros((s, r, r)) - ) + model = self.Model( + opinf.operators.InterpLinearOperator(params, np.zeros((s, r, r))) ) out = model.predict(params[2], state0, t) assert isinstance(out, np.ndarray) @@ -754,10 +921,8 @@ def test_predict(self, s=11, r=4, m=2, k=40): def input_func(t): return np.random.random(m) - model = self.ModelClass( - opinf.operators.InterpolatedInputOperator( - params, np.zeros((s, r, m)) - ) + model = self.Model( + opinf.operators.InterpInputOperator(params, np.zeros((s, r, m))) ) out = model.predict(params[-2], state0, t, input_func) assert isinstance(out, np.ndarray) @@ -766,17 +931,13 @@ def input_func(t): assert np.allclose(out[:, j], state0) -def test_publics(): - """Ensure all public ParametricModel classes can be instantiated.""" - operators = [opinf.operators.InterpolatedConstantOperator()] - for ModelClassName in _module.__all__: - ModelClass = getattr(_module, ModelClassName) - if not isinstance(ModelClass, type) or not issubclass( - ModelClass, _module._ParametricModel - ): # pragma: no cover - continue - model = ModelClass(operators) - assert issubclass( - model.ModelClass, - opinf.models.mono._nonparametric._NonparametricModel, - ) +# Deprecations models ========================================================= +def test_deprecations(): + """Ensure deprecated classes still work.""" + for ModelClass in [ + _module.InterpolatedContinuousModel, + _module.InterpolatedDiscreteModel, + ]: + with pytest.warns(DeprecationWarning) as wn: + ModelClass("A") + assert len(wn) == 1 diff --git a/tests/operators/test_affine.py b/tests/operators/test_affine.py new file mode 100644 index 00000000..b8ba449c --- /dev/null +++ b/tests/operators/test_affine.py @@ -0,0 +1,416 @@ +# operators/test_affine.py +"""Tests for operators._affine.""" + +import os +import abc +import pytest +import numpy as np +import scipy.linalg as la +import scipy.sparse as sparse + +import opinf + + +_module = opinf.operators +_submodule = _module._affine + + +class _TestAffineOperator: + """Test operators._affine._AffineOperator.""" + + OpClass = NotImplemented + + thetas1 = [ + (lambda mu: mu[0]), + (lambda mu: mu[1]), + (lambda mu: mu[2]), + (lambda mu: mu[1] * mu[2] ** 2), + ] + + @staticmethod + def thetas2(mu): + return np.array([mu[0], mu[1], mu[2], mu[1] * mu[2] ** 2]) + + p = 3 + + @abc.abstractmethod + def entries_shape(self, r, m): + raise NotImplementedError + + def test_init(self, p=6): + """Test __init__() and properties.""" + + # Bad input for coeffs. + bad_thetas = 3.14159265358979 + with pytest.raises(TypeError) as ex: + self.OpClass(bad_thetas) + assert ex.value.args[0] == ( + "argument 'coeffs' must be callable, iterable, or a positive int" + ) + bad_thetas = [1, 2, 3] + with pytest.raises(TypeError) as ex: + self.OpClass(bad_thetas) + assert ex.value.args[0] == ( + "if 'coeffs' is iterable each entry must be callable" + ) + ncoeffs = len(self.thetas1) + + # Bad input for nterms. + with pytest.raises(TypeError) as ex: + self.OpClass(None, -10) + assert ex.value.args[0] == ( + "when provided, argument 'nterms' must be a positive integer" + ) + + # coeffs as an iterable of callables. + with pytest.warns(opinf.errors.OpInfWarning) as wn: + op = self.OpClass(self.thetas1, 100) + assert len(wn) == 1 + assert wn[0].message.args[0] == ( + f"{ncoeffs} = len(coeffs) != nterms = 100, ignoring " + f"argument 'nterms' and setting nterms = {ncoeffs}" + ) + assert op.nterms == ncoeffs + assert op.parameter_dimension is None + assert op.entries is None + mu = np.random.random(ncoeffs) + opmu = op.coeffs(mu) + assert all(opmu[i] == thta(mu) for i, thta in enumerate(self.thetas1)) + + # coeffs as a single callable. + with pytest.raises(ValueError) as ex: + self.OpClass(self.thetas2) + assert ex.value.args[0] == ( + "argument 'nterms' required when argument 'coeffs' is callable" + ) + op = self.OpClass(self.thetas2, nterms=ncoeffs) + assert op.parameter_dimension is None + assert op.entries is None + mu = np.random.random(ncoeffs) + opmu = op.coeffs(mu) + assert all(opmu[i] == thta(mu) for i, thta in enumerate(self.thetas1)) + + # coeffs as an integer. + with pytest.warns(opinf.errors.OpInfWarning) as wn: + op = self.OpClass(p, p + 1) + assert len(wn) == 1 + assert wn[0].message.args[0] == ( + f"{p} = coeffs != nterms = {p + 1}, ignoring " + f"argument 'nterms' and setting nterms = {p}" + ) + assert op.nterms == p + assert op.parameter_dimension == p + assert op.entries is None + mu = np.random.random(p) + assert np.array_equal(op.coeffs(mu), mu) + + assert repr(op).count(f"expansion terms: {p}") == 1 + + def test_entries(self, r=10, m=3): + """Test set_entries() and entries property.""" + ncoeffs = len(self.thetas1) + shape = self.entries_shape(r, m) + arrays = [np.random.random(shape) for _ in range(ncoeffs)] + + op = self.OpClass(self.thetas1) + with pytest.raises(ValueError) as ex: + op.set_entries(np.random.random((2, 3, 2)), fromblock=True) + assert ex.value.args[0] == ( + "entries must be a 1- or 2-dimensional ndarray " + "when fromblock=True" + ) + with pytest.raises(ValueError) as ex: + op.set_entries(arrays[:-1]) + assert ex.value.args[0] == ( + f"{ncoeffs} = number of affine expansion terms " + f"!= len(entries) = {ncoeffs - 1}" + ) + + op = self.OpClass(self.thetas2, ncoeffs) + assert op.entries is None + op.set_entries(arrays) + for i in range(ncoeffs): + assert np.all(op.entries[i] == arrays[i]) + + op = self.OpClass(self.thetas1, entries=arrays) + for i in range(ncoeffs): + assert np.all(op.entries[i] == arrays[i]) + + op = self.OpClass( + self.thetas2, + ncoeffs, + entries=np.hstack(arrays), + fromblock=True, + ) + for i in range(ncoeffs): + assert np.all(op.entries[i] == arrays[i]) + + def test_evaluate(self, r=9, m=4): + """Test evaluate().""" + ncoeffs = len(self.thetas1) + shape = self.entries_shape(r, m) + arrays = [np.random.random(shape) for _ in range(ncoeffs)] + op = self.OpClass(self.thetas1, entries=arrays) + + mu = np.random.random(self.p) + op_mu = op.evaluate(mu) + assert isinstance(op_mu, op._OperatorClass) + assert op_mu.entries.shape == arrays[0].shape + Amu = np.sum( + [theta(mu) * A for theta, A in zip(self.thetas1, arrays)], + axis=0, + ) + assert np.allclose(op_mu.entries, Amu) + + # Special case: scalar parameter A(mu) = mu A0. + + def _check(newop): + op_mu = newop.evaluate(0.5) + assert isinstance(op_mu, newop._OperatorClass) + assert op_mu.entries.shape == arrays[0].shape + assert np.allclose(op_mu.entries, arrays[0] / 2) + + _check(self.OpClass(lambda mu: mu, nterms=1, entries=[arrays[0]])) + _check(self.OpClass(1, entries=[arrays[0]])) + + def test_galerkin(self, r=9, m=4): + """Test galerkin().""" + ncoeffs = len(self.thetas1) + shape = self.entries_shape(r, m) + arrays = [np.random.random(shape) for _ in range(ncoeffs)] + op = self.OpClass(self.thetas1, entries=arrays) + + Vr = la.qr(np.random.random((r, r // 2)), mode="economic")[0] + Wr = la.qr(np.random.random((r, r // 2)), mode="economic")[0] + for testbasis in (None, Wr): + newop = op.galerkin(Vr, testbasis) + assert isinstance(newop, self.OpClass) + assert newop.state_dimension == r // 2 + + def test_opinf(self, s=10, k=15, r=11, m=3): + """Test operator_dimension() and datablock().""" + ncoeffs = len(self.thetas1) + shape = self.entries_shape(r, m) + arrays = [np.random.random(shape) for _ in range(ncoeffs)] + op = self.OpClass(self.thetas1, entries=arrays) + + parameters = [np.random.random(self.p) for _ in range(s)] + states = np.random.random((s, r, k)) + inputs = np.random.random((s, m, k)) + + block = op.datablock(parameters, states, inputs) + dim = op.operator_dimension(s, r, m) + assert block.shape[0] == dim + assert block.shape[1] == s * k + + # One-dimensional inputs. + block = op.datablock(parameters, states, np.random.random((s, k))) + dim = op.operator_dimension(s, r, 1) + assert block.shape[0] == dim + assert block.shape[1] == s * k + + # Special case: scalar parameter A(mu) = mu A0. + + def _check(newop): + block = newop.datablock(np.linspace(0, 1, s), states, inputs) + dim = newop.operator_dimension(1, r, m) + assert block.shape[0] == dim + assert block.shape[1] == s * k + + _check(self.OpClass(lambda mu: mu, nterms=1, entries=[arrays[0]])) + _check(self.OpClass(1, entries=[arrays[0]])) + + def test_copysaveload(self, r=10, m=2, target="_affinesavetest.h5"): + """Test copy(), save(), and load().""" + ncoeffs = len(self.thetas1) + shape = self.entries_shape(r, m) + arrays = [np.random.random(shape) for _ in range(ncoeffs)] + + def sparsearray(A): + B = A.copy() + B[B < 0.9] = 0 + B = np.atleast_2d(B) + if B.shape[0] == 1: + B = B.T + return sparse.csr_array(B) + + sparrays = [sparsearray(A) for A in arrays] + + def _checksame(original, copied): + assert copied is not original + assert isinstance(copied, self.OpClass) + if original.entries is None: + assert copied.entries is None + elif isinstance(original.entries[0], np.ndarray): + for i, Ai in enumerate(copied.entries): + assert isinstance(Ai, np.ndarray) + assert np.all(Ai == original.entries[i]) + elif sparse.issparse(original.entries[0]): + for i, Ai in enumerate(copied.entries): + assert sparse.issparse(Ai) + assert (Ai - original.entries[i]).sum() == 0 + if (p := original.parameter_dimension) is not None: + assert copied.parameter_dimension == p + + # Test copy() without entries set. + op = self.OpClass(self.thetas1) + _checksame(op, op.copy()) + + op.parameter_dimension = self.p + _checksame(op, op.copy()) + + # Test copy() with entries set. + op.set_entries(arrays) + _checksame(op, op.copy()) + + op.set_entries(sparrays) + _checksame(op, op.copy()) + + # Test save() and load() together. + + class Dummy(self.OpClass): + pass + + op = Dummy(self.thetas2, nterms=ncoeffs) + op.save(target, overwrite=True) + with pytest.raises(opinf.errors.LoadfileFormatError) as ex: + self.OpClass.load(target, self.thetas2) + assert ex.value.args[0] == ( + f"file '{target}' contains 'Dummy' object, use 'Dummy.load()'" + ) + + def _checkload(original): + if os.path.isfile(target): + os.remove(target) + original.save(target) + copied = self.OpClass.load(target, original.coeffs) + return _checksame(original, copied) + + # Test save()/load() without entries set. + op = self.OpClass(self.thetas1) + _checkload(op) + + op.parameter_dimension = self.p + _checkload(op) + + # Test save()/load() with entries set. + op.set_entries(arrays) + _checkload(op) + + op.set_entries(sparrays) + _checkload(op) + + if os.path.isfile(target): + os.remove(target) + + +# Test public classes ========================================================= +class TestAffineConstantOperator(_TestAffineOperator): + """Test AffineConstantOperator.""" + + OpClass = _module.AffineConstantOperator + + @staticmethod + def entries_shape(r, m): + return (r,) + + +class TestAffineLinearOperator(_TestAffineOperator): + """Test AffineLinearOperator.""" + + OpClass = _module.AffineLinearOperator + + @staticmethod + def entries_shape(r, m): + return (r, r) + + +class TestAffineQuadraticOperator(_TestAffineOperator): + """Test AffineQuadraticOperator.""" + + OpClass = _module.AffineQuadraticOperator + + @staticmethod + def entries_shape(r, m): + return (r, int(r * (r + 1) / 2)) + + +class TestAffineCubicOperator(_TestAffineOperator): + """Test AffineCubicOperator.""" + + OpClass = _module.AffineCubicOperator + + @staticmethod + def entries_shape(r, m): + return (r, int(r * (r + 1) * (r + 2) / 6)) + + +class TestAffineInputOperator(_TestAffineOperator): + """Test AffineInputOperator.""" + + OpClass = _module.AffineInputOperator + + @staticmethod + def entries_shape(r, m): + return (r, m) + + def test_input_dimension(self, r=8, m=3, p=3): + """Test input_dimension.""" + Bs = [np.random.random((r, m)) for _ in range(p)] + op = self.OpClass(p) + assert op.input_dimension is None + op.set_entries(Bs) + assert op.input_dimension == m + + +class TestAffineStateInputOperator(_TestAffineOperator): + OpClass = _module.AffineStateInputOperator + + @staticmethod + def entries_shape(r, m): + return (r, r * m) + + def test_input_dimension(self, r=7, m=4, p=5): + """Test input_dimension.""" + Ns = [np.random.random((r, r * m)) for _ in range(p)] + op = self.OpClass(p) + assert op.input_dimension is None + op.set_entries(Ns) + assert op.input_dimension == m + + +def test_publics(): + """Ensure all public AffineOperator classes can be instantiated.""" + for OpClassName in _submodule.__all__: + OpClass = getattr(_module, OpClassName) + if not isinstance(OpClass, type) or not issubclass( + OpClass, _submodule._AffineOperator + ): + continue + op = OpClass(_TestAffineOperator.thetas1) + assert issubclass( + op._OperatorClass, + opinf.operators.OpInfOperator, + ) + + +def test_is_affine(): + """Test operators._affine.is_interpolated().""" + + class Dummy(_submodule._AffineOperator): + pass + + op = Dummy(_TestAffineOperator.thetas1) + assert _submodule.is_affine(op) + assert not _submodule.is_affine(-2) + + +def test_nonparametric_to_affine(): + """Test operators._affine.nonparametric_to_affine().""" + + with pytest.raises(TypeError) as ex: + _submodule.nonparametric_to_affine(list) + assert ex.value.args[0] == "_AffineOperator for class 'list' not found" + + OpClass = _submodule.nonparametric_to_affine(opinf.operators.CubicOperator) + assert OpClass is opinf.operators.AffineCubicOperator diff --git a/tests/operators/test_base.py b/tests/operators/test_base.py index 4bb0fff4..15056753 100644 --- a/tests/operators/test_base.py +++ b/tests/operators/test_base.py @@ -6,6 +6,7 @@ import pytest import numpy as np import scipy.linalg as la +import scipy.sparse as sparse import matplotlib.pyplot as plt import opinf @@ -16,14 +17,15 @@ def test_has_inputs(): """Test operators._base.has_inputs().""" + has_inputs = opinf.operators._base.has_inputs class Dummy(_module.InputMixin): def input_dimension(self): return -1 op = Dummy() - assert opinf.operators.has_inputs(op) - assert not opinf.operators.has_inputs(5) + assert has_inputs(op) + assert not has_inputs(5) # Nonparametric operators ===================================================== @@ -32,77 +34,56 @@ class TestOperatorTemplate: Operator = _module.OperatorTemplate - def test_str(self, r=11, m=3): - """Test __str__() and _str().""" + class Dummy(_module.OperatorTemplate): + """Instantiable version of OperatorTemplate.""" - class Dummy(self.Operator): - """Instantiable version of OperatorTemplate.""" + def __init__(self, state_dimension): + self.__r = state_dimension - def __init__(self, state_dimension=r): - self.__r = state_dimension + @property + def state_dimension(self): + return self.__r - @property - def state_dimension(self): - return self.__r + def apply(self, state, input_=None): + return state - def apply(self, state, input_=None): - return state + def jacobian(self, state, input_=None): + return np.eye(state.size) - class InputDummy(Dummy, _module.InputMixin): - """Instantiable version of OperatorTemplate with inputs.""" + class InputDummy(Dummy, _module.InputMixin): + """Instantiable version of OperatorTemplate with inputs.""" - def __init__(self, state_dimension=r, input_dimension=m): - Dummy.__init__(self, state_dimension) - self.__m = input_dimension + def __init__(self, state_dimension, input_dimension): + TestOperatorTemplate.Dummy.__init__(self, state_dimension) + self.__m = input_dimension - @property - def input_dimension(self): - return self.__m + @property + def input_dimension(self): + return self.__m + + def test_str(self, r=11, m=3): + """Test __str__() and _str().""" - def _test(DummyClass): - dummystr = str(DummyClass()) + def _test(DummyClass, args): + dummystr = str(DummyClass(*args)) assert dummystr.startswith(DummyClass.__name__) for line in (lines := dummystr.split("\n")[1:]): assert line.startswith(" ") assert lines[0].endswith(f"{r}") return lines - _test(Dummy) - assert _test(InputDummy)[-1].endswith(f"{m}") + _test(self.Dummy, [r]) + assert _test(self.InputDummy, [r, m])[-1].endswith(f"{m}") - assert Dummy._str("q", "u") == "f(q, u)" + assert self.Dummy._str("q", "u") == "f(q, u)" def test_verify(self, r=10, m=4): """Test verify().""" - class Dummy(self.Operator): - """Instantiable version of OperatorTemplate.""" - - def __init__(self, state_dimension=r): - self.__r = state_dimension - - @property - def state_dimension(self): - return self.__r - - def apply(self, state, input_=None): - return state - - class InputDummy(Dummy, _module.InputMixin): - """Instantiable version of OperatorTemplate with inputs.""" - - def __init__(self, state_dimension=r, input_dimension=m): - Dummy.__init__(self, state_dimension) - self.__m = input_dimension - - @property - def input_dimension(self): - return self.__m - - op = Dummy() + op = self.Dummy(r) op.verify() - op = InputDummy() + op = self.InputDummy(r, m) op.verify() def _single(DummyClass, message): @@ -112,6 +93,16 @@ def _single(DummyClass, message): assert ex.value.args[0] == message # Verification failures for apply(). + BaseDummy = self.Dummy + BaseInputDummy = self.InputDummy + + class Dummy(BaseDummy): + def __init__(self, rr=r): + BaseDummy.__init__(self, rr) + + class InputDummy(BaseInputDummy): + def __init__(self, rr=r, mm=m): + BaseInputDummy.__init__(self, rr, mm) class Dummy1(Dummy): def __init__(self): @@ -125,8 +116,9 @@ class Dummy2(Dummy): def apply(self, state, input_=None): return state[:-1] - class Dummy2I(Dummy2, InputDummy): - pass + class Dummy2I(InputDummy): + def apply(self, state, input_=None): + return state[:-1] class Dummy3(Dummy): def apply(self, state, input_=None): @@ -134,8 +126,11 @@ def apply(self, state, input_=None): return state return state[:, :-1] - class Dummy3I(Dummy3, InputDummy): - pass + class Dummy3I(InputDummy): + def apply(self, state, input_=None): + if state.ndim == 1: + return state + return state[:, :-1] _single( Dummy1, @@ -180,8 +175,9 @@ class Dummy4(Dummy): def jacobian(self, state, input_=None): return state - class Dummy4I(Dummy4, InputDummy): - pass + class Dummy4I(InputDummy): + def jacobian(self, state, input_=None): + return state _single( Dummy4, @@ -444,6 +440,11 @@ def test_validate_entries(self): "operator entries must be NumPy or scipy.sparse array" ) + A = sparse.dok_array((3, 4), dtype=np.float64) + A[1, 2] = 2 + A[0, 1] = -1 + func(A) + A = np.arange(12, dtype=float).reshape((4, 3)).T A[0, 0] = np.nan with pytest.raises(ValueError) as ex: @@ -697,11 +698,87 @@ def test_is_nonparametric(): """Test operators._base.is_nonparametric().""" op = TestOpInfOperator.Dummy() - assert opinf.operators.is_nonparametric(op) - assert not opinf.operators.is_nonparametric(10) + assert _module.is_nonparametric(op) + assert not _module.is_nonparametric(10) # Parametric operators ======================================================== +class TestParametricOperatorTemplate: + """Test operators._base.ParametricOperatorTemplate.""" + + Operator = _module.ParametricOperatorTemplate + + class Dummy(_module.ParametricOperatorTemplate): + """Instantiable version of ParametricOperatorTemplate.""" + + _OperatorClass = TestOperatorTemplate.Dummy + + def __init__(self, state_dim, param_dim): + self.__r = state_dim + self.__p = param_dim + + @property + def state_dimension(self) -> int: + return self.__r + + @property + def parameter_dimension(self) -> int: + return self.__p + + def evaluate(self, parameter): + return self._OperatorClass(self.state_dimension) + + def test_check_parametervalue_dimension(self, r=8, p=3): + """Test _check_parametervalue_dimension().""" + op = self.Dummy(r, None) + + with pytest.raises(RuntimeError) as ex: + op._check_parametervalue_dimension(10) + assert ex.value.args[0] == "parameter_dimension not set" + + op = self.Dummy(r, p) + + val = np.empty(p - 1) + with pytest.raises(ValueError) as ex: + op._check_parametervalue_dimension(val) + assert ex.value.args[0] == f"expected parameter of shape ({p:d},)" + + op._check_parametervalue_dimension(np.empty(p)) + + def test_evals(self, r=10, p=3): + """Test evaluate() and apply().""" + op = self.Dummy(r, p) + assert op.state_dimension == r + assert op.parameter_dimension == p + + param = np.random.random(p) + npop = op.evaluate(param) + assert isinstance(npop, self.Dummy._OperatorClass) + assert npop.state_dimension == r + + q = np.random.random(r) + npop_out = npop.apply(q) + op_out = op.apply(param, q) + assert np.all(op_out == npop_out) + + npop_jac = npop.jacobian(q) + op_jac = op.jacobian(param, q) + assert np.all(op_jac == npop_jac) + + op.verify() + + def test_str(self, r=7, p=2): + """Lightly test __str__() and __repr__().""" + repr(self.Dummy(r, p)) + + class InputDummy(self.Dummy, _module.InputMixin): + @property + def input_dimension(self): + return 10000 + + repr(InputDummy(r, p)) + + class TestParametricOpInfOperator: """Test operators._base.ParametricOpInfOperator.""" @@ -710,59 +787,60 @@ class Dummy(_module.ParametricOpInfOperator): _OperatorClass = TestOpInfOperator.Dummy - def __init__(self): - _module.ParametricOpInfOperator.__init__(self) - - def _clear(self): - pass - - def state_dimension(self): - pass - - def shape(self): - pass + def set_entries(self, entries): + _module.ParametricOpInfOperator.set_entries(self, entries) def evaluate(self, parameter): + self._check_parametervalue_dimension(parameter) op = self._OperatorClass() - op.set_entries(np.random.random((2, 2))) + op.set_entries(self.entries[0]) return op - def galerkin(self, *args, **kwargs): - pass + def operator_dimension(self, r, m): + return 4 - def datablock(self, *args, **kwargs): - pass + def datablock(self, states, inputs=None): + K = sum([Q.shape[-1] for Q in states]) + return np.random.random(4, K) - def operator_dimension(self, *args, **kwargs): - pass + def test_parameter_dimension(self): + """Test parameter_dimension and its setter.""" + op = self.Dummy() - def copy(self, *args, **kwargs): - pass + with pytest.raises(ValueError) as ex: + op.parameter_dimension = -40 + assert ex.value.args[0] == ( + "parameter_dimension must be a positive integer" + ) - def save(self, *args, **kwargs): - pass + op.parameter_dimension = 100 - def load(self, *args, **kwargs): - pass + with pytest.raises(AttributeError) as ex: + op.parameter_dimension = 10 + assert ex.value.args[0] == ( + "can't set property 'parameter_dimension' twice" + ) - def test_set_parameter_dimension_from_data(self): - """Test _set_parameter_dimension_from_data().""" + def test_set_parameter_dimension_from_values(self): + """Test _set_parameter_dimension_from_values().""" op = self.Dummy() assert op.parameter_dimension is None # One-dimensional parameters. - op._set_parameter_dimension_from_data(np.arange(10)) + op._set_parameter_dimension_from_values(np.arange(10)) assert op.parameter_dimension == 1 - op._set_parameter_dimension_from_data(np.arange(5).reshape((-1, 1))) + op._set_parameter_dimension_from_values(np.arange(5).reshape((-1, 1))) assert op.parameter_dimension == 1 # n-dimensional parameters. n = np.random.randint(2, 20) - op._set_parameter_dimension_from_data(np.random.random((5, n))) + op._set_parameter_dimension_from_values(np.random.random((5, n))) assert op.parameter_dimension == n with pytest.raises(ValueError) as ex: - op._set_parameter_dimension_from_data(np.random.random((2, 2, 2))) + op._set_parameter_dimension_from_values( + np.random.random((2, 2, 2)) + ) assert ex.value.args[0] == ( "parameter values must be scalars or 1D arrays" ) @@ -777,43 +855,26 @@ def test_check_shape_consistency(self): arrays[1] = arrays[1].T self.Dummy._check_shape_consistency(arrays, "array") - def test_check_parametervalue_dimension(self, p=3): - """Test _check_parametervalue_dimension().""" + def test_entries(self, r=8, p=2): + """Test entries, shape, and set_entries().""" op = self.Dummy() - - with pytest.raises(RuntimeError) as ex: - op._check_parametervalue_dimension(10) - assert ex.value.args[0] == "parameter_dimension not set" - - op._set_parameter_dimension_from_data(np.empty((5, p))) - - val = np.empty(p - 1) - with pytest.raises(ValueError) as ex: - op._check_parametervalue_dimension(val) - assert ex.value.args[0] == f"expected parameter of shape ({p:d},)" - - op._check_parametervalue_dimension(np.empty(p)) - - def test_apply(self): - """Test apply().""" - assert self.Dummy().apply(None, None, None) == -1 - - def test_jacobian(self): - """Test jacobian().""" - assert self.Dummy().jacobian(None, None, None) == 0 + assert op.entries is None + assert op.shape is None + op.set_entries([np.random.random((r, r)) for _ in range(r)]) + assert op.shape == (r, r) def test_is_parametric(): """Test operators._base.is_parametric().""" op = TestParametricOpInfOperator.Dummy() - assert opinf.operators.is_parametric(op) - assert not opinf.operators.is_nonparametric(-1) + assert _module.is_parametric(op) + assert not _module.is_parametric(100) def test_is_uncalibrated(): """Test operators._base.is_uncalibrated().""" - func = opinf.operators.is_uncalibrated + func = _module.is_uncalibrated class Dummy(opinf.operators.OperatorTemplate): """Instantiable version of OperatorTemplate.""" diff --git a/tests/operators/test_interpolate.py b/tests/operators/test_interpolate.py index b42c443f..a19bb6c9 100644 --- a/tests/operators/test_interpolate.py +++ b/tests/operators/test_interpolate.py @@ -9,11 +9,13 @@ import scipy.interpolate as interp import opinf +import opinf.operators._utils as oputils from . import _get_operator_entries -_module = opinf.operators._interpolate +_module = opinf.operators +_submodule = _module._interpolate _d = 8 _Dblock = np.random.random((4, _d)) @@ -56,16 +58,16 @@ class _DummyInterpolator2(_DummyInterpolator): pass -class TestInterpolatedOperator: - """Test operators._interpolate._InterpolatedOperator.""" +class TestInterpOperator: + """Test operators._interpolate._InterpOperator.""" - class Dummy(_module._InterpolatedOperator): - """Instantiable version of _InterpolatedOperator.""" + class Dummy(_submodule._InterpOperator): + """Instantiable version of _InterpOperator.""" _OperatorClass = _DummyOperator def test_from_operators(self, s=7, p=2, r=5): - """Test _InterpolatedOperator._from_operators().""" + """Test _InterpOperator._from_operators().""" mu = np.random.random((s, p)) with pytest.raises(TypeError) as ex: @@ -96,14 +98,13 @@ def test_from_operators(self, s=7, p=2, r=5): assert isinstance(op.interpolator, _DummyInterpolator) def test_set_training_parameters(self, s=10, p=2, r=4): - """Test _InterpolatedOperator.set_training_parameters(), + """Test _InterpOperator.set_training_parameters(), the training_parameter property, and __len__(). """ op = self.Dummy() assert op.training_parameters is None assert op.parameter_dimension is None assert op.state_dimension is None - assert len(op) == 0 mu_bad = np.empty((s, p, p)) with pytest.raises(ValueError) as ex: @@ -115,17 +116,20 @@ def test_set_training_parameters(self, s=10, p=2, r=4): mu = np.empty((s, p)) op.set_training_parameters(mu) assert np.all(op.training_parameters == mu) - assert len(op) == s assert op.state_dimension is None assert op.interpolator is None + assert op.parameter_dimension == p op.set_training_parameters(mu[:, 0]) assert np.all(op.training_parameters == mu[:, 0]) - assert len(op) == s + assert op.parameter_dimension == 1 + + op.set_training_parameters(mu[:, 0].reshape((-1, 1))) + assert np.all(op.training_parameters == mu[:, 0]) + assert op.parameter_dimension == 1 entries = np.random.standard_normal((s, r, r)) op = self.Dummy(mu, entries) - assert len(op) == s with pytest.raises(AttributeError) as ex: op.set_training_parameters(mu) @@ -134,7 +138,7 @@ def test_set_training_parameters(self, s=10, p=2, r=4): ) def test_set_entries(self, s=5, p=3, r=4): - """Test _InterpolatedOperator.set_entries(), _clear(), and the + """Test _InterpOperator.set_entries(), _clear(), and the the entries and shape properties. """ mu = np.random.random((s, p)) @@ -143,7 +147,7 @@ def test_set_entries(self, s=5, p=3, r=4): # Try without training_parameters set. op = self.Dummy() with pytest.raises(AttributeError) as ex: - op.entries = entries + op.set_entries(entries) assert ex.value.args[0] == ( "training_parameters have not been set, " "call set_training_parameters() first" @@ -186,14 +190,14 @@ def test_set_entries(self, s=5, p=3, r=4): ) # Test deletion. - del op.entries + op._clear() assert op.entries is None assert op.interpolator is None assert op.shape is None assert op.state_dimension is None def test_set_interpolator(self, s=4, p=2, r=5): - """Test _InterpolatedOperator.set_interpolator() and the + """Test _InterpOperator.set_interpolator() and the interpolator property. """ op = self.Dummy() @@ -211,8 +215,10 @@ def test_set_interpolator(self, s=4, p=2, r=5): op.set_interpolator(_DummyInterpolator2) assert isinstance(op.interpolator, _DummyInterpolator2) + assert isinstance(repr(op), str) + def test_eq(self, s=4, p=3, r=2): - """Test _InterpolatedOperator.__eq__().""" + """Test _InterpOperator.__eq__().""" op1 = self.Dummy() op2 = self.Dummy() assert op1 == op2 @@ -249,7 +255,7 @@ def test_eq(self, s=4, p=3, r=2): assert op1 == op2 def test_evaluate(self, s=3, p=5, r=4): - """Test _InterpolatedOperator.evaluate().""" + """Test _InterpOperator.evaluate().""" mu = np.random.random((s, p)) op = self.Dummy(mu, InterpolatorClass=_DummyInterpolator) @@ -263,8 +269,20 @@ def test_evaluate(self, s=3, p=5, r=4): assert op_evaluated.entries.shape == (r, r) assert np.all(op_evaluated.entries == entries[0]) + # Scalar parameters. + op = self.Dummy( + mu[:, 0], + entries=entries, + InterpolatorClass=_DummyInterpolator, + fromblock=False, + ) + op_evaluated = op.evaluate(np.array([[mu[0, 0]]])) + assert isinstance(op_evaluated, self.Dummy._OperatorClass) + assert op_evaluated.entries.shape == (r, r) + assert np.all(op_evaluated.entries == entries[0]) + def test_galerkin(self, s=5, p=2, n=10, r=4): - """Test _InterpolatedOperator.galerkin().""" + """Test _InterpOperator.galerkin().""" Vr = np.empty((n, r)) mu = np.random.random((s, p)) entries = np.random.random((s, n, n)) @@ -275,20 +293,20 @@ def test_galerkin(self, s=5, p=2, n=10, r=4): assert np.all(op_reduced.entries == entries) def test_datablock(self, s=4, p=2, r=2, k=3): - """Test _InterpolatedOperator.datablock().""" + """Test _InterpOperator.datablock().""" mu = np.random.random((s, p)) states = np.random.random((s, r, k)) op = self.Dummy(mu, InterpolatorClass=_DummyInterpolator) - block = op.datablock(states, states) + block = op.datablock(mu, states, states) assert block.shape == (s * _Dblock.shape[0], s * _Dblock.shape[1]) assert np.all(block == la.block_diag(*[_Dblock for _ in range(s)])) def test_operator_dimension(self, s=3): - """Test _InterpolatedOperator.operator_dimension().""" + """Test _InterpOperator.operator_dimension().""" assert self.Dummy.operator_dimension(s, None, None) == _d * s def test_copy(self, s=4, p=2, r=5): - """Test _InterpolatedOperator.copy().""" + """Test _InterpOperator.copy().""" op1 = self.Dummy() op2 = op1.copy() assert op2 is not op1 @@ -317,7 +335,7 @@ def test_copy(self, s=4, p=2, r=5): assert isinstance(op2.interpolator, _DummyInterpolator2) def test_save(self, s=5, p=2, r=3, target="_interpolatedopsavetest.h5"): - """Lightly test _InterpolatedOperator.save().""" + """Lightly test _InterpOperator.save().""" if os.path.isfile(target): # pragma: no cover os.remove(target) @@ -340,7 +358,7 @@ def test_save(self, s=5, p=2, r=3, target="_interpolatedopsavetest.h5"): os.remove(target) def test_load(self, s=15, p=3, r=3, target="_interpolatedoploadtest.h5"): - """Test _InterpolatedOperator.load().""" + """Test _InterpOperator.load().""" if os.path.isfile(target): os.remove(target) @@ -394,24 +412,27 @@ def test_load(self, s=15, p=3, r=3, target="_interpolatedoploadtest.h5"): def test_publics(): - """Ensure all public InterpolatedOperator classes can be instantiated + """Ensure all public InterpOperator classes can be instantiated without arguments. """ - for OpClassName in _module.__all__: + for OpClassName in _submodule.__all__: + if "Interpolated" in OpClassName: + # Skip deprecations + continue OpClass = getattr(_module, OpClassName) if not isinstance(OpClass, type) or not issubclass( - OpClass, _module._InterpolatedOperator + OpClass, _submodule._InterpOperator ): continue op = OpClass() assert issubclass( - op.OperatorClass, + op._OperatorClass, opinf.operators.OpInfOperator, ) def test_1Doperators(r=10, m=3, s=5): - """Test InterpolatedOperator classes with using all 1D interpolators + """Test InterpOperator classes with using all 1D interpolators from scipy.interpolate. """ InterpolatorClass = interp.CubicSpline @@ -424,12 +445,12 @@ def test_1Doperators(r=10, m=3, s=5): mu_new = 0.314159 for OpClass, Ohat in [ - (_module.InterpolatedConstantOperator, c), - (_module.InterpolatedLinearOperator, A), - (_module.InterpolatedQuadraticOperator, H), - (_module.InterpolatedCubicOperator, G), - (_module.InterpolatedInputOperator, B), - (_module.InterpolatedStateInputOperator, N), + (_module.InterpConstantOperator, c), + (_module.InterpLinearOperator, A), + (_module.InterpQuadraticOperator, H), + (_module.InterpCubicOperator, G), + (_module.InterpInputOperator, B), + (_module.InterpStateInputOperator, N), ]: entries = [ Ohat + p**2 + np.random.standard_normal(Ohat.shape) / 20 @@ -443,10 +464,10 @@ def test_1Doperators(r=10, m=3, s=5): interp.PchipInterpolator, ]: op = OpClass(params, InterpolatorClass=InterpolatorClass) - if opinf.operators.has_inputs(op): + if oputils.has_inputs(op): assert op.input_dimension is None op.set_entries(entries) - if opinf.operators.has_inputs(op): + if oputils.has_inputs(op): assert op.input_dimension == m op_evaluated = op.evaluate(mu_new) assert isinstance(op_evaluated, OpClass._OperatorClass) @@ -465,21 +486,34 @@ def test_1Doperators(r=10, m=3, s=5): def test_is_interpolated(): """Test operators._interpolate.is_interpolated().""" - op = TestInterpolatedOperator.Dummy() - assert _module.is_interpolated(op) - assert not _module.is_interpolated(-1) + op = TestInterpOperator.Dummy() + assert _submodule.is_interpolated(op) + assert not _submodule.is_interpolated(-1) def test_nonparametric_to_interpolated(): """Test operators._interpolate.nonparametric_to_interpolated().""" with pytest.raises(TypeError) as ex: - _module.nonparametric_to_interpolated(float) - assert ex.value.args[0] == ( - "_InterpolatedOperator for class 'float' not found" - ) + _submodule.nonparametric_to_interpolated(float) + assert ex.value.args[0] == ("_InterpOperator for class 'float' not found") - OpClass = _module.nonparametric_to_interpolated( + OpClass = _submodule.nonparametric_to_interpolated( opinf.operators.QuadraticOperator ) - assert OpClass is opinf.operators.InterpolatedQuadraticOperator + assert OpClass is opinf.operators.InterpQuadraticOperator + + +def test_deprecations(): + """Ensure deprecated classes still work.""" + for OpClass in [ + _module.InterpolatedConstantOperator, + _module.InterpolatedLinearOperator, + _module.InterpolatedQuadraticOperator, + _module.InterpolatedCubicOperator, + _module.InterpolatedInputOperator, + _module.InterpolatedStateInputOperator, + ]: + with pytest.warns(DeprecationWarning) as wn: + OpClass() + assert len(wn) == 1 diff --git a/tests/operators/test_nonparametric.py b/tests/operators/test_nonparametric.py index 10aa3253..fa273122 100644 --- a/tests/operators/test_nonparametric.py +++ b/tests/operators/test_nonparametric.py @@ -4,6 +4,7 @@ import pytest import numpy as np import scipy.linalg as la +import scipy.sparse as sparse import opinf @@ -45,8 +46,10 @@ def test_verify(self, shape=None): op = self.Operator() op.verify() - op.set_entries(np.random.random(shape)) + op.entries = np.random.random(shape) op.verify() + del op.entries + assert op.entries is None # No dependence on state or input ============================================= @@ -208,6 +211,15 @@ def test_set_entries(self): assert op.state_dimension == 1 assert op[0, 0] == a + # Sparse matrix. + A = np.random.random((100, 100)) + A[A < 0.95] = 0 + A = sparse.csr_matrix(A) + op.set_entries(A) + assert op.state_dimension == 100 + assert op.shape == (100, 100) + assert sparse.issparse(op.entries) + def test_apply(self, k=20): """Test apply()/__call__().""" op = self.Operator() diff --git a/tests/roms/__init__.py b/tests/roms/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/roms/test_base.py b/tests/roms/test_base.py new file mode 100644 index 00000000..3388bca4 --- /dev/null +++ b/tests/roms/test_base.py @@ -0,0 +1,294 @@ +# roms/test_base.py +"""Tests for roms._base.""" + +import abc +import pytest +import numpy as np + +import opinf +from opinf.models import _utils as modutils + + +class _TestBaseROM(abc.ABC): + """Test opinf.roms._base._BaseROM.""" + + ROM = NotImplemented + ModelClasses = NotImplemented + + @abc.abstractmethod + def _get_models(self): + """Return a list of valid model instantiations.""" + pass + + @staticmethod + def _get(*keys): + args = dict( + lifter=opinf.lift.QuadraticLifter(), + transformer=opinf.pre.ShiftScaleTransformer(centering=True), + transformer2=opinf.pre.ShiftScaleTransformer(scaling="standard"), + basis=opinf.basis.PODBasis(num_vectors=3), + basis2=opinf.basis.PODBasis(num_vectors=4), + ddt_estimator=opinf.ddt.UniformFiniteDifferencer( + np.linspace(0, 1, 100) + ), + ) + args["multi_transformer"] = opinf.pre.TransformerMulti( + [args["transformer"], args["transformer2"]] + ) + args["multi_basis"] = opinf.basis.BasisMulti( + [args["basis"], args["basis2"]] + ) + if len(keys) == 1: + return args[keys[0]] + return [args[k] for k in keys] + + def test_init(self): + """Test __init__() and properties.""" + + for model in self._get_models(): + # Warnings for non-model arguments. + with pytest.warns(opinf.errors.OpInfWarning) as wn: + self.ROM( + model, + lifter=10, + transformer=8, + basis=6, + ddt_estimator=4, + ) + assert len(wn) == 4 + assert wn[0].message.args[0] == ( + "lifter not derived from LifterTemplate, " + "unexpected behavior may occur" + ) + assert wn[1].message.args[0] == ( + "transformer not derived from TransformerTemplate " + "or TransformerMulti, unexpected behavior may occur" + ) + assert wn[2].message.args[0] == ( + "basis not derived from BasisTemplate or BasisMulti, " + "unexpected behavior may occur" + ) + if modutils.is_continuous(model): + assert wn[3].message.args[0] == ( + "ddt_estimator not derived from " + "DerivativeEstimatorTemplate, " + "unexpected behavior may occur" + ) + else: + assert wn[3].message.args[0] == ( + "ddt_estimator ignored for discrete models" + ) + + # Given ddt_estimator with non-continuous model. + if modutils.is_discrete(model): + with pytest.warns(opinf.errors.OpInfWarning) as wn: + rom = self.ROM( + model, + ddt_estimator=self._get("ddt_estimator"), + ) + assert len(wn) == 1 + assert wn[0].message.args[0] == ( + "ddt_estimator ignored for discrete models" + ) + assert rom.ddt_estimator is None + assert not rom._iscontinuous + + # Correct usage. + if modutils.is_continuous(model): + lifter, ddt_estimator = self._get("lifter", "ddt_estimator") + rom = self.ROM( + model, + lifter=lifter, + ddt_estimator=ddt_estimator, + ) + assert rom.lifter is lifter + assert rom.transformer is None + assert rom.basis is None + assert rom.ddt_estimator is ddt_estimator + + transformer, basis = self._get("multi_transformer", "multi_basis") + rom = self.ROM( + model, + transformer=transformer, + basis=basis, + ) + assert rom.lifter is None + assert rom.transformer is transformer + assert rom.basis is basis + assert rom.ddt_estimator is None + + def test_str(self): + """Lightly test __str__() and __repr__().""" + for model in self._get_models(): + a1, a2, a3 = self._get("lifter", "transformer", "basis") + repr(self.ROM(model, lifter=a1, transformer=a2, basis=a3)) + + def test_encode(self, n=40, k=20): + """Test encode().""" + states = np.random.random((n, k)) + lhs = np.random.random((n, k)) + + def _check(arr, shape): + assert isinstance(arr, np.ndarray) + assert arr.shape == shape + + for model in self._get_models(): + # Lifter only. + rom = self.ROM(model, lifter=self._get("lifter")) + _check(rom.encode(states), (2 * n, k)) + out1, out2 = rom.encode(states, lhs) + for out in out1, out2: + _check(out, (2 * n, k)) + _check(rom.encode(states[:, 0]), (2 * n,)) + out1, out2 = rom.encode(states[:, 0], lhs[:, 0]) + for out in out1, out2: + _check(out, (2 * n,)) + + # Transformer only. + rom = self.ROM(model, transformer=self._get("transformer")) + with pytest.raises(AttributeError) as ex: + rom.encode(states) + assert ex.value.args[0] == ( + "transformer not trained (call fit() or fit_transform())" + ) + + out = rom.encode(states, fit_transformer=True, inplace=False) + _check(out, (n, k)) + out1, out2 = rom.encode(states, lhs) + for out in out1, out2: + _check(out, (n, k)) + out = rom.encode(states[:, 0]) + _check(out, (n,)) + out1, out2 = rom.encode(states[:, 0], lhs[:, 0]) + for out in out1, out2: + _check(out, (n,)) + + # Basis only. + rom = self.ROM(model, basis=self._get("basis")) + with pytest.raises(AttributeError) as ex: + rom.encode(states) + assert ex.value.args[0] == "basis entries not initialized" + + out = rom.encode(states, fit_basis=True) + r = rom.basis.reduced_state_dimension + _check(out, (r, k)) + out1, out2 = rom.encode(states, lhs) + for out in out1, out2: + _check(out, (r, k)) + _check(rom.encode(states[:, 0]), (r,)) + out1, out2 = rom.encode(states[:, 0], lhs[:, 0]) + for out in out1, out2: + _check(out, (r,)) + + # Lifter, transformer, and basis. + a1, a2, a3 = self._get("lifter", "transformer", "basis") + rom = self.ROM(model, lifter=a1, transformer=a2, basis=a3) + out = rom.encode(states, fit_transformer=True, fit_basis=True) + r = rom.basis.reduced_state_dimension + _check(out, (r, k)) + out1, out2 = rom.encode(states, lhs) + for out in out1, out2: + _check(out, (r, k)) + _check(rom.encode(states[:, 0]), (r,)) + out1, out2 = rom.encode(states[:, 0], lhs[:, 0]) + for out in out1, out2: + _check(out, (r,)) + + def test_decode(self, n=22, k=18): + """Test decode().""" + + def _check(arr, shape): + assert isinstance(arr, np.ndarray) + assert arr.shape == shape + + for model in self._get_models(): + # Lifter only. + rom = self.ROM(model, lifter=self._get("lifter")) + states = np.random.random((2 * n, k)) + _check(rom.decode(states), (n, k)) + _check(rom.decode(states[:, 0]), (n,)) + + # Transformer only. + rom = self.ROM(model, transformer=self._get("transformer")) + with pytest.raises(AttributeError) as ex: + rom.decode(states) + assert ex.value.args[0] == ( + "transformer not trained (call fit() or fit_transform())" + ) + states = np.random.random((n, k)) + states_ = rom.encode(states, fit_transformer=True) + out = rom.decode(states_) + _check(out, (n, k)) + assert np.allclose(out, states) + out = rom.decode(states_[:, 0]) + _check(out, (n,)) + assert np.allclose(out, states[:, 0]) + + # Basis only. + rom = self.ROM(model, basis=self._get("basis")) + with pytest.raises(AttributeError) as ex: + rom.decode(states) + assert ex.value.args[0] == "basis entries not initialized" + states_ = rom.encode(states, fit_basis=True) + _check(rom.decode(states_), (n, k)) + _check(rom.decode(states_[:, 0]), (n,)) + + # Lifter, transformer, and basis. + a1, a2, a3 = self._get("lifter", "transformer", "basis") + rom = self.ROM(model, lifter=a1, transformer=a2, basis=a3) + states_ = rom.encode(states, fit_transformer=True, fit_basis=True) + out1 = rom.decode(states_) + _check(out1, (n, k)) + out2 = rom.decode(states_[:, 0]) + _check(out2, (n,)) + assert np.allclose(out2, out1[:, 0]) + + # With the locs argument. + a2, a3 = self._get("transformer", "basis") + rom = self.ROM(model, transformer=a2, basis=a3) + states_ = rom.encode(states, fit_transformer=True, fit_basis=True) + out1 = rom.decode(states_) + locs = np.sort(np.random.choice(n, n // 3)) + out2 = rom.decode(states_, locs=locs) + _check(out2, (n // 3, k)) + assert np.allclose(out2, out1[locs]) + + def test_project(self, n=30, k=19): + """Test project().""" + states = np.random.random((n, k)) + + def _check(rom, preserved=False): + rom.encode(states, fit_transformer=True, fit_basis=True) + out = rom.project(states) + assert isinstance(out, np.ndarray) + assert out.shape == (n, k) + if preserved: + assert np.allclose(out, states) + out0 = rom.project(states[:, 0]) + assert isinstance(out0, np.ndarray) + assert out0.shape == (n,) + assert np.allclose(out0, out[:, 0]) + + for model in self._get_models(): + # Lifter only. + _check(self.ROM(model, lifter=self._get("lifter")), preserved=True) + + # Transformer only. + rom = self.ROM(model, transformer=self._get("transformer")) + with pytest.raises(AttributeError) as ex: + rom.project(states) + assert ex.value.args[0] == ( + "transformer not trained (call fit() or fit_transform())" + ) + _check(rom, preserved=True) + + # Basis only. + rom = self.ROM(model, basis=self._get("basis")) + with pytest.raises(AttributeError) as ex: + rom.project(states) + assert ex.value.args[0] == "basis entries not initialized" + _check(rom, preserved=False) + + # Lifter, transformer, and basis. + a1, a2, a3 = self._get("lifter", "transformer", "basis") + _check(self.ROM(model, lifter=a1, transformer=a2, basis=a3)) diff --git a/tests/roms/test_nonparametric.py b/tests/roms/test_nonparametric.py new file mode 100644 index 00000000..bf53c831 --- /dev/null +++ b/tests/roms/test_nonparametric.py @@ -0,0 +1,178 @@ +# roms/test_nonparametric.py +"""Tests for roms._nonparametric.py.""" + +import pytest +import numpy as np + +import opinf + +from .test_base import _TestBaseROM + + +_module = opinf.roms + + +class TestROM(_TestBaseROM): + """Test roms.ROM.""" + + ROM = _module.ROM + ModelClasses = ( + opinf.models.ContinuousModel, + opinf.models.DiscreteModel, + ) + + def _get_models(self): + """Return a list of valid model instantiations.""" + return [ + opinf.models.ContinuousModel("A"), + opinf.models.DiscreteModel("AB"), + ] + + def test_init(self): + """Test __init__() and properties.""" + + # Model error. + with pytest.raises(TypeError) as ex: + self.ROM(10) + assert ex.value.args[0] == ( + "'model' must be a nonparametric model instance" + ) + + # Other arguments. + super().test_init() + + def test_fit(self, n=10, m=3, s=3, k0=50): + """Test fit().""" + states = [np.random.standard_normal((n, k0 + i)) for i in range(s)] + lhs = [np.zeros_like(Q) for Q in states] + inputs = [np.ones((m, Q.shape[-1])) for Q in states] + + rom = self.ROM(model=opinf.models.ContinuousModel("cBH")) + with pytest.raises(ValueError) as ex: + rom.fit(states, inputs) + assert ex.value.args[0] == ( + "argument 'inputs' required (model depends on external inputs)" + ) + + with pytest.raises(ValueError) as ex: + rom.fit(states, inputs=inputs) + assert ex.value.args[0] == ( + "argument 'lhs' required when model is time-continuous" + " and ddt_estimator=None" + ) + + def _fit(prom, withlhs=True, singletrajectory=False): + kwargs = dict(states=states) + if withlhs: + kwargs["lhs"] = lhs + if prom.model._has_inputs: + kwargs["inputs"] = inputs + if singletrajectory: + kwargs = {key: val[0] for key, val in kwargs.items()} + prom.fit(**kwargs) + assert rom.model.operators[0].entries is not None + if rom.basis is not None: + assert (r := rom.basis.reduced_state_dimension) == 3 + assert model.state_dimension == r + + for model in self._get_models(): + # Model only. + rom = self.ROM(model) + _fit(rom) + assert model.state_dimension == n + + # Model and basis. + rom = self.ROM(model, basis=self._get("basis")) + _fit(rom) + assert rom.basis.full_state_dimension == n + oldbasisentries = rom.basis.entries.copy() + + # Make sure fit_basis=False doesn't change the basis. + rom.fit( + states=[Q + 1 for Q in states], + lhs=lhs, + inputs=inputs if model._has_inputs else None, + fit_basis=False, + ) + assert np.array_equal(rom.basis.entries, oldbasisentries) + + # Model and basis and transformer. + trans, base = self._get("transformer", "basis") + rom = self.ROM(model, transformer=trans, basis=base) + _fit(rom) + assert rom.transformer.state_dimension == n + + # Make sure fit_transformer=False doesn't change the basis. + z = np.random.random(n) + ztrans = rom.transformer.transform(z) + rom.fit( + states=[Q + 1 for Q in states], + lhs=lhs, + inputs=inputs if model._has_inputs else None, + fit_transformer=False, + ) + ztrans2 = rom.transformer.transform(z) + assert np.allclose(ztrans2, ztrans) + + # Model and lifter and basis and transformer. + lift, trans, base = self._get("lifter", "transformer", "basis") + rom = self.ROM(model, lifter=lift, transformer=trans, basis=base) + _fit(rom) + assert rom.transformer.state_dimension == 2 * n + assert rom.basis.full_state_dimension == 2 * n + + # Without lhs. + ddter = None + if rom._iscontinuous: + # Without ddt_estimator either. + rom = self.ROM(model) + with pytest.raises(ValueError) as ex: + _fit(rom, withlhs=False) + assert ex.value.args[0] == ( + "argument 'lhs' required when model is time-continuous " + "and ddt_estimator=None" + ) + + ddter = self._get("ddt_estimator") + + lift, trans, base = self._get("lifter", "transformer", "basis") + rom = self.ROM( + model, + lifter=lift, + transformer=trans, + basis=base, + ddt_estimator=ddter, + ) + _fit(rom, withlhs=False) + _fit(rom, singletrajectory=True) + + def test_predict(self, n=50, m=2, k=100): + """Test predict().""" + states = np.random.standard_normal((n, k)) + inputs = np.ones((m, k)) + t = np.linspace(0, 0.1, k) + q0 = states[:, 0] + + cmodel, dmodel = self._get_models() + + # Continuous model. + lift, trans, base, ddter = self._get( + "lifter", "transformer", "basis", "ddt_estimator" + ) + rom = self.ROM( + cmodel, + lifter=lift, + transformer=trans, + basis=base, + ddt_estimator=ddter, + ) + rom.fit(states) + out = rom.predict(q0, t, input_func=None) + assert out.shape == (n, t.size) + + # Discrete model. + lift, trans, base = self._get("lifter", "transformer", "basis") + rom = self.ROM(dmodel, lifter=lift, transformer=trans, basis=base) + rom.fit(states, inputs=inputs) + out = rom.predict(q0, k, inputs=inputs) + assert out.shape == (n, k) diff --git a/tests/roms/test_parametric.py b/tests/roms/test_parametric.py new file mode 100644 index 00000000..1b06f724 --- /dev/null +++ b/tests/roms/test_parametric.py @@ -0,0 +1,194 @@ +# roms/test_parametric.py +"""Tests for roms._parametric.py.""" + +import pytest +import numpy as np + +import opinf + +from .test_base import _TestBaseROM + + +_module = opinf.roms + + +class TestROM(_TestBaseROM): + """Test roms.ROM.""" + + ROM = _module.ParametricROM + ModelClasses = ( + opinf.models.ParametricContinuousModel, + opinf.models.ParametricDiscreteModel, + opinf.models.InterpContinuousModel, + opinf.models.InterpDiscreteModel, + ) + + def _get_models(self): + """Return a list of valid model instantiations.""" + return [ + opinf.models.ParametricContinuousModel( + [ + opinf.operators.ConstantOperator(), + opinf.operators.AffineLinearOperator(3), + ] + ), + opinf.models.ParametricDiscreteModel( + [ + opinf.operators.AffineLinearOperator(3), + opinf.operators.InterpInputOperator(), + ] + ), + opinf.models.InterpContinuousModel("AB"), + opinf.models.InterpDiscreteModel("A"), + ] + + def test_init(self): + """Test __init__() and properties.""" + + # Model error. + with pytest.raises(TypeError) as ex: + self.ROM(10) + assert ex.value.args[0] == ( + "'model' must be a parametric model instance" + ) + + # Other arguments. + super().test_init() + + def test_fit(self, n=20, m=3, s=8, k0=50): + """Test fit().""" + parameters = [np.sort(np.random.random(3)) for _ in range(s)] + states = [np.random.standard_normal((n, k0 + i)) for i in range(s)] + lhs = [np.zeros_like(Q) for Q in states] + inputs = [np.ones((m, Q.shape[-1])) for Q in states] + + rom = self.ROM(model=opinf.models.InterpContinuousModel("AB")) + with pytest.raises(ValueError) as ex: + rom.fit(parameters, states, inputs) + assert ex.value.args[0] == ( + "argument 'inputs' required (model depends on external inputs)" + ) + + with pytest.raises(ValueError) as ex: + rom.fit(parameters, states, inputs=inputs) + assert ex.value.args[0] == ( + "argument 'lhs' required when model is time-continuous" + " and ddt_estimator=None" + ) + + def _fit(prom, withlhs=True): + kwargs = dict(parameters=parameters, states=states) + if withlhs: + kwargs["lhs"] = lhs + if prom.model._has_inputs: + kwargs["inputs"] = inputs + prom.fit(**kwargs) + assert rom.model.operators[0].entries is not None + if rom.basis is not None: + assert (r := rom.basis.reduced_state_dimension) == 3 + assert model.state_dimension == r + + for model in self._get_models(): + # Model only. + rom = self.ROM(model) + _fit(rom) + assert model.state_dimension == n + + # Model and basis. + rom = self.ROM(model, basis=self._get("basis")) + _fit(rom) + assert rom.basis.full_state_dimension == n + oldbasisentries = rom.basis.entries.copy() + + # Make sure fit_basis=False doesn't change the basis. + rom.fit( + parameters=parameters, + states=[Q + 1 for Q in states], + lhs=lhs, + inputs=inputs if model._has_inputs else None, + fit_basis=False, + ) + assert np.array_equal(rom.basis.entries, oldbasisentries) + + # Model and basis and transformer. + trans, base = self._get("transformer", "basis") + rom = self.ROM(model, transformer=trans, basis=base) + _fit(rom) + assert rom.transformer.state_dimension == n + + # Make sure fit_transformer=False doesn't change the basis. + z = np.random.random(n) + ztrans = rom.transformer.transform(z) + rom.fit( + parameters=parameters, + states=[Q + 1 for Q in states], + lhs=lhs, + inputs=inputs if model._has_inputs else None, + fit_transformer=False, + ) + ztrans2 = rom.transformer.transform(z) + assert np.allclose(ztrans2, ztrans) + + # Model and lifter and basis and transformer. + lift, trans, base = self._get("lifter", "transformer", "basis") + rom = self.ROM(model, lifter=lift, transformer=trans, basis=base) + _fit(rom) + assert rom.transformer.state_dimension == 2 * n + assert rom.basis.full_state_dimension == 2 * n + + # Without lhs. + ddter = None + if rom._iscontinuous: + # Without ddt_estimator either. + rom = self.ROM(model) + with pytest.raises(ValueError) as ex: + _fit(rom, withlhs=False) + assert ex.value.args[0] == ( + "argument 'lhs' required when model is time-continuous " + "and ddt_estimator=None" + ) + + ddter = self._get("ddt_estimator") + + lift, trans, base = self._get("lifter", "transformer", "basis") + rom = self.ROM( + model, + lifter=lift, + transformer=trans, + basis=base, + ddt_estimator=ddter, + ) + _fit(rom, withlhs=False) + + def test_predict(self, n=50, m=2, s=10, k0=40): + """Test predict().""" + parameters = [np.sort(np.random.random(3)) for _ in range(s)] + states = [np.random.standard_normal((n, k0 + i)) for i in range(s)] + inputs = [np.ones((m, Q.shape[-1])) for Q in states] + t = np.linspace(0, 0.1, k0) + testparam = np.mean(parameters, axis=0) + testinit = states[0][:, s // 2] + + cmodel, dmodel, _, _ = self._get_models() + + # Continuous model. + lift, trans, base, ddter = self._get( + "lifter", "transformer", "basis", "ddt_estimator" + ) + rom = self.ROM( + cmodel, + lifter=lift, + transformer=trans, + basis=base, + ddt_estimator=ddter, + ) + rom.fit(parameters, states) + out = rom.predict(testparam, testinit, t, input_func=None) + assert out.shape == (n, t.size) + + # Discrete model. + lift, trans, base = self._get("lifter", "transformer", "basis") + rom = self.ROM(dmodel, lifter=lift, transformer=trans, basis=base) + rom.fit(parameters, states, inputs=inputs) + out = rom.predict(testparam, testinit, k0, inputs=inputs[0]) + assert out.shape == (n, k0) diff --git a/tests/utils/test_hdf5.py b/tests/utils/test_hdf5.py index 3a1a5f23..21558b71 100644 --- a/tests/utils/test_hdf5.py +++ b/tests/utils/test_hdf5.py @@ -5,6 +5,8 @@ import h5py import pytest import warnings +import numpy as np +import scipy.sparse as sparse import opinf @@ -184,3 +186,30 @@ class DummyWarning(Warning): with subject(target): pass assert ex.value.args[0] == target + + +def test_saveload_sparray(n=100, target="_saveloadsparraytest.h5"): + """Test save_sparray() and load_sparray().""" + + with pytest.raises(TypeError) as ex: + opinf.utils.save_sparray(None, None) + assert ex.value.args[0] == "second arg must be a scipy.sparse array" + + A = sparse.dok_array((n, n), dtype=float) + for _ in range(n // 10): + i, j = np.random.randint(0, n, size=2) + A[i, j] = np.random.random() + + if os.path.isfile(target): + os.remove(target) + + with h5py.File(target, "w") as hf: + opinf.utils.save_sparray(hf.create_group("sparsearray"), A) + + with h5py.File(target, "r") as hf: + B = opinf.utils.load_sparray(hf["sparsearray"]) + + diff = np.abs((A - B).data) + assert np.allclose(diff, 0) + + os.remove(target) diff --git a/tests/utils/test_timer.py b/tests/utils/test_timer.py new file mode 100644 index 00000000..22b42398 --- /dev/null +++ b/tests/utils/test_timer.py @@ -0,0 +1,111 @@ +# utils/test_timer.py +"""Tests for utils._timer.""" + +import os +import time +import pytest +import platform + +import opinf + + +SYSTEM = platform.system() + + +def skipwindows(func): + + def skip(self, *args, **kwargs): + pass + + return skip if SYSTEM == "Windows" else func + + +class MyException(Exception): + pass + + +class TestTimedBlock: + """Test utils.TimedBlock.""" + + Timer = opinf.utils.TimedBlock + + @skipwindows + def test_standard(self, message="TimedBlock test, no timelimit"): + # No time limit. + with self.Timer() as obj: + pass + assert obj.timelimit is None + assert isinstance(obj.elapsed, float) + + # Time limit that does not expire. + with self.Timer(message, timelimit=100) as obj: + pass + assert obj.message == message + + @skipwindows + def test_timeout(self, message="TimedBlock test with problems"): + # Time limit expires. + with pytest.raises(TimeoutError) as ex: + with self.Timer(message, timelimit=1): + time.sleep(10) + assert ex.value.args[0].startswith("TIMED OUT after ") + + # Exception occurs in the block. + with pytest.raises(MyException) as ex: + with self.Timer(message): + raise MyException("failure in the block") + assert ex.value.args[0] == "failure in the block" + + @skipwindows + def test_log( + self, + message: str = "TimedBlock test with log", + target: str = "_timedblocktest.log", + ): + if os.path.isfile(target): + os.remove(target) + + # Set up a log file. + self.Timer.add_logfile(target) + + # See if we write to the log file. + with self.Timer(message, timelimit=100): + pass + + assert os.path.isfile(target) + with open(target, "r") as infile: + text = infile.read().strip() + assert text.count(message) == 1 + + with pytest.raises(TimeoutError) as ex: + with self.Timer(message, timelimit=1): + time.sleep(10) + assert ex.value.args[0].startswith("TIMED OUT after ") + + with open(target, "r") as infile: + text = infile.read().strip() + assert text.count(message) == 2 + assert text.count("TIMED OUT after ") == 1 + + with pytest.raises(MyException) as ex: + with self.Timer(message): + raise MyException("failure in the block") + assert ex.value.args[0] == "failure in the block" + + # Log to the same file. + newmessage = f"{message} AGAIN!" + self.Timer.add_logfile(target) + + # Log to another file. + newtarget = f"_{target}" + if os.path.isfile(newtarget): + os.remove(newtarget) + + self.Timer.add_logfile(newtarget) + with self.Timer(newmessage): + pass + for tfile in target, newtarget: + with open(tfile, "r") as infile: + text = infile.read().strip() + assert text.count(newmessage) == 1 + os.remove(tfile)