From 7fe3c2c512829e7308e3c96bd310e8e0584ceb11 Mon Sep 17 00:00:00 2001 From: Shane Date: Mon, 12 Aug 2024 18:07:01 -0600 Subject: [PATCH 01/48] start parametric operator docs --- docs/source/api/operators.ipynb | 179 ++++++++++++++++++++++++++------ 1 file changed, 150 insertions(+), 29 deletions(-) diff --git a/docs/source/api/operators.ipynb b/docs/source/api/operators.ipynb index d4f3595b..cf6cdde7 100644 --- a/docs/source/api/operators.ipynb +++ b/docs/source/api/operators.ipynb @@ -393,7 +393,7 @@ "Nonparametric OpInf operator classes have two static methods that facilitate constructing the operator regression problem.\n", "\n", "- [`operator_dimension()`](OpInfOperator.operator_dimension): given the state dimension $r$ and the input dimension $r$, return the data dimension $d_\\ell$.\n", - "- [`datablock()`](OpInfOperator.datablock): given the state-input data pairs $\\{(\\qhat_j,\\u_j)\\}_{j=0}^{k-1}$, forms the matrix\n", + "- [`datablock()`](OpInfOperator.datablock): given the state-input data pairs $\\{(\\qhat_j,\\u_j)\\}_{j=0}^{k-1}$, form the matrix\n", "\n", "$$\n", "\\begin{aligned}\n", @@ -480,7 +480,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - ":::{admonition} Operators With Entries Are Not Recalibrated\n", + ":::{admonition} Operators with Entries are _Not_ Recalibrated\n", ":class: important\n", "\n", "Only operators whose entries are _not initialized_ (set to `None`) when a model is constructed are learned with Operator Inference when [`fit()`](opinf.models.ContinuousModel.fit) is called.\n", @@ -515,7 +515,7 @@ "\n", "$$\n", "\\begin{aligned}\n", - " &\\min_{\\Ahat,}\\sum_{j=0}^{k-1}\\left\\|\n", + " &\\min_{\\Ahat}\\sum_{j=0}^{k-1}\\left\\|\n", " \\Ahat\\qhat_j - (\\dot{\\qhat}_j - \\Bhat\\u_j)\n", " \\right\\|_2^2.\n", "\\end{aligned}\n", @@ -761,7 +761,7 @@ "\n", "$$\n", "\\begin{aligned}\n", - " \\frac{\\partial}{\\partial \\hat{\\q}_j}\\left[\\hat{q}_i\\hat{s}_i\\right]\n", + " \\frac{\\partial}{\\partial \\hat{q}_j}\\left[\\hat{q}_i\\hat{s}_i\\right]\n", " = \\begin{cases}\n", " \\hat{s}_i & \\textrm{if}~i = j,\n", " \\\\\n", @@ -1000,15 +1000,141 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Operators are called _parametric_ if the operator entries depend on an independent parameter vector\n", - "$\\bfmu\\in\\RR^{p}$, i.e., $\\Ophat_{\\ell}(\\qhat,\\u;\\bfmu) = \\Ohat_{\\ell}(\\bfmu)\\d_{\\ell}(\\qhat,\\u)$ where now $\\Ohat:\\RR^{p}\\to\\RR^{r\\times d}$.\n", - "\n", + "An operator is called _parametric_ if it depends on an independent parameter vector\n", + "$\\bfmu\\in\\RR^{p}$, i.e., $\\Ophat = \\Ophat_{\\ell}(\\qhat,\\u;\\bfmu)$\n", + "When the parameter vector is fixed, a parametric operator becomes nonparametric.\n", + "In particular, a parametric operator's [`evaluate()`](ParametricOperatorTemplate.evaluate) method accepts a parameter vector $\\bfmu$ and returns an instance of a nonparametric operator whose type is given by the parametric operator's [`OperatorClass`](ParametricOperatorTemplate.OperatorClass) property." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Parametric OpInf operators have the form\n", + "$\\Ophat_{\\ell}(\\qhat,\\u;\\bfmu) = \\Ohat_{\\ell}(\\bfmu)\\d_{\\ell}(\\qhat,\\u)$ defined by the matrix-valued function $\\Ohat_{\\ell}:\\RR^{p}\\to\\RR^{r\\times d_\\ell}$ and (as in the nonparametric case) the data vector $\\d_{\\ell}:\\RR^{r}\\times\\RR^{m}\\to\\RR^{d_\\ell}$.\n", + "This module provides two options for the parameterization of $\\Ohat_{\\ell}(\\bfmu)$: [affine expansion](sec-operators-affine) and [elementwise interpolation](sec-operators-interpolated).\n", + "In each case, Operator Inference begins with $s$ training parameter values $\\bfmu_1,\\ldots,\\bfmu_s$ and corresponding state, input, and derivative data $\\{(\\qhat_{i,j},\\u_{i,j},\\dot{\\qhat}_{i,j})\\}_{j=0}^{k_{i}-1}$ for each training parameter value $\\bfmu_i$.\n", + "A regression of the form $\\Z \\approx \\Ohat\\D\\trp$ is formed as in the nonparametric case, with the structure of the matrices $\\Ohat$ and $\\D$ depending on the choice of parameterization for each $\\Ohat_{\\ell}(\\bfmu)$." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ ":::{admonition} Example\n", ":class: tip\n", "Let $\\bfmu = [~\\mu_{1}~~\\mu_{2}~]\\trp$.\n", - "The linear operator\n", - "$\\Ophat_1(\\qhat,\\u;\\bfmu) = (\\mu_{1}\\Ahat_{1} + \\mu_{2}\\Ahat_{2})\\qhat$\n", - "is a parametric operator with parameter-dependent entries $\\Ohat_1(\\bfmu) = \\mu_{1}\\Ahat_{1} + \\mu_{2}\\Ahat_{2}$.\n", + "The operator\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\Ophat_1(\\qhat,\\u;\\bfmu) = (\\mu_{1}\\Ahat_{1} + \\mu_{2}^2\\Ahat_{2})\\qhat\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "is a parametric OpInf operator because it can be written as $\\Ophat_1(\\qhat,\\u;\\bfmu) = \\Ohat_1(\\bfmu)\\d_1(\\qhat,\\u)$ with $\\Ohat_1(\\bfmu) = \\mu_{1}\\Ahat_{1} + \\mu_{2}\\Ahat_{2}$ and $\\d_1(\\qhat,\\u) = \\qhat$.\n", + "\n", + "This operator can be represented with an {class}`AffineLinearOperator`.\n", + "For a given parameter value, the [`evaluate()`](AffineLinearOperator.evaluate) method returns a {class}`LinearOperator` instance.\n", + ":::" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "(sec-operators-affine)=\n", + "### Affine Operators" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Affine parametric OpInf operators $\\Ophat_{\\ell}(\\qhat,\\u;\\bfmu) = \\Ohat_{\\ell}(\\bfmu)\\d_{\\ell}(\\qhat,\\u)$ parameterize the operator matrix $\\Ohat_{\\ell}(\\bfmu)$ as a sum of constant matrices with parameter-dependent scalar coefficients,\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\Ohat_{\\ell}(\\bfmu)\n", + " &= \\sum_{a=1}^{s_{\\ell}}\\theta_{\\ell}^{(a)}\\!(\\bfmu)\\,\\Ohat_{\\ell}^{(a)},\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "where each $\\theta_{\\ell}^{(a)}:\\RR^{p}\\to\\RR$ is a scalar-valued function and each $\\Ohat_{\\ell}^{(a)}\\in\\RR^{r\\times d_\\ell}$ is constant.\n", + "Affine expansions are grouped such that the coefficient functions $\\theta_{\\ell}^{(1)},\\ldots,\\theta_{\\ell}^{(s_{\\ell})}$ are linearly independent." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Affine parametric operators arise in Operator Inference settings because linear projection preserves affine structure.\n", + "\n", + ":::{dropdown} Preservation of Affine Structure\n", + "\n", + "Consider a full-order affine parametric OpInf operator\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\Op_{\\ell}(\\q,\\u;\\bfmu)\n", + " = \\sum_{a=1}^{s_{\\ell}}\\theta_{\\ell}^{(a)}\\!(\\bfmu)\\,\\Op_{\\ell}^{(a)}\\!(\\q, \\u).\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "Given a trial basis $\\Vr\\in\\RR^{n\\times r}$ and a test basis $\\Wr\\in\\RR^{n\\times r}$, the [intrusive projection](sec-operators-projection) of $\\Op_{\\ell}$ is the operator\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\Ophat_{\\ell}(\\qhat, \\u; \\bfmu)\n", + " &= (\\Wr\\trp\\Vr)^{-1}\\Wr\\trp\\Op_{\\ell}(\\Vr\\qhat, \\u; \\bfmu)\n", + " \\\\\n", + " &= (\\Wr\\trp\\Vr)^{-1}\\Wr\\trp \\sum_{a=1}^{s_{\\ell}}\\theta_{\\ell}^{(a)}\\!(\\bfmu)\\,\\Op_{\\ell}^{(a)}\\!(\\V\\qhat, \\u)\n", + " \\\\\n", + " &= \\sum_{a=1}^{s_{\\ell}}\\theta_{\\ell}^{(a)}\\!(\\bfmu)\\,(\\Wr\\trp\\Vr)^{-1}\\Wr\\trp\\Op_{\\ell}^{(a)}\\!(\\V\\qhat, \\u)\n", + " = \\sum_{a=1}^{s_{\\ell}}\\theta_{\\ell}^{(a)}\\!(\\bfmu)\\,\\Ophat_{\\ell}^{(a)}\\!(\\qhat, \\u),\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "where $\\Ophat_{\\ell}^{(a)}\\!(\\qhat, \\u) = (\\Wr\\trp\\Vr)^{-1}\\Wr\\trp\\Op_{\\ell}^{(a)}\\!(\\V\\qhat, \\u)$ is the intrusive projection of $\\Op_{\\ell}^{(a)}$.\n", + "That is, the intrusive projection of an affine expansion is an affine expansion of intrusive projections, and both expansions feature the same coefficient functions.\n", + ":::" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Available affine parametric operators are listed below.\n", + "\n", + "```{eval-rst}\n", + ".. currentmodule:: opinf.operators\n", + "\n", + ".. autosummary::\n", + " :toctree: _autosummaries\n", + " :nosignatures:\n", + "\n", + " AffineConstantOperator\n", + " AffineLinearOperator\n", + " AffineQuadraticOperator\n", + " AffineCubicOperator\n", + " AffineInputOperator\n", + " AffineStateInputOperator\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Affine parametric operators are instantiated with a list of the coefficient functions $\\theta_{\\ell}^{(1)},\\ldots,\\theta_{\\ell}^{(s_{\\ell})}$ and with or without the operator matrices $\\Ohat_{\\ell}^{(1)},\\ldots,\\Ohat_{\\ell}^{(s_{\\ell})}$." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + ":::{admonition} TODO\n", + "Demonstration.\n", ":::" ] }, @@ -1024,17 +1150,25 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "These operators handle the parametric dependence on $\\bfmu$ by using elementwise interpolation:\n", + "Interpolated operators define the parametric dependence on $\\bfmu$ through elementwise interpolation.\n", + "That is,\n", "\n", "$$\n", "\\begin{aligned}\n", - " \\Ohat_{\\ell}(\\bfmu)\n", - " = \\text{interpolate}(\n", - " (\\bfmu_{1},\\Ohat_{\\ell}^{(1)}),\\ldots,(\\bfmu_{s},\\Ohat_{\\ell}^{(s)}); \\bfmu),\n", + " \\Ophat_{\\ell}(\\qhat,\\u;\\bfmu)\n", + " = \\Ohat_{\\ell}(\\bfmu)\\d_{\\ell}(\\qhat,\\u),\n", "\\end{aligned}\n", "$$\n", "\n", - "where $\\bfmu_1,\\ldots,\\bfmu_s$ are training parameter values and $\\Ohat_{\\ell}^{(i)} = \\Ohat_{\\ell}(\\bfmu_i)$ for $i=1,\\ldots,s$.\n", + "where $\\Ohat_{\\ell}(\\bmfu)$ is determined by interpolating $s$ matrices $\\Ohat_{\\ell}^{(1)},\\ldots,\\Ohat_{\\ell}^{(s)}$.\n", + "In the context of Operator Inference, $s$ is the number of training parameter values." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Available interpolated operators are listed below.\n", "\n", "```{eval-rst}\n", ".. currentmodule:: opinf.operators\n", @@ -1049,20 +1183,7 @@ " InterpolatedCubicOperator\n", " InterpolatedInputOperator\n", " InterpolatedStateInputOperator\n", - "```\n", - "\n", - "" + "```" ] } ], From 4c510b35987eaec2d05c35947ebdce1b23e7ea4c Mon Sep 17 00:00:00 2001 From: Shane Date: Tue, 13 Aug 2024 17:39:40 -0600 Subject: [PATCH 02/48] nonparametric operator docstring tweaks --- docs/source/api/operators.ipynb | 2 +- src/opinf/operators/_nonparametric.py | 197 ++++++++++++++++++-------- 2 files changed, 140 insertions(+), 59 deletions(-) diff --git a/docs/source/api/operators.ipynb b/docs/source/api/operators.ipynb index cf6cdde7..ca71663f 100644 --- a/docs/source/api/operators.ipynb +++ b/docs/source/api/operators.ipynb @@ -1160,7 +1160,7 @@ "\\end{aligned}\n", "$$\n", "\n", - "where $\\Ohat_{\\ell}(\\bmfu)$ is determined by interpolating $s$ matrices $\\Ohat_{\\ell}^{(1)},\\ldots,\\Ohat_{\\ell}^{(s)}$.\n", + "where $\\Ohat_{\\ell}(\\bfmu)$ is determined by interpolating $s$ matrices $\\Ohat_{\\ell}^{(1)},\\ldots,\\Ohat_{\\ell}^{(s)}$.\n", "In the context of Operator Inference, $s$ is the number of training parameter values." ] }, diff --git a/src/opinf/operators/_nonparametric.py b/src/opinf/operators/_nonparametric.py index 0fa7ac17..3d57c326 100644 --- a/src/opinf/operators/_nonparametric.py +++ b/src/opinf/operators/_nonparametric.py @@ -26,13 +26,13 @@ class ConstantOperator(OpInfOperator): Parameters ---------- entries : (r,) ndarray or None - Operator entries :math:`\chat`. + Operator vector :math:`\chat`. Examples -------- >>> import numpy as np >>> c = opinf.operators.ConstantOperator() - >>> entries = np.random.random(10) # Operator entries. + >>> entries = np.random.random(10) # Operator vector. >>> c.set_entries(np.random.random(10)) >>> c.shape (10,) @@ -45,13 +45,23 @@ class ConstantOperator(OpInfOperator): def _str(statestr=None, inputstr=None): return "c" + @property + def entries(self): + r"""Operator vector :math:`\chat`.""" + return OpInfOperator.entries.fget(self) + + @property + def shape(self): + r"""Shape :math:`(r,)` of the operator vector :math:`\chat`.""" + return OpInfOperator.shape.fget(self) + def set_entries(self, entries): - r"""Set the ``entries`` attribute. + r"""Set the operator vector :math:`\chat`. Parameters ---------- entries : (r,) ndarray - Operator entries :math:`\chat`. + Operator vector :math:`\chat`. """ if np.isscalar(entries): entries = np.atleast_1d(entries) @@ -152,7 +162,7 @@ def datablock(states, inputs=None): @staticmethod def operator_dimension(r=None, m=None): - r"""Column dimension of the operator entries (always 1). + r"""Column dimension of the operator vector (always 1). Parameters ---------- @@ -172,13 +182,13 @@ class LinearOperator(OpInfOperator): Parameters ---------- entries : (r, r) ndarray or None - Operator entries :math:`\Ahat`. + Operator matrix :math:`\Ahat`. Examples -------- >>> import numpy as np >>> A = opinf.operators.LinearOperator() - >>> entries = np.random.random((10, 10)) # Operator entries. + >>> entries = np.random.random((10, 10)) # Operator matrix. >>> A.set_entries(entries) >>> A.shape (10, 10) @@ -192,13 +202,23 @@ class LinearOperator(OpInfOperator): def _str(statestr, inputstr=None): return f"A{statestr}" + @property + def entries(self): + r"""Operator matrix :math:`\Ahat`.""" + return OpInfOperator.entries.fget(self) + + @property + def shape(self): + r"""Shape :math:`(r, r)` of the operator matrix :math:`\Ahat`.""" + return OpInfOperator.shape.fget(self) + def set_entries(self, entries): - r"""Set the ``entries`` attribute. + r"""Set the operator matrix :math:`\Ahat`. Parameters ---------- entries : (r, r) ndarray - Operator entries :math:`\Ahat`. + Operator matrix :math:`\Ahat`. """ if np.isscalar(entries) or np.shape(entries) == (1,): entries = np.atleast_2d(entries) @@ -309,7 +329,7 @@ def datablock(states, inputs=None): @staticmethod def operator_dimension(r, m=None): - """Column dimension :math:`r` of the operator entries. + r"""Column dimension :math:`r` of the operator matrix :math:`\Ahat`. Parameters ---------- @@ -326,20 +346,21 @@ class QuadraticOperator(OpInfOperator): :math:`\Ophat_{\ell}(\qhat,\u) = \Hhat[\qhat\otimes\qhat]` where :math:`\Hhat\in\RR^{r \times r^{2}}`. - Internally, the action of the operator is computed as the product of a - :math:`r \times r(r+1)/2` matrix and a compressed version of the Kronecker - product :math:`\qhat \otimes \qhat`. + Internally, the action of the operator is computed as the product of an + :math:`r \times r(r+1)/2` matrix :math:`\tilde{\H}` and a + compressed version of the Kronecker product :math:`\qhat \otimes \qhat`. Parameters ---------- entries : (r, r^2) or (r, r(r+1)/2) or (r, r, r) ndarray or None - Operator entries :math:`\Hhat`. + Operator matrix :math:`\Hhat`, its compressed representation + :math:`\tilde{\H}`, or the equivalent symmetric tensor. Examples -------- >>> import numpy as np >>> H = opinf.operators.QuadraticOperator() - >>> entries = np.random.random((10, 100)) # Operator entries. + >>> entries = np.random.random((10, 100)) # Operator matrix. >>> H.set_entries(entries) >>> H.shape # Compressed shape. (10, 55) @@ -367,13 +388,29 @@ def _precompute_jacobian_jit(self): Ht = self.expand_entries(self.entries).reshape((r, r, r)) self._prejac = Ht + Ht.transpose(0, 2, 1) + @property + def entries(self): + r"""Internal representation :math:`\tilde{\H}` of the operator + matrix :math:`\Hhat`. + """ + return OpInfOperator.entries.fget(self) + + @property + def shape(self): + r"""Shape :math:`(r, r(r+1)/2)` of the internal representation + :math:`\tilde{\H}` of the operator matrix :math:`\Hhat`. + """ + return OpInfOperator.shape.fget(self) + def set_entries(self, entries): - r"""Set the ``entries`` attribute. + r"""Set the internal representation :math:`\tilde{\H}` of the operator + matrix :math:`\Hhat`. Parameters ---------- entries : (r, r^2) or (r, r(r+1)/2) or (r, r, r) ndarray - Operator entries :math:`\Hhat`. + Operator matrix :math:`\Hhat`, its compressed representation + :math:`\tilde{\H}`, or the equivalent symmetric tensor. """ if np.isscalar(entries) or np.shape(entries) == (1,): entries = np.atleast_2d(entries) @@ -489,16 +526,16 @@ def datablock(states, inputs=None): \end{array}\right] \in\RR^{r^2 \times k}. - Internally, a compressed Kronecker product :math:`\tilde{\otimes}` with + Internally, a compressed Kronecker product :math:`\hat{\otimes}` with :math:`r(r+1)/2 < r^{2}` degrees of freedom is used for efficiency, hence the data block is actually .. math:: \D\trp = \left[\begin{array}{ccc} - \qhat_0\tilde{\otimes}\qhat_0 + \qhat_0\,\hat{\otimes}\,\qhat_0 & \cdots & - \qhat_{k-1}\tilde{\otimes}\qhat_{k-1} + \qhat_{k-1}\,\hat{\otimes}\,\qhat_{k-1} \end{array}\right] \in\RR^{r(r+1)/2 \times k}. @@ -519,7 +556,8 @@ def datablock(states, inputs=None): @staticmethod def operator_dimension(r, m=None): - """Column dimension :math:`r(r+1)/2` of the operator entries. + r"""Column dimension :math:`r(r+1)/2` of the internal representation + :math:`\tilde{\H}` of the operator matrix :math:`\Hhat`. Parameters ---------- @@ -561,11 +599,11 @@ def ckron(state, checkdim=False): Cross terms :math:`\hat{q}_i \hat{q}_j` for :math:`i \neq j` appear twice in :math:`\qhat\otimes\qhat`. - The *compressed Kronecker product* :math:`\qhat\hat{\otimes}\qhat` + The *compressed Kronecker product* :math:`\qhat\,\hat{\otimes}\,\qhat` consists of the unique terms of :math:`\qhat\otimes\qhat`: .. math:: - \qhat\hat{\otimes}\qhat + \qhat\,\hat{\otimes}\,\qhat = \left[\begin{array}{c} \hat{q}_{1}^2 \\ @@ -604,9 +642,9 @@ def ckron(state, checkdim=False): \end{array}\right] = \left[\begin{array}{ccc} & & \\ - \qhat_0\hat{\otimes}\qhat_0 + \qhat_0\,\hat{\otimes}\,\qhat_0 & \cdots & - \qhat_{k-1}\hat{\otimes}\qhat_{k-1} + \qhat_{k-1}\,\hat{\otimes}\,\qhat_{k-1} \\ & & \end{array}\right] \in \RR^{r(r+1)/2 \times k}. @@ -665,7 +703,8 @@ def ckron_indices(r): def compress_entries(H): r"""Given :math:`\Hhat\in\RR^{a\times r^2}`, construct the matrix :math:`\tilde{\H}\in\RR^{a \times r(r+1)/2}` such that - :math:`\Hhat[\qhat\otimes\qhat] = \tilde{\H}[\qhat\hat{\otimes}\qhat]` + :math:`\Hhat[\qhat\otimes\qhat] + = \tilde{\H}[\qhat\,\hat{\otimes}\,\qhat]` for all :math:`\qhat\in\RR^{r}` where :math:`\hat{\otimes}` is the compressed Kronecker product (see :meth:`ckron`). @@ -719,7 +758,8 @@ def compress_entries(H): def expand_entries(Hc): r"""Given :math:`\tilde{\H}\in\RR^{a \times r(r+1)/2}`, construct the matrix :math:`\Hhat\in\RR^{a\times r^2}` such that - :math:`\Hhat[\qhat\otimes\qhat] = \tilde{\H}[\qhat\hat{\otimes}\qhat]` + :math:`\Hhat[\qhat\otimes\qhat] + = \tilde{\H}[\qhat\,\hat{\otimes}\,\qhat]` for all :math:`\qhat\in\RR^{r}` where :math:`\hat{\otimes}` is the compressed Kronecker product (see :meth:`ckron`). @@ -782,20 +822,22 @@ class CubicOperator(OpInfOperator): :math:`\Ophat_{\ell}(\qhat,\u) = \Ghat[\qhat\otimes\qhat\otimes\qhat]` where :math:`\Ghat\in\RR^{r \times r^{3}}`. - Internally, the action of the operator is computed as the product of a - :math:`r \times r(r+1)(r+2)/6` matrix and a compressed version of the - triple Kronecker product :math:`\qhat \otimes \qhat \otimes \qhat`. + Internally, the action of the operator is computed as the product of an + :math:`r \times r(r+1)(r+2)/6` matrix :math:`\tilde{\G}` and a compressed + version of the triple Kronecker product + :math:`\qhat \otimes \qhat \otimes \qhat`. Parameters ---------- entries : (r, r^3) or (r, r(r+1)(r+2)/6) or (r, r, r, r) ndarray or None - Operator entries :math:`\Ghat`. + Operator matrix :math:`\Ghat`, its compressed representation + :math:`\tilde{\G}`, or the equivalent symmetric 4-tensor. Examples -------- >>> import numpy as np >>> G = opinf.operators.CubicOperator() - >>> entries = np.random.random((10, 1000)) # Operator entries. + >>> entries = np.random.random((10, 1000)) # Operator matrix. >>> G.set_entries(entries) >>> G.shape # Compressed shape. (10, 220) @@ -823,13 +865,29 @@ def _precompute_jacobian_jit(self): Gt = self.expand_entries(self.entries).reshape((r, r, r, r)) self._prejac = Gt + Gt.transpose(0, 2, 1, 3) + Gt.transpose(0, 3, 1, 2) + @property + def entries(self): + r"""Internal representation :math:`\tilde{\G}` of the operator + matrix :math:`\Ghat`. + """ + return OpInfOperator.entries.fget(self) + + @property + def shape(self): + r"""Shape :math:`(r, r(r+1)(r+2)/6)` of the internal representation + :math:`\tilde{\G}` of the operator matrix :math:`\Ghat`. + """ + return OpInfOperator.shape.fget(self) + def set_entries(self, entries): - r"""Set the ``entries`` attribute. + r"""Set the internal representation :math:`\tilde{\G}` of the operator + matrix :math:`\Ghat`. Parameters ---------- entries : (r, r^3) or (r, r(r+1)(r+2)/6) or (r, r, r, r) ndarray - Operator entries :math:`\Ghat`. + Operator matrix :math:`\Ghat`, its compressed representation + :math:`\tilde{\G}`, or the equivalent symmetric 4-tensor. """ if np.isscalar(entries) or np.shape(entries) == (1,): entries = np.atleast_2d(entries) @@ -905,8 +963,7 @@ def jacobian(self, state, input_=None): @utils.requires("entries") def galerkin(self, Vr, Wr=None): r"""Return the Galerkin projection of the operator, - :math:`\widehat{\mathbf{G}} = - (\Wr\trp\Vr)^{-1}\Wr\trp\mathbf{G}[\Vr\otimes\Vr\otimes\Vr]`. + :math:`\Ghat = (\Wr\trp\Vr)^{-1}\Wr\trp\G[\Vr\otimes\Vr\otimes\Vr]`. Parameters ---------- @@ -958,9 +1015,9 @@ def datablock(states, inputs=None): .. math:: \D\trp = \left[\begin{array}{ccc} - \qhat_0\tilde{\otimes}\qhat_0\tilde{\otimes}\qhat_0 + \qhat_0\,\hat{\otimes}\,\qhat_0\,\hat{\otimes}\,\qhat_0 & \cdots & - \qhat_{k-1}\tilde{\otimes}\qhat_{k-1}\tilde{\otimes}\qhat_{k-1} + \qhat_{k-1}\,\hat{\otimes}\,\qhat_{k-1}\,\hat{\otimes}\,\qhat_{k-1} \end{array}\right] \in\RR^{r(r+1)(r+2)/6 \times k}. @@ -981,7 +1038,8 @@ def datablock(states, inputs=None): @staticmethod def operator_dimension(r, m=None): - """Column dimension :math:`r(r+1)(r+2)/6` of the operator entries. + r"""Column dimension :math:`r(r+1)(r+2)/6` of the internal + representation :math:`\tilde{\G}` of the operator matrix :math:`\Ghat`. Parameters ---------- @@ -1014,17 +1072,17 @@ def ckron(state): not all equal appear multiple times in :math:`\qhat\otimes\qhat\otimes\qhat`. The *compressed cubic Kronecker product* - :math:`\qhat\hat{\otimes}\qhat\hat{\otimes}\qhat` + :math:`\qhat\,\hat{\otimes}\,\qhat\,\hat{\otimes}\,\qhat` consists of the unique terms of :math:`\qhat\otimes\qhat\otimes\qhat`: .. math:: - \qhat\hat{\otimes}\qhat\hat{\otimes}\qhat + \qhat\,\hat{\otimes}\,\qhat\,\hat{\otimes}\,\qhat = \left[\begin{array}{c} \hat{q}_{1}^3 \\ - \hat{q}_{2}[\![\qhat\hat{\otimes}\qhat]\!]_{1:2} + \hat{q}_{2}[\![\qhat\,\hat{\otimes}\,\qhat]\!]_{1:2} \\ \vdots \\ - \hat{q}_{r}[\![\qhat\hat{\otimes}\qhat]\!]_{1:r} + \hat{q}_{r}[\![\qhat\,\hat{\otimes}\,\qhat]\!]_{1:r} \end{array}\right] \in \RR^{r(r+1)(r+2)/6}. @@ -1089,7 +1147,7 @@ def compress_entries(G): r"""Given :math:`\Ghat\in\RR^{a\times r^2}`, construct the matrix :math:`\tilde{\G}\in\RR^{a \times r(r+1)(r+2)/6}` such that :math:`\Ghat[\qhat\otimes\qhat\otimes\qhat] - = \tilde{\G}[\qhat\hat{\otimes}\qhat\hat{\otimes}\qhat]` + = \tilde{\G}[\qhat\,\hat{\otimes}\,\qhat\,\hat{\otimes}\,\qhat]` for all :math:`\qhat\in\RR^{r}` where :math:`\cdot\hat{\otimes}\cdot\hat{\otimes}\cdot` is the compressed cubic Kronecker product (see :meth:`ckron`). @@ -1147,7 +1205,7 @@ def expand_entries(Gc): r"""Given :math:`\tilde{\G}\in\RR^{a \times r(r+1)(r+2)/6}`, construct the matrix :math:`\Ghat\in\RR^{a\times r^3}` such that :math:`\Ghat[\qhat\otimes\qhat\otimes\qhat] - = \tilde{\G}[\qhat\hat{\otimes}\qhat\hat{\otimes}\qhat]` + = \tilde{\G}[\qhat\,\hat{\otimes}\,\qhat\,\hat{\otimes}\,\qhat]` for all :math:`\qhat\in\RR^{r}` where :math:`\cdot\hat{\otimes}\cdot\hat{\otimes}\cdot` is the compressed cubic Kronecker product (see :meth:`ckron`). @@ -1226,13 +1284,13 @@ class InputOperator(OpInfOperator, InputMixin): Parameters ---------- entries : (r, m) ndarray or None - Operator entries :math:`\Bhat`. + Operator matrix :math:`\Bhat`. Examples -------- >>> import numpy as np >>> B = opinf.operators.LinearOperator() - >>> entries = np.random.random((10, 3)) # Operator entries. + >>> entries = np.random.random((10, 3)) # Operator matrix. >>> B.set_entries(entries) >>> B.shape (10, 3) @@ -1244,20 +1302,32 @@ class InputOperator(OpInfOperator, InputMixin): @property def input_dimension(self): - r"""Dimension of the input :math:`\u` that the operator acts on.""" + r"""Dimension :math:`m` of the input :math:`\u` that the operator + acts on. + """ return None if self.entries is None else self.entries.shape[1] @staticmethod def _str(statestr, inputstr): return f"B{inputstr}" + @property + def entries(self): + r"""Operator matrix :math:`\Bhat`.""" + return OpInfOperator.entries.fget(self) + + @property + def shape(self): + r"""Shape :math:`(r, m)` of the operator matrix :math:`\Bhat`.""" + return OpInfOperator.shape.fget(self) + def set_entries(self, entries): - r"""Set the ``entries`` attribute. + r"""Set the operator matrix :math:`\Bhat`. Parameters ---------- entries : (r, m) ndarray - Operator entries :math:`\Bhat`. + Operator matrix :math:`\Bhat`. """ if np.isscalar(entries) or np.shape(entries) == (1,): entries = np.atleast_2d(entries) @@ -1354,7 +1424,7 @@ def datablock(states, inputs): @staticmethod def operator_dimension(r, m): - """Column dimension :math:`m` of the operator entries. + r"""Column dimension :math:`m` of the operator matrix :math:`\Bhat`. Parameters ---------- @@ -1375,7 +1445,7 @@ class StateInputOperator(OpInfOperator, InputMixin): Parameters ---------- entries : (r, rm) ndarray or None - Operator entries :math:`\Nhat`. + Operator matrix :math:`\Nhat`. Examples -------- @@ -1394,7 +1464,9 @@ class StateInputOperator(OpInfOperator, InputMixin): @property def input_dimension(self): - r"""Dimension of the input :math:`\u` that the operator acts on.""" + r"""Dimension :math:`m` of the input :math:`\u` that the operator + acts on. + """ if self.entries is None: return None return self.entries.shape[1] // self.entries.shape[0] @@ -1403,13 +1475,23 @@ def input_dimension(self): def _str(statestr, inputstr): return f"N[{inputstr} ⊗ {statestr}]" + @property + def entries(self): + r"""Operator matrix :math:`\Nhat`.""" + return OpInfOperator.entries.fget(self) + + @property + def shape(self): + r"""Shape :math:`(r, rm)` of the operator matrix :math:`\Nhat`.""" + return OpInfOperator.shape.fget(self) + def set_entries(self, entries): - r"""Set the ``entries`` attribute. + r"""Set the operator matrix :math:`\Nhat`. Parameters ---------- entries : (r, rm) ndarray - Operator entries :math:`\Nhat`. + Operator matrix :math:`\Nhat`. """ if np.isscalar(entries) or np.shape(entries) == (1,): entries = np.atleast_2d(entries) @@ -1491,8 +1573,7 @@ def jacobian(self, state, input_): @utils.requires("entries") def galerkin(self, Vr, Wr=None): r"""Return the Galerkin projection of the operator, - :math:`\widehat{\mathbf{N}} = - (\Wr\trp\Vr)^{-1}\Wr\trp\mathbf{N}[\I_{m}\otimes\Vr]`. + :math:`\Nhat = (\Wr\trp\Vr)^{-1}\Wr\trp\N[\I_{m}\otimes\Vr]`. Parameters ---------- @@ -1554,7 +1635,7 @@ def datablock(states, inputs): @staticmethod def operator_dimension(r, m): - """Column dimension :math:`rm` of the operator entries. + r"""Column dimension :math:`rm` of the operator matrix :math:`\Nhat`. Parameters ---------- From fec635b66fd734d887fdcaacc003c15b6ee67fd7 Mon Sep 17 00:00:00 2001 From: Shane Date: Wed, 14 Aug 2024 09:43:00 -0600 Subject: [PATCH 03/48] explanation of parametric opinf --- docs/source/api/operators.ipynb | 191 ++++++++++++++++++++++++++++---- 1 file changed, 169 insertions(+), 22 deletions(-) diff --git a/docs/source/api/operators.ipynb b/docs/source/api/operators.ipynb index ca71663f..0a0478a4 100644 --- a/docs/source/api/operators.ipynb +++ b/docs/source/api/operators.ipynb @@ -303,12 +303,13 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Operator Inference requires state, input, and [derivative](opinf.ddt) data $\\{(\\qhat_j,\\u_j,\\dot{\\qhat}_j)\\}_{j=0}^{k-1}$ that approximately satisfy the desired model dynamics.\n", + "Operator Inference requires state, input, and \"left-hand side\" data $\\{(\\qhat_j,\\u_j,\\z_j)\\}_{j=0}^{k-1}$ that approximately satisfy the desired model dynamics.\n", + "For [time-continuous models](opinf.models.ContinuousModel), $\\z_j$ is the [time derivative](opinf.ddt) of the state data; for [fully discrete models](opinf.models.DiscreteModel), $\\z_j$ is the ``next state,'' usually $\\qhat_{j+1}$.\n", "For {eq}`eq:operators:model`, and assuming each operator is an OpInf operator, the data should approximately satisfy\n", "\n", "$$\n", "\\begin{aligned}\n", - " \\dot{\\qhat}_j\n", + " \\z_j\n", " \\approx \\Ophat(\\qhat_j, \\u_j)\n", " = \\sum_{\\ell=1}^{n_\\textrm{terms}} \\Ophat_{\\ell}(\\qhat_j, \\u_j)\n", " = \\sum_{\\ell=1}^{n_\\textrm{terms}} \\Ohat_{\\ell}\\d_{\\ell}(\\qhat_j, \\u_j),\n", @@ -333,7 +334,7 @@ "\n", "$$\n", "\\begin{aligned}\n", - " \\dot{\\qhat}_j\n", + " \\z_j\n", " \\approx \\sum_{\\ell=1}^{n_\\textrm{terms}} \\Ohat_{\\ell}\\d_{\\ell}(\\qhat_j, \\u_j)\n", " = [~\\Ohat_{1}~~\\cdots~~\\Ohat_{n_\\textrm{terms}}~]\n", " \\left[\\begin{array}{c}\n", @@ -350,7 +351,7 @@ "\\begin{aligned}\n", " \\left[\\begin{array}{c|c|c}\n", " & & \\\\\n", - " \\dot{\\qhat}_0 & \\cdots & \\dot{\\qhat}_{k-1}\n", + " \\z_0 & \\cdots & \\z_{k-1}\n", " \\\\ & &\n", " \\end{array}\\right]\n", " \\approx\n", @@ -368,7 +369,7 @@ "$$\n", "\\begin{aligned}\n", " \\Z\n", - " &= [~\\dot{\\qhat}_0~~\\cdots~~\\dot{\\qhat}_{k-1}~] \\in \\RR^{r\\times k},\n", + " &= [~\\z_0~~\\cdots~~\\z_{k-1}~] \\in \\RR^{r\\times k},\n", " \\\\ & \\\\\n", " \\Ohat\n", " &= [~\\Ohat_{1}~~\\cdots~~\\Ohat_{n_\\textrm{terms}}~] \\in \\RR^{r \\times d},\n", @@ -442,7 +443,7 @@ "$$\n", "\\begin{aligned}\n", " \\Z\n", - " &= [~\\dot{\\qhat}_0~~\\cdots~~\\dot{\\qhat}_{k-1}~] \\in \\RR^{r\\times k},\n", + " &= [~\\z_0~~\\cdots~~\\z_{k-1}~] \\in \\RR^{r\\times k},\n", " \\\\ \\\\\n", " \\Ohat\n", " &= [~\\Ahat~~\\Bhat~] \\in \\RR^{r \\times (r + m)},\n", @@ -457,6 +458,7 @@ "\\end{aligned}\n", "$$\n", "\n", + "In this setting, $\\z_j = \\dot{\\qhat}_j$, the time derivative of $\\qhat_j$.\n", "Collecting the state snapshots in the matrix $\\Qhat = [~\\qhat_0~~\\cdots~~\\qhat_{k-1}~]\\in\\RR^{r\\times k}$ and the inputs in the matrix $\\U = [~\\u_0~~\\cdots~~\\u_{k-1}~]$, the full data matrix can be abbreviated as $\\D = [~\\Qhat\\trp~~\\U\\trp~]$.\n", "\n", "If the regression $\\Z \\approx \\Ohat\\D\\trp$ is treated as an [ordinary least-squares problem](opinf.lstsq.PlainSolver), the optimization problem to solve is given by\n", @@ -467,7 +469,7 @@ " \\D\\Ohat\\trp - \\Z\\trp\n", " \\right\\|_F^2\n", " = \\min_{\\Ahat,\\Bhat}\\sum_{j=0}^{k-1}\\left\\|\n", - " \\Ahat\\qhat_j + \\Bhat\\u_j - \\dot{\\qhat}_j\n", + " \\Ahat\\qhat_j + \\Bhat\\u_j - \\z_j\n", " \\right\\|_2^2.\n", "\\end{aligned}\n", "$$\n", @@ -501,7 +503,7 @@ "$$\n", "\\begin{aligned}\n", " \\Z\n", - " &= [~(\\dot{\\qhat}_0 - \\Bhat\\u_0)~~\\cdots~~(\\dot{\\qhat}_{k-1} - \\Bhat\\u_{k-1})~] \\in \\RR^{r\\times k},\n", + " &= [~(\\z_0 - \\Bhat\\u_0)~~\\cdots~~(\\z_{k-1} - \\Bhat\\u_{k-1})~] \\in \\RR^{r\\times k},\n", " \\\\\n", " \\Ohat\n", " &= \\Ahat \\in \\RR^{r \\times r},\n", @@ -516,7 +518,7 @@ "$$\n", "\\begin{aligned}\n", " &\\min_{\\Ahat}\\sum_{j=0}^{k-1}\\left\\|\n", - " \\Ahat\\qhat_j - (\\dot{\\qhat}_j - \\Bhat\\u_j)\n", + " \\Ahat\\qhat_j - (\\z_j - \\Bhat\\u_j)\n", " \\right\\|_2^2.\n", "\\end{aligned}\n", "$$\n", @@ -1013,8 +1015,9 @@ "Parametric OpInf operators have the form\n", "$\\Ophat_{\\ell}(\\qhat,\\u;\\bfmu) = \\Ohat_{\\ell}(\\bfmu)\\d_{\\ell}(\\qhat,\\u)$ defined by the matrix-valued function $\\Ohat_{\\ell}:\\RR^{p}\\to\\RR^{r\\times d_\\ell}$ and (as in the nonparametric case) the data vector $\\d_{\\ell}:\\RR^{r}\\times\\RR^{m}\\to\\RR^{d_\\ell}$.\n", "This module provides two options for the parameterization of $\\Ohat_{\\ell}(\\bfmu)$: [affine expansion](sec-operators-affine) and [elementwise interpolation](sec-operators-interpolated).\n", - "In each case, Operator Inference begins with $s$ training parameter values $\\bfmu_1,\\ldots,\\bfmu_s$ and corresponding state, input, and derivative data $\\{(\\qhat_{i,j},\\u_{i,j},\\dot{\\qhat}_{i,j})\\}_{j=0}^{k_{i}-1}$ for each training parameter value $\\bfmu_i$.\n", - "A regression of the form $\\Z \\approx \\Ohat\\D\\trp$ is formed as in the nonparametric case, with the structure of the matrices $\\Ohat$ and $\\D$ depending on the choice of parameterization for each $\\Ohat_{\\ell}(\\bfmu)$." + "In each case, Operator Inference begins with $s$ training parameter values $\\bfmu_{0},\\ldots,\\bfmu_{s-1}$ and corresponding state, input, and left-hand side data $\\{(\\qhat_{i,j},\\u_{i,j},\\z_{i,j})\\}_{j=0}^{k_{i}-1}$ for each training parameter value $\\bfmu_{i}$.\n", + "A regression of the form $\\Z \\approx \\Ohat\\D\\trp$ is formed as in the nonparametric case, with the structure of the matrices $\\Ohat$ and $\\D$ depending on the choice of parameterization for each $\\Ohat_{\\ell}(\\bfmu)$.\n", + "The matrix $\\D$ is formed by the static [`datablock()`](ParametricOpInfOperator.datablock) method, and the rest of the problem is constructed and solved by a parametric model class." ] }, { @@ -1023,16 +1026,16 @@ "source": [ ":::{admonition} Example\n", ":class: tip\n", - "Let $\\bfmu = [~\\mu_{1}~~\\mu_{2}~]\\trp$.\n", + "Let $\\bfmu = [~\\mu_{0}~~\\mu_{1}~]\\trp$.\n", "The operator\n", "\n", "$$\n", "\\begin{aligned}\n", - " \\Ophat_1(\\qhat,\\u;\\bfmu) = (\\mu_{1}\\Ahat_{1} + \\mu_{2}^2\\Ahat_{2})\\qhat\n", + " \\Ophat_1(\\qhat,\\u;\\bfmu) = (\\mu_{0}\\Ahat_{0} + \\mu_{1}^{2}\\Ahat_{1})\\qhat\n", "\\end{aligned}\n", "$$\n", "\n", - "is a parametric OpInf operator because it can be written as $\\Ophat_1(\\qhat,\\u;\\bfmu) = \\Ohat_1(\\bfmu)\\d_1(\\qhat,\\u)$ with $\\Ohat_1(\\bfmu) = \\mu_{1}\\Ahat_{1} + \\mu_{2}\\Ahat_{2}$ and $\\d_1(\\qhat,\\u) = \\qhat$.\n", + "is a parametric OpInf operator because it can be written as $\\Ophat_1(\\qhat,\\u;\\bfmu) = \\Ohat_1(\\bfmu)\\d_1(\\qhat,\\u)$ with $\\Ohat_1(\\bfmu) = \\mu_{0}\\Ahat_{0} + \\mu_{1}^{2}\\Ahat_{1}$ and $\\d_1(\\qhat,\\u) = \\qhat$.\n", "\n", "This operator can be represented with an {class}`AffineLinearOperator`.\n", "For a given parameter value, the [`evaluate()`](AffineLinearOperator.evaluate) method returns a {class}`LinearOperator` instance.\n", @@ -1056,12 +1059,12 @@ "$$\n", "\\begin{aligned}\n", " \\Ohat_{\\ell}(\\bfmu)\n", - " &= \\sum_{a=1}^{s_{\\ell}}\\theta_{\\ell}^{(a)}\\!(\\bfmu)\\,\\Ohat_{\\ell}^{(a)},\n", + " &= \\sum_{a=0}^{A_{\\ell}-1}\\theta_{\\ell}^{(a)}\\!(\\bfmu)\\,\\Ohat_{\\ell}^{(a)},\n", "\\end{aligned}\n", "$$\n", "\n", "where each $\\theta_{\\ell}^{(a)}:\\RR^{p}\\to\\RR$ is a scalar-valued function and each $\\Ohat_{\\ell}^{(a)}\\in\\RR^{r\\times d_\\ell}$ is constant.\n", - "Affine expansions are grouped such that the coefficient functions $\\theta_{\\ell}^{(1)},\\ldots,\\theta_{\\ell}^{(s_{\\ell})}$ are linearly independent." + "Affine expansions are grouped such that the coefficient functions $\\theta_{\\ell}^{(0)},\\ldots,\\theta_{\\ell}^{(A_{\\ell}-1)}$ are linearly independent." ] }, { @@ -1077,7 +1080,7 @@ "$$\n", "\\begin{aligned}\n", " \\Op_{\\ell}(\\q,\\u;\\bfmu)\n", - " = \\sum_{a=1}^{s_{\\ell}}\\theta_{\\ell}^{(a)}\\!(\\bfmu)\\,\\Op_{\\ell}^{(a)}\\!(\\q, \\u).\n", + " = \\sum_{a=1}^{A_{\\ell}}\\theta_{\\ell}^{(a)}\\!(\\bfmu)\\,\\Op_{\\ell}^{(a)}\\!(\\q, \\u).\n", "\\end{aligned}\n", "$$\n", "\n", @@ -1088,10 +1091,10 @@ " \\Ophat_{\\ell}(\\qhat, \\u; \\bfmu)\n", " &= (\\Wr\\trp\\Vr)^{-1}\\Wr\\trp\\Op_{\\ell}(\\Vr\\qhat, \\u; \\bfmu)\n", " \\\\\n", - " &= (\\Wr\\trp\\Vr)^{-1}\\Wr\\trp \\sum_{a=1}^{s_{\\ell}}\\theta_{\\ell}^{(a)}\\!(\\bfmu)\\,\\Op_{\\ell}^{(a)}\\!(\\V\\qhat, \\u)\n", + " &= (\\Wr\\trp\\Vr)^{-1}\\Wr\\trp \\sum_{a=1}^{A_{\\ell}}\\theta_{\\ell}^{(a)}\\!(\\bfmu)\\,\\Op_{\\ell}^{(a)}\\!(\\V\\qhat, \\u)\n", " \\\\\n", - " &= \\sum_{a=1}^{s_{\\ell}}\\theta_{\\ell}^{(a)}\\!(\\bfmu)\\,(\\Wr\\trp\\Vr)^{-1}\\Wr\\trp\\Op_{\\ell}^{(a)}\\!(\\V\\qhat, \\u)\n", - " = \\sum_{a=1}^{s_{\\ell}}\\theta_{\\ell}^{(a)}\\!(\\bfmu)\\,\\Ophat_{\\ell}^{(a)}\\!(\\qhat, \\u),\n", + " &= \\sum_{a=1}^{A_{\\ell}}\\theta_{\\ell}^{(a)}\\!(\\bfmu)\\,(\\Wr\\trp\\Vr)^{-1}\\Wr\\trp\\Op_{\\ell}^{(a)}\\!(\\V\\qhat, \\u)\n", + " = \\sum_{a=1}^{A_{\\ell}}\\theta_{\\ell}^{(a)}\\!(\\bfmu)\\,\\Ophat_{\\ell}^{(a)}\\!(\\qhat, \\u),\n", "\\end{aligned}\n", "$$\n", "\n", @@ -1126,7 +1129,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Affine parametric operators are instantiated with a list of the coefficient functions $\\theta_{\\ell}^{(1)},\\ldots,\\theta_{\\ell}^{(s_{\\ell})}$ and with or without the operator matrices $\\Ohat_{\\ell}^{(1)},\\ldots,\\Ohat_{\\ell}^{(s_{\\ell})}$." + "Affine parametric operators are instantiated with a list of the coefficient functions $\\theta_{\\ell}^{(1)},\\ldots,\\theta_{\\ell}^{(A_{\\ell})}$ and with or without the operator matrices $\\Ohat_{\\ell}^{(1)},\\ldots,\\Ohat_{\\ell}^{(A_{\\ell})}$." ] }, { @@ -1138,6 +1141,84 @@ ":::" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + ":::{dropdown} Operator Inference for Affine Operators\n", + "\n", + "Consider a model with a single affine operator,\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\z\n", + " = \\Ophat_{1}(\\qhat,\\u;\\bfmu)\n", + " &= \\left(\\sum_{a=0}^{A_{1}-1}\\theta_{1}^{(a)}\\!(\\bfmu)\\,\\Ohat_{1}^{(a)}\\right)\\d_1(\\qhat,\\u).\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "For the moment we will neglect the $\\ell = 1$ subscript.\n", + "With data $\\{(\\qhat_{i,j},\\u_{i,j},\\z_{i,j})\\}_{j=0}^{k_{i}-1}$ corresponding to $s$ training parameter values $\\bfmu_0,\\ldots,\\bfmu_{s-1}$, we seek the $A$ operator matrices $\\Ohat^{(0)},\\ldots,\\Ohat^{(A-1)}$ such that for each parameter index $i$ and time index $j$, we have\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\z_{i,j}\n", + " \\approx \\Ophat(\\qhat_{i,j},\\u_{i,j};\\bfmu_{i})\n", + " &= \\left(\\sum_{a=0}^{A-1}\\theta^{(a)}\\!(\\bfmu_{i})\\,\\Ohat^{(a)}\\right)\\d(\\qhat_{i,j},\\u_{i,j})\n", + " \\\\\n", + " &= \\left[\\begin{array}{ccc}\n", + " \\Ohat^{(0)} & \\cdots & \\Ohat^{(A-1)}\n", + " \\end{array}\\right]\n", + " \\underbrace{\\left[\\begin{array}{c}\n", + " \\theta^{(0)}\\!(\\bfmu_{i})\\,\\d(\\qhat_{i,j},\\u_{i,j})\n", + " \\\\ \\vdots \\\\\n", + " \\theta^{(A-1)}\\!(\\bfmu_{i})\\,\\d(\\qhat_{i,j},\\u_{i,j})\n", + " \\end{array}\\right]}_{\\d_{i,j}\\in\\RR^{dA}},\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "where $d$ is the output dimension of $\\d$.\n", + "Collecting these expressions for each time index $j = 0, \\ldots, k_i - 1$ (but keeping the parameter index $i$ fixed for the moment) results in\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\underbrace{\\left[\\begin{array}{ccc}\n", + " \\z_{i,0} & \\cdots & \\z_{i,k_i-1}\n", + " \\end{array}\\right]}_{\\Z_i\\in\\RR^{r\\times k_i}}\n", + " \\approx \\left[\\begin{array}{ccc}\n", + " \\Ohat^{(0)} & \\cdots & \\Ohat^{(A-1)}\n", + " \\end{array}\\right]\n", + " \\underbrace{\\left[\\begin{array}{ccc}\n", + " \\d_{i,0} & \\cdots & \\d_{i,k_i-1}\n", + " \\end{array}\\right]}_{\\D_i\\trp\\in\\RR^{dA \\times k_i}}.\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "Finally, we concatenate each of these expressions for each parameter index $i = 0,\\ldots, s-1$ to arrive at\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\underbrace{\\left[\\begin{array}{ccc}\n", + " \\Z_{0} & \\cdots & \\Z_{s-1}\n", + " \\end{array}\\right]}_{\\Z\\in\\RR^{r\\times K}}\n", + " \\approx \\left[\\begin{array}{ccc}\n", + " \\Ohat^{(0)} & \\cdots & \\Ohat^{(A-1)}\n", + " \\end{array}\\right]\n", + " \\underbrace{\\left[\\begin{array}{ccc}\n", + " \\D_{0}\\trp & \\cdots & \\D_{s-1}\\trp\n", + " \\end{array}\\right]}_{\\D\\trp\\in\\RR^{dA \\times K}},\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "where $K = \\sum_{i=0}^{s-1}k_i$, the total number of available data instances.\n", + "This is the familiar $\\Z \\approx \\Ohat\\D\\trp$ where $\\Ohat = [~\\Ohat^{(1)}~~\\cdots~~\\Ohat^{(A)}~]$, which can be solved for $\\Ohat$ using {mod}`opinf.lstsq`.\n", + "\n", + "The construction of $\\D$ is taken care of through the [`datablock()`](AffineLinearOperator.datablock) method of the affine \n", + "\n", + "For models with multiple affine operators, the operator matrix $\\Ohat$ is further concatenated horizontally to accommodate the operator matrices from each affine expansion, and the data matrix $\\D\\trp$ gains additional block rows.\n", + ":::" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -1160,7 +1241,7 @@ "\\end{aligned}\n", "$$\n", "\n", - "where $\\Ohat_{\\ell}(\\bfmu)$ is determined by interpolating $s$ matrices $\\Ohat_{\\ell}^{(1)},\\ldots,\\Ohat_{\\ell}^{(s)}$.\n", + "where $\\Ohat_{\\ell}(\\bfmu)$ is determined by interpolating $s$ matrices $\\Ohat_{\\ell}^{(0)},\\ldots,\\Ohat_{\\ell}^{(s-1)}$.\n", "In the context of Operator Inference, $s$ is the number of training parameter values." ] }, @@ -1185,6 +1266,72 @@ " InterpolatedStateInputOperator\n", "```" ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + ":::{dropdown} Operator Inference for Interpolated Operators\n", + "\n", + "Consider a model with a single affine operator,\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\z\n", + " = \\Ophat_{1}(\\qhat,\\u;\\bfmu)\n", + " = \\Ohat_{1}(\\bfmu)\\d_{1}(\\qhat,\\u).\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "For the moment we will neglect the $\\ell = 1$ subscript.\n", + "With data $\\{(\\qhat_{i,j},\\u_{i,j},\\z_{i,j})\\}_{j=0}^{k_{i}-1}$ corresponding to $s$ training parameter values $\\bfmu_0,\\ldots,\\bfmu_{s-1}$, we seek $s$ operator matrices $\\Ohat^{(0)},\\ldots,\\Ohat^{(s-1)}\\in\\RR^{r\\times d}$ such that for each parameter index $i$ and time index $j$, we have\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\z_{i,j}\n", + " \\approx \\Ophat(\\qhat_{i,j},\\u_{i,j};\\bfmu_{i})\n", + " = \\Ohat^{(i)}\\d(\\qhat_{i,j},\\u_{i,j}),\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "which comes from the interpolation condition $\\Ohat^{(i)} = \\Ohat_{1}(\\bfmu_{i})$ for $i = 0,\\ldots,s-1$.\n", + "Because only one operator matrix $\\Ohat^{(i)}$ defines the operator action at each parameter value for which we have data, we have $s$ independent nonparametric Operator Inference problems:\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\Z_i\n", + " &\\approx \\Ohat^{(i)}\\D_i\\trp,\n", + " \\\\\n", + " \\Z_i\n", + " &= \\left[\\begin{array}{ccc}\n", + " \\z_{i,0} & \\cdots & \\z_{i,k_{i}-1}\n", + " \\end{array}\\right]\\in\\RR^{r \\times k_{i}},\n", + " \\\\\n", + " \\D_i\\trp\n", + " &= \\left[\\begin{array}{ccc}\n", + " \\d(\\qhat_{i,0}, \\u_{i,0}) & \\cdots & \\d(\\qhat_{i,k_{i}-1}, \\u_{i,k_{i}-1})\n", + " \\end{array}\\right]\\in\\RR^{d\\times k_{i}}\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "The InterpolatedModel classes represent models comprised solely of interpolated operators.\n", + "If interpolated operators are mixed with other operators (nonparametric or affine parametric), the $\\Ohat\\D\\trp$ block of the problem for the interpolated operator is included as follows:\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\left[\\begin{array}{ccc}\n", + " \\Ohat^{(0)} & \\cdots & \\Ohat^{(s-1)}\n", + " \\end{array}\\right]\n", + " \\left[\\begin{array}{cccc}\n", + " \\D_0\\trp & \\0 & \\cdots & \\0 \\\\\n", + " \\0 & \\D_1\\trp & \\cdots & \\0 \\\\\n", + " \\vdots & \\vdots & \\ddots & \\vdots \\\\\n", + " \\0 & \\0 & \\cdots & \\D_{s-1}\\trp\n", + " \\end{array}\\right]\n", + "\\end{aligned}\n", + "$$\n", + ":::" + ] } ], "metadata": { From 8e04d28a14c5fcc0f5a093ff3e54afb691da17b6 Mon Sep 17 00:00:00 2001 From: Shane Date: Wed, 14 Aug 2024 14:11:05 -0600 Subject: [PATCH 04/48] move entries to ParametricOpInfOperator --- src/opinf/operators/_base.py | 274 +++++++++++++++++++------- src/opinf/operators/_interpolate.py | 139 ++++++------- src/opinf/operators/_nonparametric.py | 60 ++++++ tests/operators/test_base.py | 262 +++++++++++++----------- tests/operators/test_interpolate.py | 4 - tests/operators/test_nonparametric.py | 4 +- 6 files changed, 473 insertions(+), 270 deletions(-) diff --git a/src/opinf/operators/_base.py b/src/opinf/operators/_base.py index a51c5694..29eeb8c4 100644 --- a/src/opinf/operators/_base.py +++ b/src/opinf/operators/_base.py @@ -35,9 +35,11 @@ class InputMixin(abc.ABC): @property @abc.abstractmethod - def input_dimension(self) -> int: # pragma: no cover - r"""Dimension of the input :math:`\u` that the operator acts on.""" - raise NotImplementedError + def input_dimension(self) -> int: + r"""Dimension :math:`m` of the input :math:`\u` that the operator + acts on. + """ + raise NotImplementedError # pragma: no cover def has_inputs(obj) -> bool: @@ -77,9 +79,11 @@ class OperatorTemplate(abc.ABC): # Properties -------------------------------------------------------------- @property @abc.abstractmethod - def state_dimension(self) -> int: # pragma: no cover - r"""Dimension of the state :math:`\qhat` that the operator acts on.""" - raise NotImplementedError + def state_dimension(self) -> int: + r"""Dimension :math:`r` of the state :math:`\qhat` that the operator + acts on. + """ + raise NotImplementedError # pragma: no cover def __str__(self) -> str: """String representation: class name + dimensions.""" @@ -153,7 +157,7 @@ def jacobian(self, state: np.ndarray, input_=None) -> np.ndarray: jac : (r, r) ndarray State Jacobian. """ - raise NotImplementedError + raise NotImplementedError # pragma: no cover # Dimensionality reduction ------------------------------------------------ def galerkin(self, Vr: np.ndarray, Wr=None): @@ -175,9 +179,10 @@ def galerkin(self, Vr: np.ndarray, Wr=None): Parameters ---------- Vr : (n, r) ndarray - Basis for the trial space. + Basis for the trial space :math:`\Vr`. Wr : (n, r) ndarray or None - Basis for the test space. If ``None``, defaults to ``Vr``. + Basis for the test space :math:`\Wr`. + If ``None`` (default), use ``Vr`` as the test basis. Returns ------- @@ -187,7 +192,7 @@ def galerkin(self, Vr: np.ndarray, Wr=None): ``input_dimension`` attribute of the new operator should be ``self.input_dimension``. """ - raise NotImplementedError + raise NotImplementedError # pragma: no cover # Model persistence ------------------------------------------------------- def copy(self): @@ -205,10 +210,10 @@ def save(self, savefile: str, overwrite: bool = False) -> None: If ``True``, overwrite the file if it already exists. If ``False`` (default), raise a ``FileExistsError`` if the file already exists. """ - raise NotImplementedError + raise NotImplementedError # pragma: no cover @classmethod - def load(cls, loadfile: str): # pragma: no cover + def load(cls, loadfile: str): """Load an operator from an HDF5 file. Parameters @@ -216,7 +221,7 @@ def load(cls, loadfile: str): # pragma: no cover loadfile : str Path to the file where the operator was stored via :meth:`save()`. """ - raise NotImplementedError + raise NotImplementedError # pragma: no cover # Verification ------------------------------------------------------------ def verify( @@ -538,13 +543,13 @@ def _validate_entries(entries): elif np.any(np.isinf(entries)): raise ValueError("operator entries must not be Inf") - def set_entries(self, entries): + def set_entries(self, entries) -> None: """Set the :attr:`entries` attribute.""" self.__entries = entries # Properties -------------------------------------------------------------- @property - def entries(self): + def entries(self) -> np.ndarray: r"""Discrete representation of the operator, the matrix :math:`\Ohat`. """ @@ -561,13 +566,15 @@ def entries(self): self._clear() @property - def shape(self): - """Shape of the operator entries array.""" + def shape(self) -> tuple: + """Shape of the operator matrix.""" return None if self.entries is None else self.entries.shape @property - def state_dimension(self): - r"""Dimension of the state :math:`\qhat` that the operator acts on.""" + def state_dimension(self) -> int: + r"""Dimension :math:`r` of the state :math:`\qhat` that the operator + acts on. + """ return None if self.entries is None else self.entries.shape[0] # Magic methods ----------------------------------------------------------- @@ -602,7 +609,7 @@ def __add__(self, other): # Evaluation -------------------------------------------------------------- @utils.requires("entries") - def jacobian(self, state, input_=None): # pragma: no cover + def jacobian(self, state, input_=None) -> np.ndarray: # pragma: no cover r"""Construct the state Jacobian of the operator. If :math:`[\![\q]\!]_{i}` denotes the :math:`i`-th entry of a vector @@ -680,7 +687,7 @@ def galerkin(self, Vr, Wr=None): # Operator inference ------------------------------------------------------ @staticmethod @abc.abstractmethod - def operator_dimension(r: int, m: int = None) -> int: # pragma: no cover + def operator_dimension(r: int, m: int = None) -> int: r"""Column dimension of the operator entries. Child classes should decorate this method with ``@staticmethod``. @@ -698,7 +705,7 @@ def operator_dimension(r: int, m: int = None) -> int: # pragma: no cover Number of columns in the operator entries matrix. This is also the number of rows in the data matrix block. """ - raise NotImplementedError + raise NotImplementedError # pragma: no cover @staticmethod @abc.abstractmethod @@ -900,30 +907,43 @@ def OperatorClass(self): # Properties -------------------------------------------------------------- @property @abc.abstractmethod - def state_dimension(self) -> int: # pragma: no cover - r"""Dimension of the state :math:`\qhat` that the operator acts on.""" - raise NotImplementedError + def state_dimension(self) -> int: + r"""Dimension :math:`r` of the state :math:`\qhat` that the operator + acts on. + """ + raise NotImplementedError # pragma: no cover @property @abc.abstractmethod - def parameter_dimension(self) -> int: # pragma: no cover - r"""Dimension of the parameters :math:`\bfmu` that the operator acts - on. + def parameter_dimension(self) -> int: + r"""Dimension :math:`p` of the parameter vector :math:`\bfmu` that the + operator matrix depends on. """ - raise NotImplementedError + raise NotImplementedError # pragma: no cover def __str__(self) -> str: - """String representation: class name + dimensions.""" + """String representation: class name, dimensions, evaluation type.""" out = [self.__class__.__name__] out.append(f"state_dimension: {self.state_dimension}") if has_inputs(self): out.append(f"input_dimension: {self.input_dimension}") out.append(f"parameter_dimension: {self.parameter_dimension}") + out.append(f"evaluate(parameter) -> {self._OperatorClass.__name__}") return "\n ".join(out) + def __repr__(self) -> str: + return utils.str2repr(self) + # Evaluation -------------------------------------------------------------- + def _check_parametervalue_dimension(self, parameter): + """Ensure a new parameter value has the expected shape.""" + if (pdim := self.parameter_dimension) is None: + raise RuntimeError("parameter_dimension not set") + if np.atleast_1d(parameter).shape[0] != pdim: + raise ValueError(f"expected parameter of shape ({pdim:d},)") + @abc.abstractmethod - def evaluate(self, parameter): # pragma: no cover + def evaluate(self, parameter): r"""Evaluate the operator at the given parameter value, resulting in a nonparametric operator of type ``OperatorClass``. @@ -934,14 +954,14 @@ def evaluate(self, parameter): # pragma: no cover Returns ------- - evaluated_operator : nonparametric operator. + evaluated_operator : nonparametric operator Nonparametric operator corresponding to the parameter value. This should be an instance of :class:`OperatorTemplate` (or a class that inherits from it). """ - raise NotImplementedError + raise NotImplementedError # pragma: no cover - def apply(self, parameter, state, input_): + def apply(self, parameter, state, input_=None): r"""Apply the operator to the given state and input at the specified parameter value, :math:`\Ophat_\ell(\qhat,\u;\bfmu)`. @@ -1021,7 +1041,7 @@ def jacobian(self, parameter, state, input_=None): return self.evaluate(parameter).jacobian(state, input_) # Dimensionality reduction ------------------------------------------------ - def galerkin(self, Vr, Wr=None): # pragma: no cover + def galerkin(self, Vr, Wr=None): r"""Get the (Petrov-)Galerkin projection of this operator. Consider an operator :math:`\Op(\q,\u)`, where :math:`\q\in\RR^n` @@ -1052,12 +1072,12 @@ def galerkin(self, Vr, Wr=None): # pragma: no cover ``input_dimension`` attribute of the new operator should be ``self.input_dimension``. """ - raise NotImplementedError + raise NotImplementedError # pragma: no cover # Model persistence ------------------------------------------------------- def copy(self): """Return a copy of the operator.""" - return copy.deepcopy(self) + return copy.deepcopy(self) # pragma: no cover def save(self, savefile: str, overwrite: bool = False) -> None: """Save the operator to an HDF5 file. @@ -1070,10 +1090,10 @@ def save(self, savefile: str, overwrite: bool = False) -> None: If ``True``, overwrite the file if it already exists. If ``False`` (default), raise a ``FileExistsError`` if the file already exists. """ - raise NotImplementedError + raise NotImplementedError # pragma: no cover @classmethod - def load(cls, loadfile: str): # pragma: no cover + def load(cls, loadfile: str): """Load an operator from an HDF5 file. Parameters @@ -1081,7 +1101,70 @@ def load(cls, loadfile: str): # pragma: no cover loadfile : str Path to the file where the operator was stored via :meth:`save()`. """ - raise NotImplementedError + raise NotImplementedError # pragma: no cover + + # Verification ------------------------------------------------------------ + def verify(self, testparam=None): + """Verify dimension attributes and :meth:`evaluate()`. + + Parameters + ---------- + testparam : (p,) ndarray or None + Test parameter at which to evaluate the operator. + If ``None`` (default), draw test parameter entries from the + standard Normal distribution. + """ + # Verify dimensions exist and are valid. + if not isinstance((r := self.state_dimension), int) or r <= 0: + raise errors.VerificationError( + "state_dimension must be a positive integer " + f"(current value: {repr(r)}, of type '{type(r).__name__}')" + ) + + if hasinputs := has_inputs(self): + if not isinstance((m := self.input_dimension), int) or m <= 0: + raise errors.VerificationError( + "input_dimension must be a positive integer " + f"(current value: {repr(m)}, of type '{type(r).__name__}')" + ) + else: + m = 0 + + # Get a test parameter. + if testparam is None: + testparam = np.random.standard_normal(self.parameter_dimension) + if np.shape(testparam) != (self.parameter_dimension,): + raise ValueError("testparam.shape != (parameter_dimension,)") + + # Evaluate the operator at the test parameter. + op_evaluated = self.evaluate(testparam) + if not isinstance(op_evaluated, self._OperatorClass): + raise errors.VerificationError( + "evaluate() must return instance of type OperatorClass" + ) + if not is_nonparametric(op_evaluated): + raise errors.VerificationError( + "OperatorClass must be a nonparametric operator type" + ) + + if op_evaluated.state_dimension != self.state_dimension: + raise errors.VerificationError( + "result of evaluate() does not retain the state_dimension" + ) + if hasinputs: + if not has_inputs(op_evaluated): + raise errors.VerificationError( + "result of evaluate() should depend on inputs" + ) + if op_evaluated.input_dimension != m: + raise errors.VerificationError( + "result of evaluate() does not retain the input_dimension" + ) + else: + if has_inputs(op_evaluated): + raise errors.VerificationError( + "result of evaluate() should not depend on inputs" + ) def is_parametric(obj) -> bool: @@ -1093,7 +1176,7 @@ class ParametricOpInfOperator(ParametricOperatorTemplate): r"""Base class for operators that depend on external parameters, i.e., :math:`\Ophat_\ell(\qhat,\u;\bfmu) = \Ohat_\ell(\bfmu)\d_\ell(\qhat,\u)`. - Evaluating a ``_ParametricOpertor`` at a specific parameter value + Evaluating a ``_ParametricOpInfOperator`` at a specific parameter value results in an object that inherits from :class:`opinf.operators.OpInfOperator`. @@ -1105,21 +1188,19 @@ class ParametricOpInfOperator(ParametricOperatorTemplate): True """ - # TODO: pull entries property back into this class as in OpInfOperator. - # Initialization ---------------------------------------------------------- def __init__(self): """Initialize the parameter_dimension.""" - self.__p = None + self._clear() - @abc.abstractmethod - def _clear(self) -> None: # pragma: no cover + def _clear(self) -> None: """Reset the operator to its post-constructor state.""" - raise NotImplementedError + self.__p = None + self.__entries = None - def _set_parameter_dimension_from_data(self, parameters) -> None: + def _set_parameter_dimension_from_values(self, parameters) -> None: """Extract and save the dimension of the parameter space from a set of - parameter values. + one or more parameter values. Parameters ---------- @@ -1142,6 +1223,13 @@ def _check_shape_consistency(iterable, prefix: str) -> None: raise ValueError(f"{prefix} shapes inconsistent") # Properties -------------------------------------------------------------- + @property + def state_dimension(self) -> int: + r"""Dimension :math:`r` of the state :math:`\qhat` that the operator + acts on. + """ + return None if self.entries is None else self.entries[0].shape[0] + @property def parameter_dimension(self) -> int: r"""Dimension of the parameters :math:`\bfmu` that the operator acts @@ -1150,44 +1238,88 @@ def parameter_dimension(self) -> int: return self.__p @property - @abc.abstractmethod - def shape(self) -> tuple: # pragma: no cover - """Shape of the operator entries matrix when evaluated + def shape(self) -> tuple: + """Shape of the operator matrix when evaluated at a parameter value. """ - raise NotImplementedError + return None if self.entries is None else self.entries[0].shape - # Evaluation -------------------------------------------------------------- - def _check_parametervalue_dimension(self, parameter): - """Ensure a new parameter value has the expected shape.""" - if (pdim := self.parameter_dimension) is None: - raise RuntimeError("parameter_dimension not set") - if np.atleast_1d(parameter).shape[0] != pdim: - raise ValueError(f"expected parameter of shape ({pdim:d},)") + @property + def entries(self): + r"""Arrays that define the operator matrix as a function of the + parameter vector. + """ + return self.__entries + + @entries.setter + def entries(self, entries): + """Set the arrays defining the operator matrix.""" + self.set_entries(entries) + + @entries.deleter + def entries(self): + """Reset the ``entries`` attribute.""" + self._clear() + + @abc.abstractmethod + def set_entries(self, entries, fromblock: bool = False) -> None: + r"""Set the arrays that define the operator matrix as a function of + the parameter vector. + + Parameters + ---------- + entries : list of s (r, d) ndarrays, or (r, sd) ndarray + Operator entries, either as a list of arrays + (``fromblock=False``, default) + or as a horizontal concatenatation of arrays (``fromblock=True``). + fromblock : bool + If ``True``, interpret ``entries`` as a horizontal concatenation + of arrays; if ``False`` (default), interpret ``entries`` as a list + of arrays. + """ + self.__entries = entries # Operator inference ------------------------------------------------------ @abc.abstractmethod - def datablock(self, states, inputs=None): # pragma: no cover + def operator_dimension(self, r: int, m: int = None) -> int: + r"""Number of columns in the total operator matrix. + + Parameters + ---------- + r : int + State dimension. + m : int or None + Input dimension. + + Returns + ------- + d : int + Number of columns in the total operator entries matrix. + This is also the number of rows in the data matrix block. + """ + raise NotImplementedError # pragma: no cover + + @abc.abstractmethod + def datablock(self, states, inputs=None): r"""Return the data matrix block corresponding to the operator. Parameters ---------- - states : list of s (r, k) ndarrays - State snapshots for each of the `s` training parameter values. - inputs : list of s (m, k) ndarrays + states : list of s (r, k_i) ndarrays + State snapshots for each of the :math:`s` training parameter + values. + inputs : list of s (m, k_i) ndarrays Inputs corresponding to the state snapshots. Returns ------- - block : ndarray + block : (d, K) ndarray Data block for the parametric operator. + Here, :math:`d` is the total operator matrix dimension and + :math:`K = \sum_{i=0}^{s-1}k_i`, the total number of state + snapshots. """ - raise NotImplementedError - - @abc.abstractmethod - def operator_dimension(self, r, m): # pragma: no cover - """Number of columns in the operator matrix.""" - raise NotImplementedError + raise NotImplementedError # pragma: no cover def is_uncalibrated(obj) -> bool: diff --git a/src/opinf/operators/_interpolate.py b/src/opinf/operators/_interpolate.py index a3719b54..d09fb3db 100644 --- a/src/opinf/operators/_interpolate.py +++ b/src/opinf/operators/_interpolate.py @@ -34,39 +34,31 @@ class _InterpolatedOperator(ParametricOpInfOperator): r"""Base class for parametric operators where the parameter dependence is handled with element-wise interpolation. - For a set of training parameter values :math:`\{\bfmu_i\}_{i=1}^{s}`, + For a set of training parameter values :math:`\{\bfmu_i\}_{i=0}^{s-1}`, this type of operator is given by :math:`\Ophat_\ell(\qhat, \u, \bfmu) = \Ohat_\ell(\bfmu)\d(\qhat, \u)` - where :math:`\Ohat_{\ell}(\bfmu)` is calculated by interpolating - operator entries that correspond to each parameter value: + where :math:`\Ohat_{\ell}(\bfmu)` is calculated by interpolating the + operator matrix entries that correspond to each parameter value: .. math:: \Ohat_{\ell}(\bfmu) = \textrm{interpolate}( - (\bfmu_1,\Ohat_{\ell}^{(1)}),\ldots,(\Ohat_{\ell}^{(s)}\bfmu_s);\bfmu), + (\bfmu_0,\Ohat_{\ell}^{(0)}), + \ldots,(\Ohat_{\ell}^{(s-1)}\bfmu_{s-1});\bfmu), where :math:`\Ohat_\ell^{(i)} = \Ohat_\ell(\bfmu_i)` for each - :math:`i=1,\ldots,s`. + :math:`i=0,\ldots,s-1`. Parent class: :class:`opinf.operators.ParametricOpInfOperator` - Child classes: - - * :class:`opinf.operators.InterpolatedConstantOperator` - * :class:`opinf.operators.InterpolatedLinearOperator` - * :class:`opinf.operators.InterpolatedQuadraticOperator` - * :class:`opinf.operators.InterpolatedCubicOperator` - * :class:`opinf.operators.InterpolatedInputOperator` - * :class:`opinf.operators.InterpolatedStateInputOperator` - Parameters ---------- training_parameters : list of s scalars or (p,) 1D ndarrays - Parameter values for which the operator entries are known + Parameter values for which the operator matrix is known or will be inferred from data. If not provided in the constructor, use :meth:`set_training_parameters` later. entries : list of s ndarrays, or None - Operator entries corresponding to the ``training_parameters``. + Operator matrices corresponding to the ``training_parameters``. If not provided in the constructor, use :meth:`set_entries` later. InterpolatorClass : type or None Class for the elementwise interpolation. Must obey the syntax @@ -94,12 +86,11 @@ def __init__( fromblock=False, ): """Set attributes and, if training parameters and entries are given, - construct the elementwise operator interpolator. + construct the elementwise operator matrix interpolator. """ ParametricOpInfOperator.__init__(self) self.__parameters = None - self.__entries = None self.__interpolator = None self.__InterpolatorClass = InterpolatorClass @@ -146,13 +137,15 @@ def _from_operators( def _clear(self) -> None: """Reset the operator to its post-constructor state without entries.""" - self.__entries = None + ParametricOpInfOperator._clear(self) self.__interpolator = None # Properties -------------------------------------------------------------- @property def training_parameters(self): - """Parameter values for which the operator entries are known.""" + """Parameter values where the operator matrix is known + or will be inferred from data. + """ return self.__parameters @training_parameters.setter @@ -166,7 +159,7 @@ def set_training_parameters(self, training_parameters): Parameters ---------- training_parameters : list of s scalars or (p,) 1D ndarrays - Parameter values for which the operator entries are known + Parameter values for which the operator matrix is known or will be inferred from data. """ if self.__interpolator is not None: @@ -183,35 +176,34 @@ def set_training_parameters(self, training_parameters): parameters = np.array(training_parameters) if parameters.ndim not in (1, 2): raise ValueError("parameter values must be scalars or 1D arrays") - self._set_parameter_dimension_from_data(parameters) + self._set_parameter_dimension_from_values(parameters) self.__parameters = parameters @property - def entries(self): - """Operator entries corresponding to the training parameters values, - i.e., ``entries[i]`` are the operator entries corresponding to the + def entries(self) -> np.ndarray: + """Operator matrices corresponding to the training parameters values, + i.e., ``entries[i]`` is the operator matrix corresponding to the parameter value ``training_parameters[i]``. """ - return self.__entries + return ParametricOpInfOperator.entries.fget(self) @entries.setter def entries(self, entries): - """Set the operator entries.""" - self.set_entries(entries) + """Set the operator matrices.""" + ParametricOpInfOperator.entries.fset(self, entries) @entries.deleter def entries(self): """Reset the ``entries`` attribute.""" - self._clear() + ParametricOpInfOperator.entries.fdel(self) def set_entries(self, entries, fromblock: bool = False) -> None: - r"""Set the operator entries, the matrices - :math:`\Ohat_{\ell}^{(1)},\ldots,\Ohat_{\ell}^{(s)}`. + r"""Set the operator matrices at the training parameter values. Parameters ---------- entries : list of s (r, d) ndarrays, or (r, sd) ndarray - Operator entries, either as a list of arrays + Operator matrices, either as a list of arrays (``fromblock=False``, default) or as a horizontal concatenatation of arrays (``fromblock=True``). fromblock : bool @@ -242,23 +234,12 @@ def set_entries(self, entries, fromblock: bool = False) -> None: f"!= len(entries) = {n_arrays}" ) - self.__entries = np.array( - [self.OperatorClass(A).entries for A in entries] + ParametricOpInfOperator.set_entries( + self, + np.array([self.OperatorClass(A).entries for A in entries]), ) self.set_interpolator(self.__InterpolatorClass) - @property - def state_dimension(self) -> int: - r"""Dimension of the state :math:`\qhat` that the operator acts on.""" - return None if self.entries is None else self.entries[0].shape[0] - - @property - def shape(self) -> tuple: - """Shape of the operator entries matrix when evaluated - at a parameter value. - """ - return None if self.entries is None else self.entries[0].shape - # Interpolation ----------------------------------------------------------- @property def interpolator(self): @@ -268,7 +249,7 @@ def interpolator(self): return self.__interpolator def set_interpolator(self, InterpolatorClass): - """Construct the interpolator for the operator entries. + """Construct the interpolator for the operator matrix. Parameters ---------- @@ -296,14 +277,8 @@ def set_interpolator(self, InterpolatorClass): self.__InterpolatorClass = InterpolatorClass # Magic methods ----------------------------------------------------------- - def __len__(self) -> int: - """Length: number of training data points for the interpolation.""" - if self.training_parameters is None: - return 0 - return len(self.training_parameters) - def __eq__(self, other) -> bool: - """Test whether the training parameters and operator entries of two + """Test whether the training parameters and operator matrices of two _InterpolatedOperator objects are the same. """ if not isinstance(other, self.__class__): @@ -367,14 +342,14 @@ def galerkin(self, Vr, Wr=None): .. math:: \f_\ell(\q,\u;\bfmu) = \textrm{interpolate}( - (\bfmu_1,\f_{\ell}^{(1)}(\q,\u)),\ldots, - (\bfmu_s,\f_{\ell}^{(s)}(\q,\u)); \bfmu), + (\bfmu_0,\f_{\ell}^{(0)}(\q,\u)),\ldots, + (\bfmu_{s-1},\f_{\ell}^{(s-1)}(\q,\u)); \bfmu), where * :math:`\q\in\RR^n` is the full-order state, * :math:`\u\in\RR^m` is the input, - * :math:`\bfmu_1,\ldots,\bfmu_s\in\RR^p` + * :math:`\bfmu_0,\ldots,\bfmu_{s-1}\in\RR^p` are the (fixed) training parameter values, * :math:`\f_{\ell}^{(i)}(\q,\u) = \f_{\ell}(\q,\u;\bfmu_i)` is the operators evaluated at the :math:`i`-th training parameter @@ -389,8 +364,8 @@ def galerkin(self, Vr, Wr=None): .. math:: \fhat_{\ell}(\qhat,\u;\bfmu) = \textrm{interpolate}( - (\bfmu_1,\Wr\trp\f_{\ell}^{(1)}(\Vr\qhat,\u)),\ldots, - (\bfmu_s,\Wr\trp\f_{\ell}^{(s)}(\Vr\qhat,\u)); \bfmu), + (\bfmu_0,\Wr\trp\f_{\ell}^{(0)}(\Vr\qhat,\u)),\ldots, + (\bfmu_{s-1},\Wr\trp\f_{\ell}^{(s-1)}(\Vr\qhat,\u)); \bfmu), Here, :math:`\qhat\in\RR^r` is the reduced-order state, which enables the low-dimensional state approximation :math:`\q = \Vr\qhat`. @@ -421,7 +396,7 @@ def galerkin(self, Vr, Wr=None): # Operator inference ------------------------------------------------------ @classmethod - def datablock(cls, states, inputs=None): + def datablock(cls, states, inputs=None) -> np.ndarray: r"""Return the data matrix block corresponding to the operator. For interpolated operators, this is a block diagonal matrix where the @@ -454,8 +429,7 @@ def datablock(cls, states, inputs=None): @classmethod def operator_dimension(cls, s: int, r: int, m: int) -> int: - r"""Number of columns `sd` in the concatenated operator matrix - :math:`[~\Ohat_{\ell}^{(1)}~~\cdots~~\Ohat_{\ell}^{(s)}~]`. + r"""Number of columns in the concatenated operator matrix. Parameters ---------- @@ -481,6 +455,9 @@ def copy(self): def save(self, savefile: str, overwrite: bool = False) -> None: """Save the operator to an HDF5 file. + If the :attr:`InterpolatorClass` is not from :mod:`scipy.interpolate`, + it must be passed to :meth:`load()` when recovering the operator. + Parameters ---------- savefile : str @@ -524,7 +501,7 @@ def load(cls, loadfile: str, InterpolatorClass: type = None): ---------- loadfile : str Path to the file where the operator was stored via :meth:`save()`. - InterpolatorClass : type + InterpolatorClass : type or None Class for the elementwise interpolation. Must obey the syntax >>> interpolator = InterpolatorClass(data_points, data_values) @@ -535,7 +512,7 @@ def load(cls, loadfile: str, InterpolatorClass: type = None): Returns ------- - op : _Operator + op : _InterpolatedOperator Initialized operator object. """ with utils.hdf5_loadhandle(loadfile) as hf: @@ -588,25 +565,25 @@ class InterpolatedConstantOperator(_InterpolatedOperator): .. math:: \chat(\bfmu) = \textrm{interpolate}( - (\bfmu_1,\chat^{(1)}),\ldots,(\bfmu_s,\chat^{(s)}); \bfmu) + (\bfmu_0,\chat^{(0)}),\ldots,(\bfmu_{s-1},\chat^{(s-1)}); \bfmu) Here, - * :math:`\bfmu_1,\ldots,\bfmu_s\in\RR^p` + * :math:`\bfmu_0,\ldots,\bfmu_{s-1}\in\RR^p` are the (fixed) training parameter values, and * :math:`\chat^{(i)} = \chat(\bfmu_i) \in \RR^r` - are the operator entries evaluated at the training parameter values. + is the operator vector evaluated at the training parameter values. See :class:`opinf.operators.ConstantOperator`. Parameters ---------- training_parameters : list of s scalars or (p,) 1D ndarrays - Parameter values for which the operator entries are known - or will be inferred from data. If not provided in the constructor, + Parameter values for which the operator vector is known or + will be inferred from data. If not provided in the constructor, use :meth:`set_training_parameters` later. entries : list of s ndarrays, or None - Operator entries corresponding to the ``training_parameters``. + Operator vectors corresponding to the ``training_parameters``. If not provided in the constructor, use :meth:`set_entries` later. InterpolatorClass : type or None Class for the elementwise interpolation. Must obey the syntax @@ -636,14 +613,14 @@ class InterpolatedLinearOperator(_InterpolatedOperator): .. math:: \Ahat(\bfmu) = \textrm{interpolate}( - (\bfmu_1,\Ahat^{(1)}),\ldots,(\bfmu_s,\Ahat^{(s)}); \bfmu) + (\bfmu_0,\Ahat^{(0)}),\ldots,(\bfmu_{s-1},\Ahat^{(s-1)}); \bfmu) Here, - * :math:`\bfmu_1,\ldots,\bfmu_s\in\RR^p` + * :math:`\bfmu_0,\ldots,\bfmu_{s-1}\in\RR^p` are the (fixed) training parameter values, and * :math:`\Ahat^{(i)} = \Ahat(\bfmu_i) \in \RR^{r \times r}` - are the operator entries evaluated at the training parameter values. + is the operator matrix for the :math:`i`th training parameter value. See :class:`opinf.operators.LinearOperator` @@ -684,11 +661,11 @@ class InterpolatedQuadraticOperator(_InterpolatedOperator): .. math:: \Hhat(\bfmu) = \textrm{interpolate}( - (\bfmu_1,\Hhat^{(1)}),\ldots,(\bfmu_s,\Hhat^{(s)}); \bfmu) + (\bfmu_0,\Hhat^{(0)}),\ldots,(\bfmu_{s-1},\Hhat^{(s-1)}); \bfmu) Here, - * :math:`\bfmu_1,\ldots,\bfmu_s\in\RR^p` + * :math:`\bfmu_0,\ldots,\bfmu_{s-1}\in\RR^p` are the (fixed) training parameter values, and * :math:`\Hhat^{(i)} = \Hhat(\bfmu_i) \in \RR^{r \times r^2}` are the operator entries evaluated at the training parameter values. @@ -733,11 +710,11 @@ class InterpolatedCubicOperator(_InterpolatedOperator): .. math:: \Ghat(\bfmu) = \textrm{interpolate}( - (\bfmu_1,\Ghat^{(1)}),\ldots,(\bfmu_s,\Ghat^{(s)}); \bfmu) + (\bfmu_0,\Ghat^{(0)}),\ldots,(\bfmu_{s-1},\Ghat^{(s-1)}); \bfmu) Here, - * :math:`\bfmu_1,\ldots,\bfmu_s\in\RR^p` + * :math:`\bfmu_0,\ldots,\bfmu_{s-1}\in\RR^p` are the (fixed) training parameter values, and * :math:`\Ghat^{(i)} = \Ghat(\bfmu_i) \in \RR^{r \times r^3}` are the operator entries evaluated at the training parameter values. @@ -781,11 +758,11 @@ class InterpolatedInputOperator(_InterpolatedOperator, InputMixin): .. math:: \Bhat(\bfmu) = \textrm{interpolate}( - (\bfmu_1,\Bhat^{(1)}),\ldots,(\bfmu_s,\Bhat^{(s)}); \bfmu) + (\bfmu_0,\Bhat^{(0)}),\ldots,(\bfmu_{s-1},\Bhat^{(s-1)}); \bfmu) Here, - * :math:`\bfmu_1,\ldots,\bfmu_s\in\RR^p` + * :math:`\bfmu_0,\ldots,\bfmu_{s-1}\in\RR^p` are the (fixed) training parameter values, and * :math:`\Bhat^{(i)} = \Bhat(\bfmu_i) \in \RR^{r \times m}` are the operator entries evaluated at the training parameter values. @@ -835,11 +812,11 @@ class InterpolatedStateInputOperator(_InterpolatedOperator, InputMixin): .. math:: \Nhat(\bfmu) = \textrm{interpolate}( - (\bfmu_1,\Nhat^{(1)}),\ldots,(\bfmu_s,\Nhat^{(s)}); \bfmu) + (\bfmu_0,\Nhat^{(0)}),\ldots,(\bfmu_{s-1},\Nhat^{(s-1)}); \bfmu) Here, - * :math:`\bfmu_1,\ldots,\bfmu_s\in\RR^p` + * :math:`\bfmu_0,\ldots,\bfmu_{s-1}\in\RR^p` are the (fixed) training parameter values, and * :math:`\Nhat^{(i)} = \Nhat(\bfmu_i) \in \RR^{r \times rm}` are the operator entries evaluated at the training parameter values. diff --git a/src/opinf/operators/_nonparametric.py b/src/opinf/operators/_nonparametric.py index 3d57c326..0e77bdf1 100644 --- a/src/opinf/operators/_nonparametric.py +++ b/src/opinf/operators/_nonparametric.py @@ -50,6 +50,16 @@ def entries(self): r"""Operator vector :math:`\chat`.""" return OpInfOperator.entries.fget(self) + @entries.setter + def entries(self, entries): + """Set the ``entries`` attribute.""" + OpInfOperator.entries.fset(self, entries) + + @entries.deleter + def entries(self): + """Reset the ``entries`` attribute.""" + OpInfOperator.entries.fdel(self) + @property def shape(self): r"""Shape :math:`(r,)` of the operator vector :math:`\chat`.""" @@ -207,6 +217,16 @@ def entries(self): r"""Operator matrix :math:`\Ahat`.""" return OpInfOperator.entries.fget(self) + @entries.setter + def entries(self, entries): + """Set the ``entries`` attribute.""" + OpInfOperator.entries.fset(self, entries) + + @entries.deleter + def entries(self): + """Reset the ``entries`` attribute.""" + OpInfOperator.entries.fdel(self) + @property def shape(self): r"""Shape :math:`(r, r)` of the operator matrix :math:`\Ahat`.""" @@ -395,6 +415,16 @@ def entries(self): """ return OpInfOperator.entries.fget(self) + @entries.setter + def entries(self, entries): + """Set the ``entries`` attribute.""" + OpInfOperator.entries.fset(self, entries) + + @entries.deleter + def entries(self): + """Reset the ``entries`` attribute.""" + OpInfOperator.entries.fdel(self) + @property def shape(self): r"""Shape :math:`(r, r(r+1)/2)` of the internal representation @@ -872,6 +902,16 @@ def entries(self): """ return OpInfOperator.entries.fget(self) + @entries.setter + def entries(self, entries): + """Set the ``entries`` attribute.""" + OpInfOperator.entries.fset(self, entries) + + @entries.deleter + def entries(self): + """Reset the ``entries`` attribute.""" + OpInfOperator.entries.fdel(self) + @property def shape(self): r"""Shape :math:`(r, r(r+1)(r+2)/6)` of the internal representation @@ -1316,6 +1356,16 @@ def entries(self): r"""Operator matrix :math:`\Bhat`.""" return OpInfOperator.entries.fget(self) + @entries.setter + def entries(self, entries): + """Set the ``entries`` attribute.""" + OpInfOperator.entries.fset(self, entries) + + @entries.deleter + def entries(self): + """Reset the ``entries`` attribute.""" + OpInfOperator.entries.fdel(self) + @property def shape(self): r"""Shape :math:`(r, m)` of the operator matrix :math:`\Bhat`.""" @@ -1480,6 +1530,16 @@ def entries(self): r"""Operator matrix :math:`\Nhat`.""" return OpInfOperator.entries.fget(self) + @entries.setter + def entries(self, entries): + """Set the ``entries`` attribute.""" + OpInfOperator.entries.fset(self, entries) + + @entries.deleter + def entries(self): + """Reset the ``entries`` attribute.""" + OpInfOperator.entries.fdel(self) + @property def shape(self): r"""Shape :math:`(r, rm)` of the operator matrix :math:`\Nhat`.""" diff --git a/tests/operators/test_base.py b/tests/operators/test_base.py index 4bb0fff4..bacb795d 100644 --- a/tests/operators/test_base.py +++ b/tests/operators/test_base.py @@ -32,77 +32,56 @@ class TestOperatorTemplate: Operator = _module.OperatorTemplate - def test_str(self, r=11, m=3): - """Test __str__() and _str().""" + class Dummy(_module.OperatorTemplate): + """Instantiable version of OperatorTemplate.""" - class Dummy(self.Operator): - """Instantiable version of OperatorTemplate.""" + def __init__(self, state_dimension): + self.__r = state_dimension - def __init__(self, state_dimension=r): - self.__r = state_dimension + @property + def state_dimension(self): + return self.__r - @property - def state_dimension(self): - return self.__r + def apply(self, state, input_=None): + return state - def apply(self, state, input_=None): - return state + def jacobian(self, state, input_=None): + return np.eye(state.size) - class InputDummy(Dummy, _module.InputMixin): - """Instantiable version of OperatorTemplate with inputs.""" + class InputDummy(Dummy, _module.InputMixin): + """Instantiable version of OperatorTemplate with inputs.""" - def __init__(self, state_dimension=r, input_dimension=m): - Dummy.__init__(self, state_dimension) - self.__m = input_dimension + def __init__(self, state_dimension, input_dimension): + TestOperatorTemplate.Dummy.__init__(self, state_dimension) + self.__m = input_dimension - @property - def input_dimension(self): - return self.__m + @property + def input_dimension(self): + return self.__m - def _test(DummyClass): - dummystr = str(DummyClass()) + def test_str(self, r=11, m=3): + """Test __str__() and _str().""" + + def _test(DummyClass, args): + dummystr = str(DummyClass(*args)) assert dummystr.startswith(DummyClass.__name__) for line in (lines := dummystr.split("\n")[1:]): assert line.startswith(" ") assert lines[0].endswith(f"{r}") return lines - _test(Dummy) - assert _test(InputDummy)[-1].endswith(f"{m}") + _test(self.Dummy, [r]) + assert _test(self.InputDummy, [r, m])[-1].endswith(f"{m}") - assert Dummy._str("q", "u") == "f(q, u)" + assert self.Dummy._str("q", "u") == "f(q, u)" def test_verify(self, r=10, m=4): """Test verify().""" - class Dummy(self.Operator): - """Instantiable version of OperatorTemplate.""" - - def __init__(self, state_dimension=r): - self.__r = state_dimension - - @property - def state_dimension(self): - return self.__r - - def apply(self, state, input_=None): - return state - - class InputDummy(Dummy, _module.InputMixin): - """Instantiable version of OperatorTemplate with inputs.""" - - def __init__(self, state_dimension=r, input_dimension=m): - Dummy.__init__(self, state_dimension) - self.__m = input_dimension - - @property - def input_dimension(self): - return self.__m - - op = Dummy() + op = self.Dummy(r) op.verify() - op = InputDummy() + op = self.InputDummy(r, m) op.verify() def _single(DummyClass, message): @@ -112,6 +91,16 @@ def _single(DummyClass, message): assert ex.value.args[0] == message # Verification failures for apply(). + BaseDummy = self.Dummy + BaseInputDummy = self.InputDummy + + class Dummy(BaseDummy): + def __init__(self, rr=r): + BaseDummy.__init__(self, rr) + + class InputDummy(BaseInputDummy): + def __init__(self, rr=r, mm=m): + BaseInputDummy.__init__(self, rr, mm) class Dummy1(Dummy): def __init__(self): @@ -125,8 +114,9 @@ class Dummy2(Dummy): def apply(self, state, input_=None): return state[:-1] - class Dummy2I(Dummy2, InputDummy): - pass + class Dummy2I(InputDummy): + def apply(self, state, input_=None): + return state[:-1] class Dummy3(Dummy): def apply(self, state, input_=None): @@ -134,8 +124,11 @@ def apply(self, state, input_=None): return state return state[:, :-1] - class Dummy3I(Dummy3, InputDummy): - pass + class Dummy3I(InputDummy): + def apply(self, state, input_=None): + if state.ndim == 1: + return state + return state[:, :-1] _single( Dummy1, @@ -180,8 +173,9 @@ class Dummy4(Dummy): def jacobian(self, state, input_=None): return state - class Dummy4I(Dummy4, InputDummy): - pass + class Dummy4I(InputDummy): + def jacobian(self, state, input_=None): + return state _single( Dummy4, @@ -702,67 +696,126 @@ def test_is_nonparametric(): # Parametric operators ======================================================== -class TestParametricOpInfOperator: - """Test operators._base.ParametricOpInfOperator.""" +class TestParametricOperatorTemplate: + """Test operators._base.ParametricOperatorTemplate.""" - class Dummy(_module.ParametricOpInfOperator): - """Instantiable version of ParametricOpInfOperator.""" + Operator = _module.ParametricOperatorTemplate - _OperatorClass = TestOpInfOperator.Dummy + class Dummy(_module.ParametricOperatorTemplate): + """Instantiable version of ParametricOperatorTemplate.""" - def __init__(self): - _module.ParametricOpInfOperator.__init__(self) + _OperatorClass = TestOperatorTemplate.Dummy - def _clear(self): - pass + def __init__(self, state_dim, param_dim): + self.__r = state_dim + self.__p = param_dim - def state_dimension(self): - pass + @property + def state_dimension(self) -> int: + return self.__r - def shape(self): - pass + @property + def parameter_dimension(self) -> int: + return self.__p def evaluate(self, parameter): - op = self._OperatorClass() - op.set_entries(np.random.random((2, 2))) - return op + return self.OperatorClass(self.state_dimension) - def galerkin(self, *args, **kwargs): - pass + def test_check_parametervalue_dimension(self, r=8, p=3): + """Test _check_parametervalue_dimension().""" + op = self.Dummy(r, None) - def datablock(self, *args, **kwargs): - pass + with pytest.raises(RuntimeError) as ex: + op._check_parametervalue_dimension(10) + assert ex.value.args[0] == "parameter_dimension not set" - def operator_dimension(self, *args, **kwargs): - pass + op = self.Dummy(r, p) - def copy(self, *args, **kwargs): - pass + val = np.empty(p - 1) + with pytest.raises(ValueError) as ex: + op._check_parametervalue_dimension(val) + assert ex.value.args[0] == f"expected parameter of shape ({p:d},)" - def save(self, *args, **kwargs): - pass + op._check_parametervalue_dimension(np.empty(p)) - def load(self, *args, **kwargs): - pass + def test_evals(self, r=10, p=3): + """Test evaluate() and apply().""" + op = self.Dummy(r, p) + assert op.state_dimension == r + assert op.parameter_dimension == p + + param = np.random.random(p) + npop = op.evaluate(param) + assert isinstance(npop, self.Dummy._OperatorClass) + assert npop.state_dimension == r + + q = np.random.random(r) + npop_out = npop.apply(q) + op_out = op.apply(param, q) + assert np.all(op_out == npop_out) + + npop_jac = npop.jacobian(q) + op_jac = op.jacobian(param, q) + assert np.all(op_jac == npop_jac) + + op.verify() + + def test_str(self, r=7, p=2): + """Lightly test __str__() and __repr__().""" + repr(self.Dummy(r, p)) + + class InputDummy(self.Dummy, _module.InputMixin): + @property + def input_dimension(self): + return 10000 + + repr(InputDummy(r, p)) + + +class TestParametricOpInfOperator: + """Test operators._base.ParametricOpInfOperator.""" - def test_set_parameter_dimension_from_data(self): - """Test _set_parameter_dimension_from_data().""" + class Dummy(_module.ParametricOpInfOperator): + """Instantiable version of ParametricOpInfOperator.""" + + _OperatorClass = TestOpInfOperator.Dummy + + def set_entries(self, entries): + _module.ParametricOpInfOperator.set_entries(self, entries) + + def evaluate(self, parameter): + self._check_parametervalue_dimension(parameter) + op = self.OperatorClass() + op.set_entries(self.entries[0]) + return op + + def operator_dimension(self, r, m): + return 4 + + def datablock(self, states, inputs=None): + K = sum([Q.shape[-1] for Q in states]) + return np.random.random(4, K) + + def test_set_parameter_dimension_from_values(self): + """Test _set_parameter_dimension_from_values().""" op = self.Dummy() assert op.parameter_dimension is None # One-dimensional parameters. - op._set_parameter_dimension_from_data(np.arange(10)) + op._set_parameter_dimension_from_values(np.arange(10)) assert op.parameter_dimension == 1 - op._set_parameter_dimension_from_data(np.arange(5).reshape((-1, 1))) + op._set_parameter_dimension_from_values(np.arange(5).reshape((-1, 1))) assert op.parameter_dimension == 1 # n-dimensional parameters. n = np.random.randint(2, 20) - op._set_parameter_dimension_from_data(np.random.random((5, n))) + op._set_parameter_dimension_from_values(np.random.random((5, n))) assert op.parameter_dimension == n with pytest.raises(ValueError) as ex: - op._set_parameter_dimension_from_data(np.random.random((2, 2, 2))) + op._set_parameter_dimension_from_values( + np.random.random((2, 2, 2)) + ) assert ex.value.args[0] == ( "parameter values must be scalars or 1D arrays" ) @@ -777,30 +830,13 @@ def test_check_shape_consistency(self): arrays[1] = arrays[1].T self.Dummy._check_shape_consistency(arrays, "array") - def test_check_parametervalue_dimension(self, p=3): - """Test _check_parametervalue_dimension().""" + def test_entries(self, r=8, p=2): + """Test entries, shape, and set_entries().""" op = self.Dummy() - - with pytest.raises(RuntimeError) as ex: - op._check_parametervalue_dimension(10) - assert ex.value.args[0] == "parameter_dimension not set" - - op._set_parameter_dimension_from_data(np.empty((5, p))) - - val = np.empty(p - 1) - with pytest.raises(ValueError) as ex: - op._check_parametervalue_dimension(val) - assert ex.value.args[0] == f"expected parameter of shape ({p:d},)" - - op._check_parametervalue_dimension(np.empty(p)) - - def test_apply(self): - """Test apply().""" - assert self.Dummy().apply(None, None, None) == -1 - - def test_jacobian(self): - """Test jacobian().""" - assert self.Dummy().jacobian(None, None, None) == 0 + assert op.entries is None + assert op.shape is None + op.set_entries([np.random.random((r, r)) for _ in range(r)]) + assert op.shape == (r, r) def test_is_parametric(): diff --git a/tests/operators/test_interpolate.py b/tests/operators/test_interpolate.py index b42c443f..07e4a9c8 100644 --- a/tests/operators/test_interpolate.py +++ b/tests/operators/test_interpolate.py @@ -103,7 +103,6 @@ def test_set_training_parameters(self, s=10, p=2, r=4): assert op.training_parameters is None assert op.parameter_dimension is None assert op.state_dimension is None - assert len(op) == 0 mu_bad = np.empty((s, p, p)) with pytest.raises(ValueError) as ex: @@ -115,17 +114,14 @@ def test_set_training_parameters(self, s=10, p=2, r=4): mu = np.empty((s, p)) op.set_training_parameters(mu) assert np.all(op.training_parameters == mu) - assert len(op) == s assert op.state_dimension is None assert op.interpolator is None op.set_training_parameters(mu[:, 0]) assert np.all(op.training_parameters == mu[:, 0]) - assert len(op) == s entries = np.random.standard_normal((s, r, r)) op = self.Dummy(mu, entries) - assert len(op) == s with pytest.raises(AttributeError) as ex: op.set_training_parameters(mu) diff --git a/tests/operators/test_nonparametric.py b/tests/operators/test_nonparametric.py index 10aa3253..edcda7d8 100644 --- a/tests/operators/test_nonparametric.py +++ b/tests/operators/test_nonparametric.py @@ -45,8 +45,10 @@ def test_verify(self, shape=None): op = self.Operator() op.verify() - op.set_entries(np.random.random(shape)) + op.entries = np.random.random(shape) op.verify() + del op.entries + assert op.entries is None # No dependence on state or input ============================================= From 3461a8b0c87ac2edbe1ab1bb42acd8b3343487b9 Mon Sep 17 00:00:00 2001 From: Shane Date: Wed, 14 Aug 2024 14:31:51 -0600 Subject: [PATCH 05/48] update models for interpolation changes --- src/opinf/models/mono/_parametric.py | 4 +-- src/opinf/operators/_base.py | 12 ++++--- src/opinf/operators/_interpolate.py | 6 +++- tests/models/mono/test_parametric.py | 48 ++++++++++++---------------- tests/operators/test_interpolate.py | 2 +- 5 files changed, 37 insertions(+), 35 deletions(-) diff --git a/src/opinf/models/mono/_parametric.py b/src/opinf/models/mono/_parametric.py index ee3ac0b2..6276d89e 100644 --- a/src/opinf/models/mono/_parametric.py +++ b/src/opinf/models/mono/_parametric.py @@ -178,7 +178,7 @@ def parameter_dimension(self, p): ) self.__p = p - def _set_parameter_dimension_from_data(self, parameters): + def _set_parameter_dimension_from_values(self, parameters): """Extract and save the dimension of the parameter space from a set of parameter values. @@ -205,7 +205,7 @@ def _process_fit_arguments(self, parameters, states, lhs, inputs): # Process parameters. parameters = np.array(parameters) - self._set_parameter_dimension_from_data(parameters) + self._set_parameter_dimension_from_values(parameters) n_datasets = len(parameters) def _check_valid_dimension0(dataset, label): diff --git a/src/opinf/operators/_base.py b/src/opinf/operators/_base.py index 29eeb8c4..39bd791e 100644 --- a/src/opinf/operators/_base.py +++ b/src/opinf/operators/_base.py @@ -1281,11 +1281,13 @@ def set_entries(self, entries, fromblock: bool = False) -> None: # Operator inference ------------------------------------------------------ @abc.abstractmethod - def operator_dimension(self, r: int, m: int = None) -> int: + def operator_dimension(self, s: int, r: int, m: int = None) -> int: r"""Number of columns in the total operator matrix. Parameters ---------- + s : int + Number of training parameter values. r : int State dimension. m : int or None @@ -1300,11 +1302,13 @@ def operator_dimension(self, r: int, m: int = None) -> int: raise NotImplementedError # pragma: no cover @abc.abstractmethod - def datablock(self, states, inputs=None): + def datablock(self, parameters, states, inputs=None): r"""Return the data matrix block corresponding to the operator. Parameters ---------- + parameters : (s, p) ndarray + Traning parameter values :math:`\bfmu_{0},\ldots,\bfmu_{s-1}`. states : list of s (r, k_i) ndarrays State snapshots for each of the :math:`s` training parameter values. @@ -1313,9 +1317,9 @@ def datablock(self, states, inputs=None): Returns ------- - block : (d, K) ndarray + block : (D, K) ndarray Data block for the parametric operator. - Here, :math:`d` is the total operator matrix dimension and + Here, :math:`D` is the total operator matrix dimension and :math:`K = \sum_{i=0}^{s-1}k_i`, the total number of state snapshots. """ diff --git a/src/opinf/operators/_interpolate.py b/src/opinf/operators/_interpolate.py index d09fb3db..a0758fad 100644 --- a/src/opinf/operators/_interpolate.py +++ b/src/opinf/operators/_interpolate.py @@ -396,7 +396,7 @@ def galerkin(self, Vr, Wr=None): # Operator inference ------------------------------------------------------ @classmethod - def datablock(cls, states, inputs=None) -> np.ndarray: + def datablock(cls, parameters, states, inputs=None) -> np.ndarray: r"""Return the data matrix block corresponding to the operator. For interpolated operators, this is a block diagonal matrix where the @@ -406,6 +406,8 @@ def datablock(cls, states, inputs=None) -> np.ndarray: Parameters ---------- + parameters : (s, p) ndarray + Traning parameter values :math:`\bfmu_{0},\ldots,\bfmu_{s-1}`. states : list of s (r, k) or (k,) ndarrays State snapshots for each of the `s` training parameter values. If each snapshot matrix is 1D, it is assumed that :math:`r = 1`. @@ -420,6 +422,8 @@ def datablock(cls, states, inputs=None) -> np.ndarray: of rows in the data block corresponding to a single training parameter value. """ + if not issubclass(cls, InputMixin): + inputs = [None] * len(parameters) return la.block_diag( *[ cls._OperatorClass.datablock(Q, U) diff --git a/tests/models/mono/test_parametric.py b/tests/models/mono/test_parametric.py index 79326d33..749963bb 100644 --- a/tests/models/mono/test_parametric.py +++ b/tests/models/mono/test_parametric.py @@ -41,18 +41,19 @@ class DummyOpInfOperator2(DummyOpInfOperator): class DummyParametricOperator(opinf.operators.ParametricOpInfOperator): - """Instantiable version of ParametricOperator.""" + """Instantiable version of ParametricOpInfOperator.""" _OperatorClass = DummyOpInfOperator def __init__(self, entries=None): - opinf.operators.ParametricOpInfOperator.__init__(self) - self.entries = entries + super().__init__() + if entries is not None: + self.set_entries(entries) - def _clear(*args, **kwargs): # pragma: no cover - pass + def set_entries(self, entries): + super().set_entries(entries) - def copy(*args, **kwargs): # pragma: no cover + def operator_dimension(*args, **kwargs): # pragma: no cover pass def datablock(*args, **kwargs): # pragma: no cover @@ -61,24 +62,17 @@ def datablock(*args, **kwargs): # pragma: no cover def evaluate(self, *args, **kwargs): # pragma: no cover return self._OperatorClass(self.entries) - def galerkin(*args, **kwargs): # pragma: no cover - pass + # def galerkin(*args, **kwargs): # pragma: no cover + # pass - def load(*args, **kwargs): # pragma: no cover - pass + # def copy(*args, **kwargs): # pragma: no cover + # pass - def operator_dimension(*args, **kwargs): # pragma: no cover - pass - - def save(*args, **kwargs): # pragma: no cover - pass - - def shape(*args, **kwargs): # pragma: no cover - pass + # def load(*args, **kwargs): # pragma: no cover + # pass - @property - def state_dimension(self): # pragma: no cover - return self.entries.shape[0] + # def save(*args, **kwargs): # pragma: no cover + # pass class DummyParametricOperator2(DummyParametricOperator): @@ -182,12 +176,12 @@ def test_check_parameter_dimension_consistency(self, s=3): assert p is None op1 = DummyParametricOperator() - op1._set_parameter_dimension_from_data(np.empty((s, 10))) + op1._set_parameter_dimension_from_values(np.empty((s, 10))) p = self.Dummy._check_parameter_dimension_consistency([op1]) assert p == 10 op2 = DummyParametricOperator2() - op2._set_parameter_dimension_from_data(np.empty((s, 20))) + op2._set_parameter_dimension_from_values(np.empty((s, 20))) with pytest.raises(opinf.errors.DimensionalityError) as ex: self.Dummy._check_parameter_dimension_consistency([op1, op2]) @@ -201,13 +195,13 @@ def test_parameter_dimension(self, s=3, p=4): op = DummyParametricOperator() model = self.Dummy([op, DummyOpInfOperator2()]) - model._set_parameter_dimension_from_data(np.empty((s, p))) + model._set_parameter_dimension_from_values(np.empty((s, p))) assert model.parameter_dimension == p model.parameter_dimension = 10 assert model.parameter_dimension == 10 - op._set_parameter_dimension_from_data(np.empty((s, 20))) + op._set_parameter_dimension_from_values(np.empty((s, 20))) with pytest.raises(AttributeError) as ex: model.parameter_dimension = 15 @@ -219,11 +213,11 @@ def test_parameter_dimension(self, s=3, p=4): assert model.parameter_dimension == 20 model = self.Dummy(DummyParametricOperator()) - model._set_parameter_dimension_from_data(np.empty(s)) + model._set_parameter_dimension_from_values(np.empty(s)) assert model.parameter_dimension == 1 with pytest.raises(ValueError) as ex: - model._set_parameter_dimension_from_data(np.empty((s, s, s))) + model._set_parameter_dimension_from_values(np.empty((s, s, s))) assert ex.value.args[0] == ( "parameter values must be scalars or 1D arrays" ) diff --git a/tests/operators/test_interpolate.py b/tests/operators/test_interpolate.py index 07e4a9c8..638d4621 100644 --- a/tests/operators/test_interpolate.py +++ b/tests/operators/test_interpolate.py @@ -275,7 +275,7 @@ def test_datablock(self, s=4, p=2, r=2, k=3): mu = np.random.random((s, p)) states = np.random.random((s, r, k)) op = self.Dummy(mu, InterpolatorClass=_DummyInterpolator) - block = op.datablock(states, states) + block = op.datablock(mu, states, states) assert block.shape == (s * _Dblock.shape[0], s * _Dblock.shape[1]) assert np.all(block == la.block_diag(*[_Dblock for _ in range(s)])) From b6606413850da939a0c4d18fc35763647ff67b4e Mon Sep 17 00:00:00 2001 From: Shane Date: Wed, 14 Aug 2024 14:33:48 -0600 Subject: [PATCH 06/48] first draft of affine operators and API documentation --- docs/source/api/missing.rst | 6 + docs/source/api/operators.ipynb | 68 +++- src/opinf/operators/__init__.py | 1 + src/opinf/operators/_affine.py | 602 ++++++++++++++++++++++++++++++++ 4 files changed, 673 insertions(+), 4 deletions(-) create mode 100644 src/opinf/operators/_affine.py diff --git a/docs/source/api/missing.rst b/docs/source/api/missing.rst index fdb89aec..7713747b 100644 --- a/docs/source/api/missing.rst +++ b/docs/source/api/missing.rst @@ -107,6 +107,12 @@ operators.ipynb StateInputOperator ParametricOperatorTemplate ParametricOpInfOperator + AffineConstantOperator + AffineLinearOperator + AffineQuadraticOperator + AffineCubicOperator + AffineInputOperator + AffineStateInputOperator InterpolatedConstantOperator InterpolatedLinearOperator InterpolatedQuadraticOperator diff --git a/docs/source/api/operators.ipynb b/docs/source/api/operators.ipynb index 0a0478a4..a535786a 100644 --- a/docs/source/api/operators.ipynb +++ b/docs/source/api/operators.ipynb @@ -40,6 +40,12 @@ "\n", " ParametricOperatorTemplate\n", " ParametricOpInfOperator\n", + " AffineConstantOperator\n", + " AffineLinearOperator\n", + " AffineQuadraticOperator\n", + " AffineCubicOperator\n", + " AffineInputOperator\n", + " AffineStateInputOperator\n", " InterpolatedConstantOperator\n", " InterpolatedLinearOperator\n", " InterpolatedQuadraticOperator\n", @@ -1080,7 +1086,7 @@ "$$\n", "\\begin{aligned}\n", " \\Op_{\\ell}(\\q,\\u;\\bfmu)\n", - " = \\sum_{a=1}^{A_{\\ell}}\\theta_{\\ell}^{(a)}\\!(\\bfmu)\\,\\Op_{\\ell}^{(a)}\\!(\\q, \\u).\n", + " = \\sum_{a=0}^{A_{\\ell}-1}\\theta_{\\ell}^{(a)}\\!(\\bfmu)\\,\\Op_{\\ell}^{(a)}\\!(\\q, \\u).\n", "\\end{aligned}\n", "$$\n", "\n", @@ -1091,10 +1097,10 @@ " \\Ophat_{\\ell}(\\qhat, \\u; \\bfmu)\n", " &= (\\Wr\\trp\\Vr)^{-1}\\Wr\\trp\\Op_{\\ell}(\\Vr\\qhat, \\u; \\bfmu)\n", " \\\\\n", - " &= (\\Wr\\trp\\Vr)^{-1}\\Wr\\trp \\sum_{a=1}^{A_{\\ell}}\\theta_{\\ell}^{(a)}\\!(\\bfmu)\\,\\Op_{\\ell}^{(a)}\\!(\\V\\qhat, \\u)\n", + " &= (\\Wr\\trp\\Vr)^{-1}\\Wr\\trp \\sum_{a=0}^{A_{\\ell}-1}\\theta_{\\ell}^{(a)}\\!(\\bfmu)\\,\\Op_{\\ell}^{(a)}\\!(\\V\\qhat, \\u)\n", " \\\\\n", - " &= \\sum_{a=1}^{A_{\\ell}}\\theta_{\\ell}^{(a)}\\!(\\bfmu)\\,(\\Wr\\trp\\Vr)^{-1}\\Wr\\trp\\Op_{\\ell}^{(a)}\\!(\\V\\qhat, \\u)\n", - " = \\sum_{a=1}^{A_{\\ell}}\\theta_{\\ell}^{(a)}\\!(\\bfmu)\\,\\Ophat_{\\ell}^{(a)}\\!(\\qhat, \\u),\n", + " &= \\sum_{a=0}^{A_{\\ell}-1}\\theta_{\\ell}^{(a)}\\!(\\bfmu)\\,(\\Wr\\trp\\Vr)^{-1}\\Wr\\trp\\Op_{\\ell}^{(a)}\\!(\\V\\qhat, \\u)\n", + " = \\sum_{a=0}^{A_{\\ell}-1}\\theta_{\\ell}^{(a)}\\!(\\bfmu)\\,\\Ophat_{\\ell}^{(a)}\\!(\\qhat, \\u),\n", "\\end{aligned}\n", "$$\n", "\n", @@ -1332,6 +1338,60 @@ "$$\n", ":::" ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + ":::{dropdown} Mixing Nonparametric and Parametric Operators\n", + "\n", + "Consider a system of ODEs with a mix of parametric and nonparametric operators,\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\ddt\\qhat(t)\n", + " = \\left(\\mu_{0}\\Ahat^{(0)} + \\cos(\\mu_{1})\\Ahat^{(1)}\\right)\\qhat(t) + \\Hhat[\\qhat(t) \\otimes \\qhat(t)] + \\Bhat(\\bfmu)\\u(t).\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "This model can be written in the general form {eq}`eq:operators:model` with three operators:\n", + "\n", + "- $\\Ophat_1(\\qhat,\\u;\\bfmu) = \\left(\\theta^{(0)}\\!(\\bfmu)\\,\\Ahat^{(0)} + \\theta^{(1)}\\!(\\bfmu)\\,\\Ahat^{(1)}\\right)\\qhat(t)$, an affine-parametric linear operator where $\\theta^{(0)}\\!(\\bfmu) = \\mu_{0}$ and $\\theta^{(1)}\\!(\\bfmu) = \\cos(\\mu_{1})$;\n", + "- $\\Ophat_2(\\qhat,\\u) = \\Hhat[\\qhat(t) \\otimes \\qhat(t)]$, a nonparametric quadratic operator; and\n", + "- $\\Ophat_3(\\qhat,\\u;\\bfmu) = \\Bhat(\\bfmu)\\u(t)$, a parametric input operator without a specified parametric structure.\n", + "\n", + "If $\\Bhat(\\bfmu)$ is parameterized with interpolation, the Operator Inference problem to learn the operator matrices can be written as $\\Z \\approx \\Ohat\\D\\trp$ in the following way.\n", + "Let $\\Qhat_i\\in\\RR^{r\\times k_i}$ and $\\U_i\\in\\RR^{m \\times k_i}$ collect the state and input data for training parameter value $\\bfmu_i$, with corresponding state time derivative data $\\Z_i = \\dot{\\Qhat}_i\\in\\RR^{r\\times k_i}$ for $i = 0,\\ldots, s-1$.\n", + "We then have\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\Z\n", + " &= \\left[\\begin{array}{ccc}\n", + " \\Z_0 & \\cdots & \\Z_{s-1}\n", + " \\end{array}\\right]\\in\\RR^{r\\times K}\n", + " \\\\\n", + " \\Ohat\n", + " &= \\left[\\begin{array}{cc|c|ccc}\n", + " \\Ahat^{(0)} & \\Ahat^{(1)} & \\Hhat & \\Bhat^{(0)} & \\cdots & \\Bhat^{(s-1)}\n", + " \\end{array}\\right]\\in\\RR^{r \\times d}\n", + " \\\\\n", + " \\D\\trp\n", + " &= \\left[\\begin{array}{}\n", + " \\theta^{(0)}\\!(\\bfmu_0)\\Qhat_{0} & \\cdots & \\theta^{(0)}\\!(\\bfmu_s)\\Qhat_{s} \\\\\n", + " \\theta^{(1)}\\!(\\bfmu_0)\\Qhat_{0} & \\cdots & \\theta^{(1)}\\!(\\bfmu_s)\\Qhat_{s} \\\\ \\hline\n", + " \\Qhat_{0}\\odot\\Qhat_{0} & \\cdots & \\Qhat_{s}\\odot\\Qhat_{s} \\\\ \\hline\n", + " \\U_{0} & \\cdots & \\0 \\\\\n", + " \\vdots & \\ddots & \\0 \\\\\n", + " \\0 & \\cdots & \\U_{s-1}\n", + " \\end{array}\\right]\\in\\RR^{d \\times K},\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "where $K = \\sum_{i=0}^{s-1}k_i$ is the total number of data snapshots and $d = 2r + r(r+1)/2 + sm$ is the total operator dimension.\n", + "Note that the operator and data matrices have blocks corresponding to each of the three operators in the model.\n", + ":::" + ] } ], "metadata": { diff --git a/src/opinf/operators/__init__.py b/src/opinf/operators/__init__.py index 8db86f97..17901a97 100644 --- a/src/opinf/operators/__init__.py +++ b/src/opinf/operators/__init__.py @@ -3,4 +3,5 @@ from ._base import * from ._nonparametric import * +from ._affine import * from ._interpolate import * diff --git a/src/opinf/operators/_affine.py b/src/opinf/operators/_affine.py new file mode 100644 index 00000000..e2a37e98 --- /dev/null +++ b/src/opinf/operators/_affine.py @@ -0,0 +1,602 @@ +# operators/_affine.py +"""Classes for parametric OpInf operators where the parametric dependence is +expressed as an affine expansion. +""" + +__all__ = [ + "AffineConstantOperator", + "AffineLinearOperator", + "AffineQuadraticOperator", + "AffineCubicOperator", + "AffineInputOperator", + "AffineStateInputOperator", +] + +import numpy as np + +from .. import utils +from ._base import ParametricOpInfOperator, InputMixin +from ._nonparametric import ( + ConstantOperator, + LinearOperator, + QuadraticOperator, + CubicOperator, + InputOperator, + StateInputOperator, +) + + +# Base class ================================================================== +class _AffineOperator(ParametricOpInfOperator): + r"""Base class for parametric operators where the parameter dependence + can be written as an affine expansion with known scalar coefficients + which are a function of the parameter vector. + + This type of operator can be written as + + .. math:: + \Ophat_{\ell}(\qhat,\u;\bfmu) = \left(\sum_{a=0}^{A_{\ell}-1} + \theta_{\ell}^{(0)}\!(\bfmu)\Ohat_{\ell}^{(a)} + \right)\d_{\ell}(\qhat, \u) + + where each :math:`\theta_{\ell}^{(a)}:\RR^{p}\to\RR` is a scalar-valued + function of the parameter vector, each + :math:`\Ohat_{\ell}^{(a)}\in\RR^{r\times d}` is a constant matrix, and + :math:`\d:\RR^{r}\times\RR^{m}\to\RR^{d}`. + + Parent class: :class:`opinf.operators.ParametricOpInfOperator` + + Parameters + ---------- + coefficient_functions : iterable of callables + Scalar-valued coefficient functions for each term of the affine + expansion, i.e., + :math:`\theta_{\ell}^{(0)},\ldots,\theta_{\ell}^{(A_{\ell}-1)}`. + entries : list of ndarrays, or None + Operator matrices for each term of the affine expansion, i.e., + :math:`\Ohat_{\ell}^{(0)},\ldots,\Ohat_{\ell}^{(A_{\ell}-1)}`. + If not provided in the constructor, use :meth:`set_entries` later. + fromblock : bool + If ``True``, interpret ``entries`` as a horizontal concatenation + of arrays; if ``False`` (default), interpret ``entries`` as a list + of arrays. + """ + + # Initialization ---------------------------------------------------------- + def __init__( + self, + coefficient_functions, + entries=None, + fromblock=False, + ): + """Set coefficient functions and (if given) operator matrices.""" + ParametricOpInfOperator.__init__(self) + + # Ensure that the coefficient functions are callable. + if any(not callable(theta) for theta in coefficient_functions): + raise TypeError( + "coefficient_functions must be collection of callables" + ) + self.__thetas = tuple(coefficient_functions) + + if entries is not None: + self.set_entries(entries, fromblock=fromblock) + + # Properties -------------------------------------------------------------- + @property + def coefficient_functions(self) -> tuple: + r"""Scalar-valued coefficient functions for each term of the affine + expansion, i.e., + :math:`\theta_{\ell}^{(0)},\ldots,\theta_{\ell}^{(A_{\ell}-1)}`. + """ + return self.__thetas + + @property + def entries(self) -> np.ndarray: + r"""Operator matrices for each term of the affine expansion, i.e., + :math:`\Ohat_{\ell}^{(0)},\ldots,\Ohat_{\ell}^{(A_{\ell}-1)}`. + """ + return ParametricOpInfOperator.entries.fget(self) + + @entries.setter + def entries(self, entries): + ParametricOpInfOperator.entries.fset(self, entries) + + @entries.deleter + def entries(self): + ParametricOpInfOperator.entries.fdel(self) + + @property + def nterms(self): + """Number of terms :math:`A` in the affine expansion.""" + return len(self.coefficient_functions) + + def set_entries(self, entries, fromblock: bool = False) -> None: + r"""Set the operator matrices for each term of the affine expansion. + + Parameters + ---------- + entries : list of s (r, d) ndarrays, or (r, sd) ndarray + Operator matrices, either as a list of arrays + (``fromblock=False``, default) + or as a horizontal concatenatation of arrays (``fromblock=True``). + fromblock : bool + If ``True``, interpret ``entries`` as a horizontal concatenation + of arrays; if ``False`` (default), interpret ``entries`` as a list + of arrays. + """ + # Extract / verify the entries. + nterms = self.nterms + if fromblock: + if entries.ndim not in (1, 2): + raise ValueError( + "entries must be a 1- or 2-dimensional ndarray " + "when fromblock=True" + ) + entries = np.split(entries, nterms, axis=-1) + if np.ndim(entries) > 1: + self._check_shape_consistency(entries, "entries") + if (n_arrays := len(entries)) != nterms: + raise ValueError( + f"{nterms} = len(coefficient_functions) " + f"!= len(entries) = {n_arrays}" + ) + + ParametricOpInfOperator.set_entries( + self, + np.array([self.OperatorClass(A).entries for A in entries]), + ) + + # Evaluation -------------------------------------------------------------- + @utils.requires("entries") + def evaluate(self, parameter): + r"""Evaluate the operator at the given parameter value. + + Parameters + ---------- + parameter : (p,) ndarray or float + Parameter value :math:`\bfmu` at which to evalute the operator. + + Returns + ------- + op : :mod:`opinf.operators` operator of type ``OperatorClass``. + Nonparametric operator corresponding to the parameter value. + """ + self._check_parametervalue_dimension(parameter) + entries = np.sum( + [ + theta(parameter) * A + for theta, A in zip(self.coefficient_functions, self.entries) + ], + axis=0, + ) + return self.OperatorClass(entries) + + # Dimensionality reduction ------------------------------------------------ + @utils.requires("entries") + def galerkin(self, Vr, Wr=None): + r"""Project this operator to a low-dimensional linear space. + + Consider an affine operator + + .. math:: + \Op_{\ell}(\q,\u;\bfmu) + = \sum_{a=0}^{A_{\ell}-1}\theta_{\ell}^{(a)}\!(\bfmu)\, + \Op_{\ell}^{(a)}\!(\q, \u) + + where + + * :math:`\q\in\RR^n` is the full-order state, + * :math:`\u\in\RR^m` is the input, + * :math:`\bfmu\in\RR^p` is the parameter vector, and + * each :math:`\Op_{\ell}^{(a)}\!(\q,\u) is a nonparametric operator. + + Given a *trial basis* :math:`\Vr\in\RR^{n\times r}` and a *test basis* + :math:`\Wr\in\RR^{n\times r}`, the corresponding *intrusive projection* + of :math:`\f` is the affine operator + + .. math:: + \fhat_{\ell}(\qhat,\u;\bfmu) + = \sum_{a=0}^{A_{\ell}-1}\theta_{\ell}^{(a)}\!(\bfmu)\, + (\Wr\trp\Vr)^{-1}\Wr\trp\Op_{\ell}^{(a)}\!(\V\qhat, \u) + = \sum_{a=0}^{A_{\ell}-1}\theta_{\ell}^{(a)}\!(\bfmu)\, + \Ophat_{\ell}^{(a)}\!(\qhat, \u), + + where :math:`\Ophat_{\ell}^{(a)}\!(\qhat, \u) + = (\Wr\trp\Vr)^{-1}\Wr\trp\Op_{\ell}^{(a)}\!(\V\qhat, \u)` + is the intrusive projection of :math:`\Op_{\ell}^{(a)}`. + Here, :math:`\qhat\in\RR^r` is the reduced-order state, which enables + the low-dimensional state approximation :math:`\q = \Vr\qhat`. + If :math:`\Wr = \Vr`, the result is called a *Galerkin projection*. + If :math:`\Wr \neq \Vr`, it is called a *Petrov-Galerkin projection*. + + Parameters + ---------- + Vr : (n, r) ndarray + Basis for the trial space. + Wr : (n, r) ndarray or None + Basis for the test space. If ``None``, defaults to ``Vr``. + + Returns + ------- + op : operator + New object of the same class as ``self``. + """ + return self.__class__( + coefficient_functions=self.coefficient_functions, + entries=[ + self.OperatorClass(A).galerkin(Vr, Wr).entries + for A in self.entries + ], + fromblock=False, + ) + + # Operator inference ------------------------------------------------------ + def operator_dimension(self, s: int, r: int, m: int) -> int: + """Number of columns in the concatenated operator matrix. + + Parameters + ---------- + s : int + Number of training parameter values. + r : int + State dimension. + m : int or None + Input dimension. + """ + return self.nterms * self.OperatorClass.operator_dimension(r, m) + + def datablock(self, parameters, states, inputs=None) -> np.ndarray: + r"""Return the data matrix block corresponding to the operator. + + For affine operators + :math:`\Ophat(\qhat,\u;\bfmu) = \Ohat(\bfmu)\d(\qhat,\u)` with + :math:`\Ohat(\bfmu)\in\RR^{r\times d}` and `\d(\qhat,\u)\in\RR^{r}`, + this is the block matrix + + .. math:: + \D\trp + = \left[\begin{array}{ccc} + \theta_{\ell}^{(0)}\!(\bfmu_{0})\d(\Qhat_{0},\U_{0}) + & \cdots & + \theta_{\ell}^{(0)}\!(\bfmu_{s-1})\d(\Qhat_{0},\U_{s-1}) + \\ \vdots & & \vdots \\ + \theta_{\ell}^{(A_{\ell})}\!(\bfmu_{0})\d(\Qhat_{0},\U_{0}) + & \cdots & + \theta_{\ell}^{(A_{\ell})}\!(\bfmu_{s-1})\d(\Qhat_{0},\U_{s-1}) + \end{array}\right] + \in \RR^{A_{\ell}d \times \sum_{i=0}^{s-1}k_i} + + where :math:`\Qhat_{i}\in\RR^{r \times k_i}` is the collection of state + snapshots for training parameter :math:`\bfmu_i\in\RR^{p}` and + :math:`\U_{i}\in\RR^{m \times k_i}` are the corresponding inputs, + :math:`i = 0, \ldots, s-1`, where :math:`s` is the number of training + parameter values. The notation :math:`\d(\Qhat_{i},\U_{i})` is + shorthand for the matrix + + .. math:: + \d(\Qhat_{i},\U_{i}) + = \left[\begin{array}{ccc}\ + \d(\qhat_{i,0},\u_{i,0}) + & \cdots & + \d(\qhat_{i,k_i-1},\u_{i,k_i-1}) + end{array}\right] + \in \RR^{d \times k_i}, + + where + + .. math:: + \Qhat_{i} + &= \left[\begin{array}{ccc}\ + \qhat_{i,0} & \cdots & \qhat_{i,k_i-1} + end{array}\right] + \in \RR^{r \times k_i}, + \\ + \U_{i} + &= \left[\begin{array}{ccc}\ + \u_{i,0} & \cdots & \u_{i,k_i-1} + end{array}\right] + \in \RR^{m \times k_i}. + + Parameters + ---------- + parameters : (s, p) ndarray + Traning parameter values :math:`\bfmu_{0},\ldots,\bfmu_{s-1}`. + states : list of s (r, k) ndarrays + State snapshots for each of the :math:`s` training parameter + values, i.e., :math:`\Qhat_{0},\ldots,\Qhat_{s-1}`. + inputs : list of s (m, k)-or-(k,) ndarrays or None + Inputs corresponding to the state snapshots, i.e., + :math:`\U_{0},\ldots,\U_{s-1}`. + If each input matrix is 1D, it is assumed that :math:`m = 1`. + + Returns + ------- + block : (sd, sk) ndarray + Data block for the interpolated operator. Here, `d` is the number + of rows in the data block corresponding to a single training + parameter value. + """ + if not isinstance(self, InputMixin): + inputs = [None] * len(parameters) + blockcolumns = [] + for mu, Q, U in zip(parameters, states, inputs): + Di = self.OperatorClass.datablock(Q, U) + blockcolumns.append( + np.vstack( + [theta(mu) * Di for theta in self.coefficient_functions] + ) + ) + return np.hstack(blockcolumns) + + # Model persistence ------------------------------------------------------- + def copy(self): + """Return a copy of the operator. Only the operator matrices are + copied, not the coefficient functions. + """ + return self.__class__( + coefficient_functions=self.coefficient_functions, + entries=self.entries.copy() if self.entries is not None else None, + fromblock=False, + ) + + def save(self, savefile: str, overwrite: bool = False) -> None: + """Save the operator to an HDF5 file. + + Since the :attr:`coefficient_functions` are callables, they cannot be + serialized, and are therefore an argument to :meth:`load()`. + + Parameters + ---------- + savefile : str + Path of the file to save the basis in. + overwrite : bool + If ``True``, overwrite the file if it already exists. If ``False`` + (default), raise a ``FileExistsError`` if the file already exists. + """ + with utils.hdf5_savehandle(savefile, overwrite) as hf: + meta = hf.create_dataset("meta", shape=(0,)) + meta.attrs["class"] = self.__class__.__name__ + if self.entries is not None: + hf.create_dataset("entries", data=self.entries) + + @classmethod + def load(cls, loadfile: str, coefficient_functions): + """Load an affine parametric operator from an HDF5 file. + + Parameters + ---------- + loadfile : str + Path to the file where the operator was stored via :meth:`save()`. + coefficient_functions : iterable of callables + Scalar-valued coefficient functions for each term of the affine + expansion. + Returns + ------- + op : _AffineOperator + Initialized operator object. + """ + with utils.hdf5_loadhandle(loadfile) as hf: + ClassName = hf["meta"].attrs["class"] + if ClassName != cls.__name__: + raise TypeError( + f"file '{loadfile}' contains '{ClassName}' " + f"object, use '{ClassName}.load()'" + ) + + return cls( + coefficient_functions=coefficient_functions, + entries=(hf["entries"][:] if "entries" in hf else None), + fromblock=False, + ) + + +# Public affine operator classes ============================================== +class AffineConstantOperator(_AffineOperator): + r"""Affine-parametric constant operator + :math:`\Ophat_{\ell}(\qhat,\u;\bfmu) + = \chat_{\ell}(\bfmu) + = \sum_{a=0}^{A_{\ell}-1}\theta_\ell^{(a)}\!(\bfmu)\chat_{\ell}^{(a)}`. + + Here, each :math:`\chat_{\ell}^{(a)} \in \RR^r` is a constant vector, + see :class:`opinf.operators.ConstantOperator`. + + Parameters + ---------- + coefficient_functions : list of callables + Scalar-valued coefficient functions for each term of the affine + expansion, i.e., + :math:`\theta_{\ell}^{(0)},\ldots,\theta_{\ell}^{(A_{\ell}-1)}`. + entries : list of ndarrays, or None + Operator matrices for each term of the affine expansion, i.e., + :math:`\Ohat_{\ell}^{(0)},\ldots,\Ohat_{\ell}^{(A_{\ell}-1)}`. + If not provided in the constructor, use :meth:`set_entries` later. + fromblock : bool + If ``True``, interpret ``entries`` as a horizontal concatenation + of arrays; if ``False`` (default), interpret ``entries`` as a list + of arrays. + """ + + _OperatorClass = ConstantOperator + + +class AffineLinearOperator(_AffineOperator): + r"""Affine-parametric linear operator + :math:`\Ophat_{\ell}(\qhat,\u;\bfmu) + = \Ahat_{\ell}(\bfmu)\qhat = \left( + \sum_{a=0}^{A_{\ell}-1}\theta_{\ell}^{(a)}\!(\bfmu)\Ahat_{\ell}^{(a)} + \right)\qhat`. + + Here, each :math:`\Ahat_{\ell}^{(a)} \in \RR^{r\times r}` is a constant + matrix, see :class:`opinf.operators.LinearOperator`. + + Parameters + ---------- + coefficient_functions : list of callables + Scalar-valued coefficient functions for each term of the affine + expansion, i.e., + :math:`\theta_{\ell}^{(0)},\ldots,\theta_{\ell}^{(A_{\ell}-1)}`. + entries : list of ndarrays, or None + Operator matrices for each term of the affine expansion, i.e., + :math:`\Ahat_{\ell}^{(0)},\ldots,\Ahat_{\ell}^{(A_{\ell}-1)}`. + If not provided in the constructor, use :meth:`set_entries` later. + fromblock : bool + If ``True``, interpret ``entries`` as a horizontal concatenation + of arrays; if ``False`` (default), interpret ``entries`` as a list + of arrays. + """ + + _OperatorClass = LinearOperator + + +class AffineQuadraticOperator(_AffineOperator): + r"""Affine-parametric quadratic operator + :math:`\Ophat_{\ell}(\qhat,\u;\bfmu) + = \Hhat_{\ell}(\bfmu)[\qhat\otimes\qhat] = \left( + \sum_{a=0}^{A_{\ell}-1}\theta_{\ell}^{(a)}\!(\bfmu)\Hhat_{\ell}^{(a)} + \right)[\qhat\otimes\qhat]`. + + Here, each :math:`\Hhat_{\ell}^{(a)} \in \RR^{r\times r^2}` is a constant + matrix, see :class:`opinf.operators.QuadraticOperator`. + + Parameters + ---------- + coefficient_functions : list of callables + Scalar-valued coefficient functions for each term of the affine + expansion, i.e., + :math:`\theta_{\ell}^{(0)},\ldots,\theta_{\ell}^{(A_{\ell}-1)}`. + entries : list of ndarrays, or None + Operator matrices for each term of the affine expansion, i.e., + :math:`\Hhat_{\ell}^{(0)},\ldots,\Hhat_{\ell}^{(A_{\ell}-1)}`. + If not provided in the constructor, use :meth:`set_entries` later. + fromblock : bool + If ``True``, interpret ``entries`` as a horizontal concatenation + of arrays; if ``False`` (default), interpret ``entries`` as a list + of arrays. + """ + + _OperatorClass = QuadraticOperator + + +class AffineCubicOperator(_AffineOperator): + r"""Affine-parametric cubic operator + :math:`\Ophat_{\ell}(\qhat,\u;\bfmu) + = \Ghat_{\ell}(\bfmu)[\qhat\otimes\qhat\otimes\qhat] = \left( + \sum_{a=0}^{A_{\ell}-1}\theta_{\ell}^{(a)}\!(\bfmu)\Ghat_{\ell}^{(a)} + \right)[\qhat\otimes\qhat\otimes\qhat]`. + + Here, each :math:`\Ghat_{\ell}^{(a)} \in \RR^{r\times r^3}` is a constant + matrix, see :class:`opinf.operators.CubicOperator`. + + Parameters + ---------- + coefficient_functions : list of callables + Scalar-valued coefficient functions for each term of the affine + expansion, i.e., + :math:`\theta_{\ell}^{(0)},\ldots,\theta_{\ell}^{(A_{\ell}-1)}`. + entries : list of ndarrays, or None + Operator matrices for each term of the affine expansion, i.e., + :math:`\Ghat_{\ell}^{(0)},\ldots,\Ghat_{\ell}^{(A_{\ell}-1)}`. + If not provided in the constructor, use :meth:`set_entries` later. + fromblock : bool + If ``True``, interpret ``entries`` as a horizontal concatenation + of arrays; if ``False`` (default), interpret ``entries`` as a list + of arrays. + """ + + _OperatorClass = CubicOperator + + +class AffineInputOperator(_AffineOperator, InputMixin): + r"""Affine-parametric input operator + :math:`\Ophat_{\ell}(\qhat,\u;\bfmu) + = \Bhat_{\ell}(\bfmu)\u = \left( + \sum_{a=0}^{A_{\ell}-1}\theta_{\ell}^{(a)}\!(\bfmu)\Bhat_{\ell}^{(a)} + \right)\u`. + + Here, each :math:`\Bhat_{\ell}^{(a)} \in \RR^{r\times m}` is a constant + matrix, see :class:`opinf.operators.InputOperator`. + + Parameters + ---------- + coefficient_functions : list of callables + Scalar-valued coefficient functions for each term of the affine + expansion, i.e., + :math:`\theta_{\ell}^{(0)},\ldots,\theta_{\ell}^{(A_{\ell}-1)}`. + entries : list of ndarrays, or None + Operator matrices for each term of the affine expansion, i.e., + :math:`\Bhat_{\ell}^{(0)},\ldots,\Bhat_{\ell}^{(A_{\ell}-1)}`. + If not provided in the constructor, use :meth:`set_entries` later. + fromblock : bool + If ``True``, interpret ``entries`` as a horizontal concatenation + of arrays; if ``False`` (default), interpret ``entries`` as a list + of arrays. + """ + + _OperatorClass = InputOperator + + @property + def input_dimension(self): + r"""Dimension of the input :math:`\u` that the operator acts on.""" + return None if self.entries is None else self.shape[1] + + +class AffineStateInputOperator(_AffineOperator, InputMixin): + r"""Affine-parametric state-input operator + :math:`\Ophat_{\ell}(\qhat,\u;\bfmu) + = \Nhat_{\ell}(\bfmu)\qhat = \left( + \sum_{a=0}^{A_{\ell}-1}\theta_{\ell}^{(a)}\!(\bfmu)\Nhat_{\ell}^{(a)} + \right)[\u\otimes\qhat]`. + + Here, each :math:`\Nhat_{\ell}^{(a)} \in \RR^{r\times rm}` is a constant + matrix, see :class:`opinf.operators.StateInputOperator`. + + Parameters + ---------- + coefficient_functions : list of callables + Scalar-valued coefficient functions for each term of the affine + expansion, i.e., + :math:`\theta_{\ell}^{(0)},\ldots,\theta_{\ell}^{(A_{\ell}-1)}`. + entries : list of ndarrays, or None + Operator matrices for each term of the affine expansion, i.e., + :math:`\Nhat_{\ell}^{(0)},\ldots,\Nhat_{\ell}^{(A_{\ell}-1)}`. + If not provided in the constructor, use :meth:`set_entries` later. + fromblock : bool + If ``True``, interpret ``entries`` as a horizontal concatenation + of arrays; if ``False`` (default), interpret ``entries`` as a list + of arrays. + """ + + _OperatorClass = StateInputOperator + + @property + def input_dimension(self): + r"""Dimension of the input :math:`\u` that the operator acts on.""" + if self.entries is None: + return None + r, rm = self.shape + return rm // r + + +# Utilities =================================================================== +def is_affine(obj) -> bool: + """Return ``True`` if ``obj`` is a interpolated operator object.""" + return isinstance(obj, _AffineOperator) + + +def nonparametric_to_affine(OpClass: type) -> type: + """Get the affine operator class corresponding to a nonparametric + operator class. + + """ + for AffineClassName in __all__: + AffineClass = eval(AffineClassName) + if not isinstance(AffineClass, type) or not issubclass( + AffineClass, _AffineOperator + ): # pragma: no cover + continue + if AffineClass._OperatorClass is OpClass: + return AffineClass + raise TypeError( + f"_AffineOperator for class '{OpClass.__name__}' not found" + ) From 6c4f8e4ddd71d440926bb8c05c495987e4a5c25f Mon Sep 17 00:00:00 2001 From: Shane Date: Wed, 14 Aug 2024 16:29:15 -0600 Subject: [PATCH 07/48] affine doc fixes, bug fixes --- docs/source/api/operators.ipynb | 130 ++++++++++++++++++-- src/opinf/lstsq/_tsvd.py | 2 +- src/opinf/operators/_affine.py | 176 +++++++++++++++------------- src/opinf/operators/_base.py | 37 ++---- src/opinf/operators/_interpolate.py | 37 ++++-- 5 files changed, 259 insertions(+), 123 deletions(-) diff --git a/docs/source/api/operators.ipynb b/docs/source/api/operators.ipynb index a535786a..5df4eb83 100644 --- a/docs/source/api/operators.ipynb +++ b/docs/source/api/operators.ipynb @@ -1009,7 +1009,7 @@ "metadata": {}, "source": [ "An operator is called _parametric_ if it depends on an independent parameter vector\n", - "$\\bfmu\\in\\RR^{p}$, i.e., $\\Ophat = \\Ophat_{\\ell}(\\qhat,\\u;\\bfmu)$\n", + "$\\bfmu\\in\\RR^{p}$, i.e., $\\Ophat_{\\ell} = \\Ophat_{\\ell}(\\qhat,\\u;\\bfmu)$\n", "When the parameter vector is fixed, a parametric operator becomes nonparametric.\n", "In particular, a parametric operator's [`evaluate()`](ParametricOperatorTemplate.evaluate) method accepts a parameter vector $\\bfmu$ and returns an instance of a nonparametric operator whose type is given by the parametric operator's [`OperatorClass`](ParametricOperatorTemplate.OperatorClass) property." ] @@ -1022,8 +1022,7 @@ "$\\Ophat_{\\ell}(\\qhat,\\u;\\bfmu) = \\Ohat_{\\ell}(\\bfmu)\\d_{\\ell}(\\qhat,\\u)$ defined by the matrix-valued function $\\Ohat_{\\ell}:\\RR^{p}\\to\\RR^{r\\times d_\\ell}$ and (as in the nonparametric case) the data vector $\\d_{\\ell}:\\RR^{r}\\times\\RR^{m}\\to\\RR^{d_\\ell}$.\n", "This module provides two options for the parameterization of $\\Ohat_{\\ell}(\\bfmu)$: [affine expansion](sec-operators-affine) and [elementwise interpolation](sec-operators-interpolated).\n", "In each case, Operator Inference begins with $s$ training parameter values $\\bfmu_{0},\\ldots,\\bfmu_{s-1}$ and corresponding state, input, and left-hand side data $\\{(\\qhat_{i,j},\\u_{i,j},\\z_{i,j})\\}_{j=0}^{k_{i}-1}$ for each training parameter value $\\bfmu_{i}$.\n", - "A regression of the form $\\Z \\approx \\Ohat\\D\\trp$ is formed as in the nonparametric case, with the structure of the matrices $\\Ohat$ and $\\D$ depending on the choice of parameterization for each $\\Ohat_{\\ell}(\\bfmu)$.\n", - "The matrix $\\D$ is formed by the static [`datablock()`](ParametricOpInfOperator.datablock) method, and the rest of the problem is constructed and solved by a parametric model class." + "A regression of the form $\\Z \\approx \\Ohat\\D\\trp$ is formed as in the nonparametric case, with the structure of the matrices $\\Ohat$ and $\\D$ depending on the choice of parameterization for each $\\Ohat_{\\ell}(\\bfmu)$." ] }, { @@ -1139,12 +1138,72 @@ ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ - ":::{admonition} TODO\n", - "Demonstration.\n", - ":::" + "thetas = (\n", + " lambda mu: mu[0],\n", + " lambda mu: mu[1] ** 2,\n", + ")\n", + "\n", + "A = opinf.operators.AffineLinearOperator(coefficient_functions=thetas)\n", + "print(A)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Set the constant operator matrices in the affine expansion.\n", + "r = 5\n", + "Ahats = [np.ones((r, r)), np.eye(r)]\n", + "A.set_entries(Ahats, fromblock=False)\n", + "\n", + "print(A)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "A.entries" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Evaluate the parametric operator at a fixed parameter value,\n", + "# resulting in a nonparametric operator.\n", + "A_nonparametric = A.evaluate([2, 4])\n", + "print(A_nonparametric)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "A_nonparametric.entries" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# The parameter dimension p = 2 was recorded when A was evaluated.\n", + "print(A)" ] }, { @@ -1237,7 +1296,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Interpolated operators define the parametric dependence on $\\bfmu$ through elementwise interpolation.\n", + "Interpolated parametric OpInf operators define the parametric dependence of the operator matrix on $\\bfmu$ through elementwise interpolation.\n", "That is,\n", "\n", "$$\n", @@ -1273,6 +1332,61 @@ "```" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Interpolated operators can be instantiated with no arguments." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "B = opinf.operators.InterpolatedInputOperator()\n", + "print(B)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "s = 9 # Number of training parameters\n", + "p = 1 # Dimension of the training parameters.\n", + "r = 4 # Dimension of the states.\n", + "m = 2 # Dimension of the inputs.\n", + "\n", + "training_parameters = np.random.standard_normal((s, p))\n", + "operator_matrices = [np.random.random((r, m)) for _ in range(s)]\n", + "\n", + "B.set_training_parameters(training_parameters)\n", + "print(B)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "B.set_entries(operator_matrices, fromblock=False)\n", + "print(B)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "B_nonparametric = B.evaluate(np.random.standard_normal(p))\n", + "print(B_nonparametric)" + ] + }, { "cell_type": "markdown", "metadata": {}, diff --git a/src/opinf/lstsq/_tsvd.py b/src/opinf/lstsq/_tsvd.py index a3e912c9..24328003 100644 --- a/src/opinf/lstsq/_tsvd.py +++ b/src/opinf/lstsq/_tsvd.py @@ -33,7 +33,7 @@ class TruncatedSVDSolver(SolverTemplate): \operatorname{rank}(\D') = d'. If :math:`\D = \bfPhi\bfSigma\bfPsi\trp` is the singular value - decomposition of :math:\D`, then defining + decomposition of :math:`\D`, then defining .. math:: \bfPhi' = \bfPhi_{:d', :} diff --git a/src/opinf/operators/_affine.py b/src/opinf/operators/_affine.py index e2a37e98..5f3fcd6d 100644 --- a/src/opinf/operators/_affine.py +++ b/src/opinf/operators/_affine.py @@ -42,7 +42,7 @@ class _AffineOperator(ParametricOpInfOperator): where each :math:`\theta_{\ell}^{(a)}:\RR^{p}\to\RR` is a scalar-valued function of the parameter vector, each :math:`\Ohat_{\ell}^{(a)}\in\RR^{r\times d}` is a constant matrix, and - :math:`\d:\RR^{r}\times\RR^{m}\to\RR^{d}`. + :math:`\d:\RR^{r}\times\RR^{m}\to\RR^{d}.` Parent class: :class:`opinf.operators.ParametricOpInfOperator` @@ -51,10 +51,10 @@ class _AffineOperator(ParametricOpInfOperator): coefficient_functions : iterable of callables Scalar-valued coefficient functions for each term of the affine expansion, i.e., - :math:`\theta_{\ell}^{(0)},\ldots,\theta_{\ell}^{(A_{\ell}-1)}`. + :math:`\theta_{\ell}^{(0)},\ldots,\theta_{\ell}^{(A_{\ell}-1)}.` entries : list of ndarrays, or None Operator matrices for each term of the affine expansion, i.e., - :math:`\Ohat_{\ell}^{(0)},\ldots,\Ohat_{\ell}^{(A_{\ell}-1)}`. + :math:`\Ohat_{\ell}^{(0)},\ldots,\Ohat_{\ell}^{(A_{\ell}-1)}.` If not provided in the constructor, use :meth:`set_entries` later. fromblock : bool If ``True``, interpret ``entries`` as a horizontal concatenation @@ -87,14 +87,14 @@ def __init__( def coefficient_functions(self) -> tuple: r"""Scalar-valued coefficient functions for each term of the affine expansion, i.e., - :math:`\theta_{\ell}^{(0)},\ldots,\theta_{\ell}^{(A_{\ell}-1)}`. + :math:`\theta_{\ell}^{(0)},\ldots,\theta_{\ell}^{(A_{\ell}-1)}.` """ return self.__thetas @property def entries(self) -> np.ndarray: r"""Operator matrices for each term of the affine expansion, i.e., - :math:`\Ohat_{\ell}^{(0)},\ldots,\Ohat_{\ell}^{(A_{\ell}-1)}`. + :math:`\Ohat_{\ell}^{(0)},\ldots,\Ohat_{\ell}^{(A_{\ell}-1)}.` """ return ParametricOpInfOperator.entries.fget(self) @@ -108,7 +108,7 @@ def entries(self): @property def nterms(self): - """Number of terms :math:`A` in the affine expansion.""" + r"""Number of terms :math:`A_{\ell}` in the affine expansion.""" return len(self.coefficient_functions) def set_entries(self, entries, fromblock: bool = False) -> None: @@ -159,9 +159,11 @@ def evaluate(self, parameter): Returns ------- - op : :mod:`opinf.operators` operator of type ``OperatorClass``. + op : :mod:`opinf.operators` operator of type :attr:`OperatorClass` Nonparametric operator corresponding to the parameter value. """ + if self.parameter_dimension is None: + self._set_parameter_dimension_from_values([parameter]) self._check_parametervalue_dimension(parameter) entries = np.sum( [ @@ -189,7 +191,7 @@ def galerkin(self, Vr, Wr=None): * :math:`\q\in\RR^n` is the full-order state, * :math:`\u\in\RR^m` is the input, * :math:`\bfmu\in\RR^p` is the parameter vector, and - * each :math:`\Op_{\ell}^{(a)}\!(\q,\u) is a nonparametric operator. + * each :math:`\Op_{\ell}^{(a)}\!(\q,\u)` is a nonparametric operator. Given a *trial basis* :math:`\Vr\in\RR^{n\times r}` and a *test basis* :math:`\Wr\in\RR^{n\times r}`, the corresponding *intrusive projection* @@ -204,9 +206,9 @@ def galerkin(self, Vr, Wr=None): where :math:`\Ophat_{\ell}^{(a)}\!(\qhat, \u) = (\Wr\trp\Vr)^{-1}\Wr\trp\Op_{\ell}^{(a)}\!(\V\qhat, \u)` - is the intrusive projection of :math:`\Op_{\ell}^{(a)}`. + is the intrusive projection of :math:`\Op_{\ell}^{(a)}.` Here, :math:`\qhat\in\RR^r` is the reduced-order state, which enables - the low-dimensional state approximation :math:`\q = \Vr\qhat`. + the low-dimensional state approximation :math:`\q = \Vr\qhat.` If :math:`\Wr = \Vr`, the result is called a *Galerkin projection*. If :math:`\Wr \neq \Vr`, it is called a *Petrov-Galerkin projection*. @@ -233,7 +235,12 @@ def galerkin(self, Vr, Wr=None): # Operator inference ------------------------------------------------------ def operator_dimension(self, s: int, r: int, m: int) -> int: - """Number of columns in the concatenated operator matrix. + r"""Number of columns in the concatenated operator matrix. + + For affine operators, this is :math:`A_{\ell}\cdot d(r,m)`, + where :math:`A_{\ell}` is the number of terms in the affine expansion + and :math:`d(r,m)` is the dimension of the function + :math:`\d(\qhat,\u)`. Parameters ---------- @@ -249,73 +256,64 @@ def operator_dimension(self, s: int, r: int, m: int) -> int: def datablock(self, parameters, states, inputs=None) -> np.ndarray: r"""Return the data matrix block corresponding to the operator. - For affine operators - :math:`\Ophat(\qhat,\u;\bfmu) = \Ohat(\bfmu)\d(\qhat,\u)` with - :math:`\Ohat(\bfmu)\in\RR^{r\times d}` and `\d(\qhat,\u)\in\RR^{r}`, - this is the block matrix + For affine operators :math:`\Ophat_{\ell}(\qhat,\u;\bfmu) + = \Ohat_{\ell}(\bfmu)\d_{\ell}(\qhat,\u)` with + :math:`\Ohat_{\ell}(\bfmu)\in\RR^{r\times d}` and + :math:`\d_{\ell}(\qhat,\u)\in\RR^{d}`, this is the block matrix .. math:: - \D\trp + \D_{\ell}\trp = \left[\begin{array}{ccc} - \theta_{\ell}^{(0)}\!(\bfmu_{0})\d(\Qhat_{0},\U_{0}) + \theta_{\ell}^{(0)}\!(\bfmu_{0})\, + \d_{\ell}(\Qhat_{0},\U_{0}) & \cdots & - \theta_{\ell}^{(0)}\!(\bfmu_{s-1})\d(\Qhat_{0},\U_{s-1}) + \theta_{\ell}^{(0)}\!(\bfmu_{s-1})\, + \d_{\ell}(\Qhat_{s-1},\U_{s-1}) \\ \vdots & & \vdots \\ - \theta_{\ell}^{(A_{\ell})}\!(\bfmu_{0})\d(\Qhat_{0},\U_{0}) + \theta_{\ell}^{(A_{\ell})}\!(\bfmu_{0})\, + \d_{\ell}(\Qhat_{0},\U_{0}) & \cdots & - \theta_{\ell}^{(A_{\ell})}\!(\bfmu_{s-1})\d(\Qhat_{0},\U_{s-1}) + \theta_{\ell}^{(A_{\ell})}\!(\bfmu_{s-1})\, + \d_{\ell}(\Qhat_{s-1},\U_{s-1}) \end{array}\right] \in \RR^{A_{\ell}d \times \sum_{i=0}^{s-1}k_i} - where :math:`\Qhat_{i}\in\RR^{r \times k_i}` is the collection of state - snapshots for training parameter :math:`\bfmu_i\in\RR^{p}` and - :math:`\U_{i}\in\RR^{m \times k_i}` are the corresponding inputs, - :math:`i = 0, \ldots, s-1`, where :math:`s` is the number of training - parameter values. The notation :math:`\d(\Qhat_{i},\U_{i})` is - shorthand for the matrix + where :math:`\Qhat_{i} = + [~\qhat_{i,0}~~\cdots~~\qhat_{i,k_i-1}] \in \RR^{r \times k_i}` + and :math:`\U_{i} = + [~\u_{i,0}~~\cdots~~\u_{i,k_i-1}] \in \RR^{m\times k_i}` + are the state snapshots and inputs corresponding to training parameter + value :math:`\bfmu_i\in\RR^{p}`, :math:`i = 0, \ldots, s-1`, where + :math:`s` is the number of training parameter values. The notation + :math:`\d_{\ell}(\Qhat_{i},\U_{i})` is shorthand for the matrix .. math:: \d(\Qhat_{i},\U_{i}) - = \left[\begin{array}{ccc}\ - \d(\qhat_{i,0},\u_{i,0}) + = \left[\begin{array}{ccc} + \d_{\ell}(\qhat_{i,0},\u_{i,0}) & \cdots & - \d(\qhat_{i,k_i-1},\u_{i,k_i-1}) - end{array}\right] - \in \RR^{d \times k_i}, - - where - - .. math:: - \Qhat_{i} - &= \left[\begin{array}{ccc}\ - \qhat_{i,0} & \cdots & \qhat_{i,k_i-1} - end{array}\right] - \in \RR^{r \times k_i}, - \\ - \U_{i} - &= \left[\begin{array}{ccc}\ - \u_{i,0} & \cdots & \u_{i,k_i-1} - end{array}\right] - \in \RR^{m \times k_i}. + \d_{\ell}(\qhat_{i,k_i-1},\u_{i,k_i-1}) + \end{array}\right] + \in \RR^{d \times k_i}. Parameters ---------- parameters : (s, p) ndarray - Traning parameter values :math:`\bfmu_{0},\ldots,\bfmu_{s-1}`. + Traning parameter values :math:`\bfmu_{0},\ldots,\bfmu_{s-1}.` states : list of s (r, k) ndarrays State snapshots for each of the :math:`s` training parameter - values, i.e., :math:`\Qhat_{0},\ldots,\Qhat_{s-1}`. + values, i.e., :math:`\Qhat_{0},\ldots,\Qhat_{s-1}.` inputs : list of s (m, k)-or-(k,) ndarrays or None Inputs corresponding to the state snapshots, i.e., - :math:`\U_{0},\ldots,\U_{s-1}`. - If each input matrix is 1D, it is assumed that :math:`m = 1`. + :math:`\U_{0},\ldots,\U_{s-1}.` + If each input matrix is 1D, it is assumed that :math:`m = 1.` Returns ------- - block : (sd, sk) ndarray - Data block for the interpolated operator. Here, `d` is the number - of rows in the data block corresponding to a single training - parameter value. + block : (D, K) ndarray + Data block for the affine operator. Here, + :math:`D = A_{\ell}d(r,m)` and :math:`K = \sum_{i=0}^{s-1}k_i` + is the total number of snapshots. """ if not isinstance(self, InputMixin): inputs = [None] * len(parameters) @@ -396,9 +394,11 @@ class AffineConstantOperator(_AffineOperator): r"""Affine-parametric constant operator :math:`\Ophat_{\ell}(\qhat,\u;\bfmu) = \chat_{\ell}(\bfmu) - = \sum_{a=0}^{A_{\ell}-1}\theta_\ell^{(a)}\!(\bfmu)\chat_{\ell}^{(a)}`. + = \sum_{a=0}^{A_{\ell}-1}\theta_\ell^{(a)}\!(\bfmu)\,\chat_{\ell}^{(a)}.` - Here, each :math:`\chat_{\ell}^{(a)} \in \RR^r` is a constant vector, + Here, each :math:`\theta_\ell^{(a)}:\RR^{p}\to\RR` is a scalar-valued + function of the parameter vector + and each :math:`\chat_{\ell}^{(a)} \in \RR^r` is a constant vector, see :class:`opinf.operators.ConstantOperator`. Parameters @@ -406,10 +406,10 @@ class AffineConstantOperator(_AffineOperator): coefficient_functions : list of callables Scalar-valued coefficient functions for each term of the affine expansion, i.e., - :math:`\theta_{\ell}^{(0)},\ldots,\theta_{\ell}^{(A_{\ell}-1)}`. + :math:`\theta_{\ell}^{(0)},\ldots,\theta_{\ell}^{(A_{\ell}-1)}.` entries : list of ndarrays, or None Operator matrices for each term of the affine expansion, i.e., - :math:`\Ohat_{\ell}^{(0)},\ldots,\Ohat_{\ell}^{(A_{\ell}-1)}`. + :math:`\Ohat_{\ell}^{(0)},\ldots,\Ohat_{\ell}^{(A_{\ell}-1)}.` If not provided in the constructor, use :meth:`set_entries` later. fromblock : bool If ``True``, interpret ``entries`` as a horizontal concatenation @@ -424,10 +424,12 @@ class AffineLinearOperator(_AffineOperator): r"""Affine-parametric linear operator :math:`\Ophat_{\ell}(\qhat,\u;\bfmu) = \Ahat_{\ell}(\bfmu)\qhat = \left( - \sum_{a=0}^{A_{\ell}-1}\theta_{\ell}^{(a)}\!(\bfmu)\Ahat_{\ell}^{(a)} - \right)\qhat`. + \sum_{a=0}^{A_{\ell}-1}\theta_{\ell}^{(a)}\!(\bfmu)\,\Ahat_{\ell}^{(a)} + \right)\qhat.` - Here, each :math:`\Ahat_{\ell}^{(a)} \in \RR^{r\times r}` is a constant + Here, each :math:`\theta_\ell^{(a)}:\RR^{p}\to\RR` is a scalar-valued + function of the parameter vector + and each :math:`\Ahat_{\ell}^{(a)} \in \RR^{r\times r}` is a constant matrix, see :class:`opinf.operators.LinearOperator`. Parameters @@ -435,10 +437,10 @@ class AffineLinearOperator(_AffineOperator): coefficient_functions : list of callables Scalar-valued coefficient functions for each term of the affine expansion, i.e., - :math:`\theta_{\ell}^{(0)},\ldots,\theta_{\ell}^{(A_{\ell}-1)}`. + :math:`\theta_{\ell}^{(0)},\ldots,\theta_{\ell}^{(A_{\ell}-1)}.` entries : list of ndarrays, or None Operator matrices for each term of the affine expansion, i.e., - :math:`\Ahat_{\ell}^{(0)},\ldots,\Ahat_{\ell}^{(A_{\ell}-1)}`. + :math:`\Ahat_{\ell}^{(0)},\ldots,\Ahat_{\ell}^{(A_{\ell}-1)}.` If not provided in the constructor, use :meth:`set_entries` later. fromblock : bool If ``True``, interpret ``entries`` as a horizontal concatenation @@ -453,10 +455,12 @@ class AffineQuadraticOperator(_AffineOperator): r"""Affine-parametric quadratic operator :math:`\Ophat_{\ell}(\qhat,\u;\bfmu) = \Hhat_{\ell}(\bfmu)[\qhat\otimes\qhat] = \left( - \sum_{a=0}^{A_{\ell}-1}\theta_{\ell}^{(a)}\!(\bfmu)\Hhat_{\ell}^{(a)} - \right)[\qhat\otimes\qhat]`. + \sum_{a=0}^{A_{\ell}-1}\theta_{\ell}^{(a)}\!(\bfmu)\,\Hhat_{\ell}^{(a)} + \right)[\qhat\otimes\qhat].` - Here, each :math:`\Hhat_{\ell}^{(a)} \in \RR^{r\times r^2}` is a constant + Here, each :math:`\theta_\ell^{(a)}:\RR^{p}\to\RR` is a scalar-valued + function of the parameter vector + and each :math:`\Hhat_{\ell}^{(a)} \in \RR^{r\times r^2}` is a constant matrix, see :class:`opinf.operators.QuadraticOperator`. Parameters @@ -464,10 +468,10 @@ class AffineQuadraticOperator(_AffineOperator): coefficient_functions : list of callables Scalar-valued coefficient functions for each term of the affine expansion, i.e., - :math:`\theta_{\ell}^{(0)},\ldots,\theta_{\ell}^{(A_{\ell}-1)}`. + :math:`\theta_{\ell}^{(0)},\ldots,\theta_{\ell}^{(A_{\ell}-1)}.` entries : list of ndarrays, or None Operator matrices for each term of the affine expansion, i.e., - :math:`\Hhat_{\ell}^{(0)},\ldots,\Hhat_{\ell}^{(A_{\ell}-1)}`. + :math:`\Hhat_{\ell}^{(0)},\ldots,\Hhat_{\ell}^{(A_{\ell}-1)}.` If not provided in the constructor, use :meth:`set_entries` later. fromblock : bool If ``True``, interpret ``entries`` as a horizontal concatenation @@ -482,10 +486,12 @@ class AffineCubicOperator(_AffineOperator): r"""Affine-parametric cubic operator :math:`\Ophat_{\ell}(\qhat,\u;\bfmu) = \Ghat_{\ell}(\bfmu)[\qhat\otimes\qhat\otimes\qhat] = \left( - \sum_{a=0}^{A_{\ell}-1}\theta_{\ell}^{(a)}\!(\bfmu)\Ghat_{\ell}^{(a)} - \right)[\qhat\otimes\qhat\otimes\qhat]`. + \sum_{a=0}^{A_{\ell}-1}\theta_{\ell}^{(a)}\!(\bfmu)\,\Ghat_{\ell}^{(a)} + \right)[\qhat\otimes\qhat\otimes\qhat].` - Here, each :math:`\Ghat_{\ell}^{(a)} \in \RR^{r\times r^3}` is a constant + Here, each :math:`\theta_\ell^{(a)}:\RR^{p}\to\RR` is a scalar-valued + function of the parameter vector + and each :math:`\Ghat_{\ell}^{(a)} \in \RR^{r\times r^3}` is a constant matrix, see :class:`opinf.operators.CubicOperator`. Parameters @@ -493,10 +499,10 @@ class AffineCubicOperator(_AffineOperator): coefficient_functions : list of callables Scalar-valued coefficient functions for each term of the affine expansion, i.e., - :math:`\theta_{\ell}^{(0)},\ldots,\theta_{\ell}^{(A_{\ell}-1)}`. + :math:`\theta_{\ell}^{(0)},\ldots,\theta_{\ell}^{(A_{\ell}-1)}.` entries : list of ndarrays, or None Operator matrices for each term of the affine expansion, i.e., - :math:`\Ghat_{\ell}^{(0)},\ldots,\Ghat_{\ell}^{(A_{\ell}-1)}`. + :math:`\Ghat_{\ell}^{(0)},\ldots,\Ghat_{\ell}^{(A_{\ell}-1)}.` If not provided in the constructor, use :meth:`set_entries` later. fromblock : bool If ``True``, interpret ``entries`` as a horizontal concatenation @@ -511,10 +517,12 @@ class AffineInputOperator(_AffineOperator, InputMixin): r"""Affine-parametric input operator :math:`\Ophat_{\ell}(\qhat,\u;\bfmu) = \Bhat_{\ell}(\bfmu)\u = \left( - \sum_{a=0}^{A_{\ell}-1}\theta_{\ell}^{(a)}\!(\bfmu)\Bhat_{\ell}^{(a)} - \right)\u`. + \sum_{a=0}^{A_{\ell}-1}\theta_{\ell}^{(a)}\!(\bfmu)\,\Bhat_{\ell}^{(a)} + \right)\u.` - Here, each :math:`\Bhat_{\ell}^{(a)} \in \RR^{r\times m}` is a constant + Here, each :math:`\theta_\ell^{(a)}:\RR^{p}\to\RR` is a scalar-valued + function of the parameter vector + and each :math:`\Bhat_{\ell}^{(a)} \in \RR^{r\times m}` is a constant matrix, see :class:`opinf.operators.InputOperator`. Parameters @@ -522,10 +530,10 @@ class AffineInputOperator(_AffineOperator, InputMixin): coefficient_functions : list of callables Scalar-valued coefficient functions for each term of the affine expansion, i.e., - :math:`\theta_{\ell}^{(0)},\ldots,\theta_{\ell}^{(A_{\ell}-1)}`. + :math:`\theta_{\ell}^{(0)},\ldots,\theta_{\ell}^{(A_{\ell}-1)}.` entries : list of ndarrays, or None Operator matrices for each term of the affine expansion, i.e., - :math:`\Bhat_{\ell}^{(0)},\ldots,\Bhat_{\ell}^{(A_{\ell}-1)}`. + :math:`\Bhat_{\ell}^{(0)},\ldots,\Bhat_{\ell}^{(A_{\ell}-1)}.` If not provided in the constructor, use :meth:`set_entries` later. fromblock : bool If ``True``, interpret ``entries`` as a horizontal concatenation @@ -545,10 +553,12 @@ class AffineStateInputOperator(_AffineOperator, InputMixin): r"""Affine-parametric state-input operator :math:`\Ophat_{\ell}(\qhat,\u;\bfmu) = \Nhat_{\ell}(\bfmu)\qhat = \left( - \sum_{a=0}^{A_{\ell}-1}\theta_{\ell}^{(a)}\!(\bfmu)\Nhat_{\ell}^{(a)} - \right)[\u\otimes\qhat]`. + \sum_{a=0}^{A_{\ell}-1}\theta_{\ell}^{(a)}\!(\bfmu)\,\Nhat_{\ell}^{(a)} + \right)[\u\otimes\qhat].` - Here, each :math:`\Nhat_{\ell}^{(a)} \in \RR^{r\times rm}` is a constant + Here, each :math:`\theta_\ell^{(a)}:\RR^{p}\to\RR` is a scalar-valued + function of the parameter vector + and each :math:`\Nhat_{\ell}^{(a)} \in \RR^{r\times rm}` is a constant matrix, see :class:`opinf.operators.StateInputOperator`. Parameters @@ -556,10 +566,10 @@ class AffineStateInputOperator(_AffineOperator, InputMixin): coefficient_functions : list of callables Scalar-valued coefficient functions for each term of the affine expansion, i.e., - :math:`\theta_{\ell}^{(0)},\ldots,\theta_{\ell}^{(A_{\ell}-1)}`. + :math:`\theta_{\ell}^{(0)},\ldots,\theta_{\ell}^{(A_{\ell}-1)}.` entries : list of ndarrays, or None Operator matrices for each term of the affine expansion, i.e., - :math:`\Nhat_{\ell}^{(0)},\ldots,\Nhat_{\ell}^{(A_{\ell}-1)}`. + :math:`\Nhat_{\ell}^{(0)},\ldots,\Nhat_{\ell}^{(A_{\ell}-1)}.` If not provided in the constructor, use :meth:`set_entries` later. fromblock : bool If ``True``, interpret ``entries`` as a horizontal concatenation diff --git a/src/opinf/operators/_base.py b/src/opinf/operators/_base.py index 39bd791e..1d9a2bd9 100644 --- a/src/opinf/operators/_base.py +++ b/src/opinf/operators/_base.py @@ -862,21 +862,21 @@ def verify( # Parametric operators ======================================================== class ParametricOperatorTemplate(abc.ABC): - r"""Template for general operators that depend on external parameters, + r"""Template for operators that depend on external parameters, :math:`\Ophat_{\ell}(\qhat,\u;\bfmu).` In this package, a parametric "operator" is a function - :math:`\Ophat_{\ell}: \RR^n \times \RR^m \times \RR^p \to \RR^n` that acts - on a state vector :math:`\qhat\in\RR^n`, an (optional) input vector + :math:`\Ophat_{\ell}: \RR^r \times \RR^m \times \RR^p \to \RR^r` that acts + on a state vector :math:`\qhat\in\RR^r`, an (optional) input vector :math:`\u\in\RR^m`, and a parameter vector :math:`\bfmu\in\RR^p`. - Models are defined as the sum of several operators, - for example, an :class:`opinf.models.ContinuousModel` object represents a - system of ordinary differential equations: + Parametric models are defined as the sum of several operators, at least + one of which is parametric. + For example, a system of ODEs: .. math:: - \ddt\qhat(t) - = \sum_{\ell=1}^{n_\textrm{terms}}\Ophat_{\ell}(\qhat(t),\u(t)). + \ddt\qhat(t;\bfmu) + = \sum_{\ell=1}^{n_\textrm{terms}}\Ophat_{\ell}(\qhat(t),\u(t);\bfmu). Notes ----- @@ -885,7 +885,6 @@ class ParametricOperatorTemplate(abc.ABC): For nonparametric model terms, see :class:`OperatorTemplate`. For model terms that can be learned with Operator Inference, see :class:`OpInfOperator` or :class:`ParametricOpInfOperator`. - """ # Meta properties --------------------------------------------------------- @@ -945,7 +944,7 @@ def _check_parametervalue_dimension(self, parameter): @abc.abstractmethod def evaluate(self, parameter): r"""Evaluate the operator at the given parameter value, - resulting in a nonparametric operator of type ``OperatorClass``. + resulting in a nonparametric operator of type :attr`OperatorClass`. Parameters ---------- @@ -1173,19 +1172,9 @@ def is_parametric(obj) -> bool: class ParametricOpInfOperator(ParametricOperatorTemplate): - r"""Base class for operators that depend on external parameters, i.e., + r"""Template for operators that depend on external parameters, and which + can be calibrated through operator inference, i.e., :math:`\Ophat_\ell(\qhat,\u;\bfmu) = \Ohat_\ell(\bfmu)\d_\ell(\qhat,\u)`. - - Evaluating a ``_ParametricOpInfOperator`` at a specific parameter value - results in an object that inherits from - :class:`opinf.operators.OpInfOperator`. - - Examples - -------- - >>> parametric_operator = MyParametricOperator(init_args) - >>> nonparametric_operator = parametric_operator.evaluate(parameter_value) - >>> isinstance(nonparametric_operator, OpInfOperator) - True """ # Initialization ---------------------------------------------------------- @@ -1232,8 +1221,8 @@ def state_dimension(self) -> int: @property def parameter_dimension(self) -> int: - r"""Dimension of the parameters :math:`\bfmu` that the operator acts - on. + r"""Dimension :math:`p` of the parameter vector :math:`\bfmu` that the + operator matrix depends on. """ return self.__p diff --git a/src/opinf/operators/_interpolate.py b/src/opinf/operators/_interpolate.py index a0758fad..774b24a6 100644 --- a/src/opinf/operators/_interpolate.py +++ b/src/opinf/operators/_interpolate.py @@ -177,6 +177,8 @@ def set_training_parameters(self, training_parameters): if parameters.ndim not in (1, 2): raise ValueError("parameter values must be scalars or 1D arrays") self._set_parameter_dimension_from_values(parameters) + if parameters.ndim == 2 and parameters.shape[-1] == 1: + parameters = parameters.ravel() self.__parameters = parameters @property @@ -262,17 +264,21 @@ def set_interpolator(self, InterpolatorClass): This can be, e.g., a class from :mod:`scipy.interpolate`. """ if self.entries is not None: + params = self.training_parameters + entries = self.entries + # Default interpolator classes. if InterpolatorClass is None: - if (dim := self.training_parameters.ndim) == 1: + if (dim := params.ndim) == 1: InterpolatorClass = spinterp.CubicSpline + paramsort = np.argsort(params) + params = params[paramsort] + entries = self.entries[paramsort] elif dim == 2: InterpolatorClass = spinterp.LinearNDInterpolator - self.__interpolator = InterpolatorClass( - self.training_parameters, - self.entries, - ) + # Do the interpolation. + self.__interpolator = InterpolatorClass(params, entries) self.__InterpolatorClass = InterpolatorClass @@ -313,6 +319,21 @@ def __eq__(self, other) -> bool: return np.allclose(self.entries, other.entries) return True + def __str__(self): + lines = ParametricOpInfOperator.__str__(self).split("\n") + + nparams = "None" + if (params := self.training_parameters) is not None: + nparams = len(params) + lines.insert(-1, f" training parameters: {nparams}") + + ICname = "None" + if (IC := self.__InterpolatorClass) is not None: + ICname = IC.__name__ + lines.insert(-1, f" type(interpolator): {ICname}") + + return "\n".join(lines) + # Evaluation -------------------------------------------------------------- @utils.requires("entries") def evaluate(self, parameter): @@ -330,6 +351,8 @@ def evaluate(self, parameter): Nonparametric operator corresponding to the parameter value. """ self._check_parametervalue_dimension(parameter) + if self.parameter_dimension == 1 and not np.isscalar(parameter): + parameter = parameter[0] return self.OperatorClass(self.interpolator(parameter)) # Dimensionality reduction ------------------------------------------------ @@ -459,7 +482,7 @@ def copy(self): def save(self, savefile: str, overwrite: bool = False) -> None: """Save the operator to an HDF5 file. - If the :attr:`InterpolatorClass` is not from :mod:`scipy.interpolate`, + If the :attr:`interpolator` is not from :mod:`scipy.interpolate`, it must be passed to :meth:`load()` when recovering the operator. Parameters @@ -624,7 +647,7 @@ class InterpolatedLinearOperator(_InterpolatedOperator): * :math:`\bfmu_0,\ldots,\bfmu_{s-1}\in\RR^p` are the (fixed) training parameter values, and * :math:`\Ahat^{(i)} = \Ahat(\bfmu_i) \in \RR^{r \times r}` - is the operator matrix for the :math:`i`th training parameter value. + is the operator matrix for training parameter value :math:`\bfmu_i`. See :class:`opinf.operators.LinearOperator` From c00468db883bbd5426bf63e0452936f2002a628e Mon Sep 17 00:00:00 2001 From: Shane Date: Wed, 14 Aug 2024 17:05:13 -0600 Subject: [PATCH 08/48] fix operators doc sidebar --- docs/source/api/operators.ipynb | 3 --- 1 file changed, 3 deletions(-) diff --git a/docs/source/api/operators.ipynb b/docs/source/api/operators.ipynb index 5df4eb83..3729ca14 100644 --- a/docs/source/api/operators.ipynb +++ b/docs/source/api/operators.ipynb @@ -197,7 +197,6 @@ "source": [ "```{eval-rst}\n", ".. autosummary::\n", - " :toctree: _autosummaries\n", " :nosignatures:\n", "\n", " ConstantOperator\n", @@ -1118,7 +1117,6 @@ ".. currentmodule:: opinf.operators\n", "\n", ".. autosummary::\n", - " :toctree: _autosummaries\n", " :nosignatures:\n", "\n", " AffineConstantOperator\n", @@ -1320,7 +1318,6 @@ ".. currentmodule:: opinf.operators\n", "\n", ".. autosummary::\n", - " :toctree: _autosummaries\n", " :nosignatures:\n", "\n", " InterpolatedConstantOperator\n", From 02cd96ff856b1577be8fd65e31d6c8133d9fa6e8 Mon Sep 17 00:00:00 2001 From: Shane Date: Wed, 14 Aug 2024 18:16:36 -0600 Subject: [PATCH 09/48] first draft of noninterpolatory parametric models --- docs/source/api/models.md | 34 +++++++++++++++++++-- src/opinf/models/mono/_parametric.py | 45 ++++++++++++++++++++++------ 2 files changed, 68 insertions(+), 11 deletions(-) diff --git a/docs/source/api/models.md b/docs/source/api/models.md index 92d705ce..9a0f70bb 100644 --- a/docs/source/api/models.md +++ b/docs/source/api/models.md @@ -2,6 +2,28 @@ ```{eval-rst} .. automodule:: opinf.models + +.. currentmodule:: opinf.models + +**Nonparametric Models** + +.. autosummary:: + :toctree: _autosummaries + :nosignatures: + + ContinuousModel + DiscreteModel + +**Parametric Models** + +.. autosummary:: + :toctree: _autosummaries + :nosignatures: + + AffineContinuousModel + AffineDiscreteModel + InterpolatedContinuousModel + InterpolatedDiscreteModel ``` :::{admonition} Overview @@ -55,7 +77,6 @@ A _nonparametric_ model is comprised exclusively of [nonparametric operators](se .. currentmodule:: opinf.models .. autosummary:: - :toctree: _autosummaries :nosignatures: ContinuousModel @@ -123,6 +144,16 @@ A _parametric model_ is a model with at least one [parametric operator](sec-oper Parametric models are similar to nonparametric models: they are initialized with a list of operators, use `fit()` to calibrate operator entries, and `predict()` to solve the model. In addition, parametric models have an `evaluate()` method that returns a nonparametric model at a fixed parameter value. +```{eval-rst} +.. currentmodule:: opinf.models + +.. autosummary:: + :nosignatures: + + ParametricContinuousModel + ParametricDiscreteModel +``` + ### Interpolated Models Interpolated models consist exclusively of [interpolated operators](sec-operators-interpolated). @@ -131,7 +162,6 @@ Interpolated models consist exclusively of [interpolated operators](sec-operator .. currentmodule:: opinf.models .. autosummary:: - :toctree: _autosummaries :nosignatures: InterpolatedContinuousModel diff --git a/src/opinf/models/mono/_parametric.py b/src/opinf/models/mono/_parametric.py index 6276d89e..1487d2c1 100644 --- a/src/opinf/models/mono/_parametric.py +++ b/src/opinf/models/mono/_parametric.py @@ -2,8 +2,8 @@ """Parametric monolithic dynamical systems models.""" __all__ = [ - # "ParametricDiscreteModel", - # "ParametricContinuousModel", + "ParametricDiscreteModel", + "ParametricContinuousModel", "InterpolatedDiscreteModel", "InterpolatedContinuousModel", ] @@ -264,16 +264,26 @@ def _check_valid_dimension2(dataset, label): # Subtract known operator evaluations from the LHS. for ell in self._indices_of_known_operators: + op = self.operators[ell] + _isparametric = _operators.is_parametric(op) for i, lhsi in enumerate(lhs): - lhs[i] = lhsi - self.operators[ell].apply( - parameters[i], states[i], inputs[i] - ) + _args = [states[i], inputs[i]] + if _isparametric: + _args.insert(0, parameters[i]) + lhs[i] = lhsi - op.apply(*_args) return parameters, states, lhs, inputs def _assemble_data_matrix(self, parameters, states, inputs): """Assemble the data matrix for operator inference.""" - raise NotImplementedError("future release") + blocks = [] + for i in self._indices_of_operators_to_infer: + op = self.operators[i] + if not _operators.is_parametric(op): + blocks.append(np.hstack(states).T) + else: + blocks.append(op.datablock(parameters, states, inputs).T) + return np.hstack(blocks) def _fit_solver(self, parameters, states, lhs, inputs=None): """Construct a solver for the operator inference least-squares @@ -288,10 +298,24 @@ def _fit_solver(self, parameters, states, lhs, inputs=None): # Set up non-intrusive learning. D = self._assemble_data_matrix(parameters_, states_, inputs_) self.solver.fit(D, np.hstack(lhs_)) + self.__s = len(parameters_) def _extract_operators(self, Ohat): """Unpack the operator matrix and populate operator entries.""" - raise NotImplementedError("future release") + index = 0 + for i in self._indices_of_operators_to_infer: + op = self.operators[i] + if _operators.is_parametric(op): + endex = index + op.operator_dimension( + self.__s, self.state_dimension, self.input_dimension + ) + op.set_entries(Ohat[:, index:endex], fromblock=True) + else: + endex = index + op.operator_dimension( + self.state_dimension, self.input_dimension + ) + op.set_entries(Ohat[:, index:endex]) + index = endex def refit(self): """Solve the Operator Inference regression using the data from the @@ -398,7 +422,10 @@ def evaluate(self, parameter): Nonparametric model of type ``ModelClass``. """ return self.ModelClass( - [op.evaluate(parameter) for op in self.operators] + [ + op.evaluate(parameter) if _operators.is_parametric(op) else op + for op in self.operators + ] ) def rhs(self, parameter, *args, **kwargs): @@ -973,7 +1000,7 @@ class ParametricContinuousModel(_ParametricContinuousMixin, _ParametricModel): pass -# Special case: fully interpolation-based models ============================== +# Special case: completely interpolation-based models ========================= class _InterpolatedModel(_ParametricModel): """Base class for parametric monolithic models where all operators MUST be interpolation-based parametric operators. In this special case, the From e7604f821ed994611f224b557b36a9ffb3ada871 Mon Sep 17 00:00:00 2001 From: Shane Date: Tue, 20 Aug 2024 10:42:04 -0600 Subject: [PATCH 10/48] allow sparse matrices for operator entries --- src/opinf/operators/_affine.py | 7 +++---- src/opinf/operators/_base.py | 4 +++- tests/operators/test_base.py | 6 ++++++ tests/operators/test_nonparametric.py | 10 ++++++++++ 4 files changed, 22 insertions(+), 5 deletions(-) diff --git a/src/opinf/operators/_affine.py b/src/opinf/operators/_affine.py index 5f3fcd6d..ca200a06 100644 --- a/src/opinf/operators/_affine.py +++ b/src/opinf/operators/_affine.py @@ -144,7 +144,7 @@ def set_entries(self, entries, fromblock: bool = False) -> None: ParametricOpInfOperator.set_entries( self, - np.array([self.OperatorClass(A).entries for A in entries]), + [self.OperatorClass(A).entries for A in entries], ) # Evaluation -------------------------------------------------------------- @@ -165,12 +165,11 @@ def evaluate(self, parameter): if self.parameter_dimension is None: self._set_parameter_dimension_from_values([parameter]) self._check_parametervalue_dimension(parameter) - entries = np.sum( + entries = sum( [ theta(parameter) * A for theta, A in zip(self.coefficient_functions, self.entries) - ], - axis=0, + ] ) return self.OperatorClass(entries) diff --git a/src/opinf/operators/_base.py b/src/opinf/operators/_base.py index 1d9a2bd9..58c69683 100644 --- a/src/opinf/operators/_base.py +++ b/src/opinf/operators/_base.py @@ -534,7 +534,9 @@ def _clear(self): @staticmethod def _validate_entries(entries): """Ensure argument is a NumPy array and screen for NaN, Inf entries.""" - if not (isinstance(entries, np.ndarray) or sparse.issparse(entries)): + if sparse.issparse(entries): + return + if not isinstance(entries, np.ndarray): raise TypeError( "operator entries must be NumPy or scipy.sparse array" ) diff --git a/tests/operators/test_base.py b/tests/operators/test_base.py index bacb795d..8ac137b4 100644 --- a/tests/operators/test_base.py +++ b/tests/operators/test_base.py @@ -6,6 +6,7 @@ import pytest import numpy as np import scipy.linalg as la +import scipy.sparse as sparse import matplotlib.pyplot as plt import opinf @@ -438,6 +439,11 @@ def test_validate_entries(self): "operator entries must be NumPy or scipy.sparse array" ) + A = sparse.dok_array((3, 4), dtype=np.float64) + A[1, 2] = 2 + A[0, 1] = -1 + func(A) + A = np.arange(12, dtype=float).reshape((4, 3)).T A[0, 0] = np.nan with pytest.raises(ValueError) as ex: diff --git a/tests/operators/test_nonparametric.py b/tests/operators/test_nonparametric.py index edcda7d8..fa273122 100644 --- a/tests/operators/test_nonparametric.py +++ b/tests/operators/test_nonparametric.py @@ -4,6 +4,7 @@ import pytest import numpy as np import scipy.linalg as la +import scipy.sparse as sparse import opinf @@ -210,6 +211,15 @@ def test_set_entries(self): assert op.state_dimension == 1 assert op[0, 0] == a + # Sparse matrix. + A = np.random.random((100, 100)) + A[A < 0.95] = 0 + A = sparse.csr_matrix(A) + op.set_entries(A) + assert op.state_dimension == 100 + assert op.shape == (100, 100) + assert sparse.issparse(op.entries) + def test_apply(self, k=20): """Test apply()/__call__().""" op = self.Operator() From ea22ea5f8b7336f92b2d8fd18a6a6462bfa80394 Mon Sep 17 00:00:00 2001 From: Shane Date: Tue, 20 Aug 2024 17:26:39 -0600 Subject: [PATCH 11/48] start affine tests, late binding warning and fix --- docs/source/api/models.md | 4 +- src/opinf/operators/_affine.py | 282 +++++++++++++++++++++++++++++++-- src/opinf/operators/_base.py | 9 ++ tests/operators/test_affine.py | 133 ++++++++++++++++ 4 files changed, 417 insertions(+), 11 deletions(-) create mode 100644 tests/operators/test_affine.py diff --git a/docs/source/api/models.md b/docs/source/api/models.md index 9a0f70bb..10d7475b 100644 --- a/docs/source/api/models.md +++ b/docs/source/api/models.md @@ -20,8 +20,8 @@ :toctree: _autosummaries :nosignatures: - AffineContinuousModel - AffineDiscreteModel + ParametricContinuousModel + ParametricDiscreteModel InterpolatedContinuousModel InterpolatedDiscreteModel ``` diff --git a/src/opinf/operators/_affine.py b/src/opinf/operators/_affine.py index ca200a06..a221a3bb 100644 --- a/src/opinf/operators/_affine.py +++ b/src/opinf/operators/_affine.py @@ -48,10 +48,12 @@ class _AffineOperator(ParametricOpInfOperator): Parameters ---------- - coefficient_functions : iterable of callables + coefficient_functions : (iterable of callables) or int Scalar-valued coefficient functions for each term of the affine expansion, i.e., :math:`\theta_{\ell}^{(0)},\ldots,\theta_{\ell}^{(A_{\ell}-1)}.` + If an integer :math:`p` is provided, set :math:`A_{\ell} = p` and + define :math:`\theta_{\ell}^{(i)}\!(\bfmu) = \mu_i`. entries : list of ndarrays, or None Operator matrices for each term of the affine expansion, i.e., :math:`\Ohat_{\ell}^{(0)},\ldots,\Ohat_{\ell}^{(A_{\ell}-1)}.` @@ -60,6 +62,40 @@ class _AffineOperator(ParametricOpInfOperator): If ``True``, interpret ``entries`` as a horizontal concatenation of arrays; if ``False`` (default), interpret ``entries`` as a list of arrays. + + Warnings + -------- + A common choice for the ``coefficient_functions`` is for the :math:`i`-th + coefficient function to return the :math:`i`-th component of the parameter + vector, i.e., :math:`\theta_{\ell}^{(i)}\!(\bfmu) = \mu_{i}`. The following + implementation for this choice results in a subtle but serious error: + + .. code-block:: python + + coefficient_functions = [lambda mu: mu[i] for i in range(nterms)] + + Due to the late binding behavior of closures in Python, the ``lambda`` + functions do not capture the value of the variable ``i`` at each iteration. + When any of the ``lambda`` functions are called, they use the value of + ``i`` at the time of the call, which, after the loop, is always + ``nterms - 1``. To avoid this issue, use ``coefficient_functions=p`` in the + constructor, where ``p`` is the dimension of the parameter vector. For + related scenarios, avoid this pitfall by writing `lambda` function with + the index given explicitly, or by using a function factory. + + .. code-block:: python + + coefficient_functions = [ + lambda mu: mu[0], + lambda mu: mu[1], + lambda mu: mu[2], + # ... + ] + + # Alternatively, define a function factory. + def coeff_function(i : int): + return lambda mu: mu[i] + coefficient_functions = [coeff_function(i) for i in range(nterms)] """ # Initialization ---------------------------------------------------------- @@ -67,11 +103,23 @@ def __init__( self, coefficient_functions, entries=None, - fromblock=False, + fromblock: bool = False, ): """Set coefficient functions and (if given) operator matrices.""" ParametricOpInfOperator.__init__(self) + # Shortcut: theta[i](mu) = mu[i]. + if isinstance(coefficient_functions, int): + self.parameter_dimension = coefficient_functions + + def componentgetter(i: int): + """Make a function that returns the i-th value of its input.""" + return lambda mu: mu[i] + + coefficient_functions = [ + componentgetter(i) for i in range(self.parameter_dimension) + ] + # Ensure that the coefficient functions are callable. if any(not callable(theta) for theta in coefficient_functions): raise TypeError( @@ -92,7 +140,7 @@ def coefficient_functions(self) -> tuple: return self.__thetas @property - def entries(self) -> np.ndarray: + def entries(self) -> list: r"""Operator matrices for each term of the affine expansion, i.e., :math:`\Ohat_{\ell}^{(0)},\ldots,\Ohat_{\ell}^{(A_{\ell}-1)}.` """ @@ -402,10 +450,12 @@ class AffineConstantOperator(_AffineOperator): Parameters ---------- - coefficient_functions : list of callables + coefficient_functions : (iterable of callables) or int Scalar-valued coefficient functions for each term of the affine expansion, i.e., :math:`\theta_{\ell}^{(0)},\ldots,\theta_{\ell}^{(A_{\ell}-1)}.` + If an integer :math:`p` is provided, set :math:`A_{\ell} = p` and + define :math:`\theta_{\ell}^{(i)}\!(\bfmu) = \mu_i`. entries : list of ndarrays, or None Operator matrices for each term of the affine expansion, i.e., :math:`\Ohat_{\ell}^{(0)},\ldots,\Ohat_{\ell}^{(A_{\ell}-1)}.` @@ -414,6 +464,40 @@ class AffineConstantOperator(_AffineOperator): If ``True``, interpret ``entries`` as a horizontal concatenation of arrays; if ``False`` (default), interpret ``entries`` as a list of arrays. + + Warnings + -------- + A common choice for the ``coefficient_functions`` is for the :math:`i`-th + coefficient function to return the :math:`i`-th component of the parameter + vector, i.e., :math:`\theta_{\ell}^{(i)}\!(\bfmu) = \mu_{i}`. The following + implementation for this choice results in a subtle but serious error: + + .. code-block:: python + + coefficient_functions = [lambda mu: mu[i] for i in range(nterms)] + + Due to the late binding behavior of closures in Python, the ``lambda`` + functions do not capture the value of the variable ``i`` at each iteration. + When any of the ``lambda`` functions are called, they use the value of + ``i`` at the time of the call, which, after the loop, is always + ``nterms - 1``. To avoid this issue, use ``coefficient_functions=p`` in the + constructor, where ``p`` is the dimension of the parameter vector. For + related scenarios, avoid this pitfall by writing `lambda` function with + the index given explicitly, or by using a function factory. + + .. code-block:: python + + coefficient_functions = [ + lambda mu: mu[0], + lambda mu: mu[1], + lambda mu: mu[2], + # ... + ] + + # Alternatively, define a function factory. + def coeff_function(i : int): + return lambda mu: mu[i] + coefficient_functions = [coeff_function(i) for i in range(nterms)] """ _OperatorClass = ConstantOperator @@ -433,10 +517,12 @@ class AffineLinearOperator(_AffineOperator): Parameters ---------- - coefficient_functions : list of callables + coefficient_functions : (iterable of callables) or int Scalar-valued coefficient functions for each term of the affine expansion, i.e., :math:`\theta_{\ell}^{(0)},\ldots,\theta_{\ell}^{(A_{\ell}-1)}.` + If an integer :math:`p` is provided, set :math:`A_{\ell} = p` and + define :math:`\theta_{\ell}^{(i)}\!(\bfmu) = \mu_i`. entries : list of ndarrays, or None Operator matrices for each term of the affine expansion, i.e., :math:`\Ahat_{\ell}^{(0)},\ldots,\Ahat_{\ell}^{(A_{\ell}-1)}.` @@ -445,6 +531,40 @@ class AffineLinearOperator(_AffineOperator): If ``True``, interpret ``entries`` as a horizontal concatenation of arrays; if ``False`` (default), interpret ``entries`` as a list of arrays. + + Warnings + -------- + A common choice for the ``coefficient_functions`` is for the :math:`i`-th + coefficient function to return the :math:`i`-th component of the parameter + vector, i.e., :math:`\theta_{\ell}^{(i)}\!(\bfmu) = \mu_{i}`. The following + implementation for this choice results in a subtle but serious error: + + .. code-block:: python + + coefficient_functions = [lambda mu: mu[i] for i in range(nterms)] + + Due to the late binding behavior of closures in Python, the ``lambda`` + functions do not capture the value of the variable ``i`` at each iteration. + When any of the ``lambda`` functions are called, they use the value of + ``i`` at the time of the call, which, after the loop, is always + ``nterms - 1``. To avoid this issue, use ``coefficient_functions=p`` in the + constructor, where ``p`` is the dimension of the parameter vector. For + related scenarios, avoid this pitfall by writing `lambda` function with + the index given explicitly, or by using a function factory. + + .. code-block:: python + + coefficient_functions = [ + lambda mu: mu[0], + lambda mu: mu[1], + lambda mu: mu[2], + # ... + ] + + # Alternatively, define a function factory. + def coeff_function(i : int): + return lambda mu: mu[i] + coefficient_functions = [coeff_function(i) for i in range(nterms)] """ _OperatorClass = LinearOperator @@ -464,10 +584,12 @@ class AffineQuadraticOperator(_AffineOperator): Parameters ---------- - coefficient_functions : list of callables + coefficient_functions : (iterable of callables) or int Scalar-valued coefficient functions for each term of the affine expansion, i.e., :math:`\theta_{\ell}^{(0)},\ldots,\theta_{\ell}^{(A_{\ell}-1)}.` + If an integer :math:`p` is provided, set :math:`A_{\ell} = p` and + define :math:`\theta_{\ell}^{(i)}\!(\bfmu) = \mu_i`. entries : list of ndarrays, or None Operator matrices for each term of the affine expansion, i.e., :math:`\Hhat_{\ell}^{(0)},\ldots,\Hhat_{\ell}^{(A_{\ell}-1)}.` @@ -476,6 +598,40 @@ class AffineQuadraticOperator(_AffineOperator): If ``True``, interpret ``entries`` as a horizontal concatenation of arrays; if ``False`` (default), interpret ``entries`` as a list of arrays. + + Warnings + -------- + A common choice for the ``coefficient_functions`` is for the :math:`i`-th + coefficient function to return the :math:`i`-th component of the parameter + vector, i.e., :math:`\theta_{\ell}^{(i)}\!(\bfmu) = \mu_{i}`. The following + implementation for this choice results in a subtle but serious error: + + .. code-block:: python + + coefficient_functions = [lambda mu: mu[i] for i in range(nterms)] + + Due to the late binding behavior of closures in Python, the ``lambda`` + functions do not capture the value of the variable ``i`` at each iteration. + When any of the ``lambda`` functions are called, they use the value of + ``i`` at the time of the call, which, after the loop, is always + ``nterms - 1``. To avoid this issue, use ``coefficient_functions=p`` in the + constructor, where ``p`` is the dimension of the parameter vector. For + related scenarios, avoid this pitfall by writing `lambda` function with + the index given explicitly, or by using a function factory. + + .. code-block:: python + + coefficient_functions = [ + lambda mu: mu[0], + lambda mu: mu[1], + lambda mu: mu[2], + # ... + ] + + # Alternatively, define a function factory. + def coeff_function(i : int): + return lambda mu: mu[i] + coefficient_functions = [coeff_function(i) for i in range(nterms)] """ _OperatorClass = QuadraticOperator @@ -495,10 +651,12 @@ class AffineCubicOperator(_AffineOperator): Parameters ---------- - coefficient_functions : list of callables + coefficient_functions : (iterable of callables) or int Scalar-valued coefficient functions for each term of the affine expansion, i.e., :math:`\theta_{\ell}^{(0)},\ldots,\theta_{\ell}^{(A_{\ell}-1)}.` + If an integer :math:`p` is provided, set :math:`A_{\ell} = p` and + define :math:`\theta_{\ell}^{(i)}\!(\bfmu) = \mu_i`. entries : list of ndarrays, or None Operator matrices for each term of the affine expansion, i.e., :math:`\Ghat_{\ell}^{(0)},\ldots,\Ghat_{\ell}^{(A_{\ell}-1)}.` @@ -507,6 +665,40 @@ class AffineCubicOperator(_AffineOperator): If ``True``, interpret ``entries`` as a horizontal concatenation of arrays; if ``False`` (default), interpret ``entries`` as a list of arrays. + + Warnings + -------- + A common choice for the ``coefficient_functions`` is for the :math:`i`-th + coefficient function to return the :math:`i`-th component of the parameter + vector, i.e., :math:`\theta_{\ell}^{(i)}\!(\bfmu) = \mu_{i}`. The following + implementation for this choice results in a subtle but serious error: + + .. code-block:: python + + coefficient_functions = [lambda mu: mu[i] for i in range(nterms)] + + Due to the late binding behavior of closures in Python, the ``lambda`` + functions do not capture the value of the variable ``i`` at each iteration. + When any of the ``lambda`` functions are called, they use the value of + ``i`` at the time of the call, which, after the loop, is always + ``nterms - 1``. To avoid this issue, use ``coefficient_functions=p`` in the + constructor, where ``p`` is the dimension of the parameter vector. For + related scenarios, avoid this pitfall by writing `lambda` function with + the index given explicitly, or by using a function factory. + + .. code-block:: python + + coefficient_functions = [ + lambda mu: mu[0], + lambda mu: mu[1], + lambda mu: mu[2], + # ... + ] + + # Alternatively, define a function factory. + def coeff_function(i : int): + return lambda mu: mu[i] + coefficient_functions = [coeff_function(i) for i in range(nterms)] """ _OperatorClass = CubicOperator @@ -526,10 +718,12 @@ class AffineInputOperator(_AffineOperator, InputMixin): Parameters ---------- - coefficient_functions : list of callables + coefficient_functions : (iterable of callables) or int Scalar-valued coefficient functions for each term of the affine expansion, i.e., :math:`\theta_{\ell}^{(0)},\ldots,\theta_{\ell}^{(A_{\ell}-1)}.` + If an integer :math:`p` is provided, set :math:`A_{\ell} = p` and + define :math:`\theta_{\ell}^{(i)}\!(\bfmu) = \mu_i`. entries : list of ndarrays, or None Operator matrices for each term of the affine expansion, i.e., :math:`\Bhat_{\ell}^{(0)},\ldots,\Bhat_{\ell}^{(A_{\ell}-1)}.` @@ -538,6 +732,40 @@ class AffineInputOperator(_AffineOperator, InputMixin): If ``True``, interpret ``entries`` as a horizontal concatenation of arrays; if ``False`` (default), interpret ``entries`` as a list of arrays. + + Warnings + -------- + A common choice for the ``coefficient_functions`` is for the :math:`i`-th + coefficient function to return the :math:`i`-th component of the parameter + vector, i.e., :math:`\theta_{\ell}^{(i)}\!(\bfmu) = \mu_{i}`. The following + implementation for this choice results in a subtle but serious error: + + .. code-block:: python + + coefficient_functions = [lambda mu: mu[i] for i in range(nterms)] + + Due to the late binding behavior of closures in Python, the ``lambda`` + functions do not capture the value of the variable ``i`` at each iteration. + When any of the ``lambda`` functions are called, they use the value of + ``i`` at the time of the call, which, after the loop, is always + ``nterms - 1``. To avoid this issue, use ``coefficient_functions=p`` in the + constructor, where ``p`` is the dimension of the parameter vector. For + related scenarios, avoid this pitfall by writing `lambda` function with + the index given explicitly, or by using a function factory. + + .. code-block:: python + + coefficient_functions = [ + lambda mu: mu[0], + lambda mu: mu[1], + lambda mu: mu[2], + # ... + ] + + # Alternatively, define a function factory. + def coeff_function(i : int): + return lambda mu: mu[i] + coefficient_functions = [coeff_function(i) for i in range(nterms)] """ _OperatorClass = InputOperator @@ -562,10 +790,12 @@ class AffineStateInputOperator(_AffineOperator, InputMixin): Parameters ---------- - coefficient_functions : list of callables + coefficient_functions : (iterable of callables) or int Scalar-valued coefficient functions for each term of the affine expansion, i.e., :math:`\theta_{\ell}^{(0)},\ldots,\theta_{\ell}^{(A_{\ell}-1)}.` + If an integer :math:`p` is provided, set :math:`A_{\ell} = p` and + define :math:`\theta_{\ell}^{(i)}\!(\bfmu) = \mu_i`. entries : list of ndarrays, or None Operator matrices for each term of the affine expansion, i.e., :math:`\Nhat_{\ell}^{(0)},\ldots,\Nhat_{\ell}^{(A_{\ell}-1)}.` @@ -574,6 +804,40 @@ class AffineStateInputOperator(_AffineOperator, InputMixin): If ``True``, interpret ``entries`` as a horizontal concatenation of arrays; if ``False`` (default), interpret ``entries`` as a list of arrays. + + Warnings + -------- + A common choice for the ``coefficient_functions`` is for the :math:`i`-th + coefficient function to return the :math:`i`-th component of the parameter + vector, i.e., :math:`\theta_{\ell}^{(i)}\!(\bfmu) = \mu_{i}`. The following + implementation for this choice results in a subtle but serious error: + + .. code-block:: python + + coefficient_functions = [lambda mu: mu[i] for i in range(nterms)] + + Due to the late binding behavior of closures in Python, the ``lambda`` + functions do not capture the value of the variable ``i`` at each iteration. + When any of the ``lambda`` functions are called, they use the value of + ``i`` at the time of the call, which, after the loop, is always + ``nterms - 1``. To avoid this issue, use ``coefficient_functions=p`` in the + constructor, where ``p`` is the dimension of the parameter vector. For + related scenarios, avoid this pitfall by writing `lambda` function with + the index given explicitly, or by using a function factory. + + .. code-block:: python + + coefficient_functions = [ + lambda mu: mu[0], + lambda mu: mu[1], + lambda mu: mu[2], + # ... + ] + + # Alternatively, define a function factory. + def coeff_function(i : int): + return lambda mu: mu[i] + coefficient_functions = [coeff_function(i) for i in range(nterms)] """ _OperatorClass = StateInputOperator diff --git a/src/opinf/operators/_base.py b/src/opinf/operators/_base.py index 58c69683..739a34c6 100644 --- a/src/opinf/operators/_base.py +++ b/src/opinf/operators/_base.py @@ -1228,6 +1228,15 @@ def parameter_dimension(self) -> int: """ return self.__p + @parameter_dimension.setter + def parameter_dimension(self, p): + """Set :attr:`parameter_dimension`. + Only allowed if :attr:`parameter_dimension` is currently ``None``. + """ + if self.__p is not None: + raise AttributeError("can't set property 'parameter_dimension'") + self.__p = int(p) + @property def shape(self) -> tuple: """Shape of the operator matrix when evaluated diff --git a/tests/operators/test_affine.py b/tests/operators/test_affine.py new file mode 100644 index 00000000..14aa8727 --- /dev/null +++ b/tests/operators/test_affine.py @@ -0,0 +1,133 @@ +# operators/test_affine.py +"""Tests for operators._affine.""" + +import abc +import pytest +import numpy as np + +import opinf + +_module = opinf.operators._affine + + +class _TestAffineOperator: + """Test operators._affine._AffineOperator.""" + + OpClass = NotImplemented + + thetas = [ + (lambda mu: mu[0]), + (lambda mu: mu[1]), + (lambda mu: mu[2]), + (lambda mu: mu[1] * mu[2] ** 2), + ] + + @abc.abstractmethod + def entries_shape(self, r, m): + raise NotImplementedError + + def test_init(self, p=6): + """Test __init__() and properties.""" + + bad_thetas = [1, 2, 3] + with pytest.raises(TypeError) as ex: + self.OpClass(bad_thetas) + assert ex.value.args[0] == ( + "coefficient_functions must be collection of callables" + ) + ncoeffs = len(self.thetas) + + op = self.OpClass(self.thetas) + assert op.parameter_dimension is None + assert op.entries is None + assert len(op.coefficient_functions) == ncoeffs + assert op.nterms == ncoeffs + mu = np.random.random(ncoeffs) + for i in range(ncoeffs): + opimu = op.coefficient_functions[i](mu) + truth = self.thetas[i](mu) + assert opimu == truth + + # Shortcut: coefficient_functions as an integer. + op = self.OpClass(p) + assert op.parameter_dimension == p + assert op.entries is None + assert op.nterms == p + mu = np.random.random(p) + for i in range(p): + assert op.coefficient_functions[i](mu) == mu[i] + + def test_entries(self, r=10, m=3): + """Test set_entries() and entries property.""" + ncoeffs = len(self.thetas) + shape = self.entries_shape(r, m) + arrays = [np.random.random(shape) for _ in range(ncoeffs)] + + op = self.OpClass(self.thetas) + assert op.entries is None + op.set_entries(arrays) + for i in range(ncoeffs): + assert np.all(op.entries[i] == arrays[i]) + + op = self.OpClass(self.thetas, arrays) + for i in range(ncoeffs): + assert np.all(op.entries[i] == arrays[i]) + + +# Test public classes ========================================================= +class TestAffineConstantOperator(_TestAffineOperator): + """Test AffineConstantOperator.""" + + OpClass = _module.AffineConstantOperator + + @staticmethod + def entries_shape(r, m): + return (r,) + + +class TestAffineLinearOperator(_TestAffineOperator): + """Test AffineLinearOperator.""" + + OpClass = _module.AffineLinearOperator + + @staticmethod + def entries_shape(r, m): + return (r, r) + + +class TestAffineQuadraticOperator(_TestAffineOperator): + """Test AffineQuadraticOperator.""" + + OpClass = _module.AffineQuadraticOperator + + @staticmethod + def entries_shape(r, m): + return (r, int(r * (r + 1) / 2)) + + +class TestAffineCubicOperator(_TestAffineOperator): + """Test AffineCubicOperator.""" + + OpClass = _module.AffineCubicOperator + + @staticmethod + def entries_shape(r, m): + return (r, int(r * (r + 1) * (r + 2) / 6)) + + +class TestAffineInputOperator(_TestAffineOperator): + """Test AffineInputOperator.""" + + OpClass = _module.AffineInputOperator + + @staticmethod + def entries_shape(r, m): + return (r, m) + + +class TestAffineStateInputOperator(_TestAffineOperator): + OpClass = _module.AffineStateInputOperator + + @staticmethod + def entries_shape(r, m): + return (r, r * m) From cabec307af65e8fa0ff3b6d9abf721d21035df15 Mon Sep 17 00:00:00 2001 From: Shane Date: Wed, 21 Aug 2024 17:01:02 -0600 Subject: [PATCH 12/48] remove entries setter/deleter from parametric operators --- src/opinf/operators/_affine.py | 12 +++------- src/opinf/operators/_base.py | 10 -------- src/opinf/operators/_interpolate.py | 10 -------- tests/operators/test_affine.py | 37 +++++++++++++++++++++++++++++ tests/operators/test_interpolate.py | 4 ++-- 5 files changed, 42 insertions(+), 31 deletions(-) diff --git a/src/opinf/operators/_affine.py b/src/opinf/operators/_affine.py index a221a3bb..e131d400 100644 --- a/src/opinf/operators/_affine.py +++ b/src/opinf/operators/_affine.py @@ -146,14 +146,6 @@ def entries(self) -> list: """ return ParametricOpInfOperator.entries.fget(self) - @entries.setter - def entries(self, entries): - ParametricOpInfOperator.entries.fset(self, entries) - - @entries.deleter - def entries(self): - ParametricOpInfOperator.entries.fdel(self) - @property def nterms(self): r"""Number of terms :math:`A_{\ell}` in the affine expansion.""" @@ -176,7 +168,9 @@ def set_entries(self, entries, fromblock: bool = False) -> None: # Extract / verify the entries. nterms = self.nterms if fromblock: - if entries.ndim not in (1, 2): + if not isinstance(entries, np.ndarray) or ( + entries.ndim not in (1, 2) + ): raise ValueError( "entries must be a 1- or 2-dimensional ndarray " "when fromblock=True" diff --git a/src/opinf/operators/_base.py b/src/opinf/operators/_base.py index 739a34c6..b9c9260d 100644 --- a/src/opinf/operators/_base.py +++ b/src/opinf/operators/_base.py @@ -1251,16 +1251,6 @@ def entries(self): """ return self.__entries - @entries.setter - def entries(self, entries): - """Set the arrays defining the operator matrix.""" - self.set_entries(entries) - - @entries.deleter - def entries(self): - """Reset the ``entries`` attribute.""" - self._clear() - @abc.abstractmethod def set_entries(self, entries, fromblock: bool = False) -> None: r"""Set the arrays that define the operator matrix as a function of diff --git a/src/opinf/operators/_interpolate.py b/src/opinf/operators/_interpolate.py index 774b24a6..0fc99ed0 100644 --- a/src/opinf/operators/_interpolate.py +++ b/src/opinf/operators/_interpolate.py @@ -189,16 +189,6 @@ def entries(self) -> np.ndarray: """ return ParametricOpInfOperator.entries.fget(self) - @entries.setter - def entries(self, entries): - """Set the operator matrices.""" - ParametricOpInfOperator.entries.fset(self, entries) - - @entries.deleter - def entries(self): - """Reset the ``entries`` attribute.""" - ParametricOpInfOperator.entries.fdel(self) - def set_entries(self, entries, fromblock: bool = False) -> None: r"""Set the operator matrices at the training parameter values. diff --git a/tests/operators/test_affine.py b/tests/operators/test_affine.py index 14aa8727..69a89daf 100644 --- a/tests/operators/test_affine.py +++ b/tests/operators/test_affine.py @@ -22,6 +22,8 @@ class _TestAffineOperator: (lambda mu: mu[1] * mu[2] ** 2), ] + p = 3 + @abc.abstractmethod def entries_shape(self, r, m): raise NotImplementedError @@ -63,6 +65,20 @@ def test_entries(self, r=10, m=3): shape = self.entries_shape(r, m) arrays = [np.random.random(shape) for _ in range(ncoeffs)] + op = self.OpClass(self.thetas) + with pytest.raises(ValueError) as ex: + op.set_entries(np.random.random((2, 3, 2)), fromblock=True) + assert ex.value.args[0] == ( + "entries must be a 1- or 2-dimensional ndarray " + "when fromblock=True" + ) + with pytest.raises(ValueError) as ex: + op.set_entries(arrays[:-1]) + assert ex.value.args[0] == ( + f"{ncoeffs} = len(coefficient_functions) " + f"!= len(entries) = {ncoeffs - 1}" + ) + op = self.OpClass(self.thetas) assert op.entries is None op.set_entries(arrays) @@ -73,6 +89,27 @@ def test_entries(self, r=10, m=3): for i in range(ncoeffs): assert np.all(op.entries[i] == arrays[i]) + op = self.OpClass(self.thetas, np.hstack(arrays), fromblock=True) + for i in range(ncoeffs): + assert np.all(op.entries[i] == arrays[i]) + + def test_evaluate(self, r=9, m=4): + """Test evaluate().""" + ncoeffs = len(self.thetas) + shape = self.entries_shape(r, m) + arrays = [np.random.random(shape) for _ in range(ncoeffs)] + op = self.OpClass(self.thetas, arrays) + + mu = np.random.random(self.p) + op_mu = op.evaluate(mu) + assert isinstance(op_mu, op.OperatorClass) + assert op_mu.entries.shape == arrays[0].shape + Amu = np.sum( + [theta(mu) * A for theta, A in zip(self.thetas, arrays)], + axis=0, + ) + assert np.allclose(op_mu.entries, Amu) + # Test public classes ========================================================= class TestAffineConstantOperator(_TestAffineOperator): diff --git a/tests/operators/test_interpolate.py b/tests/operators/test_interpolate.py index 638d4621..98e245f5 100644 --- a/tests/operators/test_interpolate.py +++ b/tests/operators/test_interpolate.py @@ -139,7 +139,7 @@ def test_set_entries(self, s=5, p=3, r=4): # Try without training_parameters set. op = self.Dummy() with pytest.raises(AttributeError) as ex: - op.entries = entries + op.set_entries(entries) assert ex.value.args[0] == ( "training_parameters have not been set, " "call set_training_parameters() first" @@ -182,7 +182,7 @@ def test_set_entries(self, s=5, p=3, r=4): ) # Test deletion. - del op.entries + op._clear() assert op.entries is None assert op.interpolator is None assert op.shape is None From 84ae6929baf4e0cdfebab993c2a7cfd223ffaac0 Mon Sep 17 00:00:00 2001 From: Shane Date: Thu, 22 Aug 2024 13:28:12 -0600 Subject: [PATCH 13/48] hdf5 scipy.sparse utilities --- src/opinf/utils/_hdf5.py | 116 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 116 insertions(+) diff --git a/src/opinf/utils/_hdf5.py b/src/opinf/utils/_hdf5.py index 64000bd7..7234918f 100644 --- a/src/opinf/utils/_hdf5.py +++ b/src/opinf/utils/_hdf5.py @@ -4,15 +4,19 @@ __all__ = [ "hdf5_savehandle", "hdf5_loadhandle", + "save_sparray", + "load_sparray", ] import os import h5py import warnings +import scipy.sparse as sparse from .. import errors +# File handle classes ========================================================= class _hdf5_filehandle: """Get a handle to an open HDF5 file to read or write to. @@ -122,3 +126,115 @@ def __exit__(self, exc_type, exc_value, exc_traceback): raise except Exception as ex: raise errors.LoadfileFormatError(ex.args[0]) from ex + + +# Other tools ================================================================= +def save_sparray(group: h5py.Group, arr: sparse.sparray) -> None: + """Save a :mod:`scipy.sparse` matrix efficiently in an HDF5 group. + + This method mimics the behavior of :meth:`scipy.sparse.save_npz()` but + for an open HDF5 file. See :func:`load_sparray()`. + + Parameters + ---------- + arr : scipy.sparse.sparray + Sparse SciPy array, in any sparse format. + group : h5py.Group + HDF5 group to save the sparse array to. + + Examples + -------- + >>> import h5py + >>> import scipy.sparse as sparse + >>> from opinf.utils import save_sparray, load_sparray + + # Create a sparse array. + >>> A = sparse.dok_array((100, 100), dtype=float) + >>> A[0, 5] = 12 + >>> A[4, 1] = 123.456 + >>> A + <100x100 sparse array of type '' + with 2 stored elements in Dictionary Of Keys format> + >>> print(A) + (np.int32(0), np.int32(5)) 12.0 + (np.int32(4), np.int32(1)) 123.456 + + # Save the sparse array to an HDF5 file. + >>> with h5py.File("myfile.h5", "w") as hf: + ... save_sparray(hf.create_group("sparsearray"), A) + + # Load the sparse array from the file. + >>> with h5py.File("myfile.h5", "r") as hf: + ... B = load_sparray(hf["sparsearray"]) + >>> B + <100x100 sparse array of type '' + with 2 stored elements in Dictionary Of Keys format> + >>> print(B) + (np.int32(0), np.int32(5)) 12.0 + (np.int32(4), np.int32(1)) 123.456 + """ + if not sparse.issparse(arr): + raise TypeError("first arg must be a scipy.sparse array") + + # Convert to COO format and save data attributes. + A = arr.tocoo() + group.create_dataset("data", data=A.data) + group.create_dataset("row", data=A.row) + group.create_dataset("col", data=A.col) + group.attrs["shape"] = A.shape + group.attrs["arrtype"] = type(arr).__name__[:3] + + +def load_sparray(group: h5py.Group) -> sparse.sparray: + """Save a :mod:`scipy.sparse` matrix efficiently in an HDF5 group. + + This method mimics the behavior of :meth:`scipy.sparse.load_npz()` but + for an open HDF5 file. See :func:`save_sparray()`. + + Parameters + ---------- + group : h5py.Group + HDF5 group create and save the sparse array to. + + Returns + ------- + arr : scipy.sparse.sparray + Sparse SciPy array, in the sparse format it was in before saving. + + Examples + -------- + >>> import h5py + >>> import scipy.sparse as sparse + >>> from opinf.utils import save_sparray, load_sparray + + # Create a sparse array. + >>> A = sparse.dok_array((100, 100), dtype=float) + >>> A[0, 5] = 12 + >>> A[4, 1] = 123.456 + >>> A + <100x100 sparse array of type '' + with 2 stored elements in Dictionary Of Keys format> + >>> print(A) + (np.int32(0), np.int32(5)) 12.0 + (np.int32(4), np.int32(1)) 123.456 + + # Save the sparse array to an HDF5 file. + >>> with h5py.File("myfile.h5", "w") as hf: + ... save_sparray(hf.create_group("sparsearray"), A) + + # Load the sparse array from the file. + >>> with h5py.File("myfile.h5", "r") as hf: + ... B = load_sparray(hf["sparsearray"]) + >>> B + <100x100 sparse array of type '' + with 2 stored elements in Dictionary Of Keys format> + >>> print(B) + (np.int32(0), np.int32(5)) 12.0 + (np.int32(4), np.int32(1)) 123.456 + """ + A = sparse.coo_matrix( + (group["data"], (group["row"], group["col"])), + group.attrs["shape"], + ) + arrtype = str(group.attrs["arrtype"]) + return getattr(A, f"to{arrtype}")() From b200b6f172792bf97e7dc9618df0c69a9ba22fab Mon Sep 17 00:00:00 2001 From: Shane Date: Thu, 22 Aug 2024 13:29:13 -0600 Subject: [PATCH 14/48] sparse arrays for constant, linear operators --- src/opinf/operators/_nonparametric.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/src/opinf/operators/_nonparametric.py b/src/opinf/operators/_nonparametric.py index 0e77bdf1..c9a4fd70 100644 --- a/src/opinf/operators/_nonparametric.py +++ b/src/opinf/operators/_nonparametric.py @@ -13,6 +13,7 @@ import itertools import numpy as np import scipy.linalg as la +import scipy.sparse as sparse import scipy.special as special from .. import utils @@ -73,7 +74,9 @@ def set_entries(self, entries): entries : (r,) ndarray Operator vector :math:`\chat`. """ - if np.isscalar(entries): + if sparse.issparse(entries): + entries = entries.toarray() + elif np.isscalar(entries): entries = np.atleast_1d(entries) self._validate_entries(entries) @@ -240,7 +243,10 @@ def set_entries(self, entries): entries : (r, r) ndarray Operator matrix :math:`\Ahat`. """ - if np.isscalar(entries) or np.shape(entries) == (1,): + if sparse.issparse(entries): + if not isinstance(entries, sparse.csr_array): + entries = entries.tocsr() + elif np.isscalar(entries) or np.shape(entries) == (1,): entries = np.atleast_2d(entries) self._validate_entries(entries) From fcc86b4c680b9c8e3b8ac43c85bfe708bc9297e7 Mon Sep 17 00:00:00 2001 From: Shane Date: Thu, 22 Aug 2024 13:30:09 -0600 Subject: [PATCH 15/48] test affine copy()/save()/load() --- src/opinf/operators/_affine.py | 46 +++++++++++++-- tests/operators/test_affine.py | 105 +++++++++++++++++++++++++++++++++ 2 files changed, 146 insertions(+), 5 deletions(-) diff --git a/src/opinf/operators/_affine.py b/src/opinf/operators/_affine.py index e131d400..a594d36a 100644 --- a/src/opinf/operators/_affine.py +++ b/src/opinf/operators/_affine.py @@ -12,7 +12,9 @@ "AffineStateInputOperator", ] +import h5py import numpy as np +import scipy.sparse as sparse from .. import utils from ._base import ParametricOpInfOperator, InputMixin @@ -291,6 +293,11 @@ def operator_dimension(self, s: int, r: int, m: int) -> int: State dimension. m : int or None Input dimension. + + Returns + ------- + d : int + Number of columns in the concatenated operator matrix. """ return self.nterms * self.OperatorClass.operator_dimension(r, m) @@ -373,11 +380,17 @@ def copy(self): """Return a copy of the operator. Only the operator matrices are copied, not the coefficient functions. """ - return self.__class__( + As = None + if self.entries is not None: + As = [A.copy() for A in self.entries] + op = self.__class__( coefficient_functions=self.coefficient_functions, - entries=self.entries.copy() if self.entries is not None else None, + entries=As, fromblock=False, ) + if self.parameter_dimension is not None: + op.parameter_dimension = self.parameter_dimension + return op def save(self, savefile: str, overwrite: bool = False) -> None: """Save the operator to an HDF5 file. @@ -396,8 +409,16 @@ def save(self, savefile: str, overwrite: bool = False) -> None: with utils.hdf5_savehandle(savefile, overwrite) as hf: meta = hf.create_dataset("meta", shape=(0,)) meta.attrs["class"] = self.__class__.__name__ + if (p := self.parameter_dimension) is not None: + meta.attrs["parameter_dimension"] = p if self.entries is not None: - hf.create_dataset("entries", data=self.entries) + group = hf.create_group("entries") + for i, Ai in enumerate(self.entries): + name = f"A{i:d}" + if sparse.issparse(Ai): + utils.save_sparray(group.create_group(name), Ai) + else: + group.create_dataset(name, data=Ai) @classmethod def load(cls, loadfile: str, coefficient_functions): @@ -423,12 +444,27 @@ def load(cls, loadfile: str, coefficient_functions): f"object, use '{ClassName}.load()'" ) - return cls( + entries = None + if "entries" in hf: + entries = [] + group = hf["entries"] + for i in range(len(group)): + obj = group[f"A{i:d}"] + if isinstance(obj, h5py.Dataset): + entries.append(obj[:]) + else: + entries.append(utils.load_sparray(obj)) + + op = cls( coefficient_functions=coefficient_functions, - entries=(hf["entries"][:] if "entries" in hf else None), + entries=entries, fromblock=False, ) + if (key := "parameter_dimension") in hf["meta"].attrs: + op.parameter_dimension = int(hf["meta"].attrs[key]) + return op + # Public affine operator classes ============================================== class AffineConstantOperator(_AffineOperator): diff --git a/tests/operators/test_affine.py b/tests/operators/test_affine.py index 69a89daf..89c42be7 100644 --- a/tests/operators/test_affine.py +++ b/tests/operators/test_affine.py @@ -1,9 +1,12 @@ # operators/test_affine.py """Tests for operators._affine.""" +import os import abc import pytest import numpy as np +import scipy.linalg as la +import scipy.sparse as sparse import opinf @@ -110,6 +113,108 @@ def test_evaluate(self, r=9, m=4): ) assert np.allclose(op_mu.entries, Amu) + def test_galerkin(self, r=9, m=4): + """Test galerkin().""" + ncoeffs = len(self.thetas) + shape = self.entries_shape(r, m) + arrays = [np.random.random(shape) for _ in range(ncoeffs)] + op = self.OpClass(self.thetas, arrays) + + Vr = la.qr(np.random.random((r, r // 2)), mode="economic")[0] + Wr = la.qr(np.random.random((r, r // 2)), mode="economic")[0] + for testbasis in (None, Wr): + newop = op.galerkin(Vr, testbasis) + assert isinstance(newop, self.OpClass) + assert newop.state_dimension == r // 2 + + def test_opinf(self, s=10, k=15, r=11, m=3): + """Test operator_dimension() and datablock().""" + ncoeffs = len(self.thetas) + shape = self.entries_shape(r, m) + arrays = [np.random.random(shape) for _ in range(ncoeffs)] + op = self.OpClass(self.thetas, arrays) + + parameters = [np.random.random(self.p) for _ in range(s)] + states = np.random.random((s, r, k)) + inputs = np.random.random((s, m, k)) + + block = op.datablock(parameters, states, inputs) + dim = op.operator_dimension(s, r, m) + assert block.shape[0] == dim + assert block.shape[1] == s * k + + def test_copysaveload(self, r=10, m=2, target="_affinesavetest.h5"): + """Test copy(), save(), and load().""" + ncoeffs = len(self.thetas) + shape = self.entries_shape(r, m) + arrays = [np.random.random(shape) for _ in range(ncoeffs)] + + def sparsearray(A): + B = A.copy() + B[B < 0.9] = 0 + B = np.atleast_2d(B) + if B.shape[0] == 1: + B = B.T + return sparse.csr_array(B) + + sparrays = [sparsearray(A) for A in arrays] + + def _checksame(original, copied): + assert copied is not original + assert isinstance(copied, self.OpClass) + if original.entries is None: + assert copied.entries is None + elif isinstance(original.entries[0], np.ndarray): + for i, Ai in enumerate(copied.entries): + assert isinstance(Ai, np.ndarray) + assert np.all(Ai == original.entries[i]) + elif sparse.issparse(original.entries[0]): + for i, Ai in enumerate(copied.entries): + assert sparse.issparse(Ai) + assert (Ai - original.entries[i]).sum() == 0 + if (p := original.parameter_dimension) is not None: + assert copied.parameter_dimension == p + + # Test copy() without entries set. + op = self.OpClass(self.thetas) + _checksame(op, op.copy()) + + op.parameter_dimension = self.p + _checksame(op, op.copy()) + + # Test copy() with entries set. + op.set_entries(arrays) + _checksame(op, op.copy()) + + op.set_entries(sparrays) + _checksame(op, op.copy()) + + # Test save() and load() together. + + def _checkload(original): + if os.path.isfile(target): + os.remove(target) + original.save(target) + copied = self.OpClass.load(target, original.coefficient_functions) + return _checksame(original, copied) + + # Test save()/load() without entries set. + op = self.OpClass(self.thetas) + _checkload(op) + + op.parameter_dimension = self.p + _checkload(op) + + # Test save()/load() with entries set. + op.set_entries(arrays) + _checkload(op) + + op.set_entries(sparrays) + _checkload(op) + + if os.path.isfile(target): + os.remove(target) + # Test public classes ========================================================= class TestAffineConstantOperator(_TestAffineOperator): From 3e5c69c7828edf1ba32be327d60dd4592d303e32 Mon Sep 17 00:00:00 2001 From: Shane Date: Thu, 22 Aug 2024 15:36:11 -0600 Subject: [PATCH 16/48] tests for save/load_sparray() --- src/opinf/utils/_hdf5.py | 4 ++-- tests/utils/test_hdf5.py | 27 +++++++++++++++++++++++++++ 2 files changed, 29 insertions(+), 2 deletions(-) diff --git a/src/opinf/utils/_hdf5.py b/src/opinf/utils/_hdf5.py index 7234918f..5de10ff7 100644 --- a/src/opinf/utils/_hdf5.py +++ b/src/opinf/utils/_hdf5.py @@ -174,7 +174,7 @@ def save_sparray(group: h5py.Group, arr: sparse.sparray) -> None: (np.int32(4), np.int32(1)) 123.456 """ if not sparse.issparse(arr): - raise TypeError("first arg must be a scipy.sparse array") + raise TypeError("second arg must be a scipy.sparse array") # Convert to COO format and save data attributes. A = arr.tocoo() @@ -234,7 +234,7 @@ def load_sparray(group: h5py.Group) -> sparse.sparray: """ A = sparse.coo_matrix( (group["data"], (group["row"], group["col"])), - group.attrs["shape"], + shape=group.attrs["shape"], ) arrtype = str(group.attrs["arrtype"]) return getattr(A, f"to{arrtype}")() diff --git a/tests/utils/test_hdf5.py b/tests/utils/test_hdf5.py index 3a1a5f23..cb44de97 100644 --- a/tests/utils/test_hdf5.py +++ b/tests/utils/test_hdf5.py @@ -5,6 +5,8 @@ import h5py import pytest import warnings +import numpy as np +import scipy.sparse as sparse import opinf @@ -184,3 +186,28 @@ class DummyWarning(Warning): with subject(target): pass assert ex.value.args[0] == target + + +def test_saveload_sparray(n=100, target="_saveloadsparraytest.h5"): + """Test save_sparray() and load_sparray().""" + + with pytest.raises(TypeError) as ex: + opinf.utils.save_sparray(None, None) + assert ex.value.args[0] == "second arg must be a scipy.sparse array" + + A = sparse.dok_array((n, n), dtype=float) + for _ in range(n // 10): + i, j = np.random.randint(0, n, size=2) + A[i, j] = np.random.random() + + if os.path.isfile(target): + os.remove(target) + + with h5py.File(target, "w") as hf: + opinf.utils.save_sparray(hf.create_group("sparsearray"), A) + + with h5py.File(target, "r") as hf: + B = opinf.utils.load_sparray(hf["sparsearray"]) + + diff = np.abs((A - B).data) + assert np.allclose(diff, 0) From ed57381da9b7faadc6228fa7667112f2ad64b1de Mon Sep 17 00:00:00 2001 From: Shane Date: Thu, 22 Aug 2024 17:13:30 -0600 Subject: [PATCH 17/48] tests and fixes for timed_block --- src/opinf/utils/_timer.py | 141 +++++++++++++++++++++++++++++--------- tests/utils/test_hdf5.py | 2 + tests/utils/test_timer.py | 83 ++++++++++++++++++++++ 3 files changed, 192 insertions(+), 34 deletions(-) create mode 100644 tests/utils/test_timer.py diff --git a/src/opinf/utils/_timer.py b/src/opinf/utils/_timer.py index 0eab2d4c..614cc958 100644 --- a/src/opinf/utils/_timer.py +++ b/src/opinf/utils/_timer.py @@ -1,14 +1,18 @@ # utils/_timer.py """Context manager for timing blocks of code.""" +__all__ = [ + "timed_block", +] +import os import time import signal import logging class timed_block: - """Context manager for timing a block of code and reporting the timing. + r"""Context manager for timing a block of code and reporting the timing. **WARNING**: this context manager may only function on Linux/Unix machines (Windows is not supported). @@ -17,68 +21,110 @@ class timed_block: ---------- message : str Message to log / print. - timelimit : float + timelimit : int Number of seconds to wait before raising an error. + Floats are rounded down to the nearest integer. Examples -------- - >>> with timed_block("This is a test"): + >>> import time + >>> import opinf + + >>> with opinf.utils.timed_block("This is a test"): ... # Code to be timed ... time.sleep(2) - ... This is a test...done in 2.00 s. - >>> with timed_block("Another test", timelimit=3): + >>> with opinf.utils.timed_block("Another test", timelimit=3): ... # Code to be timed and halted within the specified time limit. ... i = 0 ... while True: ... i += 1 - Another test...TIMED OUT after 3.00 s. + Another test... + TimeoutError: TIMED OUT after 3.00s. + + # Set up a logfile to record messages to. + >>> opinf.utils.timed_block.setup_logfile("log.log") + Logging to '/path/to/current/folder/log.log' + + # timed_block() will now write to the log file as well as print to screen. + >>> with opinf.utils.timed_block("logfile test"): + ... time.sleep(1) + logfile test...done in 1.00 s. + + >>> with open("log.log", "r") as infile: + ... print(infile.read().strip()) + INFO: logfile test...done in 1.001150 s. + + # Turn off print statements (but keep logging). + >>> opinf.utils.timed_block.verbose = False + >>> with opinf.utils.timed_block("not printed to the screen"): + ... time.sleep(1) + >>> with open("log.log", "r") as infile: + ... print(infile.read().strip()) + INFO: logfile test...done in 1.001150 s. + INFO: not printed to the screen...done in 1.002232 s. + + # Capture the time elapsed for later use. + >>> with opinf.utils.timed_block("how long?") as timer: + ... time.sleep(2) + >>> timer.elapsed + 2.002866268157959 """ verbose = True - @staticmethod - def _signal_handler(signum, frame): - raise TimeoutError("timed out!") + formatter = logging.Formatter( + fmt="%(asctime)s %(levelname)s:\t%(message)s", + datefmt="%Y-%m-%d %H:%M:%S", + ) + + def __init__(self, message: str, timelimit: int = None): + """Store print/log message.""" + self.__front = "\n" if message.endswith("\n") else "" + self.message = message.rstrip() + self.__back = "\n" if "\r" not in message else "" + if timelimit is not None: + timelimit = max(int(timelimit), 1) + self.__timelimit = timelimit + self.__elapsed = None @property def timelimit(self): """Time limit (in seconds) for the block to complete.""" - return self._timelimit + return self.__timelimit - def __init__(self, message, timelimit=None): - """Store print/log message.""" - self._frontend = "\n" if message.endswith("\n") else "" - self.message = message.rstrip() - self._backend = "\n" if "\r" not in message else "" - self._timelimit = timelimit + @property + def elapsed(self): + """Actual time (in seconds) the block took to complete.""" + return self.__elapsed + + @staticmethod + def _signal_handler(signum, frame): + raise TimeoutError("timed out!") def __enter__(self): """Print the message and record the current time.""" if self.verbose: - print(f"{self.message}...", end=self._frontend, flush=True) + print(f"{self.message}...", end=self.__front, flush=True) self._tic = time.time() - if self._timelimit is not None: + if self.timelimit is not None: signal.signal(signal.SIGALRM, self._signal_handler) - signal.alarm(self._timelimit) + signal.alarm(self.timelimit) return self def __exit__(self, exc_type, exc_value, exc_traceback): """Calculate and report the elapsed time.""" self._toc = time.time() - if self._timelimit is not None: + if self.timelimit is not None: signal.alarm(0) elapsed = self._toc - self._tic if exc_type: # Report an exception if present. - if self._timelimit is not None and exc_type is TimeoutError: - print( - f"TIMED OUT after {elapsed:.2f} s.", - flush=True, - end=self._backend, - ) - logging.info(f"TIMED OUT after {elapsed:.2f} s.") - raise + if self.timelimit is not None and exc_type is TimeoutError: + print(flush=True) + report = f"TIMED OUT after {elapsed:.2f} s." + logging.info(f"{self.message}...{report}") + raise TimeoutError(report) print(f"{exc_type.__name__}: {exc_value}") logging.info(self.message) logging.error( @@ -88,11 +134,38 @@ def __exit__(self, exc_type, exc_value, exc_traceback): raise else: # If no exception, report execution time. if self.verbose: - print( - f"done in {elapsed:.2f} s.", - flush=True, - end=self._backend, - ) + print(f"done in {elapsed:.2f} s.", flush=True, end=self.__back) logging.info(f"{self.message}...done in {elapsed:.6f} s.") - self.elapsed = elapsed + self.__elapsed = elapsed return + + @classmethod + def add_logfile(cls, logfile: str = "log.log") -> None: + """Instruct :class:`timed_block` to log messages to the ``logfile``. + + Parameters + ---------- + logfile : str + File to log to. + """ + logger = logging.getLogger() + logpath = os.path.abspath(logfile) + + # Check that we aren't already logging to this file. + for handler in logger.handlers: + if ( + isinstance(handler, logging.FileHandler) + and os.path.abspath(handler.baseFilename) == logpath + ): + if cls.verbose: + print(f"Already logging to {logpath}") + return + + # Add a new handler for this file. + newhandler = logging.FileHandler(logpath, "a") + newhandler.setFormatter(cls.formatter) + newhandler.setLevel(logging.INFO) + logger.setLevel(logging.INFO) + logger.addHandler(newhandler) + if cls.verbose: + print(f"Logging to '{os.path.abspath(logfile)}'") diff --git a/tests/utils/test_hdf5.py b/tests/utils/test_hdf5.py index cb44de97..21558b71 100644 --- a/tests/utils/test_hdf5.py +++ b/tests/utils/test_hdf5.py @@ -211,3 +211,5 @@ def test_saveload_sparray(n=100, target="_saveloadsparraytest.h5"): diff = np.abs((A - B).data) assert np.allclose(diff, 0) + + os.remove(target) diff --git a/tests/utils/test_timer.py b/tests/utils/test_timer.py new file mode 100644 index 00000000..7057e2cd --- /dev/null +++ b/tests/utils/test_timer.py @@ -0,0 +1,83 @@ +# utils/test_timer.py +"""Tests for utils._timer.""" + +import os +import time +import pytest + +import opinf + + +def test_timed_block(message="timed_block test", target="_timedblocktest.log"): + """Test timed_block context manager.""" + Timer = opinf.utils.timed_block + if os.path.isfile(target): + os.remove(target) + + with Timer(message) as obj: + pass + assert obj.message == message + assert obj.timelimit is None + assert isinstance(obj.elapsed, float) + + with Timer(message, timelimit=100) as obj: + pass + assert obj.message == message + + with pytest.raises(TimeoutError) as ex: + with Timer(message, timelimit=1): + time.sleep(10) + assert ex.value.args[0].startswith("TIMED OUT after ") + + class MyException(Exception): + pass + + with pytest.raises(MyException) as ex: + with Timer(message): + raise MyException("failure in the block") + assert ex.value.args[0] == "failure in the block" + + # Set up a log file. + Timer.add_logfile(target) + + # See if we write to the log file. + with Timer(message, timelimit=100) as obj: + pass + + assert os.path.isfile(target) + with open(target, "r") as infile: + text = infile.read().strip() + assert text.count(message) == 1 + + with pytest.raises(TimeoutError) as ex: + with Timer(message, timelimit=1): + time.sleep(10) + assert ex.value.args[0].startswith("TIMED OUT after ") + + with open(target, "r") as infile: + text = infile.read().strip() + assert text.count(message) == 2 + assert text.count("TIMED OUT after ") == 1 + + with pytest.raises(MyException) as ex: + with Timer(message): + raise MyException("failure in the block") + assert ex.value.args[0] == "failure in the block" + + # Log to the same file. + newmessage = f"{message} AGAIN!" + Timer.add_logfile(target) + + # Log to another file. + newtarget = f"_{target}" + if os.path.isfile(newtarget): + os.remove(newtarget) + + Timer.add_logfile(newtarget) + with Timer(newmessage) as obj: + pass + for tfile in target, newtarget: + with open(tfile, "r") as infile: + text = infile.read().strip() + assert text.count(newmessage) == 1 + os.remove(tfile) From 2c15a6be639110558758c245ab3790a79b4c02d8 Mon Sep 17 00:00:00 2001 From: Shane Date: Thu, 29 Aug 2024 15:52:41 -0600 Subject: [PATCH 18/48] affine operators take in single callable, test coverage --- src/opinf/operators/_affine.py | 533 +++++++++++++-------------------- tests/operators/test_affine.py | 162 ++++++++-- 2 files changed, 332 insertions(+), 363 deletions(-) diff --git a/src/opinf/operators/_affine.py b/src/opinf/operators/_affine.py index a594d36a..6eec9698 100644 --- a/src/opinf/operators/_affine.py +++ b/src/opinf/operators/_affine.py @@ -13,10 +13,11 @@ ] import h5py +import warnings import numpy as np import scipy.sparse as sparse -from .. import utils +from .. import errors, utils from ._base import ParametricOpInfOperator, InputMixin from ._nonparametric import ( ConstantOperator, @@ -28,6 +29,32 @@ ) +# Helper functions ============================================================ +def _identity(x): + """Identity function.""" + return x + + +def _is_iterable(obj): + """Return True if obj is iterable, False, else.""" + try: + iter(obj) + return True + except TypeError: + return False + + +def _vectorizer(functions): + """Translate a tuple of functions into a ndarray-valued function.""" + if any(not callable(func) for func in functions): + raise TypeError("if 'coeffs' is iterable each entry must be callable") + + def _vectorized(parameter): + return np.array([func(parameter) for func in functions]) + + return _vectorized + + # Base class ================================================================== class _AffineOperator(ParametricOpInfOperator): r"""Base class for parametric operators where the parameter dependence @@ -50,96 +77,105 @@ class _AffineOperator(ParametricOpInfOperator): Parameters ---------- - coefficient_functions : (iterable of callables) or int - Scalar-valued coefficient functions for each term of the affine - expansion, i.e., - :math:`\theta_{\ell}^{(0)},\ldots,\theta_{\ell}^{(A_{\ell}-1)}.` - If an integer :math:`p` is provided, set :math:`A_{\ell} = p` and - define :math:`\theta_{\ell}^{(i)}\!(\bfmu) = \mu_i`. - entries : list of ndarrays, or None + coeffs : callable, (iterable of callables), or int + Coefficient functions for the terms of the affine expansion. + + * If callable, it should receive a parameter vector + :math:`\bfmu` and return the vector of affine coefficients, + :math:`[~\theta_{\ell}^{(0)}(\bfmu) + ~~\cdots~~\theta_{\ell}^{(A_{\ell}-1)}(\bfmu)~]\trp`. + In this case, ``nterms`` is a required argument. + * If an iterable, each entry should be a callable representing a + single affine coefficient function :math:`\theta_{\ell}^{(a)}`. + * If an integer :math:`p`, set :math:`A_{\ell} = p` and define + :math:`\theta_{\ell}^{(i)}\!(\bfmu) = \mu_i`. This is equivalent to + using ``coeffs=lambda mu: mu``, except the parameter dimension is + also captured and ``nterms`` is not required. + nterms : int or None + Number of terms :math:`A_{\ell}` in the affine expansion. + Only required if ``coeffs`` is provided as a callable. + entries : (list of ndarrays), ndarray, or None Operator matrices for each term of the affine expansion, i.e., :math:`\Ohat_{\ell}^{(0)},\ldots,\Ohat_{\ell}^{(A_{\ell}-1)}.` - If not provided in the constructor, use :meth:`set_entries` later. + If not provided in the constructor, use :meth:`set_entries()` later. fromblock : bool If ``True``, interpret ``entries`` as a horizontal concatenation of arrays; if ``False`` (default), interpret ``entries`` as a list of arrays. - - Warnings - -------- - A common choice for the ``coefficient_functions`` is for the :math:`i`-th - coefficient function to return the :math:`i`-th component of the parameter - vector, i.e., :math:`\theta_{\ell}^{(i)}\!(\bfmu) = \mu_{i}`. The following - implementation for this choice results in a subtle but serious error: - - .. code-block:: python - - coefficient_functions = [lambda mu: mu[i] for i in range(nterms)] - - Due to the late binding behavior of closures in Python, the ``lambda`` - functions do not capture the value of the variable ``i`` at each iteration. - When any of the ``lambda`` functions are called, they use the value of - ``i`` at the time of the call, which, after the loop, is always - ``nterms - 1``. To avoid this issue, use ``coefficient_functions=p`` in the - constructor, where ``p`` is the dimension of the parameter vector. For - related scenarios, avoid this pitfall by writing `lambda` function with - the index given explicitly, or by using a function factory. - - .. code-block:: python - - coefficient_functions = [ - lambda mu: mu[0], - lambda mu: mu[1], - lambda mu: mu[2], - # ... - ] - - # Alternatively, define a function factory. - def coeff_function(i : int): - return lambda mu: mu[i] - coefficient_functions = [coeff_function(i) for i in range(nterms)] """ # Initialization ---------------------------------------------------------- def __init__( self, - coefficient_functions, + coeffs, + nterms: int = None, entries=None, fromblock: bool = False, ): """Set coefficient functions and (if given) operator matrices.""" ParametricOpInfOperator.__init__(self) - - # Shortcut: theta[i](mu) = mu[i]. - if isinstance(coefficient_functions, int): - self.parameter_dimension = coefficient_functions - - def componentgetter(i: int): - """Make a function that returns the i-th value of its input.""" - return lambda mu: mu[i] - - coefficient_functions = [ - componentgetter(i) for i in range(self.parameter_dimension) - ] - - # Ensure that the coefficient functions are callable. - if any(not callable(theta) for theta in coefficient_functions): + if nterms is not None and (not isinstance(nterms, int) or nterms < 1): raise TypeError( - "coefficient_functions must be collection of callables" + "when provided, argument 'nterms' must be a positive integer" ) - self.__thetas = tuple(coefficient_functions) + self.__nterms = nterms + + # Parse the coefficient functions. + if isinstance(coeffs, int) and coeffs > 0: + if nterms is not None and nterms != coeffs: + warnings.warn( + f"{coeffs} = coeffs != nterms = {nterms}, ignoring " + f"argument 'nterms' and setting nterms = {coeffs}", + errors.OpInfWarning, + ) + self.__nterms = coeffs + self.parameter_dimension = coeffs + coeffs = _identity + if not callable(coeffs): + if not _is_iterable(coeffs): + raise TypeError( + "argument 'coeffs' must be " + "callable, iterable, or a positive int" + ) + A_ell = len(coeffs) + if nterms is not None and nterms != A_ell: + warnings.warn( + f"{A_ell} = len(coeffs) != nterms = {nterms}, ignoring " + f"argument 'nterms' and setting nterms = {A_ell}", + errors.OpInfWarning, + ) + self.__nterms = A_ell + coeffs = _vectorizer(coeffs) + if self.__nterms is None: + raise ValueError( + "argument 'nterms' required when argument 'coeffs' is callable" + ) + self.__thetas = coeffs if entries is not None: self.set_entries(entries, fromblock=fromblock) # Properties -------------------------------------------------------------- - @property - def coefficient_functions(self) -> tuple: - r"""Scalar-valued coefficient functions for each term of the affine - expansion, i.e., - :math:`\theta_{\ell}^{(0)},\ldots,\theta_{\ell}^{(A_{\ell}-1)}.` + def coeffs(self, parameter): + r"""Evaluate the coefficient functions for each term of the affine + expansion for a given parameter vector. + + This method represents the vector-valued function + :math:`\boldsymbol{\theta}_{\ell} : \RR^{p} \to \RR^{A_{\ell}}` + given by :math:`\boldsymbol{\theta}_{\ell}(\bmfu) = [~ + \theta_{\ell}^{(0)}~~\cdots~~\theta_{\ell}^{(A_{\ell}-1)}~]\trp.` + + Parameters + ---------- + parameter : (p,) ndarray + Parameter vector to evaluate. + + Returns + ------- + coefficients : (nterms,) ndarray + Coefficients of the affine expansion at the given ``parameter``. """ - return self.__thetas + return self.__thetas(parameter) @property def entries(self) -> list: @@ -151,7 +187,7 @@ def entries(self) -> list: @property def nterms(self): r"""Number of terms :math:`A_{\ell}` in the affine expansion.""" - return len(self.coefficient_functions) + return self.__nterms def set_entries(self, entries, fromblock: bool = False) -> None: r"""Set the operator matrices for each term of the affine expansion. @@ -182,7 +218,7 @@ def set_entries(self, entries, fromblock: bool = False) -> None: self._check_shape_consistency(entries, "entries") if (n_arrays := len(entries)) != nterms: raise ValueError( - f"{nterms} = len(coefficient_functions) " + f"{nterms} = number of affine expansion terms " f"!= len(entries) = {n_arrays}" ) @@ -209,12 +245,8 @@ def evaluate(self, parameter): if self.parameter_dimension is None: self._set_parameter_dimension_from_values([parameter]) self._check_parametervalue_dimension(parameter) - entries = sum( - [ - theta(parameter) * A - for theta, A in zip(self.coefficient_functions, self.entries) - ] - ) + thetamus = self.coeffs(parameter) + entries = sum([tm * A for tm, A in zip(thetamus, self.entries)]) return self.OperatorClass(entries) # Dimensionality reduction ------------------------------------------------ @@ -268,7 +300,8 @@ def galerkin(self, Vr, Wr=None): New object of the same class as ``self``. """ return self.__class__( - coefficient_functions=self.coefficient_functions, + coeffs=self.coeffs, + nterms=self.nterms, entries=[ self.OperatorClass(A).galerkin(Vr, Wr).entries for A in self.entries @@ -368,11 +401,8 @@ def datablock(self, parameters, states, inputs=None) -> np.ndarray: blockcolumns = [] for mu, Q, U in zip(parameters, states, inputs): Di = self.OperatorClass.datablock(Q, U) - blockcolumns.append( - np.vstack( - [theta(mu) * Di for theta in self.coefficient_functions] - ) - ) + theta_mus = self.coeffs(mu) + blockcolumns.append(np.vstack([theta * Di for theta in theta_mus])) return np.hstack(blockcolumns) # Model persistence ------------------------------------------------------- @@ -384,7 +414,8 @@ def copy(self): if self.entries is not None: As = [A.copy() for A in self.entries] op = self.__class__( - coefficient_functions=self.coefficient_functions, + coeffs=self.__thetas, + nterms=self.nterms, entries=As, fromblock=False, ) @@ -395,7 +426,7 @@ def copy(self): def save(self, savefile: str, overwrite: bool = False) -> None: """Save the operator to an HDF5 file. - Since the :attr:`coefficient_functions` are callables, they cannot be + Since the :attr:`coeffs` are callables, they cannot be serialized, and are therefore an argument to :meth:`load()`. Parameters @@ -411,6 +442,7 @@ def save(self, savefile: str, overwrite: bool = False) -> None: meta.attrs["class"] = self.__class__.__name__ if (p := self.parameter_dimension) is not None: meta.attrs["parameter_dimension"] = p + meta.attrs["nterms"] = self.nterms if self.entries is not None: group = hf.create_group("entries") for i, Ai in enumerate(self.entries): @@ -421,14 +453,14 @@ def save(self, savefile: str, overwrite: bool = False) -> None: group.create_dataset(name, data=Ai) @classmethod - def load(cls, loadfile: str, coefficient_functions): + def load(cls, loadfile: str, coeffs): """Load an affine parametric operator from an HDF5 file. Parameters ---------- loadfile : str Path to the file where the operator was stored via :meth:`save()`. - coefficient_functions : iterable of callables + coeffs : iterable of callables Scalar-valued coefficient functions for each term of the affine expansion. Returns @@ -443,6 +475,7 @@ def load(cls, loadfile: str, coefficient_functions): f"file '{loadfile}' contains '{ClassName}' " f"object, use '{ClassName}.load()'" ) + nterms = int(hf["meta"].attrs["nterms"]) entries = None if "entries" in hf: @@ -455,11 +488,7 @@ def load(cls, loadfile: str, coefficient_functions): else: entries.append(utils.load_sparray(obj)) - op = cls( - coefficient_functions=coefficient_functions, - entries=entries, - fromblock=False, - ) + op = cls(coeffs, nterms=nterms, entries=entries, fromblock=False) if (key := "parameter_dimension") in hf["meta"].attrs: op.parameter_dimension = int(hf["meta"].attrs[key]) @@ -480,12 +509,20 @@ class AffineConstantOperator(_AffineOperator): Parameters ---------- - coefficient_functions : (iterable of callables) or int - Scalar-valued coefficient functions for each term of the affine - expansion, i.e., - :math:`\theta_{\ell}^{(0)},\ldots,\theta_{\ell}^{(A_{\ell}-1)}.` - If an integer :math:`p` is provided, set :math:`A_{\ell} = p` and - define :math:`\theta_{\ell}^{(i)}\!(\bfmu) = \mu_i`. + coeffs : callable, (iterable of callables), or int + Coefficient functions for the terms of the affine expansion. + + * If callable, it should receive a parameter vector + :math:`\bfmu` and return the vector of affine coefficients, + :math:`[~\theta_{\ell}^{(0)}(\bfmu) + ~~\cdots~~\theta_{\ell}^{(A_{\ell}-1)}(\bfmu)~]\trp`. + In this case, ``nterms`` is a required argument. + * If an iterable, each entry should be a callable representing a + single affine coefficient function :math:`\theta_{\ell}^{(a)}`. + * If an integer :math:`p`, set :math:`A_{\ell} = p` and define + :math:`\theta_{\ell}^{(i)}\!(\bfmu) = \mu_i`. This is equivalent to + using ``coeffs=lambda mu: mu``, except the parameter dimension is + also captured and ``nterms`` is not required. entries : list of ndarrays, or None Operator matrices for each term of the affine expansion, i.e., :math:`\Ohat_{\ell}^{(0)},\ldots,\Ohat_{\ell}^{(A_{\ell}-1)}.` @@ -494,40 +531,6 @@ class AffineConstantOperator(_AffineOperator): If ``True``, interpret ``entries`` as a horizontal concatenation of arrays; if ``False`` (default), interpret ``entries`` as a list of arrays. - - Warnings - -------- - A common choice for the ``coefficient_functions`` is for the :math:`i`-th - coefficient function to return the :math:`i`-th component of the parameter - vector, i.e., :math:`\theta_{\ell}^{(i)}\!(\bfmu) = \mu_{i}`. The following - implementation for this choice results in a subtle but serious error: - - .. code-block:: python - - coefficient_functions = [lambda mu: mu[i] for i in range(nterms)] - - Due to the late binding behavior of closures in Python, the ``lambda`` - functions do not capture the value of the variable ``i`` at each iteration. - When any of the ``lambda`` functions are called, they use the value of - ``i`` at the time of the call, which, after the loop, is always - ``nterms - 1``. To avoid this issue, use ``coefficient_functions=p`` in the - constructor, where ``p`` is the dimension of the parameter vector. For - related scenarios, avoid this pitfall by writing `lambda` function with - the index given explicitly, or by using a function factory. - - .. code-block:: python - - coefficient_functions = [ - lambda mu: mu[0], - lambda mu: mu[1], - lambda mu: mu[2], - # ... - ] - - # Alternatively, define a function factory. - def coeff_function(i : int): - return lambda mu: mu[i] - coefficient_functions = [coeff_function(i) for i in range(nterms)] """ _OperatorClass = ConstantOperator @@ -547,12 +550,20 @@ class AffineLinearOperator(_AffineOperator): Parameters ---------- - coefficient_functions : (iterable of callables) or int - Scalar-valued coefficient functions for each term of the affine - expansion, i.e., - :math:`\theta_{\ell}^{(0)},\ldots,\theta_{\ell}^{(A_{\ell}-1)}.` - If an integer :math:`p` is provided, set :math:`A_{\ell} = p` and - define :math:`\theta_{\ell}^{(i)}\!(\bfmu) = \mu_i`. + coeffs : callable, (iterable of callables), or int + Coefficient functions for the terms of the affine expansion. + + * If callable, it should receive a parameter vector + :math:`\bfmu` and return the vector of affine coefficients, + :math:`[~\theta_{\ell}^{(0)}(\bfmu) + ~~\cdots~~\theta_{\ell}^{(A_{\ell}-1)}(\bfmu)~]\trp`. + In this case, ``nterms`` is a required argument. + * If an iterable, each entry should be a callable representing a + single affine coefficient function :math:`\theta_{\ell}^{(a)}`. + * If an integer :math:`p`, set :math:`A_{\ell} = p` and define + :math:`\theta_{\ell}^{(i)}\!(\bfmu) = \mu_i`. This is equivalent to + using ``coeffs=lambda mu: mu``, except the parameter dimension is + also captured and ``nterms`` is not required. entries : list of ndarrays, or None Operator matrices for each term of the affine expansion, i.e., :math:`\Ahat_{\ell}^{(0)},\ldots,\Ahat_{\ell}^{(A_{\ell}-1)}.` @@ -561,40 +572,6 @@ class AffineLinearOperator(_AffineOperator): If ``True``, interpret ``entries`` as a horizontal concatenation of arrays; if ``False`` (default), interpret ``entries`` as a list of arrays. - - Warnings - -------- - A common choice for the ``coefficient_functions`` is for the :math:`i`-th - coefficient function to return the :math:`i`-th component of the parameter - vector, i.e., :math:`\theta_{\ell}^{(i)}\!(\bfmu) = \mu_{i}`. The following - implementation for this choice results in a subtle but serious error: - - .. code-block:: python - - coefficient_functions = [lambda mu: mu[i] for i in range(nterms)] - - Due to the late binding behavior of closures in Python, the ``lambda`` - functions do not capture the value of the variable ``i`` at each iteration. - When any of the ``lambda`` functions are called, they use the value of - ``i`` at the time of the call, which, after the loop, is always - ``nterms - 1``. To avoid this issue, use ``coefficient_functions=p`` in the - constructor, where ``p`` is the dimension of the parameter vector. For - related scenarios, avoid this pitfall by writing `lambda` function with - the index given explicitly, or by using a function factory. - - .. code-block:: python - - coefficient_functions = [ - lambda mu: mu[0], - lambda mu: mu[1], - lambda mu: mu[2], - # ... - ] - - # Alternatively, define a function factory. - def coeff_function(i : int): - return lambda mu: mu[i] - coefficient_functions = [coeff_function(i) for i in range(nterms)] """ _OperatorClass = LinearOperator @@ -614,12 +591,20 @@ class AffineQuadraticOperator(_AffineOperator): Parameters ---------- - coefficient_functions : (iterable of callables) or int - Scalar-valued coefficient functions for each term of the affine - expansion, i.e., - :math:`\theta_{\ell}^{(0)},\ldots,\theta_{\ell}^{(A_{\ell}-1)}.` - If an integer :math:`p` is provided, set :math:`A_{\ell} = p` and - define :math:`\theta_{\ell}^{(i)}\!(\bfmu) = \mu_i`. + coeffs : callable, (iterable of callables), or int + Coefficient functions for the terms of the affine expansion. + + * If callable, it should receive a parameter vector + :math:`\bfmu` and return the vector of affine coefficients, + :math:`[~\theta_{\ell}^{(0)}(\bfmu) + ~~\cdots~~\theta_{\ell}^{(A_{\ell}-1)}(\bfmu)~]\trp`. + In this case, ``nterms`` is a required argument. + * If an iterable, each entry should be a callable representing a + single affine coefficient function :math:`\theta_{\ell}^{(a)}`. + * If an integer :math:`p`, set :math:`A_{\ell} = p` and define + :math:`\theta_{\ell}^{(i)}\!(\bfmu) = \mu_i`. This is equivalent to + using ``coeffs=lambda mu: mu``, except the parameter dimension is + also captured and ``nterms`` is not required. entries : list of ndarrays, or None Operator matrices for each term of the affine expansion, i.e., :math:`\Hhat_{\ell}^{(0)},\ldots,\Hhat_{\ell}^{(A_{\ell}-1)}.` @@ -628,40 +613,6 @@ class AffineQuadraticOperator(_AffineOperator): If ``True``, interpret ``entries`` as a horizontal concatenation of arrays; if ``False`` (default), interpret ``entries`` as a list of arrays. - - Warnings - -------- - A common choice for the ``coefficient_functions`` is for the :math:`i`-th - coefficient function to return the :math:`i`-th component of the parameter - vector, i.e., :math:`\theta_{\ell}^{(i)}\!(\bfmu) = \mu_{i}`. The following - implementation for this choice results in a subtle but serious error: - - .. code-block:: python - - coefficient_functions = [lambda mu: mu[i] for i in range(nterms)] - - Due to the late binding behavior of closures in Python, the ``lambda`` - functions do not capture the value of the variable ``i`` at each iteration. - When any of the ``lambda`` functions are called, they use the value of - ``i`` at the time of the call, which, after the loop, is always - ``nterms - 1``. To avoid this issue, use ``coefficient_functions=p`` in the - constructor, where ``p`` is the dimension of the parameter vector. For - related scenarios, avoid this pitfall by writing `lambda` function with - the index given explicitly, or by using a function factory. - - .. code-block:: python - - coefficient_functions = [ - lambda mu: mu[0], - lambda mu: mu[1], - lambda mu: mu[2], - # ... - ] - - # Alternatively, define a function factory. - def coeff_function(i : int): - return lambda mu: mu[i] - coefficient_functions = [coeff_function(i) for i in range(nterms)] """ _OperatorClass = QuadraticOperator @@ -681,12 +632,20 @@ class AffineCubicOperator(_AffineOperator): Parameters ---------- - coefficient_functions : (iterable of callables) or int - Scalar-valued coefficient functions for each term of the affine - expansion, i.e., - :math:`\theta_{\ell}^{(0)},\ldots,\theta_{\ell}^{(A_{\ell}-1)}.` - If an integer :math:`p` is provided, set :math:`A_{\ell} = p` and - define :math:`\theta_{\ell}^{(i)}\!(\bfmu) = \mu_i`. + coeffs : callable, (iterable of callables), or int + Coefficient functions for the terms of the affine expansion. + + * If callable, it should receive a parameter vector + :math:`\bfmu` and return the vector of affine coefficients, + :math:`[~\theta_{\ell}^{(0)}(\bfmu) + ~~\cdots~~\theta_{\ell}^{(A_{\ell}-1)}(\bfmu)~]\trp`. + In this case, ``nterms`` is a required argument. + * If an iterable, each entry should be a callable representing a + single affine coefficient function :math:`\theta_{\ell}^{(a)}`. + * If an integer :math:`p`, set :math:`A_{\ell} = p` and define + :math:`\theta_{\ell}^{(i)}\!(\bfmu) = \mu_i`. This is equivalent to + using ``coeffs=lambda mu: mu``, except the parameter dimension is + also captured and ``nterms`` is not required. entries : list of ndarrays, or None Operator matrices for each term of the affine expansion, i.e., :math:`\Ghat_{\ell}^{(0)},\ldots,\Ghat_{\ell}^{(A_{\ell}-1)}.` @@ -695,40 +654,6 @@ class AffineCubicOperator(_AffineOperator): If ``True``, interpret ``entries`` as a horizontal concatenation of arrays; if ``False`` (default), interpret ``entries`` as a list of arrays. - - Warnings - -------- - A common choice for the ``coefficient_functions`` is for the :math:`i`-th - coefficient function to return the :math:`i`-th component of the parameter - vector, i.e., :math:`\theta_{\ell}^{(i)}\!(\bfmu) = \mu_{i}`. The following - implementation for this choice results in a subtle but serious error: - - .. code-block:: python - - coefficient_functions = [lambda mu: mu[i] for i in range(nterms)] - - Due to the late binding behavior of closures in Python, the ``lambda`` - functions do not capture the value of the variable ``i`` at each iteration. - When any of the ``lambda`` functions are called, they use the value of - ``i`` at the time of the call, which, after the loop, is always - ``nterms - 1``. To avoid this issue, use ``coefficient_functions=p`` in the - constructor, where ``p`` is the dimension of the parameter vector. For - related scenarios, avoid this pitfall by writing `lambda` function with - the index given explicitly, or by using a function factory. - - .. code-block:: python - - coefficient_functions = [ - lambda mu: mu[0], - lambda mu: mu[1], - lambda mu: mu[2], - # ... - ] - - # Alternatively, define a function factory. - def coeff_function(i : int): - return lambda mu: mu[i] - coefficient_functions = [coeff_function(i) for i in range(nterms)] """ _OperatorClass = CubicOperator @@ -748,12 +673,20 @@ class AffineInputOperator(_AffineOperator, InputMixin): Parameters ---------- - coefficient_functions : (iterable of callables) or int - Scalar-valued coefficient functions for each term of the affine - expansion, i.e., - :math:`\theta_{\ell}^{(0)},\ldots,\theta_{\ell}^{(A_{\ell}-1)}.` - If an integer :math:`p` is provided, set :math:`A_{\ell} = p` and - define :math:`\theta_{\ell}^{(i)}\!(\bfmu) = \mu_i`. + coeffs : callable, (iterable of callables), or int + Coefficient functions for the terms of the affine expansion. + + * If callable, it should receive a parameter vector + :math:`\bfmu` and return the vector of affine coefficients, + :math:`[~\theta_{\ell}^{(0)}(\bfmu) + ~~\cdots~~\theta_{\ell}^{(A_{\ell}-1)}(\bfmu)~]\trp`. + In this case, ``nterms`` is a required argument. + * If an iterable, each entry should be a callable representing a + single affine coefficient function :math:`\theta_{\ell}^{(a)}`. + * If an integer :math:`p`, set :math:`A_{\ell} = p` and define + :math:`\theta_{\ell}^{(i)}\!(\bfmu) = \mu_i`. This is equivalent to + using ``coeffs=lambda mu: mu``, except the parameter dimension is + also captured and ``nterms`` is not required. entries : list of ndarrays, or None Operator matrices for each term of the affine expansion, i.e., :math:`\Bhat_{\ell}^{(0)},\ldots,\Bhat_{\ell}^{(A_{\ell}-1)}.` @@ -762,40 +695,6 @@ class AffineInputOperator(_AffineOperator, InputMixin): If ``True``, interpret ``entries`` as a horizontal concatenation of arrays; if ``False`` (default), interpret ``entries`` as a list of arrays. - - Warnings - -------- - A common choice for the ``coefficient_functions`` is for the :math:`i`-th - coefficient function to return the :math:`i`-th component of the parameter - vector, i.e., :math:`\theta_{\ell}^{(i)}\!(\bfmu) = \mu_{i}`. The following - implementation for this choice results in a subtle but serious error: - - .. code-block:: python - - coefficient_functions = [lambda mu: mu[i] for i in range(nterms)] - - Due to the late binding behavior of closures in Python, the ``lambda`` - functions do not capture the value of the variable ``i`` at each iteration. - When any of the ``lambda`` functions are called, they use the value of - ``i`` at the time of the call, which, after the loop, is always - ``nterms - 1``. To avoid this issue, use ``coefficient_functions=p`` in the - constructor, where ``p`` is the dimension of the parameter vector. For - related scenarios, avoid this pitfall by writing `lambda` function with - the index given explicitly, or by using a function factory. - - .. code-block:: python - - coefficient_functions = [ - lambda mu: mu[0], - lambda mu: mu[1], - lambda mu: mu[2], - # ... - ] - - # Alternatively, define a function factory. - def coeff_function(i : int): - return lambda mu: mu[i] - coefficient_functions = [coeff_function(i) for i in range(nterms)] """ _OperatorClass = InputOperator @@ -820,12 +719,20 @@ class AffineStateInputOperator(_AffineOperator, InputMixin): Parameters ---------- - coefficient_functions : (iterable of callables) or int - Scalar-valued coefficient functions for each term of the affine - expansion, i.e., - :math:`\theta_{\ell}^{(0)},\ldots,\theta_{\ell}^{(A_{\ell}-1)}.` - If an integer :math:`p` is provided, set :math:`A_{\ell} = p` and - define :math:`\theta_{\ell}^{(i)}\!(\bfmu) = \mu_i`. + coeffs : callable, (iterable of callables), or int + Coefficient functions for the terms of the affine expansion. + + * If callable, it should receive a parameter vector + :math:`\bfmu` and return the vector of affine coefficients, + :math:`[~\theta_{\ell}^{(0)}(\bfmu) + ~~\cdots~~\theta_{\ell}^{(A_{\ell}-1)}(\bfmu)~]\trp`. + In this case, ``nterms`` is a required argument. + * If an iterable, each entry should be a callable representing a + single affine coefficient function :math:`\theta_{\ell}^{(a)}`. + * If an integer :math:`p`, set :math:`A_{\ell} = p` and define + :math:`\theta_{\ell}^{(i)}\!(\bfmu) = \mu_i`. This is equivalent to + using ``coeffs=lambda mu: mu``, except the parameter dimension is + also captured and ``nterms`` is not required. entries : list of ndarrays, or None Operator matrices for each term of the affine expansion, i.e., :math:`\Nhat_{\ell}^{(0)},\ldots,\Nhat_{\ell}^{(A_{\ell}-1)}.` @@ -834,40 +741,6 @@ class AffineStateInputOperator(_AffineOperator, InputMixin): If ``True``, interpret ``entries`` as a horizontal concatenation of arrays; if ``False`` (default), interpret ``entries`` as a list of arrays. - - Warnings - -------- - A common choice for the ``coefficient_functions`` is for the :math:`i`-th - coefficient function to return the :math:`i`-th component of the parameter - vector, i.e., :math:`\theta_{\ell}^{(i)}\!(\bfmu) = \mu_{i}`. The following - implementation for this choice results in a subtle but serious error: - - .. code-block:: python - - coefficient_functions = [lambda mu: mu[i] for i in range(nterms)] - - Due to the late binding behavior of closures in Python, the ``lambda`` - functions do not capture the value of the variable ``i`` at each iteration. - When any of the ``lambda`` functions are called, they use the value of - ``i`` at the time of the call, which, after the loop, is always - ``nterms - 1``. To avoid this issue, use ``coefficient_functions=p`` in the - constructor, where ``p`` is the dimension of the parameter vector. For - related scenarios, avoid this pitfall by writing `lambda` function with - the index given explicitly, or by using a function factory. - - .. code-block:: python - - coefficient_functions = [ - lambda mu: mu[0], - lambda mu: mu[1], - lambda mu: mu[2], - # ... - ] - - # Alternatively, define a function factory. - def coeff_function(i : int): - return lambda mu: mu[i] - coefficient_functions = [coeff_function(i) for i in range(nterms)] """ _OperatorClass = StateInputOperator diff --git a/tests/operators/test_affine.py b/tests/operators/test_affine.py index 89c42be7..13824c4f 100644 --- a/tests/operators/test_affine.py +++ b/tests/operators/test_affine.py @@ -10,7 +10,9 @@ import opinf -_module = opinf.operators._affine + +_module = opinf.operators +_submodule = _module._affine class _TestAffineOperator: @@ -18,13 +20,17 @@ class _TestAffineOperator: OpClass = NotImplemented - thetas = [ + thetas1 = [ (lambda mu: mu[0]), (lambda mu: mu[1]), (lambda mu: mu[2]), (lambda mu: mu[1] * mu[2] ** 2), ] + @staticmethod + def thetas2(mu): + return np.array([mu[0], mu[1], mu[2], mu[1] * mu[2] ** 2]) + p = 3 @abc.abstractmethod @@ -34,41 +40,77 @@ def entries_shape(self, r, m): def test_init(self, p=6): """Test __init__() and properties.""" + # Bad input for coeffs. + bad_thetas = 3.14159265358979 + with pytest.raises(TypeError) as ex: + self.OpClass(bad_thetas) + assert ex.value.args[0] == ( + "argument 'coeffs' must be callable, iterable, or a positive int" + ) bad_thetas = [1, 2, 3] with pytest.raises(TypeError) as ex: self.OpClass(bad_thetas) assert ex.value.args[0] == ( - "coefficient_functions must be collection of callables" + "if 'coeffs' is iterable each entry must be callable" + ) + ncoeffs = len(self.thetas1) + + # Bad input for nterms. + with pytest.raises(TypeError) as ex: + self.OpClass(None, -10) + assert ex.value.args[0] == ( + "when provided, argument 'nterms' must be a positive integer" ) - ncoeffs = len(self.thetas) - op = self.OpClass(self.thetas) + # coeffs as an iterable of callables. + with pytest.warns(opinf.errors.OpInfWarning) as wn: + op = self.OpClass(self.thetas1, 100) + assert len(wn) == 1 + assert wn[0].message.args[0] == ( + f"{ncoeffs} = len(coeffs) != nterms = 100, ignoring " + f"argument 'nterms' and setting nterms = {ncoeffs}" + ) + assert op.nterms == ncoeffs assert op.parameter_dimension is None assert op.entries is None - assert len(op.coefficient_functions) == ncoeffs - assert op.nterms == ncoeffs mu = np.random.random(ncoeffs) - for i in range(ncoeffs): - opimu = op.coefficient_functions[i](mu) - truth = self.thetas[i](mu) - assert opimu == truth + opmu = op.coeffs(mu) + assert all(opmu[i] == thta(mu) for i, thta in enumerate(self.thetas1)) - # Shortcut: coefficient_functions as an integer. - op = self.OpClass(p) - assert op.parameter_dimension == p + # coeffs as a single callable. + with pytest.raises(ValueError) as ex: + self.OpClass(self.thetas2) + assert ex.value.args[0] == ( + "argument 'nterms' required when argument 'coeffs' is callable" + ) + op = self.OpClass(self.thetas2, nterms=ncoeffs) + assert op.parameter_dimension is None assert op.entries is None + mu = np.random.random(ncoeffs) + opmu = op.coeffs(mu) + assert all(opmu[i] == thta(mu) for i, thta in enumerate(self.thetas1)) + + # coeffs as an integer. + with pytest.warns(opinf.errors.OpInfWarning) as wn: + op = self.OpClass(p, p + 1) + assert len(wn) == 1 + assert wn[0].message.args[0] == ( + f"{p} = coeffs != nterms = {p + 1}, ignoring " + f"argument 'nterms' and setting nterms = {p}" + ) assert op.nterms == p + assert op.parameter_dimension == p + assert op.entries is None mu = np.random.random(p) - for i in range(p): - assert op.coefficient_functions[i](mu) == mu[i] + assert np.array_equal(op.coeffs(mu), mu) def test_entries(self, r=10, m=3): """Test set_entries() and entries property.""" - ncoeffs = len(self.thetas) + ncoeffs = len(self.thetas1) shape = self.entries_shape(r, m) arrays = [np.random.random(shape) for _ in range(ncoeffs)] - op = self.OpClass(self.thetas) + op = self.OpClass(self.thetas1) with pytest.raises(ValueError) as ex: op.set_entries(np.random.random((2, 3, 2)), fromblock=True) assert ex.value.args[0] == ( @@ -78,47 +120,52 @@ def test_entries(self, r=10, m=3): with pytest.raises(ValueError) as ex: op.set_entries(arrays[:-1]) assert ex.value.args[0] == ( - f"{ncoeffs} = len(coefficient_functions) " + f"{ncoeffs} = number of affine expansion terms " f"!= len(entries) = {ncoeffs - 1}" ) - op = self.OpClass(self.thetas) + op = self.OpClass(self.thetas2, ncoeffs) assert op.entries is None op.set_entries(arrays) for i in range(ncoeffs): assert np.all(op.entries[i] == arrays[i]) - op = self.OpClass(self.thetas, arrays) + op = self.OpClass(self.thetas1, entries=arrays) for i in range(ncoeffs): assert np.all(op.entries[i] == arrays[i]) - op = self.OpClass(self.thetas, np.hstack(arrays), fromblock=True) + op = self.OpClass( + self.thetas2, + ncoeffs, + entries=np.hstack(arrays), + fromblock=True, + ) for i in range(ncoeffs): assert np.all(op.entries[i] == arrays[i]) def test_evaluate(self, r=9, m=4): """Test evaluate().""" - ncoeffs = len(self.thetas) + ncoeffs = len(self.thetas1) shape = self.entries_shape(r, m) arrays = [np.random.random(shape) for _ in range(ncoeffs)] - op = self.OpClass(self.thetas, arrays) + op = self.OpClass(self.thetas1, entries=arrays) mu = np.random.random(self.p) op_mu = op.evaluate(mu) assert isinstance(op_mu, op.OperatorClass) assert op_mu.entries.shape == arrays[0].shape Amu = np.sum( - [theta(mu) * A for theta, A in zip(self.thetas, arrays)], + [theta(mu) * A for theta, A in zip(self.thetas1, arrays)], axis=0, ) assert np.allclose(op_mu.entries, Amu) def test_galerkin(self, r=9, m=4): """Test galerkin().""" - ncoeffs = len(self.thetas) + ncoeffs = len(self.thetas1) shape = self.entries_shape(r, m) arrays = [np.random.random(shape) for _ in range(ncoeffs)] - op = self.OpClass(self.thetas, arrays) + op = self.OpClass(self.thetas1, entries=arrays) Vr = la.qr(np.random.random((r, r // 2)), mode="economic")[0] Wr = la.qr(np.random.random((r, r // 2)), mode="economic")[0] @@ -129,10 +176,10 @@ def test_galerkin(self, r=9, m=4): def test_opinf(self, s=10, k=15, r=11, m=3): """Test operator_dimension() and datablock().""" - ncoeffs = len(self.thetas) + ncoeffs = len(self.thetas1) shape = self.entries_shape(r, m) arrays = [np.random.random(shape) for _ in range(ncoeffs)] - op = self.OpClass(self.thetas, arrays) + op = self.OpClass(self.thetas1, entries=arrays) parameters = [np.random.random(self.p) for _ in range(s)] states = np.random.random((s, r, k)) @@ -145,7 +192,7 @@ def test_opinf(self, s=10, k=15, r=11, m=3): def test_copysaveload(self, r=10, m=2, target="_affinesavetest.h5"): """Test copy(), save(), and load().""" - ncoeffs = len(self.thetas) + ncoeffs = len(self.thetas1) shape = self.entries_shape(r, m) arrays = [np.random.random(shape) for _ in range(ncoeffs)] @@ -176,7 +223,7 @@ def _checksame(original, copied): assert copied.parameter_dimension == p # Test copy() without entries set. - op = self.OpClass(self.thetas) + op = self.OpClass(self.thetas1) _checksame(op, op.copy()) op.parameter_dimension = self.p @@ -191,15 +238,26 @@ def _checksame(original, copied): # Test save() and load() together. + class Dummy(self.OpClass): + pass + + op = Dummy(self.thetas2, nterms=ncoeffs) + op.save(target, overwrite=True) + with pytest.raises(opinf.errors.LoadfileFormatError) as ex: + self.OpClass.load(target, self.thetas2) + assert ex.value.args[0] == ( + f"file '{target}' contains 'Dummy' object, use 'Dummy.load()'" + ) + def _checkload(original): if os.path.isfile(target): os.remove(target) original.save(target) - copied = self.OpClass.load(target, original.coefficient_functions) + copied = self.OpClass.load(target, original.coeffs) return _checksame(original, copied) # Test save()/load() without entries set. - op = self.OpClass(self.thetas) + op = self.OpClass(self.thetas1) _checkload(op) op.parameter_dimension = self.p @@ -266,6 +324,14 @@ class TestAffineInputOperator(_TestAffineOperator): def entries_shape(r, m): return (r, m) + def test_input_dimension(self, r=8, m=3, p=3): + """Test input_dimension.""" + Bs = [np.random.random((r, m)) for _ in range(p)] + op = self.OpClass(p) + assert op.input_dimension is None + op.set_entries(Bs) + assert op.input_dimension == m + class TestAffineStateInputOperator(_TestAffineOperator): OpClass = _module.AffineStateInputOperator @@ -273,3 +339,33 @@ class TestAffineStateInputOperator(_TestAffineOperator): @staticmethod def entries_shape(r, m): return (r, r * m) + + def test_input_dimension(self, r=7, m=4, p=5): + """Test input_dimension.""" + Ns = [np.random.random((r, r * m)) for _ in range(p)] + op = self.OpClass(p) + assert op.input_dimension is None + op.set_entries(Ns) + assert op.input_dimension == m + + +def test_is_affine(): + """Test operators._affine.is_interpolated().""" + + class Dummy(_submodule._AffineOperator): + pass + + op = Dummy(_TestAffineOperator.thetas1) + assert _submodule.is_affine(op) + assert not _submodule.is_affine(-2) + + +def test_nonparametric_to_affine(): + """Test operators._affine.nonparametric_to_affine().""" + + with pytest.raises(TypeError) as ex: + _submodule.nonparametric_to_affine(list) + assert ex.value.args[0] == "_AffineOperator for class 'list' not found" + + OpClass = _submodule.nonparametric_to_affine(opinf.operators.CubicOperator) + assert OpClass is opinf.operators.AffineCubicOperator From aa43e698aeb727ed278f5d7e258dfb03efb3c5d7 Mon Sep 17 00:00:00 2001 From: Shane Date: Thu, 29 Aug 2024 16:27:38 -0600 Subject: [PATCH 19/48] _AffineOperator.__str__(), doc typo fixes --- src/opinf/operators/_affine.py | 11 ++++++++--- tests/operators/test_affine.py | 2 ++ 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/src/opinf/operators/_affine.py b/src/opinf/operators/_affine.py index 6eec9698..09d00e5f 100644 --- a/src/opinf/operators/_affine.py +++ b/src/opinf/operators/_affine.py @@ -162,7 +162,7 @@ def coeffs(self, parameter): This method represents the vector-valued function :math:`\boldsymbol{\theta}_{\ell} : \RR^{p} \to \RR^{A_{\ell}}` - given by :math:`\boldsymbol{\theta}_{\ell}(\bmfu) = [~ + given by :math:`\boldsymbol{\theta}_{\ell}(\bfmu) = [~ \theta_{\ell}^{(0)}~~\cdots~~\theta_{\ell}^{(A_{\ell}-1)}~]\trp.` Parameters @@ -227,6 +227,11 @@ def set_entries(self, entries, fromblock: bool = False) -> None: [self.OperatorClass(A).entries for A in entries], ) + def __str__(self): + lines = ParametricOpInfOperator.__str__(self).split("\n") + lines.insert(-1, f" expansion terms: {self.nterms}") + return "\n".join(lines) + # Evaluation -------------------------------------------------------------- @utils.requires("entries") def evaluate(self, parameter): @@ -524,8 +529,8 @@ class AffineConstantOperator(_AffineOperator): using ``coeffs=lambda mu: mu``, except the parameter dimension is also captured and ``nterms`` is not required. entries : list of ndarrays, or None - Operator matrices for each term of the affine expansion, i.e., - :math:`\Ohat_{\ell}^{(0)},\ldots,\Ohat_{\ell}^{(A_{\ell}-1)}.` + Operator vectors for each term of the affine expansion, i.e., + :math:`\chat_{\ell}^{(0)},\ldots,\chat_{\ell}^{(A_{\ell}-1)}.` If not provided in the constructor, use :meth:`set_entries` later. fromblock : bool If ``True``, interpret ``entries`` as a horizontal concatenation diff --git a/tests/operators/test_affine.py b/tests/operators/test_affine.py index 13824c4f..4b3c4978 100644 --- a/tests/operators/test_affine.py +++ b/tests/operators/test_affine.py @@ -104,6 +104,8 @@ def test_init(self, p=6): mu = np.random.random(p) assert np.array_equal(op.coeffs(mu), mu) + assert repr(op).count(f"expansion terms: {p}") == 1 + def test_entries(self, r=10, m=3): """Test set_entries() and entries property.""" ncoeffs = len(self.thetas1) From 2e99935e130666a96668e074acb34b985e8500ab Mon Sep 17 00:00:00 2001 From: Shane Date: Thu, 29 Aug 2024 16:29:21 -0600 Subject: [PATCH 20/48] [Interpolated -> Interp]Operators big rename --- docs/source/api/missing.rst | 12 +- docs/source/api/operators.ipynb | 50 ++++---- src/opinf/models/mono/_parametric.py | 12 +- src/opinf/operators/_interpolate.py | 185 +++++++++++++++++++++++---- tests/models/mono/test_parametric.py | 72 +++++------ tests/operators/test_interpolate.py | 79 +++++++----- 6 files changed, 277 insertions(+), 133 deletions(-) diff --git a/docs/source/api/missing.rst b/docs/source/api/missing.rst index 7713747b..7aa0e3cd 100644 --- a/docs/source/api/missing.rst +++ b/docs/source/api/missing.rst @@ -113,12 +113,12 @@ operators.ipynb AffineCubicOperator AffineInputOperator AffineStateInputOperator - InterpolatedConstantOperator - InterpolatedLinearOperator - InterpolatedQuadraticOperator - InterpolatedCubicOperator - InterpolatedInputOperator - InterpolatedStateInputOperator + InterpConstantOperator + InterpLinearOperator + InterpQuadraticOperator + InterpCubicOperator + InterpInputOperator + InterpStateInputOperator has_inputs is_nonparametric is_parametric diff --git a/docs/source/api/operators.ipynb b/docs/source/api/operators.ipynb index 3729ca14..c3f0bcba 100644 --- a/docs/source/api/operators.ipynb +++ b/docs/source/api/operators.ipynb @@ -46,12 +46,12 @@ " AffineCubicOperator\n", " AffineInputOperator\n", " AffineStateInputOperator\n", - " InterpolatedConstantOperator\n", - " InterpolatedLinearOperator\n", - " InterpolatedQuadraticOperator\n", - " InterpolatedCubicOperator\n", - " InterpolatedInputOperator\n", - " InterpolatedStateInputOperator\n", + " InterpConstantOperator\n", + " InterpLinearOperator\n", + " InterpQuadraticOperator\n", + " InterpCubicOperator\n", + " InterpInputOperator\n", + " InterpStateInputOperator\n", "\n", "**Utilities**\n", "\n", @@ -1132,7 +1132,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Affine parametric operators are instantiated with a list of the coefficient functions $\\theta_{\\ell}^{(1)},\\ldots,\\theta_{\\ell}^{(A_{\\ell})}$ and with or without the operator matrices $\\Ohat_{\\ell}^{(1)},\\ldots,\\Ohat_{\\ell}^{(A_{\\ell})}$." + "Affine parametric operators are instantiated with a function $\\boldsymbol{theta}_{\\ell}(\\mu) = [~\\theta_{ell}^{(0)}(\\bfmu)~~\\cdots~~\\theta_{ell}^{(A_{\\ell}-1)}(\\bfmu)~]\\trp$ for the affine expansion coefficients, the number of terms $A_{\\ell}$ in the expansion, and with or without the operator matrices $\\Ohat_{\\ell}^{(1)},\\ldots,\\Ohat_{\\ell}^{(A_{\\ell})}$." ] }, { @@ -1141,12 +1141,8 @@ "metadata": {}, "outputs": [], "source": [ - "thetas = (\n", - " lambda mu: mu[0],\n", - " lambda mu: mu[1] ** 2,\n", - ")\n", - "\n", - "A = opinf.operators.AffineLinearOperator(coefficient_functions=thetas)\n", + "thetas = lambda mu: np.array([mu[0], mu[1] ** 2])\n", + "A = opinf.operators.AffineLinearOperator(thetas, nterms=2)\n", "print(A)" ] }, @@ -1287,14 +1283,14 @@ "metadata": {}, "source": [ "(sec-operators-interpolated)=\n", - "### Interpolated Operators" + "### Interpolatory Operators" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Interpolated parametric OpInf operators define the parametric dependence of the operator matrix on $\\bfmu$ through elementwise interpolation.\n", + "Interpolatory parametric OpInf operators define the parametric dependence of the operator matrix on $\\bfmu$ through elementwise interpolation.\n", "That is,\n", "\n", "$$\n", @@ -1312,7 +1308,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Available interpolated operators are listed below.\n", + "Available interpolatory operators are listed below.\n", "\n", "```{eval-rst}\n", ".. currentmodule:: opinf.operators\n", @@ -1320,12 +1316,12 @@ ".. autosummary::\n", " :nosignatures:\n", "\n", - " InterpolatedConstantOperator\n", - " InterpolatedLinearOperator\n", - " InterpolatedQuadraticOperator\n", - " InterpolatedCubicOperator\n", - " InterpolatedInputOperator\n", - " InterpolatedStateInputOperator\n", + " InterpConstantOperator\n", + " InterpLinearOperator\n", + " InterpQuadraticOperator\n", + " InterpCubicOperator\n", + " InterpInputOperator\n", + " InterpStateInputOperator\n", "```" ] }, @@ -1333,7 +1329,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Interpolated operators can be instantiated with no arguments." + "Interpolatory operators can be instantiated with no arguments." ] }, { @@ -1342,7 +1338,7 @@ "metadata": {}, "outputs": [], "source": [ - "B = opinf.operators.InterpolatedInputOperator()\n", + "B = opinf.operators.InterpInputOperator()\n", "print(B)" ] }, @@ -1388,7 +1384,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - ":::{dropdown} Operator Inference for Interpolated Operators\n", + ":::{dropdown} Operator Inference for Interpolatory Operators\n", "\n", "Consider a model with a single affine operator,\n", "\n", @@ -1431,8 +1427,8 @@ "\\end{aligned}\n", "$$\n", "\n", - "The InterpolatedModel classes represent models comprised solely of interpolated operators.\n", - "If interpolated operators are mixed with other operators (nonparametric or affine parametric), the $\\Ohat\\D\\trp$ block of the problem for the interpolated operator is included as follows:\n", + "The InterpolatedModel classes represent models comprised solely of interpolatory operators.\n", + "If interpolatory operators are mixed with other operators (nonparametric or affine parametric), the $\\Ohat\\D\\trp$ block of the problem for the interpolatory operator is included as follows:\n", "\n", "$$\n", "\\begin{aligned}\n", diff --git a/src/opinf/models/mono/_parametric.py b/src/opinf/models/mono/_parametric.py index 1487d2c1..f8845dbd 100644 --- a/src/opinf/models/mono/_parametric.py +++ b/src/opinf/models/mono/_parametric.py @@ -1124,12 +1124,12 @@ def set_interpolator(self, InterpolatorClass): # Properties: operators --------------------------------------------------- _operator_abbreviations = { - "c": _operators.InterpolatedConstantOperator, - "A": _operators.InterpolatedLinearOperator, - "H": _operators.InterpolatedQuadraticOperator, - "G": _operators.InterpolatedCubicOperator, - "B": _operators.InterpolatedInputOperator, - "N": _operators.InterpolatedStateInputOperator, + "c": _operators.InterpConstantOperator, + "A": _operators.InterpLinearOperator, + "H": _operators.InterpQuadraticOperator, + "G": _operators.InterpCubicOperator, + "B": _operators.InterpInputOperator, + "N": _operators.InterpStateInputOperator, } def _isvalidoperator(self, op): diff --git a/src/opinf/operators/_interpolate.py b/src/opinf/operators/_interpolate.py index 0fc99ed0..2b6d5a86 100644 --- a/src/opinf/operators/_interpolate.py +++ b/src/opinf/operators/_interpolate.py @@ -4,12 +4,12 @@ """ __all__ = [ - "InterpolatedConstantOperator", - "InterpolatedLinearOperator", - "InterpolatedQuadraticOperator", - "InterpolatedCubicOperator", - "InterpolatedInputOperator", - "InterpolatedStateInputOperator", + "InterpConstantOperator", + "InterpLinearOperator", + "InterpQuadraticOperator", + "InterpCubicOperator", + "InterpInputOperator", + "InterpStateInputOperator", ] import warnings @@ -30,7 +30,7 @@ # Base class ================================================================== -class _InterpolatedOperator(ParametricOpInfOperator): +class _InterpOperator(ParametricOpInfOperator): r"""Base class for parametric operators where the parameter dependence is handled with element-wise interpolation. @@ -275,7 +275,7 @@ def set_interpolator(self, InterpolatorClass): # Magic methods ----------------------------------------------------------- def __eq__(self, other) -> bool: """Test whether the training parameters and operator matrices of two - _InterpolatedOperator objects are the same. + _InterpOperator objects are the same. """ if not isinstance(other, self.__class__): return False @@ -529,7 +529,7 @@ def load(cls, loadfile: str, InterpolatorClass: type = None): Returns ------- - op : _InterpolatedOperator + op : _InterpOperator Initialized operator object. """ with utils.hdf5_loadhandle(loadfile) as hf: @@ -575,7 +575,7 @@ def load(cls, loadfile: str, InterpolatorClass: type = None): # Public interpolated operator classes ======================================== -class InterpolatedConstantOperator(_InterpolatedOperator): +class InterpConstantOperator(_InterpOperator): r"""Parametric constant operator :math:`\Ophat_{\ell}(\qhat,\u;\bfmu) = \chat(\bfmu) \in \RR^r` where the parametric dependence is handled with elementwise interpolation. @@ -622,7 +622,7 @@ class InterpolatedConstantOperator(_InterpolatedOperator): _OperatorClass = ConstantOperator -class InterpolatedLinearOperator(_InterpolatedOperator): +class InterpLinearOperator(_InterpOperator): r"""Parametric linear operator :math:`\Ophat_{\ell}(\qhat,\u;\bfmu) = \Ahat(\bfmu)\qhat` where :math:`\Ahat(\bfmu) \in \RR^{r \times r}` and @@ -670,7 +670,7 @@ class InterpolatedLinearOperator(_InterpolatedOperator): _OperatorClass = LinearOperator -class InterpolatedQuadraticOperator(_InterpolatedOperator): +class InterpQuadraticOperator(_InterpOperator): r"""Parametric quadratic operator :math:`\Ophat_{\ell}(\qhat,\u;\bfmu) = \Hhat(\bfmu)[\qhat\otimes\qhat]` where :math:`\Ahat(\bfmu) \in \RR^{r \times r^2}` and @@ -718,7 +718,7 @@ class InterpolatedQuadraticOperator(_InterpolatedOperator): _OperatorClass = QuadraticOperator -class InterpolatedCubicOperator(_InterpolatedOperator): +class InterpCubicOperator(_InterpOperator): r"""Parametric cubic operator :math:`\Ophat_{\ell}(\qhat,\u;\bfmu) = \Ghat(\bfmu)[\qhat\otimes\qhat\otimes\qhat]` @@ -767,7 +767,7 @@ class InterpolatedCubicOperator(_InterpolatedOperator): _OperatorClass = CubicOperator -class InterpolatedInputOperator(_InterpolatedOperator, InputMixin): +class InterpInputOperator(_InterpOperator, InputMixin): r"""Parametric input operator :math:`\Ophat_{\ell}(\qhat,\u;\bfmu) = \Bhat(\bfmu)\u` where :math:`\Bhat(\bfmu) \in \RR^{r \times m}` and @@ -821,7 +821,7 @@ def input_dimension(self): return None if self.entries is None else self.shape[1] -class InterpolatedStateInputOperator(_InterpolatedOperator, InputMixin): +class InterpStateInputOperator(_InterpOperator, InputMixin): r"""Parametric state-input operator :math:`\Ophat_{\ell}(\qhat,\u;\bfmu) = \Nhat(\bfmu)[\u\otimes\qhat]` where :math:`\Nhat(\bfmu) \in \RR^{r \times rm}` and @@ -880,7 +880,7 @@ def input_dimension(self): # Utilities =================================================================== def is_interpolated(obj) -> bool: """Return ``True`` if ``obj`` is a interpolated operator object.""" - return isinstance(obj, _InterpolatedOperator) + return isinstance(obj, _InterpOperator) def nonparametric_to_interpolated(OpClass: type) -> type: @@ -888,14 +888,153 @@ def nonparametric_to_interpolated(OpClass: type) -> type: operator class. """ - for InterpolatedClassName in __all__: - InterpolatedClass = eval(InterpolatedClassName) - if not isinstance(InterpolatedClass, type) or not issubclass( - InterpolatedClass, _InterpolatedOperator + for InterpClassName in __all__: + InterpClass = eval(InterpClassName) + if not isinstance(InterpClass, type) or not issubclass( + InterpClass, _InterpOperator ): # pragma: no cover continue - if InterpolatedClass._OperatorClass is OpClass: - return InterpolatedClass + if InterpClass._OperatorClass is OpClass: + return InterpClass raise TypeError( - f"_InterpolatedOperator for class '{OpClass.__name__}' not found" + f"_InterpOperator for class '{OpClass.__name__}' not found" ) + + +# Deprecations ================================================================ +class InterpolatedConstantOperator(InterpConstantOperator): + def __init__( + self, + training_parameters=None, + entries=None, + InterpolatorClass: type = None, + fromblock=False, + ): + warnings.warn( + "InterpolatedConstantOperator has been renamed " + "and will be removed in an upcoming release, use " + "InterpConstantOperator", + DeprecationWarning, + ) + InterpConstantOperator.__init__( + self, + training_parameters=training_parameters, + entries=entries, + InterpolatorClass=InterpolatorClass, + fromblock=fromblock, + ) + + +class InterpolatedLinearOperator(InterpLinearOperator): + def __init__( + self, + training_parameters=None, + entries=None, + InterpolatorClass: type = None, + fromblock=False, + ): + warnings.warn( + "InterpolatedLinearOperator has been renamed " + "and will be removed in an upcoming release, use " + "InterpLinearOperator", + DeprecationWarning, + ) + InterpLinearOperator.__init__( + self, + training_parameters=training_parameters, + entries=entries, + InterpolatorClass=InterpolatorClass, + fromblock=fromblock, + ) + + +class InterpolatedQuadraticOperator(InterpQuadraticOperator): + def __init__( + self, + training_parameters=None, + entries=None, + InterpolatorClass: type = None, + fromblock=False, + ): + warnings.warn( + "InterpolatedQuadraticOperator has been renamed " + "and will be removed in an upcoming release, use " + "InterpQuadraticOperator", + DeprecationWarning, + ) + InterpQuadraticOperator.__init__( + self, + training_parameters=training_parameters, + entries=entries, + InterpolatorClass=InterpolatorClass, + fromblock=fromblock, + ) + + +class InterpolatedCubicOperator(InterpCubicOperator): + def __init__( + self, + training_parameters=None, + entries=None, + InterpolatorClass: type = None, + fromblock=False, + ): + warnings.warn( + "InterpolatedCubicOperator has been renamed " + "and will be removed in an upcoming release, use " + "InterpCubicOperator", + DeprecationWarning, + ) + InterpCubicOperator.__init__( + self, + training_parameters=training_parameters, + entries=entries, + InterpolatorClass=InterpolatorClass, + fromblock=fromblock, + ) + + +class InterpolatedInputOperator(InterpInputOperator): + def __init__( + self, + training_parameters=None, + entries=None, + InterpolatorClass: type = None, + fromblock=False, + ): + warnings.warn( + "InterpolatedInputOperator has been renamed " + "and will be removed in an upcoming release, use " + "InterpInputOperator", + DeprecationWarning, + ) + InterpInputOperator.__init__( + self, + training_parameters=training_parameters, + entries=entries, + InterpolatorClass=InterpolatorClass, + fromblock=fromblock, + ) + + +class InterpolatedStateInputOperator(InterpStateInputOperator): + def __init__( + self, + training_parameters=None, + entries=None, + InterpolatorClass: type = None, + fromblock=False, + ): + warnings.warn( + "InterpolatedStateInputOperator has been renamed " + "and will be removed in an upcoming release, use " + "InterpStateInputOperator", + DeprecationWarning, + ) + InterpStateInputOperator.__init__( + self, + training_parameters=training_parameters, + entries=entries, + InterpolatorClass=InterpolatorClass, + fromblock=fromblock, + ) diff --git a/tests/models/mono/test_parametric.py b/tests/models/mono/test_parametric.py index 749963bb..3cfbfe45 100644 --- a/tests/models/mono/test_parametric.py +++ b/tests/models/mono/test_parametric.py @@ -81,9 +81,7 @@ class DummyParametricOperator2(DummyParametricOperator): _OperatorClass = DummyOpInfOperator2 -class DummyInterpolatedOperator( - opinf.operators._interpolate._InterpolatedOperator -): +class DummyInterpOperator(opinf.operators._interpolate._InterpOperator): pass @@ -141,7 +139,7 @@ def test_set_operators(self): "consider using a nonparametric model class" ) - operators = [DummyInterpolatedOperator()] + operators = [DummyInterpOperator()] with pytest.warns(opinf.errors.OpInfWarning) as wn: self.Dummy(operators) @@ -320,7 +318,7 @@ def test_from_models(self, r=4): ) # Wrong type of model. - model2 = self.Dummy([opinf.operators.InterpolatedCubicOperator()]) + model2 = self.Dummy([opinf.operators.InterpCubicOperator()]) with pytest.raises(TypeError) as ex: self.Dummy._from_models(mu, [model2, model1]) assert ex.value.args[0] == ( @@ -356,7 +354,7 @@ def test_from_models(self, r=4): assert len(model.operators) == 1 assert isinstance( model.operators[0], - opinf.operators.InterpolatedConstantOperator, + opinf.operators.InterpConstantOperator, ) def test_set_interpolator(self, s=10, p=2, r=2): @@ -364,12 +362,12 @@ def test_set_interpolator(self, s=10, p=2, r=2): mu = np.random.random((s, p)) operators = [ - opinf.operators.InterpolatedConstantOperator( + opinf.operators.InterpConstantOperator( training_parameters=mu, entries=np.random.random((s, r)), InterpolatorClass=interp.NearestNDInterpolator, ), - opinf.operators.InterpolatedLinearOperator( + opinf.operators.InterpLinearOperator( training_parameters=mu, entries=np.random.random((s, r, r)), InterpolatorClass=interp.NearestNDInterpolator, @@ -398,8 +396,8 @@ def test_set_interpolator(self, s=10, p=2, r=2): def test_fit_solver(self, s=10, r=3, k=20): """Test _InterpolatedModel._fit_solver().""" operators = [ - opinf.operators.InterpolatedConstantOperator(), - opinf.operators.InterpolatedLinearOperator(), + opinf.operators.InterpConstantOperator(), + opinf.operators.InterpLinearOperator(), ] params = np.sort(np.random.random(s)) states = np.random.random((s, r, k)) @@ -428,8 +426,8 @@ def test_fit_solver(self, s=10, r=3, k=20): def test_refit(self, s=10, r=3, k=15): """Test _InterpolatedModel.refit().""" operators = [ - opinf.operators.InterpolatedConstantOperator(), - opinf.operators.InterpolatedLinearOperator(), + opinf.operators.InterpConstantOperator(), + opinf.operators.InterpLinearOperator(), ] params = np.sort(np.random.random(s)) states = np.random.random((s, r, k)) @@ -459,8 +457,8 @@ def test_save(self, target="_interpmodelsavetest.h5"): model = self.Dummy( [ - opinf.operators.InterpolatedConstantOperator(), - opinf.operators.InterpolatedLinearOperator(), + opinf.operators.InterpConstantOperator(), + opinf.operators.InterpLinearOperator(), ] ) model.save(target) @@ -489,8 +487,8 @@ def test_load(self, target="_interpmodelloadtest.h5"): os.remove(target) operators = [ - opinf.operators.InterpolatedConstantOperator(), - opinf.operators.InterpolatedLinearOperator(), + opinf.operators.InterpConstantOperator(), + opinf.operators.InterpLinearOperator(), ] model = self.Dummy(operators, InterpolatorClass=float) @@ -540,18 +538,18 @@ def test_copy(self, s=10, p=2, r=3): model1 = self.Dummy( [ - opinf.operators.InterpolatedConstantOperator(), - opinf.operators.InterpolatedLinearOperator(), + opinf.operators.InterpConstantOperator(), + opinf.operators.InterpLinearOperator(), ] ) mu = np.random.random((s, p)) model2 = self.Dummy( [ - opinf.operators.InterpolatedConstantOperator( + opinf.operators.InterpConstantOperator( mu, entries=np.random.random((s, r)) ), - opinf.operators.InterpolatedLinearOperator( + opinf.operators.InterpLinearOperator( mu, entries=np.random.random((s, r, r)) ), ], @@ -590,7 +588,7 @@ def test_rhs(self, s=10, r=3, m=2): params = np.sort(np.random.random(s)) state = np.random.random(r) model = self.ModelClass( - opinf.operators.InterpolatedLinearOperator( + opinf.operators.InterpLinearOperator( params, np.random.random((s, r, r)) ) ) @@ -600,7 +598,7 @@ def test_rhs(self, s=10, r=3, m=2): input_ = np.random.random(m) model = self.ModelClass( - opinf.operators.InterpolatedInputOperator( + opinf.operators.InterpInputOperator( params, np.random.random((s, r, m)) ) ) @@ -613,7 +611,7 @@ def test_jacobian(self, s=9, r=2, m=3): params = np.sort(np.random.random(s)) state = np.random.random(r) model = self.ModelClass( - opinf.operators.InterpolatedLinearOperator( + opinf.operators.InterpLinearOperator( params, np.random.random((s, r, r)) ) ) @@ -623,7 +621,7 @@ def test_jacobian(self, s=9, r=2, m=3): input_ = np.random.random(m) model = self.ModelClass( - opinf.operators.InterpolatedInputOperator( + opinf.operators.InterpInputOperator( params, np.random.random((s, r, m)) ) ) @@ -636,9 +634,7 @@ def test_predict(self, s=11, r=4, m=2, niters=10): params = np.sort(np.random.random(s)) state0 = np.random.random(r) model = self.ModelClass( - opinf.operators.InterpolatedLinearOperator( - params, np.zeros((s, r, r)) - ) + opinf.operators.InterpLinearOperator(params, np.zeros((s, r, r))) ) out = model.predict(params[2], state0, niters) assert isinstance(out, np.ndarray) @@ -648,9 +644,7 @@ def test_predict(self, s=11, r=4, m=2, niters=10): inputs = np.random.random((m, niters)) model = self.ModelClass( - opinf.operators.InterpolatedInputOperator( - params, np.zeros((s, r, m)) - ) + opinf.operators.InterpInputOperator(params, np.zeros((s, r, m))) ) out = model.predict(params[-2], state0, niters, inputs) assert isinstance(out, np.ndarray) @@ -684,7 +678,7 @@ def test_rhs(self, s=10, r=3, m=2): params = np.sort(np.random.random(s)) state = np.random.random(r) model = self.ModelClass( - opinf.operators.InterpolatedLinearOperator( + opinf.operators.InterpLinearOperator( params, np.random.random((s, r, r)) ) ) @@ -696,7 +690,7 @@ def input_func(t): return np.random.random(m) model = self.ModelClass( - opinf.operators.InterpolatedInputOperator( + opinf.operators.InterpInputOperator( params, np.random.random((s, r, m)) ) ) @@ -709,7 +703,7 @@ def test_jacobian(self, s=9, r=2, m=3): params = np.sort(np.random.random(s)) state = np.random.random(r) model = self.ModelClass( - opinf.operators.InterpolatedLinearOperator( + opinf.operators.InterpLinearOperator( params, np.random.random((s, r, r)) ) ) @@ -721,7 +715,7 @@ def input_func(t): return np.random.random(m) model = self.ModelClass( - opinf.operators.InterpolatedInputOperator( + opinf.operators.InterpInputOperator( params, np.random.random((s, r, m)) ) ) @@ -735,9 +729,7 @@ def test_predict(self, s=11, r=4, m=2, k=40): state0 = np.random.random(r) t = np.linspace(0, 1, k) model = self.ModelClass( - opinf.operators.InterpolatedLinearOperator( - params, np.zeros((s, r, r)) - ) + opinf.operators.InterpLinearOperator(params, np.zeros((s, r, r))) ) out = model.predict(params[2], state0, t) assert isinstance(out, np.ndarray) @@ -749,9 +741,7 @@ def input_func(t): return np.random.random(m) model = self.ModelClass( - opinf.operators.InterpolatedInputOperator( - params, np.zeros((s, r, m)) - ) + opinf.operators.InterpInputOperator(params, np.zeros((s, r, m))) ) out = model.predict(params[-2], state0, t, input_func) assert isinstance(out, np.ndarray) @@ -762,7 +752,7 @@ def input_func(t): def test_publics(): """Ensure all public ParametricModel classes can be instantiated.""" - operators = [opinf.operators.InterpolatedConstantOperator()] + operators = [opinf.operators.InterpConstantOperator()] for ModelClassName in _module.__all__: ModelClass = getattr(_module, ModelClassName) if not isinstance(ModelClass, type) or not issubclass( diff --git a/tests/operators/test_interpolate.py b/tests/operators/test_interpolate.py index 98e245f5..67388755 100644 --- a/tests/operators/test_interpolate.py +++ b/tests/operators/test_interpolate.py @@ -56,16 +56,16 @@ class _DummyInterpolator2(_DummyInterpolator): pass -class TestInterpolatedOperator: - """Test operators._interpolate._InterpolatedOperator.""" +class TestInterpOperator: + """Test operators._interpolate._InterpOperator.""" - class Dummy(_module._InterpolatedOperator): - """Instantiable version of _InterpolatedOperator.""" + class Dummy(_module._InterpOperator): + """Instantiable version of _InterpOperator.""" _OperatorClass = _DummyOperator def test_from_operators(self, s=7, p=2, r=5): - """Test _InterpolatedOperator._from_operators().""" + """Test _InterpOperator._from_operators().""" mu = np.random.random((s, p)) with pytest.raises(TypeError) as ex: @@ -96,7 +96,7 @@ def test_from_operators(self, s=7, p=2, r=5): assert isinstance(op.interpolator, _DummyInterpolator) def test_set_training_parameters(self, s=10, p=2, r=4): - """Test _InterpolatedOperator.set_training_parameters(), + """Test _InterpOperator.set_training_parameters(), the training_parameter property, and __len__(). """ op = self.Dummy() @@ -116,9 +116,15 @@ def test_set_training_parameters(self, s=10, p=2, r=4): assert np.all(op.training_parameters == mu) assert op.state_dimension is None assert op.interpolator is None + assert op.parameter_dimension == p op.set_training_parameters(mu[:, 0]) assert np.all(op.training_parameters == mu[:, 0]) + assert op.parameter_dimension == 1 + + op.set_training_parameters(mu[:, 0].reshape((-1, 1))) + assert np.all(op.training_parameters == mu[:, 0]) + assert op.parameter_dimension == 1 entries = np.random.standard_normal((s, r, r)) op = self.Dummy(mu, entries) @@ -130,7 +136,7 @@ def test_set_training_parameters(self, s=10, p=2, r=4): ) def test_set_entries(self, s=5, p=3, r=4): - """Test _InterpolatedOperator.set_entries(), _clear(), and the + """Test _InterpOperator.set_entries(), _clear(), and the the entries and shape properties. """ mu = np.random.random((s, p)) @@ -189,7 +195,7 @@ def test_set_entries(self, s=5, p=3, r=4): assert op.state_dimension is None def test_set_interpolator(self, s=4, p=2, r=5): - """Test _InterpolatedOperator.set_interpolator() and the + """Test _InterpOperator.set_interpolator() and the interpolator property. """ op = self.Dummy() @@ -208,7 +214,7 @@ def test_set_interpolator(self, s=4, p=2, r=5): assert isinstance(op.interpolator, _DummyInterpolator2) def test_eq(self, s=4, p=3, r=2): - """Test _InterpolatedOperator.__eq__().""" + """Test _InterpOperator.__eq__().""" op1 = self.Dummy() op2 = self.Dummy() assert op1 == op2 @@ -245,7 +251,7 @@ def test_eq(self, s=4, p=3, r=2): assert op1 == op2 def test_evaluate(self, s=3, p=5, r=4): - """Test _InterpolatedOperator.evaluate().""" + """Test _InterpOperator.evaluate().""" mu = np.random.random((s, p)) op = self.Dummy(mu, InterpolatorClass=_DummyInterpolator) @@ -260,7 +266,7 @@ def test_evaluate(self, s=3, p=5, r=4): assert np.all(op_evaluated.entries == entries[0]) def test_galerkin(self, s=5, p=2, n=10, r=4): - """Test _InterpolatedOperator.galerkin().""" + """Test _InterpOperator.galerkin().""" Vr = np.empty((n, r)) mu = np.random.random((s, p)) entries = np.random.random((s, n, n)) @@ -271,7 +277,7 @@ def test_galerkin(self, s=5, p=2, n=10, r=4): assert np.all(op_reduced.entries == entries) def test_datablock(self, s=4, p=2, r=2, k=3): - """Test _InterpolatedOperator.datablock().""" + """Test _InterpOperator.datablock().""" mu = np.random.random((s, p)) states = np.random.random((s, r, k)) op = self.Dummy(mu, InterpolatorClass=_DummyInterpolator) @@ -280,11 +286,11 @@ def test_datablock(self, s=4, p=2, r=2, k=3): assert np.all(block == la.block_diag(*[_Dblock for _ in range(s)])) def test_operator_dimension(self, s=3): - """Test _InterpolatedOperator.operator_dimension().""" + """Test _InterpOperator.operator_dimension().""" assert self.Dummy.operator_dimension(s, None, None) == _d * s def test_copy(self, s=4, p=2, r=5): - """Test _InterpolatedOperator.copy().""" + """Test _InterpOperator.copy().""" op1 = self.Dummy() op2 = op1.copy() assert op2 is not op1 @@ -313,7 +319,7 @@ def test_copy(self, s=4, p=2, r=5): assert isinstance(op2.interpolator, _DummyInterpolator2) def test_save(self, s=5, p=2, r=3, target="_interpolatedopsavetest.h5"): - """Lightly test _InterpolatedOperator.save().""" + """Lightly test _InterpOperator.save().""" if os.path.isfile(target): # pragma: no cover os.remove(target) @@ -336,7 +342,7 @@ def test_save(self, s=5, p=2, r=3, target="_interpolatedopsavetest.h5"): os.remove(target) def test_load(self, s=15, p=3, r=3, target="_interpolatedoploadtest.h5"): - """Test _InterpolatedOperator.load().""" + """Test _InterpOperator.load().""" if os.path.isfile(target): os.remove(target) @@ -390,13 +396,13 @@ def test_load(self, s=15, p=3, r=3, target="_interpolatedoploadtest.h5"): def test_publics(): - """Ensure all public InterpolatedOperator classes can be instantiated + """Ensure all public InterpOperator classes can be instantiated without arguments. """ for OpClassName in _module.__all__: OpClass = getattr(_module, OpClassName) if not isinstance(OpClass, type) or not issubclass( - OpClass, _module._InterpolatedOperator + OpClass, _module._InterpOperator ): continue op = OpClass() @@ -407,7 +413,7 @@ def test_publics(): def test_1Doperators(r=10, m=3, s=5): - """Test InterpolatedOperator classes with using all 1D interpolators + """Test InterpOperator classes with using all 1D interpolators from scipy.interpolate. """ InterpolatorClass = interp.CubicSpline @@ -420,12 +426,12 @@ def test_1Doperators(r=10, m=3, s=5): mu_new = 0.314159 for OpClass, Ohat in [ - (_module.InterpolatedConstantOperator, c), - (_module.InterpolatedLinearOperator, A), - (_module.InterpolatedQuadraticOperator, H), - (_module.InterpolatedCubicOperator, G), - (_module.InterpolatedInputOperator, B), - (_module.InterpolatedStateInputOperator, N), + (_module.InterpConstantOperator, c), + (_module.InterpLinearOperator, A), + (_module.InterpQuadraticOperator, H), + (_module.InterpCubicOperator, G), + (_module.InterpInputOperator, B), + (_module.InterpStateInputOperator, N), ]: entries = [ Ohat + p**2 + np.random.standard_normal(Ohat.shape) / 20 @@ -461,7 +467,7 @@ def test_1Doperators(r=10, m=3, s=5): def test_is_interpolated(): """Test operators._interpolate.is_interpolated().""" - op = TestInterpolatedOperator.Dummy() + op = TestInterpOperator.Dummy() assert _module.is_interpolated(op) assert not _module.is_interpolated(-1) @@ -471,11 +477,24 @@ def test_nonparametric_to_interpolated(): with pytest.raises(TypeError) as ex: _module.nonparametric_to_interpolated(float) - assert ex.value.args[0] == ( - "_InterpolatedOperator for class 'float' not found" - ) + assert ex.value.args[0] == ("_InterpOperator for class 'float' not found") OpClass = _module.nonparametric_to_interpolated( opinf.operators.QuadraticOperator ) - assert OpClass is opinf.operators.InterpolatedQuadraticOperator + assert OpClass is opinf.operators.InterpQuadraticOperator + + +def test_deprecations(): + """Ensure deprecated classes still work.""" + for OpClass in [ + _module.InterpolatedConstantOperator, + _module.InterpolatedLinearOperator, + _module.InterpolatedQuadraticOperator, + _module.InterpolatedCubicOperator, + _module.InterpolatedInputOperator, + _module.InterpolatedStateInputOperator, + ]: + with pytest.warns(DeprecationWarning) as wn: + OpClass() + assert len(wn) == 1 From 4ef277c85ce5609d62fdf00e876694bb2ad7fa81 Mon Sep 17 00:00:00 2001 From: Shane Date: Thu, 29 Aug 2024 17:11:31 -0600 Subject: [PATCH 21/48] OperatorClass -> _OperatorClass, operator utils --- src/opinf/models/mono/_base.py | 40 +++++++------ src/opinf/models/mono/_nonparametric.py | 39 ++++++++++--- src/opinf/models/mono/_parametric.py | 78 +++++++++++++++---------- src/opinf/operators/_affine.py | 12 ++-- src/opinf/operators/_base.py | 32 ++++------ src/opinf/operators/_interpolate.py | 16 +++-- src/opinf/operators/_utils.py | 17 ++++++ tests/operators/test_affine.py | 17 +++++- tests/operators/test_base.py | 14 ++--- tests/operators/test_interpolate.py | 2 +- 10 files changed, 166 insertions(+), 101 deletions(-) create mode 100644 src/opinf/operators/_utils.py diff --git a/src/opinf/models/mono/_base.py b/src/opinf/models/mono/_base.py index 4df920d6..da45d234 100644 --- a/src/opinf/models/mono/_base.py +++ b/src/opinf/models/mono/_base.py @@ -8,7 +8,15 @@ import numpy as np from ... import errors, lstsq -from ... import operators as _operators +from ...operators import ( + ConstantOperator, + LinearOperator, + QuadraticOperator, + CubicOperator, + InputOperator, + StateInputOperator, + _utils as oputils, +) class _Model(abc.ABC): @@ -94,11 +102,11 @@ def operators(self, ops): raise TypeError( f"invalid operator of type '{op.__class__.__name__}'" ) - if _operators.is_uncalibrated(op): + if oputils.is_uncalibrated(op): toinfer.append(i) else: known.append(i) - if _operators.has_inputs(op): + if oputils.has_inputs(op): self._has_inputs = True self._check_operator_types_unique([ops[i] for i in toinfer]) @@ -127,32 +135,32 @@ def __iter__(self): @property def c_(self): """:class:`opinf.operators.ConstantOperator` (or ``None``).""" - return self._get_operator_of_type(_operators.ConstantOperator) + return self._get_operator_of_type(ConstantOperator) @property def A_(self): """:class:`opinf.operators.LinearOperator` (or ``None``).""" - return self._get_operator_of_type(_operators.LinearOperator) + return self._get_operator_of_type(LinearOperator) @property def H_(self): """:class:`opinf.operators.QuadraticOperator` (or ``None``).""" - return self._get_operator_of_type(_operators.QuadraticOperator) + return self._get_operator_of_type(QuadraticOperator) @property def G_(self): """:class:`opinf.operators.CubicOperator` (or ``None``).""" - return self._get_operator_of_type(_operators.CubicOperator) + return self._get_operator_of_type(CubicOperator) @property def B_(self): """:class:`opinf.operators.InputOperator` (or ``None``).""" - return self._get_operator_of_type(_operators.InputOperator) + return self._get_operator_of_type(InputOperator) @property def N_(self): """:class:`opinf.operators.StateInputOperator` (or ``None``).""" - return self._get_operator_of_type(_operators.StateInputOperator) + return self._get_operator_of_type(StateInputOperator) # Properties: dimensions -------------------------------------------------- @staticmethod @@ -161,9 +169,7 @@ def _check_state_dimension_consistency(ops): inferrable operators whose entries have not been set. """ rs = { - op.state_dimension - for op in ops - if not _operators.is_uncalibrated(op) + op.state_dimension for op in ops if not oputils.is_uncalibrated(op) } if len(rs) > 1: raise errors.DimensionalityError( @@ -196,13 +202,13 @@ def _check_input_dimension_consistency(ops): """Ensure all *input* operators with initialized entries have the same ``input dimension``. """ - inputops = [op for op in ops if _operators.has_inputs(op)] + inputops = [op for op in ops if oputils.has_inputs(op)] if len(inputops) == 0: return 0 ms = { op.input_dimension for op in inputops - if not _operators.is_uncalibrated(op) + if not oputils.is_uncalibrated(op) } if len(ms) > 1: raise errors.DimensionalityError( @@ -227,8 +233,8 @@ def input_dimension(self, m): if self.__operators is not None: for op in self.operators: if ( - _operators.has_inputs(op) - and not _operators.is_uncalibrated(op) + oputils.has_inputs(op) + and not oputils.is_uncalibrated(op) and op.input_dimension != m ): raise AttributeError( @@ -330,7 +336,7 @@ def galerkin(self, Vr, Wr=None): [ ( old_op.copy() - if _operators.is_uncalibrated(old_op) + if oputils.is_uncalibrated(old_op) else old_op.galerkin(Vr, Wr) ) for old_op in self.operators diff --git a/src/opinf/models/mono/_nonparametric.py b/src/opinf/models/mono/_nonparametric.py index 5fc6e8c2..02ca5f2c 100644 --- a/src/opinf/models/mono/_nonparametric.py +++ b/src/opinf/models/mono/_nonparametric.py @@ -15,7 +15,28 @@ from ._base import _Model from ... import errors, utils -from ... import operators as _operators +from ...operators import ( + ConstantOperator, + LinearOperator, + QuadraticOperator, + CubicOperator, + InputOperator, + StateInputOperator, + _utils as oputils, +) + + +_operator_name2class = { + OpClass.__name__: OpClass + for OpClass in ( + ConstantOperator, + LinearOperator, + QuadraticOperator, + CubicOperator, + InputOperator, + StateInputOperator, + ) +} # Base class ================================================================== @@ -37,12 +58,12 @@ class _NonparametricModel(_Model): # Properties: operators --------------------------------------------------- _operator_abbreviations = { - "c": _operators.ConstantOperator, - "A": _operators.LinearOperator, - "H": _operators.QuadraticOperator, - "G": _operators.CubicOperator, - "B": _operators.InputOperator, - "N": _operators.StateInputOperator, + "c": ConstantOperator, + "A": LinearOperator, + "H": QuadraticOperator, + "G": CubicOperator, + "B": InputOperator, + "N": StateInputOperator, } @staticmethod @@ -50,7 +71,7 @@ def _isvalidoperator(op): """Return True if and only if ``op`` is a valid operator object for this class of model. """ - return _operators.is_nonparametric(op) + return oputils.is_nonparametric(op) @staticmethod def _check_operator_types_unique(ops): @@ -437,7 +458,7 @@ def load(cls, loadfile: str): for i in range(num_operators): gp = hf[f"operator_{i}"] OpClassName = gp["meta"].attrs["class"] - ops.append(getattr(_operators, OpClassName).load(gp)) + ops.append(_operator_name2class[OpClassName].load(gp)) # Construct the model. model = cls(ops) diff --git a/src/opinf/models/mono/_parametric.py b/src/opinf/models/mono/_parametric.py index f8845dbd..aa2da697 100644 --- a/src/opinf/models/mono/_parametric.py +++ b/src/opinf/models/mono/_parametric.py @@ -18,7 +18,30 @@ _FrozenContinuousModel, ) from ... import errors, utils -from ... import operators as _operators +from ...operators import ( + OperatorTemplate, + ParametricOperatorTemplate, + InterpConstantOperator, + InterpLinearOperator, + InterpQuadraticOperator, + InterpCubicOperator, + InterpInputOperator, + InterpStateInputOperator, + _utils as oputils, +) + + +_operator_name2class = { + OpClass.__name__: OpClass + for OpClass in ( + InterpConstantOperator, + InterpLinearOperator, + InterpQuadraticOperator, + InterpCubicOperator, + InterpInputOperator, + InterpStateInputOperator, + ) +} # Base classes ================================================================ @@ -70,8 +93,8 @@ def _isvalidoperator(self, op): return isinstance( op, ( - _operators.OperatorTemplate, - _operators.ParametricOperatorTemplate, + OperatorTemplate, + ParametricOperatorTemplate, ), ) @@ -81,7 +104,7 @@ def _check_operator_types_unique(ops): of operation (e.g., two constant operators). """ OpClasses = { - (op.OperatorClass if _operators.is_parametric(op) else type(op)) + (op._OperatorClass if oputils.is_parametric(op) else type(op)) for op in ops } if len(OpClasses) != len(ops): @@ -92,9 +115,9 @@ def _get_operator_of_type(self, OpClass): operator class ``OpClass``. """ for op in self.operators: - if ( - _operators.is_parametric(op) and op.OperatorClass is OpClass - ) or (_operators.is_nonparametric(op) and isinstance(op, OpClass)): + if oputils.is_parametric(op) and op._OperatorClass is OpClass: + return op + if oputils.is_nonparametric(op) and isinstance(op, OpClass): return op @property @@ -109,7 +132,7 @@ def operators(self, ops): # Check at least one operator is parametric. parametric_operators = [ - op for op in self.operators if _operators.is_parametric(op) + op for op in self.operators if oputils.is_parametric(op) ] if len(parametric_operators) == 0: warnings.warn( @@ -121,9 +144,7 @@ def operators(self, ops): # Check that not every operator is interpolated. if not isinstance(self, _InterpolatedModel): interpolated_operators = [ - op - for op in self.operators - if _operators._interpolate.is_interpolated(op) + op for op in self.operators if oputils.is_interpolated(op) ] if len(interpolated_operators) == len(self.operators): warnings.warn( @@ -147,8 +168,7 @@ def _check_parameter_dimension_consistency(ops): ps = { op.parameter_dimension for op in ops - if _operators.is_parametric(op) - and op.parameter_dimension is not None + if oputils.is_parametric(op) and op.parameter_dimension is not None } if len(ps) > 1: raise errors.DimensionalityError( @@ -169,7 +189,7 @@ def parameter_dimension(self, p): """ if self.operators is not None: for op in self.operators: - if _operators.is_nonparametric(op): + if oputils.is_nonparametric(op): continue if (opp := op.parameter_dimension) is not None and opp != p: raise AttributeError( @@ -265,7 +285,7 @@ def _check_valid_dimension2(dataset, label): # Subtract known operator evaluations from the LHS. for ell in self._indices_of_known_operators: op = self.operators[ell] - _isparametric = _operators.is_parametric(op) + _isparametric = oputils.is_parametric(op) for i, lhsi in enumerate(lhs): _args = [states[i], inputs[i]] if _isparametric: @@ -279,7 +299,7 @@ def _assemble_data_matrix(self, parameters, states, inputs): blocks = [] for i in self._indices_of_operators_to_infer: op = self.operators[i] - if not _operators.is_parametric(op): + if not oputils.is_parametric(op): blocks.append(np.hstack(states).T) else: blocks.append(op.datablock(parameters, states, inputs).T) @@ -305,7 +325,7 @@ def _extract_operators(self, Ohat): index = 0 for i in self._indices_of_operators_to_infer: op = self.operators[i] - if _operators.is_parametric(op): + if oputils.is_parametric(op): endex = index + op.operator_dimension( self.__s, self.state_dimension, self.input_dimension ) @@ -423,7 +443,7 @@ def evaluate(self, parameter): """ return self.ModelClass( [ - op.evaluate(parameter) if _operators.is_parametric(op) else op + op.evaluate(parameter) if oputils.is_parametric(op) else op for op in self.operators ] ) @@ -1089,9 +1109,7 @@ def _from_models(cls, parameters, models, InterpolatorClass: type = None): # Extract the operators from the individual models. return cls( operators=[ - _operators._interpolate.nonparametric_to_interpolated( - OpClass - )._from_operators( + oputils.nonparametric_to_interpolated(OpClass)._from_operators( training_parameters=parameters, operators=[mdl.operators[ell] for mdl in models], InterpolatorClass=InterpolatorClass, @@ -1124,17 +1142,17 @@ def set_interpolator(self, InterpolatorClass): # Properties: operators --------------------------------------------------- _operator_abbreviations = { - "c": _operators.InterpConstantOperator, - "A": _operators.InterpLinearOperator, - "H": _operators.InterpQuadraticOperator, - "G": _operators.InterpCubicOperator, - "B": _operators.InterpInputOperator, - "N": _operators.InterpStateInputOperator, + "c": InterpConstantOperator, + "A": InterpLinearOperator, + "H": InterpQuadraticOperator, + "G": InterpCubicOperator, + "B": InterpInputOperator, + "N": InterpStateInputOperator, } def _isvalidoperator(self, op): """Only interpolated parametric operators are allowed.""" - return _operators._interpolate.is_interpolated(op) + return oputils.is_interpolated(op) # Fitting ----------------------------------------------------------------- def _assemble_data_matrix(self, *args, **kwargs): # pragma: no cover @@ -1166,7 +1184,7 @@ def _fit_solver(self, parameters, states, lhs, inputs=None): for i in range(n_datasets): model_i = self._ModelFitClass( operators=[ - op.OperatorClass( + op._OperatorClass( op.entries[i] if op.entries is not None else None ) for op in self.operators @@ -1320,7 +1338,7 @@ def load(cls, loadfile: str, InterpolatorClass: type = None): gp = hf[f"operator_{i}"] OpClassName = gp["meta"].attrs["class"] ops.append( - getattr(_operators, OpClassName).load( + _operator_name2class[OpClassName].load( gp, InterpolatorClass ) ) diff --git a/src/opinf/operators/_affine.py b/src/opinf/operators/_affine.py index 09d00e5f..3f28dc27 100644 --- a/src/opinf/operators/_affine.py +++ b/src/opinf/operators/_affine.py @@ -224,7 +224,7 @@ def set_entries(self, entries, fromblock: bool = False) -> None: ParametricOpInfOperator.set_entries( self, - [self.OperatorClass(A).entries for A in entries], + [self._OperatorClass(A).entries for A in entries], ) def __str__(self): @@ -244,7 +244,7 @@ def evaluate(self, parameter): Returns ------- - op : :mod:`opinf.operators` operator of type :attr:`OperatorClass` + op : nonparametric :mod:`opinf.operators` operator Nonparametric operator corresponding to the parameter value. """ if self.parameter_dimension is None: @@ -252,7 +252,7 @@ def evaluate(self, parameter): self._check_parametervalue_dimension(parameter) thetamus = self.coeffs(parameter) entries = sum([tm * A for tm, A in zip(thetamus, self.entries)]) - return self.OperatorClass(entries) + return self._OperatorClass(entries) # Dimensionality reduction ------------------------------------------------ @utils.requires("entries") @@ -308,7 +308,7 @@ def galerkin(self, Vr, Wr=None): coeffs=self.coeffs, nterms=self.nterms, entries=[ - self.OperatorClass(A).galerkin(Vr, Wr).entries + self._OperatorClass(A).galerkin(Vr, Wr).entries for A in self.entries ], fromblock=False, @@ -337,7 +337,7 @@ def operator_dimension(self, s: int, r: int, m: int) -> int: d : int Number of columns in the concatenated operator matrix. """ - return self.nterms * self.OperatorClass.operator_dimension(r, m) + return self.nterms * self._OperatorClass.operator_dimension(r, m) def datablock(self, parameters, states, inputs=None) -> np.ndarray: r"""Return the data matrix block corresponding to the operator. @@ -405,7 +405,7 @@ def datablock(self, parameters, states, inputs=None) -> np.ndarray: inputs = [None] * len(parameters) blockcolumns = [] for mu, Q, U in zip(parameters, states, inputs): - Di = self.OperatorClass.datablock(Q, U) + Di = self._OperatorClass.datablock(Q, U) theta_mus = self.coeffs(mu) blockcolumns.append(np.vstack([theta * Di for theta in theta_mus])) return np.hstack(blockcolumns) diff --git a/src/opinf/operators/_base.py b/src/opinf/operators/_base.py index b9c9260d..a83264d5 100644 --- a/src/opinf/operators/_base.py +++ b/src/opinf/operators/_base.py @@ -5,12 +5,9 @@ "InputMixin", "has_inputs", "OperatorTemplate", - "is_nonparametric", "OpInfOperator", "ParametricOperatorTemplate", - "is_parametric", "ParametricOpInfOperator", - "is_uncalibrated", ] import os @@ -889,22 +886,9 @@ class ParametricOperatorTemplate(abc.ABC): :class:`OpInfOperator` or :class:`ParametricOpInfOperator`. """ - # Meta properties --------------------------------------------------------- + # Nonparametric operator class that this parametric operator evaluates to. _OperatorClass = NotImplemented - @property - def OperatorClass(self): - """Nonparametric :mod:`opinf.operators` class that represents - this parametric operator evaluated at a particular parameter value. - - Examples - -------- - >>> Op = MyParametricOperator(init_args).evaluate(parameter_value) - >>> isinstance(Op, MyParametricOperator.OperatorClass) - True - """ - return self._OperatorClass - # Properties -------------------------------------------------------------- @property @abc.abstractmethod @@ -946,7 +930,7 @@ def _check_parametervalue_dimension(self, parameter): @abc.abstractmethod def evaluate(self, parameter): r"""Evaluate the operator at the given parameter value, - resulting in a nonparametric operator of type :attr`OperatorClass`. + resulting in a nonparametric operator. Parameters ---------- @@ -955,7 +939,7 @@ def evaluate(self, parameter): Returns ------- - evaluated_operator : nonparametric operator + op : nonparametric :mod:`opinf.operators` operator Nonparametric operator corresponding to the parameter value. This should be an instance of :class:`OperatorTemplate` (or a class that inherits from it). @@ -1115,6 +1099,12 @@ def verify(self, testparam=None): If ``None`` (default), draw test parameter entries from the standard Normal distribution. """ + # Check the _OperatorClass. + if not issubclass(self._OperatorClass, OperatorTemplate): + raise errors.VerificationError( + "_OperatorClass must be a nonparametric operator type" + ) + # Verify dimensions exist and are valid. if not isinstance((r := self.state_dimension), int) or r <= 0: raise errors.VerificationError( @@ -1141,11 +1131,11 @@ def verify(self, testparam=None): op_evaluated = self.evaluate(testparam) if not isinstance(op_evaluated, self._OperatorClass): raise errors.VerificationError( - "evaluate() must return instance of type OperatorClass" + "evaluate() must return instance of type _OperatorClass" ) if not is_nonparametric(op_evaluated): raise errors.VerificationError( - "OperatorClass must be a nonparametric operator type" + "_OperatorClass must be a nonparametric operator type" ) if op_evaluated.state_dimension != self.state_dimension: diff --git a/src/opinf/operators/_interpolate.py b/src/opinf/operators/_interpolate.py index 2b6d5a86..9aadd969 100644 --- a/src/opinf/operators/_interpolate.py +++ b/src/opinf/operators/_interpolate.py @@ -111,9 +111,8 @@ def _from_operators( Parameters ---------- - operators : list of :mod:`opinf.operators` objects - Operators to interpolate. Must be of class ``OperatorClass`` - and have ``entries`` set. + operators : list of nonparametric :mod:`opinf.operators` operators + Operators to interpolate with ``entries`` already set. """ # Check everything is initialized. for op in operators: @@ -228,7 +227,7 @@ def set_entries(self, entries, fromblock: bool = False) -> None: ParametricOpInfOperator.set_entries( self, - np.array([self.OperatorClass(A).entries for A in entries]), + np.array([self._OperatorClass(A).entries for A in entries]), ) self.set_interpolator(self.__InterpolatorClass) @@ -327,8 +326,7 @@ def __str__(self): # Evaluation -------------------------------------------------------------- @utils.requires("entries") def evaluate(self, parameter): - r"""Evaluate the operator at the given parameter value, - :math:`\Ophat_{\ell}(\cdot,\cdot;\bfmu)`. + r"""Evaluate the operator at the given parameter value. Parameters ---------- @@ -337,13 +335,13 @@ def evaluate(self, parameter): Returns ------- - op : :mod:`opinf.operators` operator of type ``OperatorClass``. + op : nonparametric :mod:`opinf.operators` operator Nonparametric operator corresponding to the parameter value. """ self._check_parametervalue_dimension(parameter) if self.parameter_dimension == 1 and not np.isscalar(parameter): parameter = parameter[0] - return self.OperatorClass(self.interpolator(parameter)) + return self._OperatorClass(self.interpolator(parameter)) # Dimensionality reduction ------------------------------------------------ @utils.requires("entries") @@ -400,7 +398,7 @@ def galerkin(self, Vr, Wr=None): return self.__class__( training_parameters=self.training_parameters, entries=[ - self.OperatorClass(A).galerkin(Vr, Wr).entries + self._OperatorClass(A).galerkin(Vr, Wr).entries for A in self.entries ], InterpolatorClass=self.__InterpolatorClass, diff --git a/src/opinf/operators/_utils.py b/src/opinf/operators/_utils.py new file mode 100644 index 00000000..c6657418 --- /dev/null +++ b/src/opinf/operators/_utils.py @@ -0,0 +1,17 @@ +# operators/_utils.py +"""Private utility functions for working with Operator classes.""" + +from ._base import has_inputs, is_nonparametric, is_parametric, is_uncalibrated +from ._affine import is_affine, nonparametric_to_affine +from ._interpolate import is_interpolated, nonparametric_to_interpolated + +__all__ = [ + "has_inputs", + "is_nonparametric", + "is_parametric", + "is_uncalibrated", + "is_affine", + "is_interpolated", + "nonparametric_to_affine", + "nonparametric_to_interpolated", +] diff --git a/tests/operators/test_affine.py b/tests/operators/test_affine.py index 4b3c4978..0ce8f6dc 100644 --- a/tests/operators/test_affine.py +++ b/tests/operators/test_affine.py @@ -154,7 +154,7 @@ def test_evaluate(self, r=9, m=4): mu = np.random.random(self.p) op_mu = op.evaluate(mu) - assert isinstance(op_mu, op.OperatorClass) + assert isinstance(op_mu, op._OperatorClass) assert op_mu.entries.shape == arrays[0].shape Amu = np.sum( [theta(mu) * A for theta, A in zip(self.thetas1, arrays)], @@ -351,6 +351,21 @@ def test_input_dimension(self, r=7, m=4, p=5): assert op.input_dimension == m +def test_publics(): + """Ensure all public AffineOperator classes can be instantiated.""" + for OpClassName in _submodule.__all__: + OpClass = getattr(_module, OpClassName) + if not isinstance(OpClass, type) or not issubclass( + OpClass, _submodule._AffineOperator + ): + continue + op = OpClass(_TestAffineOperator.thetas1) + assert issubclass( + op._OperatorClass, + opinf.operators.OpInfOperator, + ) + + def test_is_affine(): """Test operators._affine.is_interpolated().""" diff --git a/tests/operators/test_base.py b/tests/operators/test_base.py index 8ac137b4..a806bcf9 100644 --- a/tests/operators/test_base.py +++ b/tests/operators/test_base.py @@ -697,8 +697,8 @@ def test_is_nonparametric(): """Test operators._base.is_nonparametric().""" op = TestOpInfOperator.Dummy() - assert opinf.operators.is_nonparametric(op) - assert not opinf.operators.is_nonparametric(10) + assert _module.is_nonparametric(op) + assert not _module.is_nonparametric(10) # Parametric operators ======================================================== @@ -725,7 +725,7 @@ def parameter_dimension(self) -> int: return self.__p def evaluate(self, parameter): - return self.OperatorClass(self.state_dimension) + return self._OperatorClass(self.state_dimension) def test_check_parametervalue_dimension(self, r=8, p=3): """Test _check_parametervalue_dimension().""" @@ -791,7 +791,7 @@ def set_entries(self, entries): def evaluate(self, parameter): self._check_parametervalue_dimension(parameter) - op = self.OperatorClass() + op = self._OperatorClass() op.set_entries(self.entries[0]) return op @@ -848,14 +848,14 @@ def test_entries(self, r=8, p=2): def test_is_parametric(): """Test operators._base.is_parametric().""" op = TestParametricOpInfOperator.Dummy() - assert opinf.operators.is_parametric(op) - assert not opinf.operators.is_nonparametric(-1) + assert _module.is_parametric(op) + assert not _module.is_parametric(100) def test_is_uncalibrated(): """Test operators._base.is_uncalibrated().""" - func = opinf.operators.is_uncalibrated + func = _module.is_uncalibrated class Dummy(opinf.operators.OperatorTemplate): """Instantiable version of OperatorTemplate.""" diff --git a/tests/operators/test_interpolate.py b/tests/operators/test_interpolate.py index 67388755..56975d74 100644 --- a/tests/operators/test_interpolate.py +++ b/tests/operators/test_interpolate.py @@ -407,7 +407,7 @@ def test_publics(): continue op = OpClass() assert issubclass( - op.OperatorClass, + op._OperatorClass, opinf.operators.OpInfOperator, ) From fc08ffd1b07b26d4e8537a05a50487fa4f416675 Mon Sep 17 00:00:00 2001 From: Shane Date: Thu, 29 Aug 2024 17:41:49 -0600 Subject: [PATCH 22/48] remove operator utils from public docs --- docs/source/api/missing.rst | 4 ---- docs/source/api/models.md | 18 +++++++++--------- docs/source/api/operators.ipynb | 12 +----------- 3 files changed, 10 insertions(+), 24 deletions(-) diff --git a/docs/source/api/missing.rst b/docs/source/api/missing.rst index 7aa0e3cd..764de50e 100644 --- a/docs/source/api/missing.rst +++ b/docs/source/api/missing.rst @@ -119,10 +119,6 @@ operators.ipynb InterpCubicOperator InterpInputOperator InterpStateInputOperator - has_inputs - is_nonparametric - is_parametric - is_uncalibrated lstsq.ipynb ----------- diff --git a/docs/source/api/models.md b/docs/source/api/models.md index 10d7475b..e7a21e5f 100644 --- a/docs/source/api/models.md +++ b/docs/source/api/models.md @@ -156,7 +156,7 @@ In addition, parametric models have an `evaluate()` method that returns a nonpar ### Interpolated Models -Interpolated models consist exclusively of [interpolated operators](sec-operators-interpolated). +Interpolated models consist exclusively of [interpolatory operators](sec-operators-interpolated). ```{eval-rst} .. currentmodule:: opinf.models @@ -173,12 +173,12 @@ The `operators` constructor argument for these classes can also be a string that | Character | {mod}`opinf.operators` class | | :-------- | :------------------------------- | -| `'c'` | {class}`opinf.operators.InterpolatedConstantOperator` | -| `'A'` | {class}`opinf.operators.InterpolatedLinearOperator` | -| `'H'` | {class}`opinf.operators.InterpolatedQuadraticOperator` | -| `'G'` | {class}`opinf.operators.InterpolatedCubicOperator` | -| `'B'` | {class}`opinf.operators.InterpolatedInputOperator` | -| `'N'` | {class}`opinf.operators.InterpolatedStateInputOperator` | +| `'c'` | {class}`opinf.operators.InterpConstantOperator` | +| `'A'` | {class}`opinf.operators.InterpLinearOperator` | +| `'H'` | {class}`opinf.operators.InterpQuadraticOperator` | +| `'G'` | {class}`opinf.operators.InterpCubicOperator` | +| `'B'` | {class}`opinf.operators.InterpInputOperator` | +| `'N'` | {class}`opinf.operators.InterpStateInputOperator` | ```python import opinf @@ -186,8 +186,8 @@ import opinf # Initialize the model with a list of operator objects. model = opinf.models.InterpolatedContinuousModel( operators=[ - opinf.operators.InterpolatedCubicOperator(), - opinf.operators.InterpolatedStateInputOperator(), + opinf.operators.InterpCubicOperator(), + opinf.operators.InterpStateInputOperator(), ] ) diff --git a/docs/source/api/operators.ipynb b/docs/source/api/operators.ipynb index c3f0bcba..0512cd8a 100644 --- a/docs/source/api/operators.ipynb +++ b/docs/source/api/operators.ipynb @@ -52,16 +52,6 @@ " InterpCubicOperator\n", " InterpInputOperator\n", " InterpStateInputOperator\n", - "\n", - "**Utilities**\n", - "\n", - ".. autosummary::\n", - " :toctree: _autosummaries\n", - "\n", - " has_inputs\n", - " is_nonparametric\n", - " is_parametric\n", - " is_uncalibrated\n", "```" ] }, @@ -1010,7 +1000,7 @@ "An operator is called _parametric_ if it depends on an independent parameter vector\n", "$\\bfmu\\in\\RR^{p}$, i.e., $\\Ophat_{\\ell} = \\Ophat_{\\ell}(\\qhat,\\u;\\bfmu)$\n", "When the parameter vector is fixed, a parametric operator becomes nonparametric.\n", - "In particular, a parametric operator's [`evaluate()`](ParametricOperatorTemplate.evaluate) method accepts a parameter vector $\\bfmu$ and returns an instance of a nonparametric operator whose type is given by the parametric operator's [`OperatorClass`](ParametricOperatorTemplate.OperatorClass) property." + "In particular, a parametric operator's [`evaluate()`](ParametricOperatorTemplate.evaluate) method accepts a parameter vector $\\bfmu$ and returns an instance of a nonparametric operator." ] }, { From d24069442399731354aa9f699c3ab223d506d83e Mon Sep 17 00:00:00 2001 From: Shane Date: Thu, 29 Aug 2024 17:42:40 -0600 Subject: [PATCH 23/48] remove operators.has_inputs() from public, fix tests --- src/opinf/operators/_base.py | 9 ++++++--- tests/operators/test_base.py | 23 +++++++++++++++++++++-- tests/operators/test_interpolate.py | 19 +++++++++++++++++-- 3 files changed, 44 insertions(+), 7 deletions(-) diff --git a/src/opinf/operators/_base.py b/src/opinf/operators/_base.py index a83264d5..3295da01 100644 --- a/src/opinf/operators/_base.py +++ b/src/opinf/operators/_base.py @@ -3,7 +3,6 @@ __all__ = [ "InputMixin", - "has_inputs", "OperatorTemplate", "OpInfOperator", "ParametricOperatorTemplate", @@ -1224,8 +1223,12 @@ def parameter_dimension(self, p): Only allowed if :attr:`parameter_dimension` is currently ``None``. """ if self.__p is not None: - raise AttributeError("can't set property 'parameter_dimension'") - self.__p = int(p) + raise AttributeError( + "can't set property 'parameter_dimension' twice" + ) + if not isinstance(p, int) or p < 1: + raise ValueError("parameter_dimension must be a positive integer") + self.__p = p @property def shape(self) -> tuple: diff --git a/tests/operators/test_base.py b/tests/operators/test_base.py index a806bcf9..15056753 100644 --- a/tests/operators/test_base.py +++ b/tests/operators/test_base.py @@ -17,14 +17,15 @@ def test_has_inputs(): """Test operators._base.has_inputs().""" + has_inputs = opinf.operators._base.has_inputs class Dummy(_module.InputMixin): def input_dimension(self): return -1 op = Dummy() - assert opinf.operators.has_inputs(op) - assert not opinf.operators.has_inputs(5) + assert has_inputs(op) + assert not has_inputs(5) # Nonparametric operators ===================================================== @@ -802,6 +803,24 @@ def datablock(self, states, inputs=None): K = sum([Q.shape[-1] for Q in states]) return np.random.random(4, K) + def test_parameter_dimension(self): + """Test parameter_dimension and its setter.""" + op = self.Dummy() + + with pytest.raises(ValueError) as ex: + op.parameter_dimension = -40 + assert ex.value.args[0] == ( + "parameter_dimension must be a positive integer" + ) + + op.parameter_dimension = 100 + + with pytest.raises(AttributeError) as ex: + op.parameter_dimension = 10 + assert ex.value.args[0] == ( + "can't set property 'parameter_dimension' twice" + ) + def test_set_parameter_dimension_from_values(self): """Test _set_parameter_dimension_from_values().""" op = self.Dummy() diff --git a/tests/operators/test_interpolate.py b/tests/operators/test_interpolate.py index 56975d74..a2286136 100644 --- a/tests/operators/test_interpolate.py +++ b/tests/operators/test_interpolate.py @@ -9,6 +9,7 @@ import scipy.interpolate as interp import opinf +import opinf.operators._utils as oputils from . import _get_operator_entries @@ -213,6 +214,8 @@ def test_set_interpolator(self, s=4, p=2, r=5): op.set_interpolator(_DummyInterpolator2) assert isinstance(op.interpolator, _DummyInterpolator2) + assert isinstance(repr(op), str) + def test_eq(self, s=4, p=3, r=2): """Test _InterpOperator.__eq__().""" op1 = self.Dummy() @@ -265,6 +268,18 @@ def test_evaluate(self, s=3, p=5, r=4): assert op_evaluated.entries.shape == (r, r) assert np.all(op_evaluated.entries == entries[0]) + # Scalar parameters. + op = self.Dummy( + mu[:, 0], + entries=entries, + InterpolatorClass=_DummyInterpolator, + fromblock=False, + ) + op_evaluated = op.evaluate(np.array([[mu[0, 0]]])) + assert isinstance(op_evaluated, self.Dummy._OperatorClass) + assert op_evaluated.entries.shape == (r, r) + assert np.all(op_evaluated.entries == entries[0]) + def test_galerkin(self, s=5, p=2, n=10, r=4): """Test _InterpOperator.galerkin().""" Vr = np.empty((n, r)) @@ -445,10 +460,10 @@ def test_1Doperators(r=10, m=3, s=5): interp.PchipInterpolator, ]: op = OpClass(params, InterpolatorClass=InterpolatorClass) - if opinf.operators.has_inputs(op): + if oputils.has_inputs(op): assert op.input_dimension is None op.set_entries(entries) - if opinf.operators.has_inputs(op): + if oputils.has_inputs(op): assert op.input_dimension == m op_evaluated = op.evaluate(mu_new) assert isinstance(op_evaluated, OpClass._OperatorClass) From 4ca47ab45b48a212e27b3259bc7ed02cf76c56da Mon Sep 17 00:00:00 2001 From: Shane Date: Thu, 29 Aug 2024 17:58:29 -0600 Subject: [PATCH 24/48] [Interpolated -> Interp]DerivativeEstimator, add PchipInterpolator option --- docs/source/api/ddt.ipynb | 6 +++--- docs/source/api/missing.rst | 2 +- src/opinf/ddt/_interpolation.py | 10 +++++++--- tests/ddt/test_interpolation.py | 6 +++--- 4 files changed, 14 insertions(+), 10 deletions(-) diff --git a/docs/source/api/ddt.ipynb b/docs/source/api/ddt.ipynb index 41a4bda4..7cc04834 100644 --- a/docs/source/api/ddt.ipynb +++ b/docs/source/api/ddt.ipynb @@ -25,7 +25,7 @@ " DerivativeEstimatorTemplate\n", " UniformFiniteDifferencer\n", " NonuniformFiniteDifferencer\n", - " InterpolationDerivativeEstimator\n", + " InterpDerivativeEstimator\n", "\n", "**Finite Difference Schemes for Uniformly Spaced Data**\n", "\n", @@ -352,7 +352,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "The {class}`InterpolationDerivativeEstimator` interpolates the state data using classes from {mod}`scipy.interpolate` and evaluates the derivative of the interpolant." + "The {class}`InterpDerivativeEstimator` interpolates the state data using classes from {mod}`scipy.interpolate` and evaluates the derivative of the interpolant." ] }, { @@ -361,7 +361,7 @@ "metadata": {}, "outputs": [], "source": [ - "estimator = opinf.ddt.InterpolationDerivativeEstimator(t, \"cubic\")\n", + "estimator = opinf.ddt.InterpDerivativeEstimator(t, \"pchip\")\n", "print(estimator)" ] }, diff --git a/docs/source/api/missing.rst b/docs/source/api/missing.rst index 764de50e..58dbbbf8 100644 --- a/docs/source/api/missing.rst +++ b/docs/source/api/missing.rst @@ -64,7 +64,7 @@ ddt.ipynb DerivativeEstimatorTemplate UniformFiniteDifferencer NonuniformFiniteDifferencer - InterpolationDerivativeEstimator + InterpDerivativeEstimator fwd1 fwd2 fwd3 diff --git a/src/opinf/ddt/_interpolation.py b/src/opinf/ddt/_interpolation.py index 75099e1c..85198e3d 100644 --- a/src/opinf/ddt/_interpolation.py +++ b/src/opinf/ddt/_interpolation.py @@ -2,7 +2,7 @@ """Time derivative estimators based on interpolation.""" __all__ = [ - "InterpolationDerivativeEstimator", + "InterpDerivativeEstimator", ] @@ -15,7 +15,7 @@ from ._base import DerivativeEstimatorTemplate -class InterpolationDerivativeEstimator(DerivativeEstimatorTemplate): +class InterpDerivativeEstimator(DerivativeEstimatorTemplate): r"""Time derivative estimator based on interpolation. For a set of (compressed) snapshots @@ -41,6 +41,9 @@ class InterpolationDerivativeEstimator(DerivativeEstimatorTemplate): This is a local interpolation method and is more resitant to outliers than :class:`scipy.interpolate.CubicSpline`. However, it is not recommended if the time points are not uniformly spaced. + * ``"pchip"``: use :class:`scipy.interpolate.PchipInterpolator`. + The interpolator preserves monotonicity in the interpolation data + and does not overshoot if the data is not smooth. new_time_domain : (k',) ndarray or None If given, evaluate the interpolator at these points to generate new state snapshots and corresponding time derivatives. If input snapshots @@ -54,8 +57,9 @@ class InterpolationDerivativeEstimator(DerivativeEstimatorTemplate): _interpolators = types.MappingProxyType( { - "cubic": interp.CubicSpline, "akima": interp.Akima1DInterpolator, + "cubic": interp.CubicSpline, + "pchip": interp.PchipInterpolator, } ) diff --git a/tests/ddt/test_interpolation.py b/tests/ddt/test_interpolation.py index c8165003..3cb1862b 100644 --- a/tests/ddt/test_interpolation.py +++ b/tests/ddt/test_interpolation.py @@ -11,10 +11,10 @@ _module = opinf.ddt._interpolation -class TestInterpolationDerivativeEstimator: - """Test opinf.ddt.InterpolationDerivativeEstimator.""" +class TestInterpDerivativeEstimator: + """Test opinf.ddt.InterpDerivativeEstimator.""" - Estimator = _module.InterpolationDerivativeEstimator + Estimator = _module.InterpDerivativeEstimator def test_init(self, k=100): """Test __init__() and properties.""" From 315f8c6950899a7d3c253812c2d758cdb85b5439 Mon Sep 17 00:00:00 2001 From: Shane Date: Fri, 30 Aug 2024 14:56:18 -0600 Subject: [PATCH 25/48] test coverage, bug fixes for ParametricModels --- src/opinf/models/mono/_parametric.py | 136 ++-- tests/models/mono/test_parametric.py | 900 ++++++++++++++++----------- tests/operators/test_affine.py | 6 + 3 files changed, 586 insertions(+), 456 deletions(-) diff --git a/src/opinf/models/mono/_parametric.py b/src/opinf/models/mono/_parametric.py index aa2da697..5fb62618 100644 --- a/src/opinf/models/mono/_parametric.py +++ b/src/opinf/models/mono/_parametric.py @@ -71,20 +71,6 @@ def _INPUT_LABEL(self): # pragma: no cover """String representation of input, e.g., "u(t)".""" return self._ModelClass._INPUT_LABEL - @property - def ModelClass(self): - """Nonparametric model class that represents this parametric model - when evaluated at a particular parameter value. - - Examples - -------- - >>> model = MyParametricModel(init_args).fit(fit_args) - >>> model_evaluated = model.evaluate(parameter_value) - >>> type(model_evaluated) is MyParametricModel.ModelClass - True - """ - return self._ModelClass - # Properties: operators --------------------------------------------------- _operator_abbreviations = dict() @@ -152,22 +138,20 @@ def operators(self, ops): "consider using an InterpolatedModel class", errors.OpInfWarning, ) - self.__p = self._check_parameter_dimension_consistency(self.operators) - - def _clear(self): - """Reset the entries of the non-intrusive operators and the - state, input, and parameter dimensions. - """ - _Model._clear(self) - self.__p = self._check_parameter_dimension_consistency(self.operators) + self._synchronize_parameter_dimensions() # Properties: dimensions -------------------------------------------------- - @staticmethod - def _check_parameter_dimension_consistency(ops): - """Ensure all operators have the same parameter dimension.""" + @property + def parameter_dimension(self): + r"""Dimension :math:`p` of a parameter vector :math:`\bfmu`.""" + return self.__p + + def _synchronize_parameter_dimensions(self, newdim=None): + """Synchronize the parameter_dimension attribute for each operator.""" + # Get any non-None parameter dimensions and check for uniqueness. ps = { op.parameter_dimension - for op in ops + for op in self.operators if oputils.is_parametric(op) and op.parameter_dimension is not None } if len(ps) > 1: @@ -175,44 +159,29 @@ def _check_parameter_dimension_consistency(ops): "operators not aligned " "(parameter_dimension must be the same for all operators)" ) - return ps.pop() if len(ps) == 1 else None + p = ps.pop() if len(ps) == 1 else None - @property - def parameter_dimension(self): - """Dimension :math:`p` of the parameters.""" - return self.__p + # Check operator parameter_dimension matches new parameter_dimension. + if newdim is not None: + if p is None: + p = newdim + if p != newdim: + raise errors.DimensionalityError( + f"{p} = each operator.parameter_dimension != " + f"parameter dimension = {newdim}" + ) - @parameter_dimension.setter - def parameter_dimension(self, p): - """Set the parameter dimension. Not allowed if any - existing operators have ``parameter_dimension != p``. - """ - if self.operators is not None: + # Ensure all parametric operators have the same parameter_dimension. + if p is not None: for op in self.operators: - if oputils.is_nonparametric(op): - continue - if (opp := op.parameter_dimension) is not None and opp != p: - raise AttributeError( - "can't set attribute " - f"(existing operators have p = {self.__p})" - ) - self.__p = p - - def _set_parameter_dimension_from_values(self, parameters): - """Extract and save the dimension of the parameter space from a set of - parameter values. + if ( + oputils.is_parametric(op) + and op.parameter_dimension is None + ): + op.parameter_dimension = p - Parameters - ---------- - parameters : (s, p) or (p,) ndarray - Parameter value(s). - """ - if (dim := len(shape := np.shape(parameters))) == 1: - self.parameter_dimension = 1 - elif dim == 2: - self.parameter_dimension = shape[1] - else: - raise ValueError("parameter values must be scalars or 1D arrays") + # Set the model's parameter_dimension to the same as the operators. + self.__p = p # Fitting ----------------------------------------------------------------- def _process_fit_arguments(self, parameters, states, lhs, inputs): @@ -224,8 +193,15 @@ def _process_fit_arguments(self, parameters, states, lhs, inputs): self._clear() # Process parameters. - parameters = np.array(parameters) - self._set_parameter_dimension_from_values(parameters) + if (dim := len(shape := np.shape(parameters))) == 1: + p = 1 + elif dim == 2: + p = shape[1] + else: + raise errors.DimensionalityError( + "'parameters' must be a sequence of scalars or 1D arrays" + ) + self._synchronize_parameter_dimensions(p) n_datasets = len(parameters) def _check_valid_dimension0(dataset, label): @@ -300,9 +276,12 @@ def _assemble_data_matrix(self, parameters, states, inputs): for i in self._indices_of_operators_to_infer: op = self.operators[i] if not oputils.is_parametric(op): - blocks.append(np.hstack(states).T) + block = np.hstack( + [op.datablock(Q, U) for Q, U in zip(states, inputs)] + ) else: - blocks.append(op.datablock(parameters, states, inputs).T) + block = op.datablock(parameters, states, inputs) + blocks.append(block.T) return np.hstack(blocks) def _fit_solver(self, parameters, states, lhs, inputs=None): @@ -315,6 +294,11 @@ def _fit_solver(self, parameters, states, lhs, inputs=None): inputs_, ) = self._process_fit_arguments(parameters, states, lhs, inputs) + # Set training_parameters for interpolatory operators. + for op in self.operators: + if oputils.is_interpolated(op): + op.set_training_parameters(parameters_) + # Set up non-intrusive learning. D = self._assemble_data_matrix(parameters_, states_, inputs_) self.solver.fit(D, np.hstack(lhs_)) @@ -357,6 +341,7 @@ def refit(self): # Execute non-intrusive learning. self._extract_operators(self.solver.solve()) + return self def fit(self, parameters, states, lhs, inputs=None): r"""Learn the model operators from data. @@ -392,7 +377,7 @@ def fit(self, parameters, states, lhs, inputs=None): Parameters ---------- - parameters : list of s scalars or (p,) 1D ndarrays + parameters : list of s (floats or (p,) ndarrays) Parameter values for which training data are available. states : list of s (r, k) ndarrays Snapshot training data. Each array ``states[i]`` is the data @@ -424,8 +409,7 @@ def fit(self, parameters, states, lhs, inputs=None): return self self._fit_solver(parameters, states, lhs, inputs) - self.refit() - return self + return self.refit() # Parametric evaluation --------------------------------------------------- def evaluate(self, parameter): @@ -441,7 +425,7 @@ def evaluate(self, parameter): model : _NonparametricModel Nonparametric model of type ``ModelClass``. """ - return self.ModelClass( + return self._ModelClass( [ op.evaluate(parameter) if oputils.is_parametric(op) else op for op in self.operators @@ -1054,7 +1038,7 @@ class _InterpolatedModel(_ParametricModel): @property def _ModelFitClass(self): """Parent of ModelClass that has a callable ``fit()`` method.""" - return self.ModelClass.__bases__[-1] + return self._ModelClass.__bases__[-1] def __init__(self, operators, solver=None, InterpolatorClass=None): """Define the model structure and set the interpolator class.""" @@ -1084,15 +1068,9 @@ def _from_models(cls, parameters, models, InterpolatorClass: type = None): for one-dimensional parameters and :class:`scipy.interpolate.LinearNDInterpolator` otherwise. """ - # Check for consistency in the models. + # Check for consistency in the model operators. opclasses = [type(op) for op in models[0].operators] - ModelFitClass = cls._ModelClass.__bases__[-1] for mdl in models: - # Model class. - if not isinstance(mdl, ModelFitClass): - raise TypeError( - f"expected models of type '{ModelFitClass.__name__}'" - ) # Operator count and type. if len(mdl.operators) != len(opclasses): raise ValueError( @@ -1209,8 +1187,8 @@ def refit(self): # Solve each independent subproblem. # TODO: parallelize? - for model_i in self._submodels: - model_i.refit() + for submodel in self._submodels: + submodel.refit() # Interpolate the resulting operators. for ell, op in enumerate(self.operators): @@ -1220,8 +1198,6 @@ def refit(self): [mdl.operators[ell].entries for mdl in self._submodels] ) - # self.__InterpolatorClass = type(self.operators[0].interpolator) - return self # Model persistence ------------------------------------------------------- diff --git a/tests/models/mono/test_parametric.py b/tests/models/mono/test_parametric.py index 3cfbfe45..220838dd 100644 --- a/tests/models/mono/test_parametric.py +++ b/tests/models/mono/test_parametric.py @@ -10,224 +10,161 @@ import opinf -_module = opinf.models.mono._parametric -_applyvalue = 7 -_jacvalue = 11 -_predictvalue = 13 +_module = opinf.models -# Dummy classes =============================================================== -class DummyOpInfOperator(opinf.operators.OpInfOperator): - """Instantiable version of OpInfOperator.""" - - def apply(*args, **kwargs): # pragma: no cover - return _applyvalue - - def jacobian(*args, **kwargs): - return _jacvalue - - def datablock(*args, **kwargs): # pragma: no cover - pass - - def galerkin(*args, **kwargs): # pragma: no cover - pass - - def operator_dimension(*args, **kwargs): # pragma: no cover - pass - - -class DummyOpInfOperator2(DummyOpInfOperator): - """Another OpInfOperator (since duplicates not allowed).""" - - -class DummyParametricOperator(opinf.operators.ParametricOpInfOperator): - """Instantiable version of ParametricOpInfOperator.""" - - _OperatorClass = DummyOpInfOperator - - def __init__(self, entries=None): - super().__init__() - if entries is not None: - self.set_entries(entries) - - def set_entries(self, entries): - super().set_entries(entries) - - def operator_dimension(*args, **kwargs): # pragma: no cover - pass - - def datablock(*args, **kwargs): # pragma: no cover - pass - - def evaluate(self, *args, **kwargs): # pragma: no cover - return self._OperatorClass(self.entries) - - # def galerkin(*args, **kwargs): # pragma: no cover - # pass - - # def copy(*args, **kwargs): # pragma: no cover - # pass - - # def load(*args, **kwargs): # pragma: no cover - # pass - - # def save(*args, **kwargs): # pragma: no cover - # pass - - -class DummyParametricOperator2(DummyParametricOperator): - """Another ParametricOperator with a different OperatorClass.""" - - _OperatorClass = DummyOpInfOperator2 - - -class DummyInterpOperator(opinf.operators._interpolate._InterpOperator): - pass - - -class DummyNonparametricModel( - opinf.models.mono._nonparametric._NonparametricModel -): - """Instantiable version of _NonparametricModel.""" - - _LHS_ARGNAME = "mylhs" - - def predict(*args, **kwargs): - return _predictvalue - - -class DummyNonparametricModel2(DummyNonparametricModel): - pass - - -# Tests ======================================================================= -class TestParametricModel: +# Parametric models =========================================================== +class _TestParametricModel: """Test models.mono._parametric._ParametricModel.""" - class Dummy(_module._ParametricModel): - _ModelClass = DummyNonparametricModel + Model = NotImplemented + _iscontinuous = NotImplemented - def test_check_operator_types_unique(self): - """Test _ParametricModel._check_operator_types_unique().""" - operators = [DummyParametricOperator(), DummyOpInfOperator()] + def _get_single_operator(self, p=4): + """Get a single uncalibrated operator.""" + return opinf.operators.AffineLinearOperator(p) - with pytest.raises(ValueError) as ex: - self.Dummy._check_operator_types_unique(operators) - assert ex.value.args[0] == ( - "duplicate type in list of operators to infer" + def _get_parametric_operators(self, p, r, m=0): + """Get calibrated constant + linear + input affine operators.""" + op1 = opinf.operators.AffineConstantOperator( + coeffs=p, + entries=[np.random.random(r) for _ in range(p)], + ) + op2 = opinf.operators.AffineLinearOperator( + coeffs=p, + entries=[np.random.random((r, r)) for _ in range(p)], ) + operators = [op1, op2] + if m > 0: + op3 = opinf.operators.AffineInputOperator( + coeffs=p, + entries=[np.random.random((r, m)) for _ in range(p)], + ) + operators.append(op3) + return operators, np.random.random(p) - operators = [DummyParametricOperator(), DummyOpInfOperator()] + def test_check_operator_types_unique(self, p=2): + """Test _check_operator_types_unique().""" + operators = [ + opinf.operators.AffineLinearOperator(p), + opinf.operators.LinearOperator(), + ] with pytest.raises(ValueError) as ex: - self.Dummy._check_operator_types_unique(operators) + self.Model._check_operator_types_unique(operators) assert ex.value.args[0] == ( "duplicate type in list of operators to infer" ) - operators = [DummyParametricOperator(), DummyParametricOperator2()] - self.Dummy._check_operator_types_unique(operators) + operators[1] = opinf.operators.ConstantOperator() + self.Model._check_operator_types_unique(operators) - def test_set_operators(self): - """Test _ParametricModel.operators.fset().""" - operators = [DummyOpInfOperator()] + def test_set_operators(self, p=3): + """Test operators.fset().""" + operators = [opinf.operators.LinearOperator()] with pytest.warns(opinf.errors.OpInfWarning) as wn: - self.Dummy(operators) + self.Model(operators) assert wn[0].message.args[0] == ( "no parametric operators detected, " "consider using a nonparametric model class" ) - operators = [DummyInterpOperator()] - + operators = [opinf.operators.InterpLinearOperator()] with pytest.warns(opinf.errors.OpInfWarning) as wn: - self.Dummy(operators) + self.Model(operators) assert wn[0].message.args[0] == ( "all operators interpolatory, " "consider using an InterpolatedModel class" ) - operators = [DummyParametricOperator(), DummyParametricOperator2()] - model = self.Dummy(operators) - assert model.parameter_dimension is None + # Several operators provided. + operators = [ + opinf.operators.ConstantOperator(), + opinf.operators.AffineLinearOperator(p), + ] + model = self.Model(operators) + assert len(model.operators) == 2 + for modelop, op in zip(model.operators, operators): + assert modelop is op - def test_get_operator_of_type(self): - """Test _ParametricModel._get_operator_of_type().""" - op1 = DummyParametricOperator() - op2 = DummyParametricOperator2() - model = self.Dummy([op1, op2]) + # Single operator provided + model = self.Model(operators[1]) + assert len(model.operators) == 1 + assert model.operators[0] is operators[1] + + def test_get_operator_of_type(self, p=2): + """Test _get_operator_of_type().""" + operators = [ + opinf.operators.ConstantOperator(), + opinf.operators.AffineLinearOperator(p), + ] + model = self.Model(operators) - op = model._get_operator_of_type(DummyOpInfOperator) - assert op is op1 + op = model._get_operator_of_type(opinf.operators.ConstantOperator) + assert op is operators[0] - op = model._get_operator_of_type(DummyOpInfOperator2) - assert op is op2 + op = model._get_operator_of_type(opinf.operators.LinearOperator) + assert op is operators[1] op = model._get_operator_of_type(float) assert op is None - def test_check_parameter_dimension_consistency(self, s=3): - """Test _check_parameter_dimension_consistency().""" - op = DummyOpInfOperator() - p = self.Dummy._check_parameter_dimension_consistency([op]) - assert p is None + def test_parameter_dimension(self, p=4): + """Test parameter_dimension and _synchronize_parameter_dimensions().""" + op0 = opinf.operators.ConstantOperator() + op1 = opinf.operators.AffineLinearOperator(np.sin, nterms=p) + model = self.Model([op0, op1]) + assert model.parameter_dimension is None - op1 = DummyParametricOperator() - op1._set_parameter_dimension_from_values(np.empty((s, 10))) - p = self.Dummy._check_parameter_dimension_consistency([op1]) - assert p == 10 + op1.parameter_dimension = p + model._synchronize_parameter_dimensions() + assert model.parameter_dimension == p - op2 = DummyParametricOperator2() - op2._set_parameter_dimension_from_values(np.empty((s, 20))) + op1 = opinf.operators.AffineLinearOperator(np.sin, nterms=p) + op2 = opinf.operators.AffineInputOperator(p) + assert op1.parameter_dimension is None + model = self.Model([op0, op1, op2]) + assert op1.parameter_dimension == p + assert model.parameter_dimension == p with pytest.raises(opinf.errors.DimensionalityError) as ex: - self.Dummy._check_parameter_dimension_consistency([op1, op2]) + model._synchronize_parameter_dimensions(p + 2) assert ex.value.args[0] == ( - "operators not aligned " - "(parameter_dimension must be the same for all operators)" + f"{p} = each operator.parameter_dimension " + f"!= parameter dimension = {p + 2}" ) - - def test_parameter_dimension(self, s=3, p=4): - """Test _ParametricModel.parameter_dimension.""" - op = DummyParametricOperator() - model = self.Dummy([op, DummyOpInfOperator2()]) - - model._set_parameter_dimension_from_values(np.empty((s, p))) assert model.parameter_dimension == p + assert op1.parameter_dimension == p + assert op2.parameter_dimension == p - model.parameter_dimension = 10 - assert model.parameter_dimension == 10 - - op._set_parameter_dimension_from_values(np.empty((s, 20))) - - with pytest.raises(AttributeError) as ex: - model.parameter_dimension = 15 + op1 = opinf.operators.AffineLinearOperator(p) + op2 = opinf.operators.AffineInputOperator(p + 1) + with pytest.raises(opinf.errors.DimensionalityError) as ex: + self.Model([op0, op1, op2]) assert ex.value.args[0] == ( - "can't set attribute (existing operators have p = 10)" + "operators not aligned " + "(parameter_dimension must be the same for all operators)" ) - model.parameter_dimension = 20 - assert model.parameter_dimension == 20 + def test_process_fit_arguments(self, s=10, p=2, m=4, r=3, k=10): + """Test _process_fit_arguments().""" + params = np.random.random((s, p)) + states = [np.ones((r, k)) for _ in range(s)] + lhs = [np.ones((r, k)) for _ in range(s)] - model = self.Dummy(DummyParametricOperator()) - model._set_parameter_dimension_from_values(np.empty(s)) - assert model.parameter_dimension == 1 + op = opinf.operators.AffineLinearOperator(p) + if isinstance(self, _TestInterpolatedModel): + op = opinf.operators.InterpLinearOperator() + model = self.Model([op]) - with pytest.raises(ValueError) as ex: - model._set_parameter_dimension_from_values(np.empty((s, s, s))) + # Invalid parameters. + with pytest.raises(opinf.errors.DimensionalityError) as ex: + model._process_fit_arguments(np.empty((3, 3, 3)), None, None, None) assert ex.value.args[0] == ( - "parameter values must be scalars or 1D arrays" + "'parameters' must be a sequence of scalars or 1D arrays" ) - def test_process_fit_arguments(self, s=5, p=2, m=4, r=3, k=10): - """Test _ParametricModel._process_fit_arguments().""" - op = DummyParametricOperator() - model = self.Dummy([op]) - params = np.empty((s, p)) - states = [np.empty((r, k)) for _ in range(s)] - lhs = [np.empty((r, k)) for _ in range(s)] - # Inconsistent number of parameter values. with pytest.raises(opinf.errors.DimensionalityError) as ex: model._process_fit_arguments(params, states[1:], None, None) @@ -246,7 +183,8 @@ def test_process_fit_arguments(self, s=5, p=2, m=4, r=3, k=10): with pytest.raises(opinf.errors.DimensionalityError) as ex: model._process_fit_arguments(params, states, lhs, None) assert ex.value.args[0] == ( - f"mylhs[1].shape[-1] = {k} != {k-1} = states[1].shape[-1]" + f"{model._LHS_ARGNAME}[1].shape[-1] = {k} " + f"!= {k-1} = states[1].shape[-1]" ) # Inconsistent input dimension. @@ -259,104 +197,418 @@ def test_process_fit_arguments(self, s=5, p=2, m=4, r=3, k=10): assert ex.value.args[0] == f"inputs[1].shape[0] = {m-1} != {m} = m" # Correct usage, partially intrusive - op2 = DummyParametricOperator2(np.random.random((r, r))) - model = self.Dummy([op, op2]) + op2 = opinf.operators.AffineConstantOperator( + p, + entries=[np.random.random(r) for _ in range(p)], + ) + if isinstance(self, _TestInterpolatedModel): + op2 = opinf.operators.InterpConstantOperator( + training_parameters=params, + entries=[np.zeros(r) for _ in range(s)], + ) + + model = self.Model([op, op2]) model._process_fit_arguments(params, states, lhs, None) model._has_inputs = True inputs[1] = np.empty((m, k)) model._process_fit_arguments(params, states, lhs, inputs) - def test_evaluate(self, r=4): - """Test _ParametricModel.evaluate().""" - op1 = DummyParametricOperator(np.random.random((r, r))) - op2 = DummyParametricOperator2(np.random.random((r, r))) - model = self.Dummy([op1, op2]) - model_evaluated = model.evaluate(None) - assert isinstance(model_evaluated, DummyNonparametricModel) - assert len(model_evaluated.operators) == 2 - assert isinstance(model_evaluated.operators[0], DummyOpInfOperator) - assert isinstance(model_evaluated.operators[1], DummyOpInfOperator2) - assert model_evaluated.state_dimension == r - - def test_rhs(self, r=2): - """Test _ParametricModel.rhs().""" - op1 = DummyParametricOperator(np.random.random((r, r))) - op2 = DummyParametricOperator2(np.random.random((r, r))) - model = self.Dummy([op1, op2]) - assert model.state_dimension == r - assert model.rhs(np.empty(r), None, None) == 2 * _applyvalue - - def test_jacobian(self, r=3): - """Test _ParametricModel.jacobian().""" - op1 = DummyParametricOperator(np.random.random((r, r))) - op2 = DummyParametricOperator2(np.random.random((r, r))) - model = self.Dummy([op1, op2]) - assert model.state_dimension == r - assert np.all(model.jacobian(np.empty(r), None, None) == 2 * _jacvalue) - - def test_predict(self, r=4): - """Test _ParametricModel.predict().""" - op1 = DummyParametricOperator(np.random.random((r, r))) - op2 = DummyParametricOperator2(np.random.random((r, r))) - model = self.Dummy([op1, op2]) - assert model.state_dimension == r - assert model.predict(None) == _predictvalue - - -class TestInterpolatedModel: - """Test models.mono._parametric._InterpolatedModel.""" + def test_fit(self, s=10, p=3, m=2, r=4, k=20): + """Test fit() and refit() (but not all intermediate steps).""" + params = np.random.random((s, p)) + states = [np.ones((r, k)) for _ in range(s)] + lhs = [np.ones((r, k)) for _ in range(s)] + inputs = [np.ones((m, k)) for _ in range(s)] - class Dummy(_module._InterpolatedModel): - _ModelClass = DummyNonparametricModel2 + operators, _ = self._get_parametric_operators(p, r, m) - def test_from_models(self, r=4): - """Test _InterpolatedModel._from_models().""" - mu = np.sort(np.random.random(2)) - model1 = DummyNonparametricModel( - [DummyOpInfOperator2(np.random.random(r))] + # Fully intrusive case. + model = self.Model(operators) + with pytest.warns(opinf.errors.OpInfWarning) as wn: + out = model.fit(params, states, lhs, inputs) + assert len(wn) == 1 + assert wn[0].message.args[0] == ( + "all operators initialized explicitly, nothing to learn" + ) + assert out is model + + with pytest.warns(opinf.errors.OpInfWarning) as wn: + out = model.refit() + assert len(wn) == 1 + assert wn[0].message.args[0] == ( + "all operators initialized explicitly, nothing to learn" + ) + assert out is model + + # One affine operator. + model = self.Model([opinf.operators.AffineLinearOperator(p)]) + out = model.fit(params, states, lhs) + assert out is model + for op in model.operators: + assert op.parameter_dimension == p + assert op.entries is not None + + # Multiple affine operators. + model = self.Model( + [ + opinf.operators.AffineLinearOperator(p), + opinf.operators.AffineInputOperator(p), + ] ) + out = model.fit(params, states, lhs, inputs) # BUG + assert out is model + for op in model.operators: + assert op.parameter_dimension == p + assert op.entries is not None + + # Mix of affine and interpolatory operators. + model = self.Model( + [ + opinf.operators.AffineLinearOperator(p), + opinf.operators.InterpInputOperator(), + ] + ) + out = model.fit(params, states, lhs, inputs) + assert out is model + for op in model.operators: + assert op.parameter_dimension == p + assert op.entries is not None + + # Mix of nonparametric, affine, and interpolatory operators. + model = self.Model( + [ + opinf.operators.ConstantOperator(), + opinf.operators.AffineLinearOperator(p), + opinf.operators.InterpInputOperator(), + ] + ) + out = model.fit(params, states, lhs, inputs) + assert out is model + assert model.operators[0].entries is not None + for op in model.operators[1:]: + assert op.parameter_dimension == p + assert op.entries is not None + + def test_evaluate(self, p=8, r=4, m=2): + """Test evaluate().""" + operators, testparam = self._get_parametric_operators(p, r, m) + + # Some operators not populated. + model = self.Model([self._get_single_operator()]) + with pytest.raises(AttributeError): + model.evaluate(testparam) + + # Test with and without input operators. + for ops in operators[:-1], operators: + model = self.Model(ops) + model_evaluated = model.evaluate(testparam) + assert isinstance(model_evaluated, self.Model._ModelClass) + assert len(model_evaluated.operators) == len(model.operators) + assert model_evaluated.state_dimension == r + for pop, op in zip(model.operators, model_evaluated.operators): + pop_evaluated = pop.evaluate(testparam) + assert isinstance(op, pop_evaluated.__class__) + assert np.array_equal(op.entries, pop_evaluated.entries) + assert model_evaluated.input_dimension == model.input_dimension + + def test_rhs(self, p=7, r=2, m=4): + """Lightly test rhs().""" + operators, testparam = self._get_parametric_operators(p, r, m) + teststate = np.random.random(r) + args = [testparam, teststate] + if self._iscontinuous: + args.insert(0, np.random.random()) # time argument + + def testinput(t): + return np.random.random(m) + + else: + testinput = np.random.random(m) + + # Some operators not populated. + model = self.Model([self._get_single_operator()]) + with pytest.raises(AttributeError): + model.rhs(*args) + + # Without inputs. + model = self.Model(operators[:-1]) + out = model.rhs(*args) + assert isinstance(out, np.ndarray) + assert out.shape == (r,) + + # With inputs. + args.append(testinput) + model = self.Model(operators) + out = model.rhs(*args) + assert isinstance(out, np.ndarray) + assert out.shape == (r,) + + def test_jacobian(self, p=9, r=3, m=2): + """Lightly test jacobian().""" + operators, testparam = self._get_parametric_operators(p, r, m) + teststate = np.random.random(r) + args = [testparam, teststate] + if self._iscontinuous: + args.insert(0, np.random.random()) # time argument + + def testinput(t): + return np.random.random(m) + + else: + testinput = np.random.random(m) + + # Some operators not populated. + model = self.Model([self._get_single_operator()]) + with pytest.raises(AttributeError): + model.jacobian(*args) + + # Without inputs. + model = self.Model(operators[:-1]) + out = model.jacobian(*args) + assert isinstance(out, np.ndarray) + assert out.shape == (r, r) + + # With inputs. + args.append(testinput) + model = self.Model(operators) + out = model.jacobian(*args) + assert isinstance(out, np.ndarray) + assert out.shape == (r, r) + + +class TestParametricDiscreteModel(_TestParametricModel): + """Test opinf.models.ParametricDiscreteModel.""" + + Model = _module.ParametricDiscreteModel + _iscontinuous = False + + def test_predict(self, p=5, r=3, m=2, niters=10): + """Lightly test InterpolatedDiscreteModel.predict().""" + testparam = np.random.random(p) + state0 = np.random.random(r) + + model = self.Model( + opinf.operators.AffineLinearOperator( + p, + entries=np.zeros((p, r, r)), + ) + ) + out = model.predict(testparam, state0, niters) + assert isinstance(out, np.ndarray) + assert out.shape == (r, niters) + assert np.all(out[:, 0] == state0) + assert np.all(out[:, 1:] == 0) + + inputs = np.random.random((m, niters)) + model = self.Model( + opinf.operators.AffineInputOperator( + p, + entries=np.zeros((p, r, m)), + ) + ) + out = model.predict(testparam, state0, niters, inputs) + assert isinstance(out, np.ndarray) + assert out.shape == (r, niters) + assert np.all(out[:, 0] == state0) + assert np.all(out[:, 1:] == 0) + + +class TestParametricContinuousModel(_TestParametricModel): + """Test opinf.models.ParametricContinuousModel.""" + + Model = _module.ParametricContinuousModel + _iscontinuous = True + + def test_predict(self, p=4, r=4, m=2, k=40): + """Lightly test predict().""" + testparam = np.random.random(p) + state0 = np.random.random(r) + t = np.linspace(0, 1, k) + + model = self.Model( + opinf.operators.AffineLinearOperator( + p, + entries=np.zeros((p, r, r)), + ) + ) + out = model.predict(testparam, state0, t) + assert isinstance(out, np.ndarray) + assert out.shape == (r, k) + for j in range(k): + assert np.allclose(out[:, j], state0) + + def input_func(t): + return np.random.random(m) + + model = self.Model( + opinf.operators.AffineInputOperator( + p, + entries=np.zeros((p, r, m)), + ) + ) + out = model.predict(testparam, state0, t, input_func) + assert isinstance(out, np.ndarray) + assert out.shape == (r, k) + for j in range(k): + assert np.allclose(out[:, j], state0) + + +# Interpolatotry models ======================================================= +class _TestInterpolatedModel(_TestParametricModel): + """Test models.mono._parametric._InterpolatedModel.""" + + def _get_single_operator(self): + """Get a single uncalibrated operator.""" + return opinf.operators.InterpLinearOperator() + + def _get_parametric_operators(self, s, r, m=0): + """Get calibrated constant + linear + input affine operators.""" + params = np.sort(np.random.random(s)) + op1 = opinf.operators.InterpConstantOperator( + params, + entries=[np.random.random(r) for _ in range(s)], + ) + op2 = opinf.operators.InterpLinearOperator( + params, + entries=[np.random.random((r, r)) for _ in range(s)], + ) + operators = [op1, op2] + if m > 0: + op3 = opinf.operators.InterpInputOperator( + params, + entries=[np.random.random((r, m)) for _ in range(s)], + ) + operators.append(op3) + return operators, (params[-1] + params[0]) / 2 + + def test_set_operators(self): + """Test operators.fset().""" + operators = [opinf.operators.LinearOperator()] - # Wrong type of model. - model2 = self.Dummy([opinf.operators.InterpCubicOperator()]) with pytest.raises(TypeError) as ex: - self.Dummy._from_models(mu, [model2, model1]) + self.Model(operators) + assert ex.value.args[0] == "invalid operator of type 'LinearOperator'" + + # Several operators provided. + operators = [ + opinf.operators.InterpConstantOperator(), + opinf.operators.InterpLinearOperator(), + ] + model = self.Model(operators) + assert len(model.operators) == 2 + for modelop, op in zip(model.operators, operators): + assert modelop is op + + # Single operator provided + model = self.Model(operators[1]) + assert len(model.operators) == 1 + assert model.operators[0] is operators[1] + + def test_get_operator_of_type(self): + """Test _get_operator_of_type().""" + operators = [ + opinf.operators.InterpConstantOperator(), + opinf.operators.InterpLinearOperator(), + ] + model = self.Model(operators) + + op = model._get_operator_of_type(opinf.operators.ConstantOperator) + assert op is operators[0] + + op = model._get_operator_of_type(opinf.operators.LinearOperator) + assert op is operators[1] + + op = model._get_operator_of_type(float) + assert op is None + + def test_parameter_dimension(self, p=4): + """Test parameter_dimension and _synchronize_parameter_dimensions().""" + op1 = opinf.operators.InterpLinearOperator() + assert op1.parameter_dimension is None + model = self.Model([op1]) + assert model.parameter_dimension is None + + op1.parameter_dimension = p + model._synchronize_parameter_dimensions() + assert model.parameter_dimension == p + + op1 = opinf.operators.InterpLinearOperator() + op2 = opinf.operators.InterpInputOperator() + op2.parameter_dimension = p + assert op1.parameter_dimension is None + model = self.Model([op1, op2]) + assert op1.parameter_dimension == p + assert op2.parameter_dimension == p + assert model.parameter_dimension == p + + with pytest.raises(opinf.errors.DimensionalityError) as ex: + model._synchronize_parameter_dimensions(p + 2) assert ex.value.args[0] == ( - "expected models of type 'DummyNonparametricModel'" + f"{p} = each operator.parameter_dimension " + f"!= parameter dimension = {p + 2}" ) + assert model.parameter_dimension == p + assert op1.parameter_dimension == p + assert op2.parameter_dimension == p - # Inconsistent number of operators. - model2 = DummyNonparametricModel( - [DummyOpInfOperator(), DummyOpInfOperator2()] + op1 = opinf.operators.InterpLinearOperator() + op2 = opinf.operators.InterpInputOperator() + op1.parameter_dimension = p + op2.parameter_dimension = p + 1 + with pytest.raises(opinf.errors.DimensionalityError) as ex: + self.Model([op1, op2]) + assert ex.value.args[0] == ( + "operators not aligned " + "(parameter_dimension must be the same for all operators)" ) + + def test_from_models(self, s=10, r=4, m=2): + """Test _InterpolatedModel._from_models().""" + operators = [ + [ + opinf.operators.ConstantOperator(np.random.random(r)), + opinf.operators.LinearOperator(np.random.random((r, r))), + opinf.operators.InputOperator(np.random.random((r, m))), + ] + for _ in range(s) + ] + mu = np.sort(np.random.random(s)) + + # Inconsistent number of operators. + model1 = self.Model._ModelClass(operators[0]) + model2 = self.Model._ModelClass(operators[1][:-1]) with pytest.raises(ValueError) as ex: - self.Dummy._from_models(mu, [model1, model2]) + self.Model._from_models(mu, [model1, model2]) assert ex.value.args[0] == ( "models not aligned (inconsistent number of operators)" ) # Inconsistent operator types. - model2 = DummyNonparametricModel( - [DummyOpInfOperator(np.random.random(r))] - ) + model1 = self.Model._ModelClass(operators[0][1:]) + model2 = self.Model._ModelClass(operators[1][:-1]) with pytest.raises(ValueError) as ex: - self.Dummy._from_models(mu, [model1, model2]) + self.Model._from_models(mu, [model1, model2]) assert ex.value.args[0] == ( "models not aligned (inconsistent operator types)" ) # Correct usage - OpClass = opinf.operators.ConstantOperator - model1 = DummyNonparametricModel([OpClass(np.random.random(r))]) - model2 = DummyNonparametricModel([OpClass(np.random.random(r))]) - model = self.Dummy._from_models(mu, [model1, model2]) - assert isinstance(model, self.Dummy) - assert len(model.operators) == 1 + models = [self.Model._ModelClass(ops) for ops in operators] + model = self.Model._from_models(mu, models) + assert isinstance(model, self.Model) + assert len(model.operators) == 3 assert isinstance( model.operators[0], opinf.operators.InterpConstantOperator, ) + # Check the interpolation is as expected. + testparam = np.random.random() + IClass = type(model.operators[0].interpolator) + c00 = IClass(mu, [ops[0][0] for ops in operators]) + assert c00(testparam) == model.evaluate(testparam).operators[0][0] + def test_set_interpolator(self, s=10, p=2, r=2): """Test _InterpolatedModel._set_interpolator().""" @@ -374,11 +626,11 @@ def test_set_interpolator(self, s=10, p=2, r=2): ), ] - model = self.Dummy(operators) + model = self.Model(operators) for op in operators: assert isinstance(op.interpolator, interp.NearestNDInterpolator) - model = self.Dummy( + model = self.Model( operators, InterpolatorClass=interp.LinearNDInterpolator, ) @@ -403,7 +655,7 @@ def test_fit_solver(self, s=10, r=3, k=20): states = np.random.random((s, r, k)) lhs = np.random.random((s, r, k)) - model = self.Dummy(operators) + model = self.Model(operators) model._fit_solver(params, states, lhs) assert hasattr(model, "solvers") @@ -414,7 +666,10 @@ def test_fit_solver(self, s=10, r=3, k=20): assert hasattr(model, "_submodels") assert len(model._submodels) == s for mdl in model._submodels: - assert isinstance(mdl, DummyNonparametricModel) + assert isinstance( + mdl, + opinf.models.mono._nonparametric._NonparametricModel, + ) assert len(mdl.operators) == len(operators) for op in mdl.operators: assert op.entries is None @@ -433,7 +688,7 @@ def test_refit(self, s=10, r=3, k=15): states = np.random.random((s, r, k)) lhs = np.random.random((s, r, k)) - model = self.Dummy(operators) + model = self.Model(operators) with pytest.raises(RuntimeError) as ex: model.refit() @@ -445,7 +700,10 @@ def test_refit(self, s=10, r=3, k=15): assert hasattr(model, "_submodels") assert len(model._submodels) == s for mdl in model._submodels: - assert isinstance(mdl, DummyNonparametricModel) + assert isinstance( + mdl, + opinf.models.mono._nonparametric._NonparametricModel, + ) assert len(mdl.operators) == len(operators) for op in mdl.operators: assert op.entries is not None @@ -455,7 +713,7 @@ def test_save(self, target="_interpmodelsavetest.h5"): if os.path.isfile(target): os.remove(target) - model = self.Dummy( + model = self.Model( [ opinf.operators.InterpConstantOperator(), opinf.operators.InterpLinearOperator(), @@ -490,26 +748,26 @@ def test_load(self, target="_interpmodelloadtest.h5"): opinf.operators.InterpConstantOperator(), opinf.operators.InterpLinearOperator(), ] - model = self.Dummy(operators, InterpolatorClass=float) + model = self.Model(operators, InterpolatorClass=float) with pytest.warns(opinf.errors.OpInfWarning): model.save(target) with pytest.raises(opinf.errors.LoadfileFormatError) as ex: - self.Dummy.load(target) + self.Model.load(target) assert ex.value.args[0] == ( f"unknown InterpolatorClass 'float', call load({target}, float)" ) - self.Dummy.load(target, float) + self.Model.load(target, float) - model1 = self.Dummy( + model1 = self.Model( operators, InterpolatorClass=interp.NearestNDInterpolator, ) model1.save(target, overwrite=True) with pytest.warns(opinf.errors.OpInfWarning) as wn: - model2 = self.Dummy.load(target, float) + model2 = self.Model.load(target, float) assert wn[0].message.args[0] == ( "InterpolatorClass=float does not match loadfile " "InterpolatorClass 'NearestNDInterpolator'" @@ -517,10 +775,10 @@ def test_load(self, target="_interpmodelloadtest.h5"): model2.set_interpolator(interp.NearestNDInterpolator) assert model2 == model1 - model2 = self.Dummy.load(target) + model2 = self.Model.load(target) assert model2 == model1 - model1 = self.Dummy( + model1 = self.Model( "AB", InterpolatorClass=interp.NearestNDInterpolator, ) @@ -528,7 +786,7 @@ def test_load(self, target="_interpmodelloadtest.h5"): model1.input_dimension = 4 model1.save(target, overwrite=True) - model2 = self.Dummy.load(target) + model2 = self.Model.load(target) assert model2 == model1 os.remove(target) @@ -536,7 +794,7 @@ def test_load(self, target="_interpmodelloadtest.h5"): def test_copy(self, s=10, p=2, r=3): """Test _InterpolatedModel._copy().""" - model1 = self.Dummy( + model1 = self.Model( [ opinf.operators.InterpConstantOperator(), opinf.operators.InterpLinearOperator(), @@ -544,7 +802,7 @@ def test_copy(self, s=10, p=2, r=3): ) mu = np.random.random((s, p)) - model2 = self.Dummy( + model2 = self.Model( [ opinf.operators.InterpConstantOperator( mu, entries=np.random.random((s, r)) @@ -558,15 +816,16 @@ def test_copy(self, s=10, p=2, r=3): for model in (model1, model2): model_copied = model.copy() - assert isinstance(model_copied, self.Dummy) + assert isinstance(model_copied, self.Model) assert model_copied is not model assert model_copied == model -class TestInterpolatedDiscreteModel: +class TestInterpolatedDiscreteModel(_TestInterpolatedModel): """Test models.mono._parametric.InterpolatedDiscreteModel.""" - ModelClass = _module.InterpolatedDiscreteModel + Model = _module.InterpolatedDiscreteModel + _iscontinuous = False def test_fit(self, s=10, p=2, r=3, m=2, k=20): """Lightly test InterpolatedDiscreteModel.fit().""" @@ -575,65 +834,19 @@ def test_fit(self, s=10, p=2, r=3, m=2, k=20): nextstates = np.random.random((s, r, k)) inputs = np.random.random((s, m, k)) - model = self.ModelClass("A") + model = self.Model("A") out = model.fit(params, states) assert out is model - model = self.ModelClass("AB") + model = self.Model("AB") out = model.fit(params, states, nextstates, inputs) assert out is model - def test_rhs(self, s=10, r=3, m=2): - """Lightly test InterpolatedDiscreteModel.rhs().""" - params = np.sort(np.random.random(s)) - state = np.random.random(r) - model = self.ModelClass( - opinf.operators.InterpLinearOperator( - params, np.random.random((s, r, r)) - ) - ) - out = model.rhs(params[2], state) - assert isinstance(out, np.ndarray) - assert out.shape == (r,) - - input_ = np.random.random(m) - model = self.ModelClass( - opinf.operators.InterpInputOperator( - params, np.random.random((s, r, m)) - ) - ) - out = model.rhs(params[-2], state, input_) - assert isinstance(out, np.ndarray) - assert out.shape == (r,) - - def test_jacobian(self, s=9, r=2, m=3): - """Lightly test InterpolatedDiscreteModel.jacobian().""" - params = np.sort(np.random.random(s)) - state = np.random.random(r) - model = self.ModelClass( - opinf.operators.InterpLinearOperator( - params, np.random.random((s, r, r)) - ) - ) - out = model.jacobian(params[2], state) - assert isinstance(out, np.ndarray) - assert out.shape == (r, r) - - input_ = np.random.random(m) - model = self.ModelClass( - opinf.operators.InterpInputOperator( - params, np.random.random((s, r, m)) - ) - ) - out = model.jacobian(params[-2], state, input_) - assert isinstance(out, np.ndarray) - assert out.shape == (r, r) - def test_predict(self, s=11, r=4, m=2, niters=10): """Lightly test InterpolatedDiscreteModel.predict().""" params = np.sort(np.random.random(s)) state0 = np.random.random(r) - model = self.ModelClass( + model = self.Model( opinf.operators.InterpLinearOperator(params, np.zeros((s, r, r))) ) out = model.predict(params[2], state0, niters) @@ -643,7 +856,7 @@ def test_predict(self, s=11, r=4, m=2, niters=10): assert np.all(out[:, 1:] == 0) inputs = np.random.random((m, niters)) - model = self.ModelClass( + model = self.Model( opinf.operators.InterpInputOperator(params, np.zeros((s, r, m))) ) out = model.predict(params[-2], state0, niters, inputs) @@ -653,10 +866,11 @@ def test_predict(self, s=11, r=4, m=2, niters=10): assert np.all(out[:, 1:] == 0) -class TestInterpolatedContinuousModel: +class TestInterpolatedContinuousModel(_TestInterpolatedModel): """Test models.mono._parametric.InterpolatedContinuousModel.""" - ModelClass = _module.InterpolatedContinuousModel + Model = _module.InterpolatedContinuousModel + _iscontinuous = True def test_fit(self, s=10, p=2, r=3, m=2, k=20): """Test InterpolatedContinuousModel.fit().""" @@ -665,70 +879,20 @@ def test_fit(self, s=10, p=2, r=3, m=2, k=20): ddts = np.random.random((s, r, k)) inputs = np.random.random((s, m, k)) - model = self.ModelClass("A") + model = self.Model("A") out = model.fit(params, states, ddts) assert out is model - model = self.ModelClass("AB") + model = self.Model("AB") out = model.fit(params, states, ddts, inputs) assert out is model - def test_rhs(self, s=10, r=3, m=2): - """Lightly test InterpolatedContinuousModel.rhs().""" - params = np.sort(np.random.random(s)) - state = np.random.random(r) - model = self.ModelClass( - opinf.operators.InterpLinearOperator( - params, np.random.random((s, r, r)) - ) - ) - out = model.rhs(None, params[2], state) - assert isinstance(out, np.ndarray) - assert out.shape == (r,) - - def input_func(t): - return np.random.random(m) - - model = self.ModelClass( - opinf.operators.InterpInputOperator( - params, np.random.random((s, r, m)) - ) - ) - out = model.rhs(np.pi, params[-2], state, input_func) - assert isinstance(out, np.ndarray) - assert out.shape == (r,) - - def test_jacobian(self, s=9, r=2, m=3): - """Lightly test InterpolatedContinuousModel.jacobian().""" - params = np.sort(np.random.random(s)) - state = np.random.random(r) - model = self.ModelClass( - opinf.operators.InterpLinearOperator( - params, np.random.random((s, r, r)) - ) - ) - out = model.jacobian(None, params[2], state) - assert isinstance(out, np.ndarray) - assert out.shape == (r, r) - - def input_func(t): - return np.random.random(m) - - model = self.ModelClass( - opinf.operators.InterpInputOperator( - params, np.random.random((s, r, m)) - ) - ) - out = model.jacobian(np.pi, params[-2], state, input_func) - assert isinstance(out, np.ndarray) - assert out.shape == (r, r) - def test_predict(self, s=11, r=4, m=2, k=40): """Lightly test InterpolatedContinuousModel.predict().""" params = np.sort(np.random.random(s)) state0 = np.random.random(r) t = np.linspace(0, 1, k) - model = self.ModelClass( + model = self.Model( opinf.operators.InterpLinearOperator(params, np.zeros((s, r, r))) ) out = model.predict(params[2], state0, t) @@ -740,7 +904,7 @@ def test_predict(self, s=11, r=4, m=2, k=40): def input_func(t): return np.random.random(m) - model = self.ModelClass( + model = self.Model( opinf.operators.InterpInputOperator(params, np.zeros((s, r, m))) ) out = model.predict(params[-2], state0, t, input_func) @@ -748,19 +912,3 @@ def input_func(t): assert out.shape == (r, k) for j in range(k): assert np.allclose(out[:, j], state0) - - -def test_publics(): - """Ensure all public ParametricModel classes can be instantiated.""" - operators = [opinf.operators.InterpConstantOperator()] - for ModelClassName in _module.__all__: - ModelClass = getattr(_module, ModelClassName) - if not isinstance(ModelClass, type) or not issubclass( - ModelClass, _module._ParametricModel - ): # pragma: no cover - continue - model = ModelClass(operators) - assert issubclass( - model.ModelClass, - opinf.models.mono._nonparametric._NonparametricModel, - ) diff --git a/tests/operators/test_affine.py b/tests/operators/test_affine.py index 0ce8f6dc..87284d25 100644 --- a/tests/operators/test_affine.py +++ b/tests/operators/test_affine.py @@ -192,6 +192,12 @@ def test_opinf(self, s=10, k=15, r=11, m=3): assert block.shape[0] == dim assert block.shape[1] == s * k + # One-dimensional inputs. + block = op.datablock(parameters, states, np.random.random((s, k))) + dim = op.operator_dimension(s, r, 1) + assert block.shape[0] == dim + assert block.shape[1] == s * k + def test_copysaveload(self, r=10, m=2, target="_affinesavetest.h5"): """Test copy(), save(), and load().""" ncoeffs = len(self.thetas1) From 66eccaa647f6197fa9de154c4759e735803f9a4f Mon Sep 17 00:00:00 2001 From: Shane Date: Fri, 30 Aug 2024 15:07:55 -0600 Subject: [PATCH 26/48] fix input datasets counting bug --- src/opinf/models/mono/_parametric.py | 2 +- tests/models/mono/test_parametric.py | 27 ++++++++++++++++++++++----- 2 files changed, 23 insertions(+), 6 deletions(-) diff --git a/src/opinf/models/mono/_parametric.py b/src/opinf/models/mono/_parametric.py index 5fb62618..915bac39 100644 --- a/src/opinf/models/mono/_parametric.py +++ b/src/opinf/models/mono/_parametric.py @@ -248,7 +248,7 @@ def _check_valid_dimension2(dataset, label): inputs = [np.atleast_2d(U) for U in inputs] if not self.input_dimension: self.input_dimension = inputs[0].shape[0] - _check_valid_dimension0(lhs, self._LHS_ARGNAME) + _check_valid_dimension0(inputs, "inputs") for i, subset in enumerate(inputs): if (dim := subset.shape[0]) != (m := self.input_dimension): raise errors.DimensionalityError( diff --git a/tests/models/mono/test_parametric.py b/tests/models/mono/test_parametric.py index 220838dd..5ab11a7a 100644 --- a/tests/models/mono/test_parametric.py +++ b/tests/models/mono/test_parametric.py @@ -152,10 +152,9 @@ def test_process_fit_arguments(self, s=10, p=2, m=4, r=3, k=10): params = np.random.random((s, p)) states = [np.ones((r, k)) for _ in range(s)] lhs = [np.ones((r, k)) for _ in range(s)] + inputs = [np.empty((m, k)) for _ in range(s)] - op = opinf.operators.AffineLinearOperator(p) - if isinstance(self, _TestInterpolatedModel): - op = opinf.operators.InterpLinearOperator() + op = self._get_single_operator() model = self.Model([op]) # Invalid parameters. @@ -165,12 +164,31 @@ def test_process_fit_arguments(self, s=10, p=2, m=4, r=3, k=10): "'parameters' must be a sequence of scalars or 1D arrays" ) - # Inconsistent number of parameter values. + # Inconsistent number of datasets across arguments. with pytest.raises(opinf.errors.DimensionalityError) as ex: model._process_fit_arguments(params, states[1:], None, None) assert ex.value.args[0] == ( f"len(states) = {s-1} != {s} = len(parameters)" ) + with pytest.raises(opinf.errors.DimensionalityError) as ex: + model._process_fit_arguments(params, states, lhs[:-1], None) + assert ex.value.args[0] == ( + f"len({self.Model._ModelClass._LHS_ARGNAME}) = {s-1} " + f"!= {s} = len(parameters)" + ) + model._has_inputs = True + with pytest.raises(opinf.errors.DimensionalityError) as ex: + model._process_fit_arguments(params, states, lhs, inputs[1:]) + assert ex.value.args[0] == ( + f"len(inputs) = {s-1} != {s} = len(parameters)" + ) + inputs1D = np.empty((s - 1, k)) + with pytest.raises(opinf.errors.DimensionalityError) as ex: + model._process_fit_arguments(params, states, lhs, inputs1D) + assert ex.value.args[0] == ( + f"len(inputs) = {s-1} != {s} = len(parameters)" + ) + model._has_inputs = False # Inconsistent state dimension. states[1] = np.empty((r - 1, k)) @@ -189,7 +207,6 @@ def test_process_fit_arguments(self, s=10, p=2, m=4, r=3, k=10): # Inconsistent input dimension. states[1] = np.empty((r, k)) - inputs = [np.empty((m, k)) for _ in range(s)] inputs[1] = np.empty((m - 1, k)) model._has_inputs = True with pytest.raises(opinf.errors.DimensionalityError) as ex: From 54a5ca6a5a3d2e4547603cb00ba2d2ec01689b19 Mon Sep 17 00:00:00 2001 From: Shane Date: Mon, 12 Aug 2024 14:30:45 -0600 Subject: [PATCH 27/48] start tests for ROM class --- src/opinf/roms/_nonparametric.py | 69 +++++++++++++++-- tests/roms/__init__.py | 0 tests/roms/test_nonparametric.py | 123 +++++++++++++++++++++++++++++++ 3 files changed, 187 insertions(+), 5 deletions(-) create mode 100644 tests/roms/__init__.py create mode 100644 tests/roms/test_nonparametric.py diff --git a/src/opinf/roms/_nonparametric.py b/src/opinf/roms/_nonparametric.py index da9f271b..d320161a 100644 --- a/src/opinf/roms/_nonparametric.py +++ b/src/opinf/roms/_nonparametric.py @@ -7,17 +7,19 @@ import warnings -from .. import errors, models, utils +from .. import errors, lift, pre, basis as _basis, ddt, models, utils class ROM: - """Nonparametric reduced-order model class. + r"""Nonparametric reduced-order model. This class connects classes from the various submodules to form a complete reduced-order modeling workflow. - High-dimensional data -> transformed / preprocessed data -> compressed data - -> low-dimensional model. + High-dimensional data + :math:`\to` transformed / preprocessed data + :math:`\to` compressed data + :math:`\to` low-dimensional model. Parameters ---------- @@ -44,11 +46,68 @@ def __init__( ddt_estimator=None, ): """Store each argument as an attribute.""" - # TODO: verify each argument here. + # Verify and store the model. + if not isinstance( + model, + (models.ContinuousModel, models.DiscreteModel), + ): + raise TypeError("invalid model type") self.__model = model + + # Verify and store the lifter. + if not (lifter is None or isinstance(lifter, lift.LifterTemplate)): + warnings.warn( + "lifter not derived from LifterTemplate, " + "unexpected behavior may occur", + errors.OpInfWarning, + ) self.__lifter = lifter + + # Verify and store the transformer. + if not ( + transformer is None + or isinstance( + transformer, + (pre.TransformerTemplate, pre.TransformerMulti), + ) + ): + warnings.warn( + "transformer not derived from TransformerTemplate " + "or TransformerMulti, unexpected behavior may occur", + errors.OpInfWarning, + ) self.__transformer = transformer + + # Verify and store the basis. + if not ( + basis is None + or isinstance( + basis, + (_basis.BasisTemplate, _basis.BasisMulti), + ) + ): + warnings.warn( + "basis not derived from BasisTemplate or BasisMulti, " + "unexpected behavior may occur", + errors.OpInfWarning, + ) self.__basis = basis + + # Verify and store the ddt estimator. + if not ( + ddt_estimator is None + or isinstance(ddt_estimator, ddt.DerivativeEstimatorTemplate) + ): + warnings.warn( + "ddt_estimator not derived from DerivativeEstimatorTemplate, " + "unexpected behavior may occur", + errors.OpInfWarning, + ) + if ddt_estimator is not None and not self.iscontinuous: + warnings.warn( + "ddt_estimator ignored for discrete models", + errors.OpInfWarning, + ) self.__ddter = ddt_estimator # Properties -------------------------------------------------------------- diff --git a/tests/roms/__init__.py b/tests/roms/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/roms/test_nonparametric.py b/tests/roms/test_nonparametric.py new file mode 100644 index 00000000..ffa3997e --- /dev/null +++ b/tests/roms/test_nonparametric.py @@ -0,0 +1,123 @@ +# roms/test_nonparametric.py +"""Tests for roms._nonparametric.py.""" + +import pytest +import numpy as np + +import opinf + + +module = opinf.roms + + +args = dict( + model=opinf.models.ContinuousModel("A"), + model2=opinf.models.DiscreteModel("AB"), + lifter=opinf.lift.QuadraticLifter(), + transformer=opinf.pre.ShiftScaleTransformer(centering=True), + transformer2=opinf.pre.ShiftScaleTransformer(scaling="standard"), + basis=opinf.basis.PODBasis(num_vectors=3), + basis2=opinf.basis.PODBasis(num_vectors=4), + ddt_estimator=opinf.ddt.UniformFiniteDifferencer(np.linspace(0, 1, 100)), +) +args["multi_transformer"] = opinf.pre.TransformerMulti( + [args["transformer"], args["transformer2"]] +) +args["multi_basis"] = opinf.basis.BasisMulti([args["basis"], args["basis2"]]) +basics = { + k: v + for k, v in args.items() + if k in ("model", "lifter", "transformer", "basis", "ddt_estimator") +} + + +class TestROM: + """Test roms.ROM.""" + + ROM = module.ROM + + def test_init(self): + """Test __init__() and properties.""" + + # Model error. + with pytest.raises(TypeError) as ex: + self.ROM(10) + assert ex.value.args[0] == "invalid model type" + + # Warnings for other arguments. + with pytest.warns(opinf.errors.OpInfWarning) as wn: + self.ROM( + args["model"], + lifter=10, + transformer=8, + basis=6, + ddt_estimator=4, + ) + assert len(wn) == 4 + assert wn[0].message.args[0] == ( + "lifter not derived from LifterTemplate, " + "unexpected behavior may occur" + ) + assert wn[1].message.args[0] == ( + "transformer not derived from TransformerTemplate " + "or TransformerMulti, unexpected behavior may occur" + ) + assert wn[2].message.args[0] == ( + "basis not derived from BasisTemplate or BasisMulti, " + "unexpected behavior may occur" + ) + assert wn[3].message.args[0] == ( + "ddt_estimator not derived from DerivativeEstimatorTemplate, " + "unexpected behavior may occur" + ) + + # Given ddt_estimator with non-continuous model. + with pytest.warns(opinf.errors.OpInfWarning) as wn: + self.ROM(args["model2"], ddt_estimator=args["ddt_estimator"]) + assert len(wn) == 1 + assert wn[0].message.args[0] == ( + "ddt_estimator ignored for discrete models" + ) + + # Correct usage. + rom = self.ROM( + args["model"], + lifter=args["lifter"], + ddt_estimator=args["ddt_estimator"], + ) + assert rom.iscontinuous + assert rom.transformer is None + assert rom.basis is None + + rom = self.ROM( + args["model2"], + transformer=args["multi_transformer"], + basis=args["multi_basis"], + ) + assert rom.lifter is None + assert rom.ddt_estimator is None + assert not rom.iscontinuous + + def test_str(self): + """Test __str__() and __repr__().""" + print(repr(self.ROM(**basics))) + + def test_econde(self): + """Test encode().""" + raise NotImplementedError + + def test_decode(self): + """Test decode().""" + raise NotImplementedError + + def test_project(self): + """Test project().""" + raise NotImplementedError + + def test_fit(self): + """Test fit().""" + raise NotImplementedError + + def test_predict(self): + """Test predict().""" + raise NotImplementedError From b7858a302ba1db86c24f0c8b89205853b69a01f1 Mon Sep 17 00:00:00 2001 From: Shane Date: Fri, 30 Aug 2024 15:49:19 -0600 Subject: [PATCH 28/48] initial _BaseROM split --- src/opinf/roms/_base.py | 289 +++++++++++++++++++++++++++++++ src/opinf/roms/_nonparametric.py | 250 ++------------------------ tests/roms/test_nonparametric.py | 8 +- 3 files changed, 305 insertions(+), 242 deletions(-) create mode 100644 src/opinf/roms/_base.py diff --git a/src/opinf/roms/_base.py b/src/opinf/roms/_base.py new file mode 100644 index 00000000..d8b37938 --- /dev/null +++ b/src/opinf/roms/_base.py @@ -0,0 +1,289 @@ +# roms/_base.py +"""Base for ROM classes.""" + +__all__ = [] + +import abc +import warnings + +from .. import errors, utils +from .. import lift, pre, basis as _basis, ddt, models + + +class _BaseROM(abc.ABC): + """Reduced-order model. + + This class connects classes from the various submodules to form a complete + reduced-order modeling workflow. + + High-dimensional data + -> transformed / preprocessed data + -> compressed data + -> low-dimensional model. + + Parameters + ---------- + model : :mod:`opinf.models` object + System model. + lifter : :mod:`opinf.lift` object or None + Lifting transformation. + transformer : :mod:`opinf.pre` object or None + Preprocesser. + basis : :mod:`opinf.basis` object or None + Dimensionality reducer. + ddt_estimator : :mod:`opinf.ddt` object or None + Time derivative estimator. + Ignored if ``model`` is not time continuous. + """ + + def __init__(self, model, lifter, transformer, basis, ddt_estimator): + """Store attributes. Child classes should verify model type.""" + self.__model = model + + # Verify lifter. + if not (lifter is None or isinstance(lifter, lift.LifterTemplate)): + warnings.warn( + "lifter not derived from LifterTemplate, " + "unexpected behavior may occur", + errors.OpInfWarning, + ) + self.__lifter = lifter + + # Verify transformer. + if not ( + transformer is None + or isinstance( + transformer, + (pre.TransformerTemplate, pre.TransformerMulti), + ) + ): + warnings.warn( + "transformer not derived from TransformerTemplate " + "or TransformerMulti, unexpected behavior may occur", + errors.OpInfWarning, + ) + self.__transformer = transformer + + # Verify basis. + if not ( + basis is None + or isinstance(basis, (_basis.BasisTemplate, _basis.BasisMulti)) + ): + warnings.warn( + "basis not derived from BasisTemplate or BasisMulti, " + "unexpected behavior may occur", + errors.OpInfWarning, + ) + self.__basis = basis + + # Verify ddt_estimator. + if ddt_estimator is not None and not self.iscontinuous: + warnings.warn( + "ddt_estimator ignored for discrete models", + errors.OpInfWarning, + ) + ddt_estimator = None + if not ( + ddt_estimator is None + or isinstance(ddt_estimator, ddt.DerivativeEstimatorTemplate) + ): + warnings.warn( + "ddt_estimator not derived from DerivativeEstimatorTemplate, " + "unexpected behavior may occur", + errors.OpInfWarning, + ) + self.__ddter = ddt_estimator + + # Properties -------------------------------------------------------------- + @property + def lifter(self): + """Lifting transformation.""" + return self.__lifter + + @property + def transformer(self): + """Preprocesser.""" + return self.__transformer + + @property + def basis(self): + """Dimensionality reducer.""" + return self.__basis + + @property + def ddt_estimator(self): + """Time derivative estimator.""" + return self.__ddter + + @property + def model(self): + """System model.""" + return self.__model + + @property + def iscontinuous(self): + """``True`` if the model is time continuous (semi-discrete), + ``False`` if the model if fully discrete. + """ + return isinstance( + self.model, + (models.ContinuousModel, models.ParametricContinuousModel), + ) + + # Printing ---------------------------------------------------------------- + def __str__(self): + """String representation.""" + lines = ["reduced-order model"] + + def indent(text): + return "\n".join(f" {line}" for line in text.rstrip().split("\n")) + + for label, obj in [ + ("Lifting", self.lifter), + ("Transformer", self.transformer), + ("Basis", self.basis), + ("Time derivative estimator", self.ddt_estimator), + ("Model", self.model), + ]: + if obj is not None: + lines.append(f"{label}:") + lines.append(indent(str(obj))) + + return "\n".join(lines) + + def __repr__(self): + """Repr: address + string representatation.""" + return utils.str2repr(self) + + # Mappings between original and latent state spaces ----------------------- + def encode( + self, + states, + lhs=None, + inplace: bool = False, + *, + fit_transformer: bool = False, + fit_basis: bool = False, + ): + """Map high-dimensional data to its low-dimensional representation. + + Parameters + ---------- + states : (n, ...) ndarray + State snapshots in the original state space. + lhs : (n, ...) ndarray or None + Left-hand side regression data. + + - If the model is time continuous, these are the time derivatives + of the state snapshots. + - If the model is fully discrete, these are the "next states" + corresponding to the state snapshots. + inplace : bool + If ``True``, modify the ``states`` and ``lhs`` in-place in the + preprocessing transformation (if applicable). + + Returns + ------- + states_encoded : (r, ...) ndarray + Low-dimensional representation of ``states`` + in the latent reduced state space. + lhs_encoded : (r, ...) ndarray + Low-dimensional representation of ``lhs`` + in the latent reduced state space. + **Only returned** if ``lhs`` is not ``None``. + """ + # Lifting. + if self.lifter is not None: + if self.iscontinuous and lhs is not None: + lhs = self.lifter.lift_ddts(lhs) + states = self.lifter.lift(states) + + # Preprocessing. + if self.transformer is not None: + if fit_transformer: + states = self.transformer.fit_transform( + states, + inplace=inplace, + ) + else: + states = self.tranformer.tranform(states, inplace=inplace) + if lhs is not None: + if self.iscontinuous: + lhs = self.transformer.transform_ddts(lhs, inplace=inplace) + else: + lhs = self.transformer.tranform(lhs, inplace=inplace) + + # Dimensionality reduction. + if self.basis is not None: + if fit_basis: + self.basis.fit(states) + states = self.basis.compress(states) + if lhs is not None: + lhs = self.basis.compress(lhs) + + if lhs is not None: + return states, lhs + return states + + def decode(self, states_encoded): + """Map low-dimensional data to the original state space. + + Parameters + ---------- + states_encoded : (r, ...) ndarray + Low-dimensional state or states + in the latent reduced state space. + + Returns + ------- + states_decoded : (n, ...) ndarray + Version of ``states_compressed`` in the original state space. + """ + # Reverse dimensionality reduction. + states = states_encoded + if self.basis is not None: + states = self.basis.decompress(states) + + # Reverse preprocessing. + if self.transformer is not None: + states = self.transformer.inverse_transform(states, inplace=True) + + # Reverse lifting. + if self.lifter is not None: + states = self.lifter.unlift(states) + + return states + + def project(self, states): + """Project a high-dimensional state vector to the subset of the + high-dimensional space that can be represented by the basis. + + This is done by + + 1. expressing the state in low-dimensional latent coordinates, then + 2. reconstructing the high-dimensional state corresponding to those + coordinates. + + In other words, ``project(Q)`` is equivalent to ``decode(encode(Q))``. + + Parameters + ---------- + states : (n, ...) ndarray + Matrix of `n`-dimensional state vectors, or a single state vector. + + Returns + ------- + state_projected : (n, ...) ndarray + Matrix of `n`-dimensional projected state vectors, or a single + projected state vector. + """ + return self.decode(self.encode(states)) + + # Abstract methods -------------------------------------------------------- + @abc.abstractmethod + def fit(self, *args, **kwargs): + """Calibrate the model to the data.""" + + @abc.abstractmethod + def predict(self, *args, **kwargs): + """Evaluate the model.""" diff --git a/src/opinf/roms/_nonparametric.py b/src/opinf/roms/_nonparametric.py index d320161a..37cfa343 100644 --- a/src/opinf/roms/_nonparametric.py +++ b/src/opinf/roms/_nonparametric.py @@ -7,10 +7,11 @@ import warnings -from .. import errors, lift, pre, basis as _basis, ddt, models, utils +from ._base import _BaseROM +from .. import errors, models -class ROM: +class ROM(_BaseROM): r"""Nonparametric reduced-order model. This class connects classes from the various submodules to form a complete @@ -46,253 +47,23 @@ def __init__( ddt_estimator=None, ): """Store each argument as an attribute.""" - # Verify and store the model. + # Verify model. if not isinstance( model, (models.ContinuousModel, models.DiscreteModel), ): - raise TypeError("invalid model type") - self.__model = model - - # Verify and store the lifter. - if not (lifter is None or isinstance(lifter, lift.LifterTemplate)): - warnings.warn( - "lifter not derived from LifterTemplate, " - "unexpected behavior may occur", - errors.OpInfWarning, - ) - self.__lifter = lifter - - # Verify and store the transformer. - if not ( - transformer is None - or isinstance( - transformer, - (pre.TransformerTemplate, pre.TransformerMulti), - ) - ): - warnings.warn( - "transformer not derived from TransformerTemplate " - "or TransformerMulti, unexpected behavior may occur", - errors.OpInfWarning, + raise TypeError( + "'model' must be a " + "models.ContinuousModel or models.DiscreteModel instance" ) - self.__transformer = transformer - # Verify and store the basis. - if not ( - basis is None - or isinstance( - basis, - (_basis.BasisTemplate, _basis.BasisMulti), - ) - ): - warnings.warn( - "basis not derived from BasisTemplate or BasisMulti, " - "unexpected behavior may occur", - errors.OpInfWarning, - ) - self.__basis = basis - - # Verify and store the ddt estimator. - if not ( - ddt_estimator is None - or isinstance(ddt_estimator, ddt.DerivativeEstimatorTemplate) - ): - warnings.warn( - "ddt_estimator not derived from DerivativeEstimatorTemplate, " - "unexpected behavior may occur", - errors.OpInfWarning, - ) - if ddt_estimator is not None and not self.iscontinuous: - warnings.warn( - "ddt_estimator ignored for discrete models", - errors.OpInfWarning, - ) - self.__ddter = ddt_estimator - - # Properties -------------------------------------------------------------- - @property - def lifter(self): - """Lifting transformation.""" - return self.__lifter - - @property - def transformer(self): - """Preprocesser.""" - return self.__transformer - - @property - def basis(self): - """Dimensionality reducer.""" - return self.__basis - - @property - def ddt_estimator(self): - """Time derivative estimator.""" - return self.__ddter - - @property - def model(self): - """System model.""" - return self.__model - - @property - def iscontinuous(self): - """``True`` if the model is time continuous (semi-discrete), - ``False`` if the model if fully discrete. - """ - return isinstance(self.model, models.ContinuousModel) + super().__init__(model, lifter, transformer, basis, ddt_estimator) - # Printing ---------------------------------------------------------------- def __str__(self): """String representation.""" - lines = ["Nonparametric reduced-order model"] - - def indent(text): - return "\n".join(f" {line}" for line in text.rstrip().split("\n")) - - for label, obj in [ - ("Lifting", self.lifter), - ("Transformer", self.transformer), - ("Basis", self.basis), - ("Time derivative estimator", self.ddt_estimator), - ("Model", self.model), - ]: - if obj is not None: - lines.append(f"{label}:") - lines.append(indent(str(obj))) - - return "\n".join(lines) - - def __repr__(self): - """Repr: address + string representatation.""" - return utils.str2repr(self) - - # Mappings between original and latent state spaces ----------------------- - def encode( - self, - states, - lhs=None, - inplace: bool = False, - *, - fit_transformer: bool = False, - fit_basis: bool = False, - ): - """Map high-dimensional data to its low-dimensional representation. - - Parameters - ---------- - states : (n, ...) ndarray - State snapshots in the original state space. - lhs : (n, ...) ndarray or None - Left-hand side regression data. - - - If the model is time continuous, these are the time derivatives - of the state snapshots. - - If the model is fully discrete, these are the "next states" - corresponding to the state snapshots. - inplace : bool - If ``True``, modify the ``states`` and ``lhs`` in-place in the - preprocessing transformation (if applicable). - - Returns - ------- - states_encoded : (r, ...) ndarray - Low-dimensional representation of ``states`` - in the latent reduced state space. - lhs_encoded : (r, ...) ndarray - Low-dimensional representation of ``lhs`` - in the latent reduced state space. - **Only returned** if ``lhs`` is not ``None``. - """ - # Lifting. - if self.lifter is not None: - if self.iscontinuous and lhs is not None: - lhs = self.lifter.lift_ddts(lhs) - states = self.lifter.lift(states) - - # Preprocessing. - if self.transformer is not None: - if fit_transformer: - states = self.transformer.fit_transform( - states, - inplace=inplace, - ) - else: - states = self.tranformer.tranform(states, inplace=inplace) - if lhs is not None: - if self.iscontinuous: - lhs = self.transformer.transform_ddts(lhs, inplace=inplace) - else: - lhs = self.transformer.tranform(lhs, inplace=inplace) - - # Dimensionality reduction. - if self.basis is not None: - if fit_basis: - self.basis.fit(states) - states = self.basis.compress(states) - if lhs is not None: - lhs = self.basis.compress(lhs) - - if lhs is not None: - return states, lhs - return states - - def decode(self, states_encoded): - """Map low-dimensional data to the original state space. - - Parameters - ---------- - states_encoded : (r, ...) ndarray - Low-dimensional state or states - in the latent reduced state space. - - Returns - ------- - states_decoded : (n, ...) ndarray - Version of ``states_compressed`` in the original state space. - """ - # Reverse dimensionality reduction. - states = states_encoded - if self.basis is not None: - states = self.basis.decompress(states) - - # Reverse preprocessing. - if self.transformer is not None: - states = self.transformer.inverse_transform(states, inplace=True) - - # Reverse lifting. - if self.lifter is not None: - states = self.lifter.unlift(states) - - return states - - def project(self, states): - """Project a high-dimensional state vector to the subset of the - high-dimensional space that can be represented by the basis. - - This is done by - - 1. expressing the state in low-dimensional latent coordinates, then - 2. reconstructing the high-dimensional state corresponding to those - coordinates. - - In other words, ``project(Q)`` is equivalent to ``decode(encode(Q))``. - - Parameters - ---------- - states : (n, ...) ndarray - Matrix of `n`-dimensional state vectors, or a single state vector. - - Returns - ------- - state_projected : (n, ...) ndarray - Matrix of `n`-dimensional projected state vectors, or a single - projected state vector. - """ - return self.decode(self.encode(states)) + return f"Nonparametric {_BaseROM.__str__(self)}" - # Training ---------------------------------------------------------------- + # Training and evaluation ------------------------------------------------- def fit( self, states, @@ -375,7 +146,6 @@ def fit( return self - # Evaluation -------------------------------------------------------------- def predict(self, state0, *args, **kwargs): """Evaluate the reduced-order model. diff --git a/tests/roms/test_nonparametric.py b/tests/roms/test_nonparametric.py index ffa3997e..db274dd3 100644 --- a/tests/roms/test_nonparametric.py +++ b/tests/roms/test_nonparametric.py @@ -42,7 +42,10 @@ def test_init(self): # Model error. with pytest.raises(TypeError) as ex: self.ROM(10) - assert ex.value.args[0] == "invalid model type" + assert ex.value.args[0] == ( + "'model' must be a models.ContinuousModel " + "or models.DiscreteModel instance" + ) # Warnings for other arguments. with pytest.warns(opinf.errors.OpInfWarning) as wn: @@ -73,11 +76,12 @@ def test_init(self): # Given ddt_estimator with non-continuous model. with pytest.warns(opinf.errors.OpInfWarning) as wn: - self.ROM(args["model2"], ddt_estimator=args["ddt_estimator"]) + rom = self.ROM(args["model2"], ddt_estimator=args["ddt_estimator"]) assert len(wn) == 1 assert wn[0].message.args[0] == ( "ddt_estimator ignored for discrete models" ) + assert rom.ddt_estimator is None # Correct usage. rom = self.ROM( From 9c119fa2bda0483604473a13b8aabf4a7d116404 Mon Sep 17 00:00:00 2001 From: Shane Date: Fri, 30 Aug 2024 15:55:43 -0600 Subject: [PATCH 29/48] v0.5.[7->8], update utils doc page --- docs/source/api/utils.md | 29 +++++++++++++++++++++++++++++ src/opinf/__init__.py | 18 +++++++++++++++++- 2 files changed, 46 insertions(+), 1 deletion(-) diff --git a/docs/source/api/utils.md b/docs/source/api/utils.md index b8125519..7085b101 100644 --- a/docs/source/api/utils.md +++ b/docs/source/api/utils.md @@ -4,6 +4,21 @@ .. automodule:: opinf.utils ``` +## Timing Code + +Model reduction is all about speeding up computational tasks. +The following class defines a context manager for timing blocks of code and logging errors. + +```{eval-rst} +.. currentmodule:: opinf.utils + +.. autosummary:: + :toctree: _autosummaries + :nosignatures: + + timed_block +``` + ## Load/Save HDF5 Utilities Many `opinf` classes have `save()` methods that export the object to an HDF5 file and a `load()` class method for importing an object from an HDF5 file. @@ -19,3 +34,17 @@ The following functions facilitate that data transfer. hdf5_loadhandle hdf5_savehandle ``` + +## Documentation + +The following function initializes the Matplotlib defaults used in the documentation notebooks. + +```{eval-rst} +.. currentmodule:: opinf.utils + +.. autosummary:: + :toctree: _autosummaries + :nosignatures: + + mpl_config +``` diff --git a/src/opinf/__init__.py b/src/opinf/__init__.py index 7885f472..dc9e2310 100644 --- a/src/opinf/__init__.py +++ b/src/opinf/__init__.py @@ -7,7 +7,7 @@ https://github.com/Willcox-Research-Group/rom-operator-inference-Python3 """ -__version__ = "0.5.7" +__version__ = "0.5.8" from . import ( basis, @@ -24,3 +24,19 @@ ) from .roms import * + +__all__ = [ + "basis", + "errors", + "ddt", + "lift", + "lstsq", + "models", + "operators", + "pre", + "post", + "roms", + "utils", +] + +__all__ += roms.__all__ From 2358eaa6d224eb5671e23f8f202c3655b95fd483 Mon Sep 17 00:00:00 2001 From: Shane Date: Fri, 30 Aug 2024 16:23:11 -0600 Subject: [PATCH 30/48] models._utils, ROM test framework --- src/opinf/models/_utils.py | 29 +++++++ src/opinf/operators/_utils.py | 8 +- src/opinf/roms/_base.py | 16 ++-- tests/roms/test_base.py | 131 +++++++++++++++++++++++++++++++ tests/roms/test_nonparametric.py | 111 ++++---------------------- 5 files changed, 188 insertions(+), 107 deletions(-) create mode 100644 src/opinf/models/_utils.py create mode 100644 tests/roms/test_base.py diff --git a/src/opinf/models/_utils.py b/src/opinf/models/_utils.py new file mode 100644 index 00000000..af20b2f7 --- /dev/null +++ b/src/opinf/models/_utils.py @@ -0,0 +1,29 @@ +# models/_utils.py +"""Private utility functions for working with Model classes.""" + +__all__ = [ + "is_continuous", + "is_discrete", +] + +from .mono._nonparametric import ContinuousModel, DiscreteModel +from .mono._parametric import ( + _ParametricContinuousMixin, + _ParametricDiscreteMixin, +) + + +def is_continuous(model): + """``True`` if the model is time continuous (semi-discrete).""" + return isinstance( + model, + (ContinuousModel, _ParametricContinuousMixin), + ) + + +def is_discrete(model): + """``True`` if the model is time discrete (fully discrete).""" + return isinstance( + model, + (DiscreteModel, _ParametricDiscreteMixin), + ) diff --git a/src/opinf/operators/_utils.py b/src/opinf/operators/_utils.py index c6657418..ebb50b2c 100644 --- a/src/opinf/operators/_utils.py +++ b/src/opinf/operators/_utils.py @@ -1,10 +1,6 @@ # operators/_utils.py """Private utility functions for working with Operator classes.""" -from ._base import has_inputs, is_nonparametric, is_parametric, is_uncalibrated -from ._affine import is_affine, nonparametric_to_affine -from ._interpolate import is_interpolated, nonparametric_to_interpolated - __all__ = [ "has_inputs", "is_nonparametric", @@ -15,3 +11,7 @@ "nonparametric_to_affine", "nonparametric_to_interpolated", ] + +from ._base import has_inputs, is_nonparametric, is_parametric, is_uncalibrated +from ._affine import is_affine, nonparametric_to_affine +from ._interpolate import is_interpolated, nonparametric_to_interpolated diff --git a/src/opinf/roms/_base.py b/src/opinf/roms/_base.py index d8b37938..4a9f67a4 100644 --- a/src/opinf/roms/_base.py +++ b/src/opinf/roms/_base.py @@ -7,7 +7,8 @@ import warnings from .. import errors, utils -from .. import lift, pre, basis as _basis, ddt, models +from .. import lift, pre, basis as _basis, ddt +from ..models import _utils as modutils class _BaseROM(abc.ABC): @@ -77,7 +78,7 @@ def __init__(self, model, lifter, transformer, basis, ddt_estimator): self.__basis = basis # Verify ddt_estimator. - if ddt_estimator is not None and not self.iscontinuous: + if ddt_estimator is not None and not self._iscontinuous: warnings.warn( "ddt_estimator ignored for discrete models", errors.OpInfWarning, @@ -121,14 +122,11 @@ def model(self): return self.__model @property - def iscontinuous(self): + def _iscontinuous(self): """``True`` if the model is time continuous (semi-discrete), ``False`` if the model if fully discrete. """ - return isinstance( - self.model, - (models.ContinuousModel, models.ParametricContinuousModel), - ) + return modutils.is_continuous(self.model) # Printing ---------------------------------------------------------------- def __str__(self): @@ -194,7 +192,7 @@ def encode( """ # Lifting. if self.lifter is not None: - if self.iscontinuous and lhs is not None: + if self._iscontinuous and lhs is not None: lhs = self.lifter.lift_ddts(lhs) states = self.lifter.lift(states) @@ -208,7 +206,7 @@ def encode( else: states = self.tranformer.tranform(states, inplace=inplace) if lhs is not None: - if self.iscontinuous: + if self._iscontinuous: lhs = self.transformer.transform_ddts(lhs, inplace=inplace) else: lhs = self.transformer.tranform(lhs, inplace=inplace) diff --git a/tests/roms/test_base.py b/tests/roms/test_base.py new file mode 100644 index 00000000..8cadfa31 --- /dev/null +++ b/tests/roms/test_base.py @@ -0,0 +1,131 @@ +# roms/test_base.py +"""Tests for roms._base.""" + +import abc +import pytest +import numpy as np + +import opinf +from opinf.models import _utils as modutils + + +args = dict( + lifter=opinf.lift.QuadraticLifter(), + transformer=opinf.pre.ShiftScaleTransformer(centering=True), + transformer2=opinf.pre.ShiftScaleTransformer(scaling="standard"), + basis=opinf.basis.PODBasis(num_vectors=3), + basis2=opinf.basis.PODBasis(num_vectors=4), + ddt_estimator=opinf.ddt.UniformFiniteDifferencer(np.linspace(0, 1, 100)), +) +args["multi_transformer"] = opinf.pre.TransformerMulti( + [args["transformer"], args["transformer2"]] +) +args["multi_basis"] = opinf.basis.BasisMulti([args["basis"], args["basis2"]]) +basics = { + key: val + for key, val in args.items() + if key in ("lifter", "transformer", "basis") +} + + +class _TestBaseROM(abc.ABC): + """Test opinf.roms._base._BaseROM.""" + + ROM = NotImplemented + ModelClasses = NotImplemented + + @abc.abstractmethod + def _get_models(self): + """Return a list of valid model instantiations.""" + pass + + def test_init(self): + """Test __init__() and properties.""" + + for model in self._get_models(): + # Warnings for non-model arguments. + with pytest.warns(opinf.errors.OpInfWarning) as wn: + self.ROM( + model, + lifter=10, + transformer=8, + basis=6, + ddt_estimator=4, + ) + assert len(wn) == 4 + assert wn[0].message.args[0] == ( + "lifter not derived from LifterTemplate, " + "unexpected behavior may occur" + ) + assert wn[1].message.args[0] == ( + "transformer not derived from TransformerTemplate " + "or TransformerMulti, unexpected behavior may occur" + ) + assert wn[2].message.args[0] == ( + "basis not derived from BasisTemplate or BasisMulti, " + "unexpected behavior may occur" + ) + assert wn[3].message.args[0] == ( + "ddt_estimator not derived from DerivativeEstimatorTemplate, " + "unexpected behavior may occur" + ) + + # Given ddt_estimator with non-continuous model. + if modutils.is_discrete(model): + with pytest.warns(opinf.errors.OpInfWarning) as wn: + rom = self.ROM( + model, + ddt_estimator=args["ddt_estimator"], + ) + assert len(wn) == 1 + assert wn[0].message.args[0] == ( + "ddt_estimator ignored for discrete models" + ) + assert rom.ddt_estimator is None + assert not rom._iscontinuous + + # Correct usage. + rom = self.ROM( + model, + lifter=args["lifter"], + ddt_estimator=args["ddt_estimator"], + ) + assert rom.lifter is args["lifter"] + assert rom.transformer is None + assert rom.basis is None + assert rom.ddt_estimator is args["ddt_estimator"] + + rom = self.ROM( + args["model2"], + transformer=args["multi_transformer"], + basis=args["multi_basis"], + ) + assert rom.lifter is None + assert rom.transformer is args["multi_transformer"] + assert rom.basis is args["multi_basis"] + assert rom.ddt_estimator is None + + def test_str(self): + """Lightly test __str__() and __repr__().""" + for model in self._get_models(): + repr(self.ROM(model, **basics)) + + def test_encode(self): + """Test encode().""" + raise NotImplementedError + + def test_decode(self): + """Test decode().""" + raise NotImplementedError + + def test_project(self): + """Test project().""" + raise NotImplementedError + + def test_fit(self): + """Test fit().""" + raise NotImplementedError + + def test_predict(self): + """Test predict().""" + raise NotImplementedError diff --git a/tests/roms/test_nonparametric.py b/tests/roms/test_nonparametric.py index db274dd3..69888ac3 100644 --- a/tests/roms/test_nonparametric.py +++ b/tests/roms/test_nonparametric.py @@ -2,39 +2,30 @@ """Tests for roms._nonparametric.py.""" import pytest -import numpy as np import opinf +from .test_base import _TestBaseROM -module = opinf.roms +_module = opinf.roms -args = dict( - model=opinf.models.ContinuousModel("A"), - model2=opinf.models.DiscreteModel("AB"), - lifter=opinf.lift.QuadraticLifter(), - transformer=opinf.pre.ShiftScaleTransformer(centering=True), - transformer2=opinf.pre.ShiftScaleTransformer(scaling="standard"), - basis=opinf.basis.PODBasis(num_vectors=3), - basis2=opinf.basis.PODBasis(num_vectors=4), - ddt_estimator=opinf.ddt.UniformFiniteDifferencer(np.linspace(0, 1, 100)), -) -args["multi_transformer"] = opinf.pre.TransformerMulti( - [args["transformer"], args["transformer2"]] -) -args["multi_basis"] = opinf.basis.BasisMulti([args["basis"], args["basis2"]]) -basics = { - k: v - for k, v in args.items() - if k in ("model", "lifter", "transformer", "basis", "ddt_estimator") -} - -class TestROM: +class TestROM(_TestBaseROM): """Test roms.ROM.""" - ROM = module.ROM + ROM = _module.ROM + ModelClasses = ( + opinf.models.ContinuousModel, + opinf.models.DiscreteModel, + ) + + def _get_models(self): + """Return a list of valid model instantiations.""" + return [ + opinf.models.ContinuousModel("A"), + opinf.models.DiscreteModel("AB"), + ] def test_init(self): """Test __init__() and properties.""" @@ -47,76 +38,8 @@ def test_init(self): "or models.DiscreteModel instance" ) - # Warnings for other arguments. - with pytest.warns(opinf.errors.OpInfWarning) as wn: - self.ROM( - args["model"], - lifter=10, - transformer=8, - basis=6, - ddt_estimator=4, - ) - assert len(wn) == 4 - assert wn[0].message.args[0] == ( - "lifter not derived from LifterTemplate, " - "unexpected behavior may occur" - ) - assert wn[1].message.args[0] == ( - "transformer not derived from TransformerTemplate " - "or TransformerMulti, unexpected behavior may occur" - ) - assert wn[2].message.args[0] == ( - "basis not derived from BasisTemplate or BasisMulti, " - "unexpected behavior may occur" - ) - assert wn[3].message.args[0] == ( - "ddt_estimator not derived from DerivativeEstimatorTemplate, " - "unexpected behavior may occur" - ) - - # Given ddt_estimator with non-continuous model. - with pytest.warns(opinf.errors.OpInfWarning) as wn: - rom = self.ROM(args["model2"], ddt_estimator=args["ddt_estimator"]) - assert len(wn) == 1 - assert wn[0].message.args[0] == ( - "ddt_estimator ignored for discrete models" - ) - assert rom.ddt_estimator is None - - # Correct usage. - rom = self.ROM( - args["model"], - lifter=args["lifter"], - ddt_estimator=args["ddt_estimator"], - ) - assert rom.iscontinuous - assert rom.transformer is None - assert rom.basis is None - - rom = self.ROM( - args["model2"], - transformer=args["multi_transformer"], - basis=args["multi_basis"], - ) - assert rom.lifter is None - assert rom.ddt_estimator is None - assert not rom.iscontinuous - - def test_str(self): - """Test __str__() and __repr__().""" - print(repr(self.ROM(**basics))) - - def test_econde(self): - """Test encode().""" - raise NotImplementedError - - def test_decode(self): - """Test decode().""" - raise NotImplementedError - - def test_project(self): - """Test project().""" - raise NotImplementedError + # Other arguments. + super().test_init() def test_fit(self): """Test fit().""" From 2c7bb4067050d4f356a9af9b3657c7ec6ba2443f Mon Sep 17 00:00:00 2001 From: Shane Date: Fri, 30 Aug 2024 17:08:55 -0600 Subject: [PATCH 31/48] ROM.encode() tests and bug fixes --- src/opinf/roms/_base.py | 19 +++--- tests/roms/test_base.py | 137 ++++++++++++++++++++++++++++++---------- 2 files changed, 116 insertions(+), 40 deletions(-) diff --git a/src/opinf/roms/_base.py b/src/opinf/roms/_base.py index 4a9f67a4..2e60df0d 100644 --- a/src/opinf/roms/_base.py +++ b/src/opinf/roms/_base.py @@ -167,9 +167,9 @@ def encode( Parameters ---------- - states : (n, ...) ndarray + states : (n,) or (n, k) ndarray State snapshots in the original state space. - lhs : (n, ...) ndarray or None + lhs : (n,) or (n, k) ndarray or None Left-hand side regression data. - If the model is time continuous, these are the time derivatives @@ -182,18 +182,21 @@ def encode( Returns ------- - states_encoded : (r, ...) ndarray + states_encoded : (r,) or (r, k) ndarray Low-dimensional representation of ``states`` in the latent reduced state space. - lhs_encoded : (r, ...) ndarray + lhs_encoded : (r,) or (r, k) ndarray Low-dimensional representation of ``lhs`` in the latent reduced state space. **Only returned** if ``lhs`` is not ``None``. """ # Lifting. if self.lifter is not None: - if self._iscontinuous and lhs is not None: - lhs = self.lifter.lift_ddts(lhs) + if lhs is not None: + if self._iscontinuous: + lhs = self.lifter.lift_ddts(states, lhs) + else: + lhs = self.lifter.lift(lhs) states = self.lifter.lift(states) # Preprocessing. @@ -204,12 +207,12 @@ def encode( inplace=inplace, ) else: - states = self.tranformer.tranform(states, inplace=inplace) + states = self.transformer.transform(states, inplace=inplace) if lhs is not None: if self._iscontinuous: lhs = self.transformer.transform_ddts(lhs, inplace=inplace) else: - lhs = self.transformer.tranform(lhs, inplace=inplace) + lhs = self.transformer.transform(lhs, inplace=inplace) # Dimensionality reduction. if self.basis is not None: diff --git a/tests/roms/test_base.py b/tests/roms/test_base.py index 8cadfa31..5f64b7eb 100644 --- a/tests/roms/test_base.py +++ b/tests/roms/test_base.py @@ -9,25 +9,6 @@ from opinf.models import _utils as modutils -args = dict( - lifter=opinf.lift.QuadraticLifter(), - transformer=opinf.pre.ShiftScaleTransformer(centering=True), - transformer2=opinf.pre.ShiftScaleTransformer(scaling="standard"), - basis=opinf.basis.PODBasis(num_vectors=3), - basis2=opinf.basis.PODBasis(num_vectors=4), - ddt_estimator=opinf.ddt.UniformFiniteDifferencer(np.linspace(0, 1, 100)), -) -args["multi_transformer"] = opinf.pre.TransformerMulti( - [args["transformer"], args["transformer2"]] -) -args["multi_basis"] = opinf.basis.BasisMulti([args["basis"], args["basis2"]]) -basics = { - key: val - for key, val in args.items() - if key in ("lifter", "transformer", "basis") -} - - class _TestBaseROM(abc.ABC): """Test opinf.roms._base._BaseROM.""" @@ -39,6 +20,28 @@ def _get_models(self): """Return a list of valid model instantiations.""" pass + @staticmethod + def _get(*keys): + args = dict( + lifter=opinf.lift.QuadraticLifter(), + transformer=opinf.pre.ShiftScaleTransformer(centering=True), + transformer2=opinf.pre.ShiftScaleTransformer(scaling="standard"), + basis=opinf.basis.PODBasis(num_vectors=3), + basis2=opinf.basis.PODBasis(num_vectors=4), + ddt_estimator=opinf.ddt.UniformFiniteDifferencer( + np.linspace(0, 1, 100) + ), + ) + args["multi_transformer"] = opinf.pre.TransformerMulti( + [args["transformer"], args["transformer2"]] + ) + args["multi_basis"] = opinf.basis.BasisMulti( + [args["basis"], args["basis2"]] + ) + if len(keys) == 1: + return args[keys[0]] + return [args[k] for k in keys] + def test_init(self): """Test __init__() and properties.""" @@ -75,7 +78,7 @@ def test_init(self): with pytest.warns(opinf.errors.OpInfWarning) as wn: rom = self.ROM( model, - ddt_estimator=args["ddt_estimator"], + ddt_estimator=self._get_args("ddt_estimator"), ) assert len(wn) == 1 assert wn[0].message.args[0] == ( @@ -85,34 +88,104 @@ def test_init(self): assert not rom._iscontinuous # Correct usage. + lifter, ddt_estimator = self._get("lifter", "ddt_estimator") rom = self.ROM( model, - lifter=args["lifter"], - ddt_estimator=args["ddt_estimator"], + lifter=lifter, + ddt_estimator=ddt_estimator, ) - assert rom.lifter is args["lifter"] + assert rom.lifter is lifter assert rom.transformer is None assert rom.basis is None - assert rom.ddt_estimator is args["ddt_estimator"] + assert rom.ddt_estimator is ddt_estimator + transformer, basis = self._get("multi_transformer", "multi_basis") rom = self.ROM( - args["model2"], - transformer=args["multi_transformer"], - basis=args["multi_basis"], + model, + transformer=transformer, + basis=basis, ) assert rom.lifter is None - assert rom.transformer is args["multi_transformer"] - assert rom.basis is args["multi_basis"] + assert rom.transformer is transformer + assert rom.basis is basis assert rom.ddt_estimator is None def test_str(self): """Lightly test __str__() and __repr__().""" for model in self._get_models(): - repr(self.ROM(model, **basics)) + a1, a2, a3 = self._get("lifter", "transformer", "basis") + repr(self.ROM(model, lifter=a1, transformer=a2, basis=a3)) - def test_encode(self): + def test_encode(self, n=40, k=20): """Test encode().""" - raise NotImplementedError + states = np.random.random((n, k)) + lhs = np.random.random((n, k)) + + def _check(arr, shape): + assert isinstance(arr, np.ndarray) + assert arr.shape == shape + + for model in self._get_models(): + # Lifter only. + rom = self.ROM(model, lifter=self._get("lifter")) + _check(rom.encode(states), (2 * n, k)) + out1, out2 = rom.encode(states, lhs) + for out in out1, out2: + _check(out, (2 * n, k)) + _check(rom.encode(states[:, 0]), (2 * n,)) + out1, out2 = rom.encode(states[:, 0], lhs[:, 0]) + for out in out1, out2: + _check(out, (2 * n,)) + + # Transformer only. + rom = self.ROM(model, transformer=self._get("transformer")) + with pytest.raises(AttributeError) as ex: + rom.encode(states) + assert ex.value.args[0] == ( + "transformer not trained (call fit() or fit_transform())" + ) + + out = rom.encode(states, fit_transformer=True, inplace=False) + _check(out, (n, k)) + out1, out2 = rom.encode(states, lhs) + for out in out1, out2: + _check(out, (n, k)) + out = rom.encode(states[:, 0]) + _check(out, (n,)) + out1, out2 = rom.encode(states[:, 0], lhs[:, 0]) + for out in out1, out2: + _check(out, (n,)) + + # Basis only. + rom = self.ROM(model, basis=self._get("basis")) + with pytest.raises(AttributeError) as ex: + rom.encode(states) + assert ex.value.args[0] == "basis entries not initialized" + + out = rom.encode(states, fit_basis=True) + r = rom.basis.reduced_state_dimension + _check(out, (r, k)) + out1, out2 = rom.encode(states, lhs) + for out in out1, out2: + _check(out, (r, k)) + _check(rom.encode(states[:, 0]), (r,)) + out1, out2 = rom.encode(states[:, 0], lhs[:, 0]) + for out in out1, out2: + _check(out, (r,)) + + # Lifter, transformer, and basis. + a1, a2, a3 = self._get("lifter", "transformer", "basis") + rom = self.ROM(model, lifter=a1, transformer=a2, basis=a3) + out = rom.encode(states, fit_transformer=True, fit_basis=True) + r = rom.basis.reduced_state_dimension + _check(out, (r, k)) + out1, out2 = rom.encode(states, lhs) + for out in out1, out2: + _check(out, (r, k)) + _check(rom.encode(states[:, 0]), (r,)) + out1, out2 = rom.encode(states[:, 0], lhs[:, 0]) + for out in out1, out2: + _check(out, (r,)) def test_decode(self): """Test decode().""" From 9f94e31b4d80e2a9209b49f800e4263860777df7 Mon Sep 17 00:00:00 2001 From: Shane Date: Wed, 4 Sep 2024 16:03:03 -0600 Subject: [PATCH 32/48] tests and fixes for decode, project --- tests/roms/test_base.py | 142 ++++++++++++++++++++++++++++++++++------ 1 file changed, 121 insertions(+), 21 deletions(-) diff --git a/tests/roms/test_base.py b/tests/roms/test_base.py index 5f64b7eb..a26be40c 100644 --- a/tests/roms/test_base.py +++ b/tests/roms/test_base.py @@ -68,17 +68,23 @@ def test_init(self): "basis not derived from BasisTemplate or BasisMulti, " "unexpected behavior may occur" ) - assert wn[3].message.args[0] == ( - "ddt_estimator not derived from DerivativeEstimatorTemplate, " - "unexpected behavior may occur" - ) + if modutils.is_continuous(model): + assert wn[3].message.args[0] == ( + "ddt_estimator not derived from " + "DerivativeEstimatorTemplate, " + "unexpected behavior may occur" + ) + else: + assert wn[3].message.args[0] == ( + "ddt_estimator ignored for discrete models" + ) # Given ddt_estimator with non-continuous model. if modutils.is_discrete(model): with pytest.warns(opinf.errors.OpInfWarning) as wn: rom = self.ROM( model, - ddt_estimator=self._get_args("ddt_estimator"), + ddt_estimator=self._get("ddt_estimator"), ) assert len(wn) == 1 assert wn[0].message.args[0] == ( @@ -88,16 +94,17 @@ def test_init(self): assert not rom._iscontinuous # Correct usage. - lifter, ddt_estimator = self._get("lifter", "ddt_estimator") - rom = self.ROM( - model, - lifter=lifter, - ddt_estimator=ddt_estimator, - ) - assert rom.lifter is lifter - assert rom.transformer is None - assert rom.basis is None - assert rom.ddt_estimator is ddt_estimator + if modutils.is_continuous(model): + lifter, ddt_estimator = self._get("lifter", "ddt_estimator") + rom = self.ROM( + model, + lifter=lifter, + ddt_estimator=ddt_estimator, + ) + assert rom.lifter is lifter + assert rom.transformer is None + assert rom.basis is None + assert rom.ddt_estimator is ddt_estimator transformer, basis = self._get("multi_transformer", "multi_basis") rom = self.ROM( @@ -187,18 +194,111 @@ def _check(arr, shape): for out in out1, out2: _check(out, (r,)) - def test_decode(self): + def test_decode(self, n=22, k=18): """Test decode().""" - raise NotImplementedError - def test_project(self): + def _check(arr, shape): + assert isinstance(arr, np.ndarray) + assert arr.shape == shape + + for model in self._get_models(): + # Lifter only. + rom = self.ROM(model, lifter=self._get("lifter")) + states = np.random.random((2 * n, k)) + _check(rom.decode(states), (n, k)) + _check(rom.decode(states[:, 0]), (n,)) + + # Transformer only. + rom = self.ROM(model, transformer=self._get("transformer")) + with pytest.raises(AttributeError) as ex: + rom.decode(states) + assert ex.value.args[0] == ( + "transformer not trained (call fit() or fit_transform())" + ) + states = np.random.random((n, k)) + states_ = rom.encode(states, fit_transformer=True) + out = rom.decode(states_) + _check(out, (n, k)) + assert np.allclose(out, states) + out = rom.decode(states_[:, 0]) + _check(out, (n,)) + assert np.allclose(out, states[:, 0]) + + # Basis only. + rom = self.ROM(model, basis=self._get("basis")) + with pytest.raises(AttributeError) as ex: + rom.decode(states) + assert ex.value.args[0] == "basis entries not initialized" + states_ = rom.encode(states, fit_basis=True) + _check(rom.decode(states_), (n, k)) + _check(rom.decode(states_[:, 0]), (n,)) + + # Lifter, transformer, and basis. + a1, a2, a3 = self._get("lifter", "transformer", "basis") + rom = self.ROM(model, lifter=a1, transformer=a2, basis=a3) + states_ = rom.encode(states, fit_transformer=True, fit_basis=True) + out1 = rom.decode(states_) + _check(out1, (n, k)) + out2 = rom.decode(states_[:, 0]) + _check(out2, (n,)) + assert np.allclose(out2, out1[:, 0]) + + # With the locs argument. + a2, a3 = self._get("transformer", "basis") + rom = self.ROM(model, transformer=a2, basis=a3) + states_ = rom.encode(states, fit_transformer=True, fit_basis=True) + out1 = rom.decode(states_) + locs = np.sort(np.random.choice(n, n // 3)) + out2 = rom.decode(states_, locs=locs) + _check(out2, (n // 3, k)) + assert np.allclose(out2, out1[locs]) + + def test_project(self, n=30, k=19): """Test project().""" - raise NotImplementedError + states = np.random.random((n, k)) + def _check(rom, preserved=False): + rom.encode(states, fit_transformer=True, fit_basis=True) + out = rom.project(states) + assert isinstance(out, np.ndarray) + assert out.shape == (n, k) + if preserved: + assert np.allclose(out, states) + out0 = rom.project(states[:, 0]) + assert isinstance(out0, np.ndarray) + assert out0.shape == (n,) + assert np.allclose(out0, out[:, 0]) + + for model in self._get_models(): + # Lifter only. + _check(self.ROM(model, lifter=self._get("lifter")), preserved=True) + + # Transformer only. + rom = self.ROM(model, transformer=self._get("transformer")) + with pytest.raises(AttributeError) as ex: + rom.project(states) + assert ex.value.args[0] == ( + "transformer not trained (call fit() or fit_transform())" + ) + _check(rom, preserved=True) + + # Basis only. + rom = self.ROM(model, basis=self._get("basis")) + with pytest.raises(AttributeError) as ex: + rom.project(states) + assert ex.value.args[0] == "basis entries not initialized" + _check(rom, preserved=False) + + # Lifter, transformer, and basis. + a1, a2, a3 = self._get("lifter", "transformer", "basis") + _check(self.ROM(model, lifter=a1, transformer=a2, basis=a3)) + + @abc.abstractmethod def test_fit(self): """Test fit().""" - raise NotImplementedError + raise NotImplementedError # pragma: no cover + @abc.abstractmethod def test_predict(self): """Test predict().""" - raise NotImplementedError + raise NotImplementedError # pragma: no cover From ceabbc52e0f0b41d40d047e80b226cc21cbd0d47 Mon Sep 17 00:00:00 2001 From: Shane Date: Wed, 4 Sep 2024 17:01:42 -0600 Subject: [PATCH 33/48] InterpolatedModels -> InterpModels --- docs/source/api/models.md | 16 +++---- src/opinf/models/mono/_nonparametric.py | 2 +- src/opinf/models/mono/_parametric.py | 48 ++++++++++++++++++--- src/opinf/operators/_interpolate.py | 7 ++++ tests/models/mono/test_parametric.py | 56 +++++++++++++++---------- tests/operators/test_interpolate.py | 20 +++++---- 6 files changed, 104 insertions(+), 45 deletions(-) diff --git a/docs/source/api/models.md b/docs/source/api/models.md index e7a21e5f..dc3d3c4f 100644 --- a/docs/source/api/models.md +++ b/docs/source/api/models.md @@ -22,8 +22,8 @@ ParametricContinuousModel ParametricDiscreteModel - InterpolatedContinuousModel - InterpolatedDiscreteModel + InterpContinuousModel + InterpDiscreteModel ``` :::{admonition} Overview @@ -154,9 +154,9 @@ In addition, parametric models have an `evaluate()` method that returns a nonpar ParametricDiscreteModel ``` -### Interpolated Models +### Interpolatory Models -Interpolated models consist exclusively of [interpolatory operators](sec-operators-interpolated). +Interpolatory models consist exclusively of [interpolatory operators](sec-operators-interpolated). ```{eval-rst} .. currentmodule:: opinf.models @@ -164,8 +164,8 @@ Interpolated models consist exclusively of [interpolatory operators](sec-operato .. autosummary:: :nosignatures: - InterpolatedContinuousModel - InterpolatedDiscreteModel + InterpContinuousModel + InterpDiscreteModel ``` :::{tip} @@ -184,7 +184,7 @@ The `operators` constructor argument for these classes can also be a string that import opinf # Initialize the model with a list of operator objects. -model = opinf.models.InterpolatedContinuousModel( +model = opinf.models.InterpContinuousModel( operators=[ opinf.operators.InterpCubicOperator(), opinf.operators.InterpStateInputOperator(), @@ -192,7 +192,7 @@ model = opinf.models.InterpolatedContinuousModel( ) # Equivalently, initialize the model with a string. -model = opinf.models.InterpolatedContinuousModel(operators="GN") +model = opinf.models.InterpContinuousModel(operators="GN") ``` ::: diff --git a/src/opinf/models/mono/_nonparametric.py b/src/opinf/models/mono/_nonparametric.py index 02ca5f2c..41dea863 100644 --- a/src/opinf/models/mono/_nonparametric.py +++ b/src/opinf/models/mono/_nonparametric.py @@ -2,7 +2,7 @@ """Nonparametric monolithic dynamical systems models.""" __all__ = [ - "SteadyModel", + # "SteadyModel", "DiscreteModel", "ContinuousModel", ] diff --git a/src/opinf/models/mono/_parametric.py b/src/opinf/models/mono/_parametric.py index 915bac39..55ea72e4 100644 --- a/src/opinf/models/mono/_parametric.py +++ b/src/opinf/models/mono/_parametric.py @@ -4,6 +4,9 @@ __all__ = [ "ParametricDiscreteModel", "ParametricContinuousModel", + "InterpDiscreteModel", + "InterpContinuousModel", + # Deprecations: "InterpolatedDiscreteModel", "InterpolatedContinuousModel", ] @@ -128,14 +131,14 @@ def operators(self, ops): ) # Check that not every operator is interpolated. - if not isinstance(self, _InterpolatedModel): + if not isinstance(self, _InterpModel): interpolated_operators = [ op for op in self.operators if oputils.is_interpolated(op) ] if len(interpolated_operators) == len(self.operators): warnings.warn( "all operators interpolatory, " - "consider using an InterpolatedModel class", + "consider using an InterpModel class", errors.OpInfWarning, ) self._synchronize_parameter_dimensions() @@ -1005,7 +1008,7 @@ class ParametricContinuousModel(_ParametricContinuousMixin, _ParametricModel): # Special case: completely interpolation-based models ========================= -class _InterpolatedModel(_ParametricModel): +class _InterpModel(_ParametricModel): """Base class for parametric monolithic models where all operators MUST be interpolation-based parametric operators. In this special case, the inference problems completely decouple by training parameter. @@ -1338,7 +1341,7 @@ def copy(self): ) -class InterpolatedDiscreteModel(_ParametricDiscreteMixin, _InterpolatedModel): +class InterpDiscreteModel(_ParametricDiscreteMixin, _InterpModel): r"""Parametric discrete dynamical system model :math:`\qhat(\bfmu)_{j+1} = \fhat(\qhat(\bfmu)_{j}, \u_{j}; \bfmu)` where the parametric dependence is handled by elementwise interpolation. @@ -1374,9 +1377,9 @@ class InterpolatedDiscreteModel(_ParametricDiscreteMixin, _InterpolatedModel): pass -class InterpolatedContinuousModel( +class InterpContinuousModel( _ParametricContinuousMixin, - _InterpolatedModel, + _InterpModel, ): r"""Parametric system of ordinary differential equations :math:`\ddt\qhat(t; \bfmu) = \fhat(\qhat(t; \bfmu), \u(t); \bfmu)` where @@ -1409,3 +1412,36 @@ class InterpolatedContinuousModel( """ pass + + +# Deprecations ================================================================ +class InterpolatedDiscreteModel(InterpDiscreteModel): + def __init__(self, operators, solver=None, InterpolatorClass=None): + warnings.warn( + "InterpolatedDiscreteModel has been renamed " + "and will be removed in an upcoming release, use " + "InterpDiscreteModel", + DeprecationWarning, + ) + InterpDiscreteModel.__init__( + self, + operators=operators, + solver=solver, + InterpolatorClass=InterpolatorClass, + ) + + +class InterpolatedContinuousModel(InterpContinuousModel): + def __init__(self, operators, solver=None, InterpolatorClass=None): + warnings.warn( + "InterpolatedContinuousModel has been renamed " + "and will be removed in an upcoming release, use " + "InterpContinuousModel", + DeprecationWarning, + ) + InterpContinuousModel.__init__( + self, + operators=operators, + solver=solver, + InterpolatorClass=InterpolatorClass, + ) diff --git a/src/opinf/operators/_interpolate.py b/src/opinf/operators/_interpolate.py index 9aadd969..a0056d72 100644 --- a/src/opinf/operators/_interpolate.py +++ b/src/opinf/operators/_interpolate.py @@ -10,6 +10,13 @@ "InterpCubicOperator", "InterpInputOperator", "InterpStateInputOperator", + # Deprecations: + "InterpolatedConstantOperator", + "InterpolatedLinearOperator", + "InterpolatedQuadraticOperator", + "InterpolatedCubicOperator", + "InterpolatedInputOperator", + "InterpolatedStateInputOperator", ] import warnings diff --git a/tests/models/mono/test_parametric.py b/tests/models/mono/test_parametric.py index 5ab11a7a..a9290757 100644 --- a/tests/models/mono/test_parametric.py +++ b/tests/models/mono/test_parametric.py @@ -75,7 +75,7 @@ def test_set_operators(self, p=3): self.Model(operators) assert wn[0].message.args[0] == ( "all operators interpolatory, " - "consider using an InterpolatedModel class" + "consider using an InterpModel class" ) # Several operators provided. @@ -218,7 +218,7 @@ def test_process_fit_arguments(self, s=10, p=2, m=4, r=3, k=10): p, entries=[np.random.random(r) for _ in range(p)], ) - if isinstance(self, _TestInterpolatedModel): + if isinstance(self, _TestInterpModel): op2 = opinf.operators.InterpConstantOperator( training_parameters=params, entries=[np.zeros(r) for _ in range(s)], @@ -401,7 +401,7 @@ class TestParametricDiscreteModel(_TestParametricModel): _iscontinuous = False def test_predict(self, p=5, r=3, m=2, niters=10): - """Lightly test InterpolatedDiscreteModel.predict().""" + """Lightly test InterpDiscreteModel.predict().""" testparam = np.random.random(p) state0 = np.random.random(r) @@ -472,8 +472,8 @@ def input_func(t): # Interpolatotry models ======================================================= -class _TestInterpolatedModel(_TestParametricModel): - """Test models.mono._parametric._InterpolatedModel.""" +class _TestInterpModel(_TestParametricModel): + """Test models.mono._parametric._InterpModel.""" def _get_single_operator(self): """Get a single uncalibrated operator.""" @@ -581,7 +581,7 @@ def test_parameter_dimension(self, p=4): ) def test_from_models(self, s=10, r=4, m=2): - """Test _InterpolatedModel._from_models().""" + """Test _InterpModel._from_models().""" operators = [ [ opinf.operators.ConstantOperator(np.random.random(r)), @@ -627,7 +627,7 @@ def test_from_models(self, s=10, r=4, m=2): assert c00(testparam) == model.evaluate(testparam).operators[0][0] def test_set_interpolator(self, s=10, p=2, r=2): - """Test _InterpolatedModel._set_interpolator().""" + """Test _InterpModel._set_interpolator().""" mu = np.random.random((s, p)) operators = [ @@ -663,7 +663,7 @@ def test_set_interpolator(self, s=10, p=2, r=2): assert isinstance(op.interpolator, interp.NearestNDInterpolator) def test_fit_solver(self, s=10, r=3, k=20): - """Test _InterpolatedModel._fit_solver().""" + """Test _InterpModel._fit_solver().""" operators = [ opinf.operators.InterpConstantOperator(), opinf.operators.InterpLinearOperator(), @@ -696,7 +696,7 @@ def test_fit_solver(self, s=10, r=3, k=20): assert np.all(model._training_parameters == params) def test_refit(self, s=10, r=3, k=15): - """Test _InterpolatedModel.refit().""" + """Test _InterpModel.refit().""" operators = [ opinf.operators.InterpConstantOperator(), opinf.operators.InterpLinearOperator(), @@ -726,7 +726,7 @@ def test_refit(self, s=10, r=3, k=15): assert op.entries is not None def test_save(self, target="_interpmodelsavetest.h5"): - """Test _InterpolatedModel._save().""" + """Test _InterpModel._save().""" if os.path.isfile(target): os.remove(target) @@ -757,7 +757,7 @@ def test_save(self, target="_interpmodelsavetest.h5"): os.remove(target) def test_load(self, target="_interpmodelloadtest.h5"): - """Test _InterpolatedModel._load().""" + """Test _InterpModel._load().""" if os.path.isfile(target): os.remove(target) @@ -809,7 +809,7 @@ def test_load(self, target="_interpmodelloadtest.h5"): os.remove(target) def test_copy(self, s=10, p=2, r=3): - """Test _InterpolatedModel._copy().""" + """Test _InterpModel._copy().""" model1 = self.Model( [ @@ -838,14 +838,14 @@ def test_copy(self, s=10, p=2, r=3): assert model_copied == model -class TestInterpolatedDiscreteModel(_TestInterpolatedModel): - """Test models.mono._parametric.InterpolatedDiscreteModel.""" +class TestInterpDiscreteModel(_TestInterpModel): + """Test models.mono._parametric.InterpDiscreteModel.""" - Model = _module.InterpolatedDiscreteModel + Model = _module.InterpDiscreteModel _iscontinuous = False def test_fit(self, s=10, p=2, r=3, m=2, k=20): - """Lightly test InterpolatedDiscreteModel.fit().""" + """Lightly test InterpDiscreteModel.fit().""" params = np.random.random((s, p)) states = np.random.random((s, r, k)) nextstates = np.random.random((s, r, k)) @@ -860,7 +860,7 @@ def test_fit(self, s=10, p=2, r=3, m=2, k=20): assert out is model def test_predict(self, s=11, r=4, m=2, niters=10): - """Lightly test InterpolatedDiscreteModel.predict().""" + """Lightly test InterpDiscreteModel.predict().""" params = np.sort(np.random.random(s)) state0 = np.random.random(r) model = self.Model( @@ -883,14 +883,14 @@ def test_predict(self, s=11, r=4, m=2, niters=10): assert np.all(out[:, 1:] == 0) -class TestInterpolatedContinuousModel(_TestInterpolatedModel): - """Test models.mono._parametric.InterpolatedContinuousModel.""" +class TestInterpContinuousModel(_TestInterpModel): + """Test models.mono._parametric.InterpContinuousModel.""" - Model = _module.InterpolatedContinuousModel + Model = _module.InterpContinuousModel _iscontinuous = True def test_fit(self, s=10, p=2, r=3, m=2, k=20): - """Test InterpolatedContinuousModel.fit().""" + """Test InterpContinuousModel.fit().""" params = np.random.random((s, p)) states = np.random.random((s, r, k)) ddts = np.random.random((s, r, k)) @@ -905,7 +905,7 @@ def test_fit(self, s=10, p=2, r=3, m=2, k=20): assert out is model def test_predict(self, s=11, r=4, m=2, k=40): - """Lightly test InterpolatedContinuousModel.predict().""" + """Lightly test InterpContinuousModel.predict().""" params = np.sort(np.random.random(s)) state0 = np.random.random(r) t = np.linspace(0, 1, k) @@ -929,3 +929,15 @@ def input_func(t): assert out.shape == (r, k) for j in range(k): assert np.allclose(out[:, j], state0) + + +# Deprecations models ========================================================= +def test_deprecations(): + """Ensure deprecated classes still work.""" + for ModelClass in [ + _module.InterpolatedContinuousModel, + _module.InterpolatedDiscreteModel, + ]: + with pytest.warns(DeprecationWarning) as wn: + ModelClass("A") + assert len(wn) == 1 diff --git a/tests/operators/test_interpolate.py b/tests/operators/test_interpolate.py index a2286136..a19bb6c9 100644 --- a/tests/operators/test_interpolate.py +++ b/tests/operators/test_interpolate.py @@ -14,7 +14,8 @@ from . import _get_operator_entries -_module = opinf.operators._interpolate +_module = opinf.operators +_submodule = _module._interpolate _d = 8 _Dblock = np.random.random((4, _d)) @@ -60,7 +61,7 @@ class _DummyInterpolator2(_DummyInterpolator): class TestInterpOperator: """Test operators._interpolate._InterpOperator.""" - class Dummy(_module._InterpOperator): + class Dummy(_submodule._InterpOperator): """Instantiable version of _InterpOperator.""" _OperatorClass = _DummyOperator @@ -414,10 +415,13 @@ def test_publics(): """Ensure all public InterpOperator classes can be instantiated without arguments. """ - for OpClassName in _module.__all__: + for OpClassName in _submodule.__all__: + if "Interpolated" in OpClassName: + # Skip deprecations + continue OpClass = getattr(_module, OpClassName) if not isinstance(OpClass, type) or not issubclass( - OpClass, _module._InterpOperator + OpClass, _submodule._InterpOperator ): continue op = OpClass() @@ -483,18 +487,18 @@ def test_1Doperators(r=10, m=3, s=5): def test_is_interpolated(): """Test operators._interpolate.is_interpolated().""" op = TestInterpOperator.Dummy() - assert _module.is_interpolated(op) - assert not _module.is_interpolated(-1) + assert _submodule.is_interpolated(op) + assert not _submodule.is_interpolated(-1) def test_nonparametric_to_interpolated(): """Test operators._interpolate.nonparametric_to_interpolated().""" with pytest.raises(TypeError) as ex: - _module.nonparametric_to_interpolated(float) + _submodule.nonparametric_to_interpolated(float) assert ex.value.args[0] == ("_InterpOperator for class 'float' not found") - OpClass = _module.nonparametric_to_interpolated( + OpClass = _submodule.nonparametric_to_interpolated( opinf.operators.QuadraticOperator ) assert OpClass is opinf.operators.InterpQuadraticOperator From e44ebc81929a62b2a8e8e8dcc83406db482516e5 Mon Sep 17 00:00:00 2001 From: Shane Date: Wed, 4 Sep 2024 17:09:55 -0600 Subject: [PATCH 34/48] fix iscontinuous bug --- docs/source/tutorials/heat_equation.ipynb | 14 ++++++-------- src/opinf/roms/_base.py | 19 +++++++++++++++---- src/opinf/roms/_nonparametric.py | 19 +++++++++++-------- tests/roms/test_nonparametric.py | 1 + 4 files changed, 33 insertions(+), 20 deletions(-) diff --git a/docs/source/tutorials/heat_equation.ipynb b/docs/source/tutorials/heat_equation.ipynb index 2dd6b52b..e270f0d2 100644 --- a/docs/source/tutorials/heat_equation.ipynb +++ b/docs/source/tutorials/heat_equation.ipynb @@ -1462,7 +1462,7 @@ "\n", "Here, we perform interpolation on the entries of the reduced-order operators learned for each parameter sample. This means we learn a separate ROM for each $\\mu_i$, $i=1, \\ldots, s$, obtaining reduced-order operators $\\widehat{\\mathbf{A}}(\\mu_{i})$ and $\\widehat{\\mathbf{B}}(\\mu_{i})$.\n", "Then, for a new parameter value $\\bar{\\mu}\\in\\mathcal{D}$, we interpolate the entries of the learned reduced model operators to create a new reduced model corresponding to $\\bar{\\mu}\\in\\mathcal{D}$.\n", - "The {class}`opinf.models.InterpolatedContinuousModel` class encapsulates this process." + "The {class}`opinf.models.InterpContinuousModel` class encapsulates this process." ] }, { @@ -1472,7 +1472,7 @@ "outputs": [], "source": [ "# Learn reduced models for each parameter value.\n", - "model = opinf.models.InterpolatedContinuousModel(\"AB\")\n", + "model = opinf.models.InterpContinuousModel(\"AB\")\n", "model.fit(\n", " parameters=params,\n", " states=basis.compress(Qs_train),\n", @@ -1517,14 +1517,12 @@ "metadata": {}, "outputs": [], "source": [ - "full_order_model = opinf.models.InterpolatedContinuousModel(\n", + "full_order_model = opinf.models.InterpContinuousModel(\n", " [\n", - " opinf.operators.InterpolatedLinearOperator(\n", + " opinf.operators.InterpLinearOperator(\n", " params, [p * A.toarray() for p in params]\n", " ),\n", - " opinf.operators.InterpolatedInputOperator(\n", - " params, [p * B for p in params]\n", - " ),\n", + " opinf.operators.InterpInputOperator(params, [p * B for p in params]),\n", " ]\n", ")\n", "\n", @@ -1538,7 +1536,7 @@ " model_intrusive = full_order_model.galerkin(basis.entries)\n", "\n", " # Learn an operator inference ROM from the training data.\n", - " model_opinf = opinf.models.InterpolatedContinuousModel(\n", + " model_opinf = opinf.models.InterpContinuousModel(\n", " operators=\"AB\",\n", " solver=opinf.lstsq.L2Solver(1e-12),\n", " ).fit(\n", diff --git a/src/opinf/roms/_base.py b/src/opinf/roms/_base.py index 2e60df0d..6b454bef 100644 --- a/src/opinf/roms/_base.py +++ b/src/opinf/roms/_base.py @@ -78,7 +78,7 @@ def __init__(self, model, lifter, transformer, basis, ddt_estimator): self.__basis = basis # Verify ddt_estimator. - if ddt_estimator is not None and not self._iscontinuous: + if (ddt_estimator is not None) and not self._iscontinuous: warnings.warn( "ddt_estimator ignored for discrete models", errors.OpInfWarning, @@ -226,7 +226,7 @@ def encode( return states, lhs return states - def decode(self, states_encoded): + def decode(self, states_encoded, locs=None): """Map low-dimensional data to the original state space. Parameters @@ -234,20 +234,29 @@ def decode(self, states_encoded): states_encoded : (r, ...) ndarray Low-dimensional state or states in the latent reduced state space. + locs : slice or (p,) ndarray of integers or None + If given, return the decoded state at only the p specified + locations (indices) described by ``locs``. Returns ------- states_decoded : (n, ...) ndarray Version of ``states_compressed`` in the original state space. """ + inplace = False # Reverse dimensionality reduction. states = states_encoded if self.basis is not None: - states = self.basis.decompress(states) + inplace = True + states = self.basis.decompress(states, locs=locs) # Reverse preprocessing. if self.transformer is not None: - states = self.transformer.inverse_transform(states, inplace=True) + states = self.transformer.inverse_transform( + states, + inplace=inplace, + locs=locs, + ) # Reverse lifting. if self.lifter is not None: @@ -284,7 +293,9 @@ def project(self, states): @abc.abstractmethod def fit(self, *args, **kwargs): """Calibrate the model to the data.""" + raise NotImplementedError # pragma: no cover @abc.abstractmethod def predict(self, *args, **kwargs): """Evaluate the model.""" + raise NotImplementedError # pragma: no cover diff --git a/src/opinf/roms/_nonparametric.py b/src/opinf/roms/_nonparametric.py index 37cfa343..2c6378a0 100644 --- a/src/opinf/roms/_nonparametric.py +++ b/src/opinf/roms/_nonparametric.py @@ -24,15 +24,18 @@ class ROM(_BaseROM): Parameters ---------- - model : opinf.models.ContinuousModel or opinf.models.DiscreteModel - System model. - lifter : opinf.lift.LifterTemplate or None + model : :mod:`opinf.models` object + Nonparametric system model, an instance of one of the following: + + * :class:`opinf.models.ContinuousModel` + * :class:`opinf.models.DiscreteModel` + lifter : :mod:`opinf.lift` object or None Lifting transformation. - transformer : opinf.pre.TransformerTemplate or None + transformer : :mod:`opinf.pre` object or None Preprocesser. - basis : opinf.basis.BasisTemplate + basis : :mod:`opinf.basis` object or None Dimensionality reducer. - ddt_estimator : opinf.ddt.DerivativeEstimatorTemplate + ddt_estimator : :mod:`opinf.ddt` object or None Time derivative estimator. Ignored if ``model`` is not time continuous. """ @@ -117,7 +120,7 @@ def fit( states, lhs = reduced # If needed, estimate time derivatives. - if self.iscontinuous: + if self._iscontinuous: if lhs is None: if self.ddt_estimator is None: raise ValueError( @@ -137,7 +140,7 @@ def fit( # Calibrate the model. kwargs = dict(inputs=inputs) - if self.iscontinuous: + if self._iscontinuous: self.model.fit(states, lhs, **kwargs) else: if lhs is not None: diff --git a/tests/roms/test_nonparametric.py b/tests/roms/test_nonparametric.py index 69888ac3..c30fe69b 100644 --- a/tests/roms/test_nonparametric.py +++ b/tests/roms/test_nonparametric.py @@ -41,6 +41,7 @@ def test_init(self): # Other arguments. super().test_init() + # DONE TO HERE. def test_fit(self): """Test fit().""" raise NotImplementedError From e65f0c8af5631945aedde9ed539257a46f486a87 Mon Sep 17 00:00:00 2001 From: Shane Date: Wed, 4 Sep 2024 18:18:21 -0600 Subject: [PATCH 35/48] draft ROM.fit(), ParametricROM.fit() --- .gitignore | 1 + src/opinf/models/_utils.py | 19 +++- src/opinf/models/mono/_parametric.py | 7 +- src/opinf/roms/_base.py | 123 ++++++++++++++++++++++- src/opinf/roms/_nonparametric.py | 109 ++++++++------------ src/opinf/roms/_parametric.py | 145 ++++++++++++++++++++++++++- tests/roms/test_nonparametric.py | 3 +- tests/roms/test_parametric.py | 52 ++++++++++ 8 files changed, 384 insertions(+), 75 deletions(-) create mode 100644 tests/roms/test_parametric.py diff --git a/.gitignore b/.gitignore index 85e17210..9116efff 100644 --- a/.gitignore +++ b/.gitignore @@ -52,3 +52,4 @@ html/ # Other *.swp .markdownlint.json +Notes/ diff --git a/src/opinf/models/_utils.py b/src/opinf/models/_utils.py index af20b2f7..77828deb 100644 --- a/src/opinf/models/_utils.py +++ b/src/opinf/models/_utils.py @@ -4,12 +4,19 @@ __all__ = [ "is_continuous", "is_discrete", + "is_parametric", + "is_nonparametric", ] -from .mono._nonparametric import ContinuousModel, DiscreteModel +from .mono._nonparametric import ( + ContinuousModel, + DiscreteModel, + _NonparametricModel, +) from .mono._parametric import ( _ParametricContinuousMixin, _ParametricDiscreteMixin, + _ParametricModel, ) @@ -27,3 +34,13 @@ def is_discrete(model): model, (DiscreteModel, _ParametricDiscreteMixin), ) + + +def is_nonparametric(model): + """``True`` if the model is nonparametric.""" + return isinstance(model, _NonparametricModel) + + +def is_parametric(model): + """``True`` if the model is parametric.""" + return isinstance(model, _ParametricModel) diff --git a/src/opinf/models/mono/_parametric.py b/src/opinf/models/mono/_parametric.py index 55ea72e4..36207a44 100644 --- a/src/opinf/models/mono/_parametric.py +++ b/src/opinf/models/mono/_parametric.py @@ -398,7 +398,7 @@ def fit(self, parameters, states, lhs, inputs=None): Input training data. Each array ``inputs[i]`` is the data corresponding to parameter value ``parameters[i]``; each column ``inputs[i][:, j]`` corresponds to the snapshot ``states[:, j]``. - May be a two-dimensional array if `m=1` (scalar input). + May be a two-dimensional array if :math:`m=1` (scalar input). Returns ------- @@ -607,7 +607,7 @@ def fit(self, parameters, states, nextstates=None, inputs=None): Input training data. Each array ``inputs[i]`` is the data corresponding to parameter value ``parameters[i]``; each column ``inputs[i][:, j]`` corresponds to the snapshot ``states[:, j]``. - May be a two-dimensional array if `m=1` (scalar input). + May be a two-dimensional array if :math:`m=1` (scalar input). Returns ------- @@ -815,7 +815,8 @@ def fit(self, parameters, states, ddts, inputs=None): corresponding to parameter value ``parameters[i]``; each column ``inputs[i][:, j]`` corresponds to the snapshot ``states[i][:, j]``. - May be a two-dimensional array if `m=1` (scalar input). + May be a two-dimensional array if :math:`m=1` (scalar input). + Only required if one or more model operators depend on inputs. Returns ------- diff --git a/src/opinf/roms/_base.py b/src/opinf/roms/_base.py index 6b454bef..c316c48f 100644 --- a/src/opinf/roms/_base.py +++ b/src/opinf/roms/_base.py @@ -5,6 +5,7 @@ import abc import warnings +import numpy as np from .. import errors, utils from .. import lift, pre, basis as _basis, ddt @@ -291,9 +292,125 @@ def project(self, states): # Abstract methods -------------------------------------------------------- @abc.abstractmethod - def fit(self, *args, **kwargs): - """Calibrate the model to the data.""" - raise NotImplementedError # pragma: no cover + def fit( + self, + states, + lhs, + inputs, + fit_transformer: bool, + fit_basis: bool, + ): + """Calibrate the model to training data. + + Child classes should overwrite this method to include a call to + the ``fit()`` method of :attr:`model`. + + Parameters + ---------- + states : list of s (n, k_i) ndarrays + State snapshots in the original state space. Each array + ``states[i]`` is data for a single trajectory; each column + ``states[i][:, j]`` is one snapshot. + lhs : list of s (n, k_i) ndarrays or None + Left-hand side regression data. Each array ``lhs[i]`` is the data + corresponding to parameter value ``parameters[i]``; each column + ``lhs[i][:, j]`` corresponds to the snapshot ``states[i][:, j]``. + + - If the model is time continuous, these are the time derivatives + of the state snapshots. + - If the model is fully discrete, these are the "next states" + corresponding to the state snapshots. + + If ``None``, these are estimated using :attr:`ddt_estimator` + (time continuous) or extracted from ``states`` (fully discrete). + inputs : list of s (m, k_i) ndarrays or None + Input training data. Each array ``inputs[i]`` is the data + corresponding to parameter value ``parameters[i]``; each column + ``inputs[i][:, j]`` corresponds to the snapshot ``states[:, j]``. + May be a two-dimensional array if :math:`m=1` (scalar input). + Only required if one or more model operators depend on inputs. + fit_transformer : bool + If ``True``, calibrate the preprocessing transformation + using the ``states``. If ``False``, assume the transformer is + already calibrated. + fit_basis : bool + If ``True``, calibrate the high-to-low dimensional mapping + using the ``states``. + If ``False``, assume the basis is already calibrated. + + Returns + ------- + self + """ + # Lifting. + if self.lifter is not None: + if lhs is not None: + if self._iscontinuous: + lhs = [ + self.lifter.lift_ddts(Q, Z) + for Q, Z in zip(states, lhs) + ] + else: + lhs = [self.lifter.lift(Z) for Z in lhs] + states = [self.lifter.lift(Q) for Q in states] + + # Preprocessing. + if self.transformer is not None: + if fit_transformer: + self.transformer.fit(np.hstack(states)) + states = [self.transformer.transform(Q) for Q in states] + if lhs is not None: + if self._iscontinuous: + lhs = [self.transformer.transform_ddts(Z) for Z in lhs] + else: + lhs = [self.transformer.transform(Z) for Z in lhs] + elif fit_transformer: + warnings.warn( + "fit_transformer=True ignored because transformer=None", + errors.OpInfWarning, + ) + + # Dimensionality reduction. + if self.basis is not None: + if fit_basis: + self.basis.fit(np.hstack(states)) + states = [self.basis.compress(Q) for Q in states] + if lhs is not None: + lhs = [self.basis.compress(Z) for Z in lhs] + elif fit_basis: + warnings.warn( + "fit_basis=True ignored because basis=None", + errors.OpInfWarning, + ) + + # Time derivative estimation / discrete LHS + if lhs is None: + if self._iscontinuous: + if self.ddt_estimator is None: + raise ValueError( + "argument 'lhs' required when model is time-continuous" + " and ddt_estimator=None" + ) + if inputs is None: + states, lhs = zip( + *[self.ddt_estimator.estimate(Q) for Q in states] + ) + else: + states, lhs, inputs = zip( + *[ + self.ddt_estimator.estimate(Q, U) + for Q, U in zip(states, inputs) + ] + ) + else: + lhs = [Q[:, 1:] for Q in states] + states = [Q[:, :-1] for Q in states] + if inputs is not None: + inputs = [ + U[..., : Q.shape[1]] for Q, U in zip(states, inputs) + ] + + return states, lhs, inputs @abc.abstractmethod def predict(self, *args, **kwargs): diff --git a/src/opinf/roms/_nonparametric.py b/src/opinf/roms/_nonparametric.py index 2c6378a0..d61cc1f9 100644 --- a/src/opinf/roms/_nonparametric.py +++ b/src/opinf/roms/_nonparametric.py @@ -5,10 +5,8 @@ "ROM", ] -import warnings - +from ..models import _utils as modutils from ._base import _BaseROM -from .. import errors, models class ROM(_BaseROM): @@ -50,16 +48,8 @@ def __init__( ddt_estimator=None, ): """Store each argument as an attribute.""" - # Verify model. - if not isinstance( - model, - (models.ContinuousModel, models.DiscreteModel), - ): - raise TypeError( - "'model' must be a " - "models.ContinuousModel or models.DiscreteModel instance" - ) - + if not modutils.is_nonparametric(model): + raise TypeError("'model' must be a nonparametric model instance") super().__init__(model, lifter, transformer, basis, ddt_estimator) def __str__(self): @@ -72,30 +62,43 @@ def fit( states, lhs=None, inputs=None, - inplace: bool = False, fit_transformer: bool = True, fit_basis: bool = True, ): - """Calibrate the model to the data. + """Calibrate the model to training data. Parameters ---------- - states : (n, k) ndarray - State snapshots in the original state space. - lhs : (n, k) ndarray or None - Left-hand side regression data. + states : list of s (n, k_i) ndarrays + State snapshots in the original state space. Each array + ``states[i]`` is data corresponding to a different trajectory; + each column ``states[i][:, j]`` is one snapshot. + If there is only one trajectory of training data (s = 1), + ``states`` may be an (n, k) ndarray. In this case, it is assumed + that ``lhs`` and ``inputs`` (if given) are arrays, not a sequence + of arrays. + lhs : list of s (n, k_i) ndarrays or None + Left-hand side regression data. Each array ``lhs[i]`` is the data + corresponding to parameter value ``parameters[i]``; each column + ``lhs[i][:, j]`` corresponds to the snapshot ``states[i][:, j]``. - If the model is time continuous, these are the time derivatives of the state snapshots. - If the model is fully discrete, these are the "next states" corresponding to the state snapshots. - inplace : bool - If ``True``, modify the ``states`` and ``lhs`` in-place in the - preprocessing transformation (if applicable). + + If ``None``, these are estimated using :attr:`ddt_estimator` + (time continuous) or extracted from ``states`` (fully discrete). + inputs : list of s (m, k_i) ndarrays or None + Input training data. Each array ``inputs[i]`` is the data + corresponding to parameter value ``parameters[i]``; each column + ``inputs[i][:, j]`` corresponds to the snapshot ``states[:, j]``. + May be a two-dimensional array if :math:`m=1` (scalar input). + Only required if one or more model operators depend on inputs. fit_transformer : bool - If ``True`` (default), calibrate the high-to-low dimensional - mapping using the ``states``. - If ``False``, assume the transformer is already calibrated. + If ``True`` (default), calibrate the preprocessing transformation + using the ``states``. If ``False``, assume the transformer is + already calibrated. fit_basis : bool If ``True``, calibrate the high-to-low dimensional mapping using the ``states``. @@ -105,63 +108,39 @@ def fit( ------- self """ + # Single trajectory case. + if states[0].ndim == 1: + states = [states] + if lhs is not None: + lhs = [lhs] + if inputs is not None: + inputs = [inputs] - # Express the states and the LHS in the latent state space. - reduced = self.encode( - states, + states, lhs, inputs = _BaseROM.fit( + self, + states=states, lhs=lhs, - inplace=inplace, + inputs=inputs, fit_transformer=fit_transformer, fit_basis=fit_basis, ) - if lhs is None: - states = reduced - else: - states, lhs = reduced - - # If needed, estimate time derivatives. - if self._iscontinuous: - if lhs is None: - if self.ddt_estimator is None: - raise ValueError( - "ddt_estimator required for time-continuous model " - "and lhs=None" - ) - estimated = self.ddt_estimator.estimate(states, inputs) - if inputs is None: - states, lhs = estimated - else: - states, lhs, inputs = estimated - elif self.ddt_estimator is not None: - warnings.warn( - "using provided time derivatives, ignoring ddt_estimator", - errors.OpInfWarning, - ) - - # Calibrate the model. - kwargs = dict(inputs=inputs) - if self._iscontinuous: - self.model.fit(states, lhs, **kwargs) - else: - if lhs is not None: - kwargs["nextstates"] = lhs - self.model.fit(states, **kwargs) - + self.model.fit(states, lhs, inputs) return self def predict(self, state0, *args, **kwargs): """Evaluate the reduced-order model. - Parameters are the same as the model's ``predict()`` method. + Arguments are the same as the ``predict()`` method of :attr:`model`. Parameters ---------- state0 : (n,) ndarray Initial state, expressed in the original state space. *args : list - Other positional arguments to ``model.predict()``. + Other positional arguments to the ``predict()`` method of + :attr:`model`. **kwargs : dict - Keyword arguments to ``model.predict()``. + Keyword arguments to the ``predict()`` method of :attr:`model`. Returns ------- diff --git a/src/opinf/roms/_parametric.py b/src/opinf/roms/_parametric.py index e5aa6767..56940233 100644 --- a/src/opinf/roms/_parametric.py +++ b/src/opinf/roms/_parametric.py @@ -1,4 +1,147 @@ # roms/_parametric.py """Parametric ROM classes.""" -__all__ = [] +__all__ = [ + "ParametricROM", +] + +from ..models import _utils as modutils +from ._base import _BaseROM + + +class ParametricROM(_BaseROM): + r"""Parametric reduced-order model. + + This class connects classes from the various submodules to form a complete + reduced-order modeling workflow. + + High-dimensional data + :math:`\to` transformed / preprocessed data + :math:`\to` compressed data + :math:`\to` low-dimensional model. + + Parameters + ---------- + model : :mod:`opinf.models` object + Parametric system model, an instance of one of the following: + + * :class:`opinf.models.ParametricContinuousModel` + * :class:`opinf.models.ParametricDiscreteModel` + * :class:`opinf.models.InterpContinuousModel` + * :class:`opinf.models.InterpDiscreteModel` + lifter : :mod:`opinf.lift` object or None + Lifting transformation. + transformer : :mod:`opinf.pre` object or None + Preprocesser. + basis : :mod:`opinf.basis` object or None + Dimensionality reducer. + ddt_estimator : :mod:`opinf.ddt` object or None + Time derivative estimator. + Ignored if ``model`` is not time continuous. + """ + + def __init__( + self, + model, + *, + lifter=None, + transformer=None, + basis=None, + ddt_estimator=None, + ): + """Store each argument as an attribute.""" + if not modutils.is_parametric(model): + raise TypeError("'model' must be a parametric model instance") + super().__init__(model, lifter, transformer, basis, ddt_estimator) + + def __str__(self): + """String representation.""" + return f"Parametric {_BaseROM.__str__(self)}" + + # Training and evaluation ------------------------------------------------- + def fit( + self, + parameters, + states, + lhs=None, + inputs=None, + fit_transformer: bool = True, + fit_basis: bool = True, + ): + """Calibrate the model to training data. + + Parameters + ---------- + parameters : list of s (floats or (p,) ndarrays) + Parameter values for which training data are available. + states : list of s (n, k_i) ndarrays + State snapshots in the original state space. Each array + ``states[i]`` is the data corresponding to parameter value + ``parameters[i]``; each column ``states[i][:, j]`` is one snapshot. + lhs : list of s (n, k_i) ndarrays or None + Left-hand side regression data. Each array ``lhs[i]`` is the data + corresponding to parameter value ``parameters[i]``; each column + ``lhs[i][:, j]`` corresponds to the snapshot ``states[i][:, j]``. + + - If the model is time continuous, these are the time derivatives + of the state snapshots. + - If the model is fully discrete, these are the "next states" + corresponding to the state snapshots. + + If ``None``, these are estimated using :attr:`ddt_estimator` + (time continuous) or extracted from ``states`` (fully discrete). + inputs : list of s (m, k_i) ndarrays or None + Input training data. Each array ``inputs[i]`` is the data + corresponding to parameter value ``parameters[i]``; each column + ``inputs[i][:, j]`` corresponds to the snapshot ``states[:, j]``. + May be a two-dimensional array if :math:`m=1` (scalar input). + Only required if one or more model operators depend on inputs. + fit_transformer : bool + If ``True`` (default), calibrate the preprocessing transformation + using the ``states``. If ``False``, assume the transformer is + already calibrated. + fit_basis : bool + If ``True`` (default), calibrate the high-to-low dimensional + mapping using the ``states``. + If ``False``, assume the basis is already calibrated. + + Returns + ------- + self + """ + states, lhs, inputs = _BaseROM.fit( + self, + states=states, + lhs=lhs, + inputs=inputs, + fit_transformer=fit_transformer, + fit_basis=fit_basis, + ) + self.model.fit(parameters, states, lhs, inputs) + return self + + def predict(self, parameter, state0, *args, **kwargs): + r"""Evaluate the reduced-order model. + + Arguments are the same as the ``predict()`` method of :attr:`model`. + + Parameters + ---------- + parameter : (p,) ndarray + Parameter value :math:`\bfmu`. + state0 : (n,) ndarray + Initial state, expressed in the original state space. + *args : list + Other positional arguments to the ``predict()`` method of + :attr:`model`. + **kwargs : dict + Keyword arguments to the ``predict()`` method of :attr:`model`. + + Returns + ------- + states: (n, k) ndarray + Solution to the model, expressed in the original state space. + """ + q0_ = self.encode(state0, fit_transformer=False, fit_basis=False) + states = self.model.predict(parameter, q0_, *args, **kwargs) + return self.decode(states) diff --git a/tests/roms/test_nonparametric.py b/tests/roms/test_nonparametric.py index c30fe69b..6f49c0ee 100644 --- a/tests/roms/test_nonparametric.py +++ b/tests/roms/test_nonparametric.py @@ -34,8 +34,7 @@ def test_init(self): with pytest.raises(TypeError) as ex: self.ROM(10) assert ex.value.args[0] == ( - "'model' must be a models.ContinuousModel " - "or models.DiscreteModel instance" + "'model' must be a nonparametric model instance" ) # Other arguments. diff --git a/tests/roms/test_parametric.py b/tests/roms/test_parametric.py new file mode 100644 index 00000000..44692031 --- /dev/null +++ b/tests/roms/test_parametric.py @@ -0,0 +1,52 @@ +# roms/test_parametric.py +"""Tests for roms._parametric.py.""" + +import pytest + +import opinf + +from .test_base import _TestBaseROM + + +_module = opinf.roms + + +class TestROM(_TestBaseROM): + """Test roms.ROM.""" + + ROM = _module.ParametricROM + ModelClasses = ( + opinf.models.ParametricContinuousModel, + opinf.models.ParametricDiscreteModel, + opinf.models.InterpContinuousModel, + opinf.models.InterpDiscreteModel, + ) + + def _get_models(self): + """Return a list of valid model instantiations.""" + return [ + opinf.models.InterpContinuousModel("A"), + opinf.models.InterpDiscreteModel("AB"), + ] + + def test_init(self): + """Test __init__() and properties.""" + + # Model error. + with pytest.raises(TypeError) as ex: + self.ROM(10) + assert ex.value.args[0] == ( + "'model' must be a parametric model instance" + ) + + # Other arguments. + super().test_init() + + # DONE TO HERE. + def test_fit(self): + """Test fit().""" + raise NotImplementedError + + def test_predict(self): + """Test predict().""" + raise NotImplementedError From 159bd2877dd1639897badb03c91d2c144894808b Mon Sep 17 00:00:00 2001 From: Shane Date: Thu, 5 Sep 2024 14:45:38 -0600 Subject: [PATCH 36/48] tests + fixes for [Parametric]ROM.fit(), predict() --- src/opinf/operators/_interpolate.py | 5 +- src/opinf/roms/_base.py | 10 -- src/opinf/roms/_nonparametric.py | 9 +- tests/roms/test_base.py | 10 -- tests/roms/test_nonparametric.py | 124 +++++++++++++++++++++++- tests/roms/test_parametric.py | 142 ++++++++++++++++++++++++++-- 6 files changed, 266 insertions(+), 34 deletions(-) diff --git a/src/opinf/operators/_interpolate.py b/src/opinf/operators/_interpolate.py index a0056d72..629d1994 100644 --- a/src/opinf/operators/_interpolate.py +++ b/src/opinf/operators/_interpolate.py @@ -348,7 +348,10 @@ def evaluate(self, parameter): self._check_parametervalue_dimension(parameter) if self.parameter_dimension == 1 and not np.isscalar(parameter): parameter = parameter[0] - return self._OperatorClass(self.interpolator(parameter)) + entries = self.interpolator(parameter) + if entries.ndim == 3: + entries = entries[0] + return self._OperatorClass(entries) # Dimensionality reduction ------------------------------------------------ @utils.requires("entries") diff --git a/src/opinf/roms/_base.py b/src/opinf/roms/_base.py index c316c48f..eb8cd0c1 100644 --- a/src/opinf/roms/_base.py +++ b/src/opinf/roms/_base.py @@ -364,11 +364,6 @@ def fit( lhs = [self.transformer.transform_ddts(Z) for Z in lhs] else: lhs = [self.transformer.transform(Z) for Z in lhs] - elif fit_transformer: - warnings.warn( - "fit_transformer=True ignored because transformer=None", - errors.OpInfWarning, - ) # Dimensionality reduction. if self.basis is not None: @@ -377,11 +372,6 @@ def fit( states = [self.basis.compress(Q) for Q in states] if lhs is not None: lhs = [self.basis.compress(Z) for Z in lhs] - elif fit_basis: - warnings.warn( - "fit_basis=True ignored because basis=None", - errors.OpInfWarning, - ) # Time derivative estimation / discrete LHS if lhs is None: diff --git a/src/opinf/roms/_nonparametric.py b/src/opinf/roms/_nonparametric.py index d61cc1f9..6647f849 100644 --- a/src/opinf/roms/_nonparametric.py +++ b/src/opinf/roms/_nonparametric.py @@ -5,6 +5,8 @@ "ROM", ] +import numpy as np + from ..models import _utils as modutils from ._base import _BaseROM @@ -124,7 +126,12 @@ def fit( fit_transformer=fit_transformer, fit_basis=fit_basis, ) - self.model.fit(states, lhs, inputs) + + # Concatentate trajectories. + if inputs is not None: + inputs = np.hstack(inputs) + self.model.fit(np.hstack(states), np.hstack(lhs), inputs) + return self def predict(self, state0, *args, **kwargs): diff --git a/tests/roms/test_base.py b/tests/roms/test_base.py index a26be40c..3388bca4 100644 --- a/tests/roms/test_base.py +++ b/tests/roms/test_base.py @@ -292,13 +292,3 @@ def _check(rom, preserved=False): # Lifter, transformer, and basis. a1, a2, a3 = self._get("lifter", "transformer", "basis") _check(self.ROM(model, lifter=a1, transformer=a2, basis=a3)) - - @abc.abstractmethod - def test_fit(self): - """Test fit().""" - raise NotImplementedError # pragma: no cover - - @abc.abstractmethod - def test_predict(self): - """Test predict().""" - raise NotImplementedError # pragma: no cover diff --git a/tests/roms/test_nonparametric.py b/tests/roms/test_nonparametric.py index 6f49c0ee..962eeed1 100644 --- a/tests/roms/test_nonparametric.py +++ b/tests/roms/test_nonparametric.py @@ -2,6 +2,7 @@ """Tests for roms._nonparametric.py.""" import pytest +import numpy as np import opinf @@ -40,11 +41,124 @@ def test_init(self): # Other arguments. super().test_init() - # DONE TO HERE. - def test_fit(self): + def test_fit(self, n=10, m=3, s=3, k0=50): """Test fit().""" - raise NotImplementedError + states = [np.random.standard_normal((n, k0 + i)) for i in range(s)] + lhs = [np.zeros_like(Q) for Q in states] + inputs = [np.ones((m, Q.shape[-1])) for Q in states] - def test_predict(self): + def _fit(prom, withlhs=True, singletrajectory=False): + kwargs = dict(states=states) + if withlhs: + kwargs["lhs"] = lhs + if prom.model._has_inputs: + kwargs["inputs"] = inputs + if singletrajectory: + kwargs = {key: val[0] for key, val in kwargs.items()} + prom.fit(**kwargs) + assert rom.model.operators[0].entries is not None + if rom.basis is not None: + assert (r := rom.basis.reduced_state_dimension) == 3 + assert model.state_dimension == r + + for model in self._get_models(): + # Model only. + rom = self.ROM(model) + _fit(rom) + assert model.state_dimension == n + + # Model and basis. + rom = self.ROM(model, basis=self._get("basis")) + _fit(rom) + assert rom.basis.full_state_dimension == n + oldbasisentries = rom.basis.entries.copy() + + # Make sure fit_basis=False doesn't change the basis. + rom.fit( + states=[Q + 1 for Q in states], + lhs=lhs, + inputs=inputs if model._has_inputs else None, + fit_basis=False, + ) + assert np.array_equal(rom.basis.entries, oldbasisentries) + + # Model and basis and transformer. + trans, base = self._get("transformer", "basis") + rom = self.ROM(model, transformer=trans, basis=base) + _fit(rom) + assert rom.transformer.state_dimension == n + + # Make sure fit_transformer=False doesn't change the basis. + z = np.random.random(n) + ztrans = rom.transformer.transform(z) + rom.fit( + states=[Q + 1 for Q in states], + lhs=lhs, + inputs=inputs if model._has_inputs else None, + fit_transformer=False, + ) + ztrans2 = rom.transformer.transform(z) + assert np.allclose(ztrans2, ztrans) + + # Model and lifter and basis and transformer. + lift, trans, base = self._get("lifter", "transformer", "basis") + rom = self.ROM(model, lifter=lift, transformer=trans, basis=base) + _fit(rom) + assert rom.transformer.state_dimension == 2 * n + assert rom.basis.full_state_dimension == 2 * n + + # Without lhs. + ddter = None + if rom._iscontinuous: + # Without ddt_estimator either. + rom = self.ROM(model) + with pytest.raises(ValueError) as ex: + _fit(rom, withlhs=False) + assert ex.value.args[0] == ( + "argument 'lhs' required when model is time-continuous " + "and ddt_estimator=None" + ) + + ddter = self._get("ddt_estimator") + + lift, trans, base = self._get("lifter", "transformer", "basis") + rom = self.ROM( + model, + lifter=lift, + transformer=trans, + basis=base, + ddt_estimator=ddter, + ) + _fit(rom, withlhs=False) + _fit(rom, singletrajectory=True) + + def test_predict(self, n=50, m=2, k=100): """Test predict().""" - raise NotImplementedError + states = np.random.standard_normal((n, k)) + inputs = np.ones((m, k)) + t = np.linspace(0, 0.1, k) + q0 = states[:, 0] + + cmodel, dmodel = self._get_models() + + # Continuous model. + lift, trans, base, ddter = self._get( + "lifter", "transformer", "basis", "ddt_estimator" + ) + rom = self.ROM( + cmodel, + lifter=lift, + transformer=trans, + basis=base, + ddt_estimator=ddter, + ) + rom.fit(states) + out = rom.predict(q0, t, input_func=None) + assert out.shape == (n, t.size) + + # Discrete model. + lift, trans, base = self._get("lifter", "transformer", "basis") + rom = self.ROM(dmodel, lifter=lift, transformer=trans, basis=base) + rom.fit(states, inputs=inputs) + out = rom.predict(q0, k, inputs=inputs) + assert out.shape == (n, k) diff --git a/tests/roms/test_parametric.py b/tests/roms/test_parametric.py index 44692031..217d8c83 100644 --- a/tests/roms/test_parametric.py +++ b/tests/roms/test_parametric.py @@ -2,6 +2,7 @@ """Tests for roms._parametric.py.""" import pytest +import numpy as np import opinf @@ -25,8 +26,20 @@ class TestROM(_TestBaseROM): def _get_models(self): """Return a list of valid model instantiations.""" return [ - opinf.models.InterpContinuousModel("A"), - opinf.models.InterpDiscreteModel("AB"), + opinf.models.ParametricContinuousModel( + [ + opinf.operators.ConstantOperator(), + opinf.operators.AffineLinearOperator(3), + ] + ), + opinf.models.ParametricDiscreteModel( + [ + opinf.operators.AffineLinearOperator(3), + opinf.operators.InterpInputOperator(), + ] + ), + opinf.models.InterpContinuousModel("AB"), + opinf.models.InterpDiscreteModel("A"), ] def test_init(self): @@ -42,11 +55,126 @@ def test_init(self): # Other arguments. super().test_init() - # DONE TO HERE. - def test_fit(self): + def test_fit(self, n=20, m=3, s=8, k0=50): """Test fit().""" - raise NotImplementedError + parameters = [np.sort(np.random.random(3)) for _ in range(s)] + states = [np.random.standard_normal((n, k0 + i)) for i in range(s)] + lhs = [np.zeros_like(Q) for Q in states] + inputs = [np.ones((m, Q.shape[-1])) for Q in states] - def test_predict(self): + def _fit(prom, withlhs=True): + kwargs = dict(parameters=parameters, states=states) + if withlhs: + kwargs["lhs"] = lhs + if prom.model._has_inputs: + kwargs["inputs"] = inputs + prom.fit(**kwargs) + assert rom.model.operators[0].entries is not None + if rom.basis is not None: + assert (r := rom.basis.reduced_state_dimension) == 3 + assert model.state_dimension == r + + for model in self._get_models(): + # Model only. + rom = self.ROM(model) + _fit(rom) + assert model.state_dimension == n + + # Model and basis. + rom = self.ROM(model, basis=self._get("basis")) + _fit(rom) + assert rom.basis.full_state_dimension == n + oldbasisentries = rom.basis.entries.copy() + + # Make sure fit_basis=False doesn't change the basis. + rom.fit( + parameters=parameters, + states=[Q + 1 for Q in states], + lhs=lhs, + inputs=inputs if model._has_inputs else None, + fit_basis=False, + ) + assert np.array_equal(rom.basis.entries, oldbasisentries) + + # Model and basis and transformer. + trans, base = self._get("transformer", "basis") + rom = self.ROM(model, transformer=trans, basis=base) + _fit(rom) + assert rom.transformer.state_dimension == n + + # Make sure fit_transformer=False doesn't change the basis. + z = np.random.random(n) + ztrans = rom.transformer.transform(z) + rom.fit( + parameters=parameters, + states=[Q + 1 for Q in states], + lhs=lhs, + inputs=inputs if model._has_inputs else None, + fit_transformer=False, + ) + ztrans2 = rom.transformer.transform(z) + assert np.allclose(ztrans2, ztrans) + + # Model and lifter and basis and transformer. + lift, trans, base = self._get("lifter", "transformer", "basis") + rom = self.ROM(model, lifter=lift, transformer=trans, basis=base) + _fit(rom) + assert rom.transformer.state_dimension == 2 * n + assert rom.basis.full_state_dimension == 2 * n + + # Without lhs. + ddter = None + if rom._iscontinuous: + # Without ddt_estimator either. + rom = self.ROM(model) + with pytest.raises(ValueError) as ex: + _fit(rom, withlhs=False) + assert ex.value.args[0] == ( + "argument 'lhs' required when model is time-continuous " + "and ddt_estimator=None" + ) + + ddter = self._get("ddt_estimator") + + lift, trans, base = self._get("lifter", "transformer", "basis") + rom = self.ROM( + model, + lifter=lift, + transformer=trans, + basis=base, + ddt_estimator=ddter, + ) + _fit(rom, withlhs=False) + + def test_predict(self, n=50, m=2, s=10, k0=40): """Test predict().""" - raise NotImplementedError + parameters = [np.sort(np.random.random(3)) for _ in range(s)] + states = [np.random.standard_normal((n, k0 + i)) for i in range(s)] + inputs = [np.ones((m, Q.shape[-1])) for Q in states] + t = np.linspace(0, 0.1, k0) + testparam = np.mean(parameters, axis=0) + testinit = states[0][:, s // 2] + + cmodel, dmodel, _, _ = self._get_models() + + # Continuous model. + lift, trans, base, ddter = self._get( + "lifter", "transformer", "basis", "ddt_estimator" + ) + rom = self.ROM( + cmodel, + lifter=lift, + transformer=trans, + basis=base, + ddt_estimator=ddter, + ) + rom.fit(parameters, states) + out = rom.predict(testparam, testinit, t, input_func=None) + assert out.shape == (n, t.size) + + # Discrete model. + lift, trans, base = self._get("lifter", "transformer", "basis") + rom = self.ROM(dmodel, lifter=lift, transformer=trans, basis=base) + rom.fit(parameters, states, inputs=inputs) + out = rom.predict(testparam, testinit, k0, inputs=inputs[0]) + assert out.shape == (n, k0) From 780f1f905c99a630d77df5347251217cd95bf81e Mon Sep 17 00:00:00 2001 From: Shane Date: Fri, 6 Sep 2024 12:11:49 -0600 Subject: [PATCH 37/48] intersphinx pandas --- .gitignore | 1 + docs/_config.yml | 3 +++ docs/_toc.yml | 4 ++++ 3 files changed, 8 insertions(+) diff --git a/.gitignore b/.gitignore index 9116efff..44995983 100644 --- a/.gitignore +++ b/.gitignore @@ -11,6 +11,7 @@ docs/source/opinf/literature.md __pycache__/ .ipynb_checkpoints/ .pytest_cache/ +.ruff_cache/ htmlcov/ .coverage* build/ diff --git a/docs/_config.yml b/docs/_config.yml index 83a192e9..2506b36f 100644 --- a/docs/_config.yml +++ b/docs/_config.yml @@ -68,6 +68,9 @@ sphinx: sklearn: - "https://scikit-learn.org/stable/" - null + pandas: + - "https://pandas.pydata.org/docs/" + - null mathjax3_config: tex: macros: diff --git a/docs/_toc.yml b/docs/_toc.yml index 7b09e30d..fdbb7a7b 100644 --- a/docs/_toc.yml +++ b/docs/_toc.yml @@ -21,7 +21,11 @@ parts: # numbered: 1 chapters: - file: source/tutorials/basics.ipynb + # - file: source/tutorials/inputs.ipynb - file: source/tutorials/heat_equation.ipynb + # - file: source/tutorials/lifting.ipynb + # - file: source/tutorials/regularization.ipynb + # - file: source/tutorials/parametric.ipynb # API reference via sphinx-autodoc + limited handwritten documentation. - caption: API Reference From e6f10684bd1f8eaa6428159e7e0a7c2795fc02c3 Mon Sep 17 00:00:00 2001 From: Shane Date: Fri, 6 Sep 2024 14:06:26 -0600 Subject: [PATCH 38/48] standardize __str__ --- src/opinf/basis/_base.py | 10 +++---- src/opinf/basis/_pod.py | 8 +++--- src/opinf/ddt/_finite_difference.py | 21 +++++++++------ src/opinf/lstsq/_base.py | 8 +++--- src/opinf/lstsq/_tikhonov.py | 30 +++++++++++++++++---- src/opinf/models/mono/_nonparametric.py | 27 ++++++++++--------- src/opinf/operators/_base.py | 4 +++ src/opinf/roms/_base.py | 22 +++++++--------- src/opinf/roms/_nonparametric.py | 4 --- src/opinf/roms/_parametric.py | 4 --- tests/basis/test_base.py | 11 +++----- tests/basis/test_linear.py | 13 ++------- tests/basis/test_multi.py | 28 +++++++------------- tests/basis/test_pod.py | 32 ++++------------------ tests/lstsq/test_base.py | 21 +++------------ tests/models/mono/test_nonparametric.py | 35 +++++-------------------- 16 files changed, 108 insertions(+), 170 deletions(-) diff --git a/src/opinf/basis/_base.py b/src/opinf/basis/_base.py index 2c2fac76..512a7870 100644 --- a/src/opinf/basis/_base.py +++ b/src/opinf/basis/_base.py @@ -65,13 +65,13 @@ def name(self, label: str): def __str__(self): """String representation: class and dimensions.""" - out = [self.__class__.__name__] + out = [ + self.__class__.__name__, + f"full_state_dimension: {self.full_state_dimension}", + f"reduced_state_dimension: {self.reduced_state_dimension}", + ] if (name := self.name) is not None: out[0] = f"{out[0]} for variable '{name}'" - if (n := self.full_state_dimension) is not None: - out.append(f"Full state dimension n = {n:d}") - if (r := self.reduced_state_dimension) is not None: - out.append(f"Reduced state dimension r = {r:d}") return "\n ".join(out) def __repr__(self): diff --git a/src/opinf/basis/_pod.py b/src/opinf/basis/_pod.py index 1d503f8f..458e662c 100644 --- a/src/opinf/basis/_pod.py +++ b/src/opinf/basis/_pod.py @@ -333,15 +333,15 @@ def __str__(self): if (ce := self.cumulative_energy) is not None: if self.__energy_is_being_estimated: - out.append(f"Approximate cumulative energy: {ce:%}") + out.append(f"approximate cumulative energy: {ce:%}") else: - out.append(f"Cumulative energy: {ce:%}") + out.append(f"cumulative energy: {ce:%}") if (re := self.residual_energy) is not None: if self.__energy_is_being_estimated: - out.append(f"Approximate residual energy: {re:.4e}") + out.append(f"approximate residual energy: {re:.4e}") else: - out.append(f"Residual energy: {re:.4e}") + out.append(f"residual energy: {re:.4e}") if (mv := self.max_vectors) is not None: out.append(f"{mv:d} basis vectors available") diff --git a/src/opinf/ddt/_finite_difference.py b/src/opinf/ddt/_finite_difference.py index d6e477b5..c7b9a9bf 100644 --- a/src/opinf/ddt/_finite_difference.py +++ b/src/opinf/ddt/_finite_difference.py @@ -863,11 +863,13 @@ def scheme(self): return self.__scheme def __str__(self): - """String representation: class name, time domain.""" - head = DerivativeEstimatorTemplate.__str__(self) - tail = [f"time step: {self.dt:.2e}"] - tail.append(f"finite difference scheme: {self.scheme.__name__}()") - return f"{head}\n " + "\n ".join(tail) + return "\n ".join( + [ + DerivativeEstimatorTemplate.__str__(self), + f"dt: {self.dt:.4e}", + f"scheme: {self.scheme.__name__}()", + ] + ) # Main routine ------------------------------------------------------------ def estimate(self, states, inputs=None): @@ -927,9 +929,12 @@ def __init__(self, time_domain): def __str__(self): """String representation: class name, time domain.""" - head = DerivativeEstimatorTemplate.__str__(self) - tail = "finite difference engine: np.gradient(edge_order=2)" - return f"{head}\n {tail}" + return "\n ".join( + [ + DerivativeEstimatorTemplate.__str__(self), + "scheme: np.gradient(edge_order=2)", + ] + ) # Main routine ------------------------------------------------------------ def estimate(self, states, inputs=None): diff --git a/src/opinf/lstsq/_base.py b/src/opinf/lstsq/_base.py index 94aab551..2b4ac908 100644 --- a/src/opinf/lstsq/_base.py +++ b/src/opinf/lstsq/_base.py @@ -133,10 +133,10 @@ def __str__(self) -> str: """String representation: class name + dimensions.""" out = [self.__class__.__name__] if (self.data_matrix is not None) and (self.lhs_matrix is not None): - out.append(f" Data matrix: {self.data_matrix.shape}") - out.append(f" Condition number: {self.cond():.4e}") - out.append(f" LHS matrix: {self.lhs_matrix.shape}") - out.append(f" Operator matrix: {self.r, self.d}") + out.append(f" data_matrix: {self.data_matrix.shape}") + out.append(f" condition number: {self.cond():.4e}") + out.append(f" lhs_matrix: {self.lhs_matrix.shape}") + out.append(f" solve().shape: {self.r, self.d}") else: out[0] += " (not trained)" return "\n".join(out) diff --git a/src/opinf/lstsq/_tikhonov.py b/src/opinf/lstsq/_tikhonov.py index d0fc7872..415e941a 100644 --- a/src/opinf/lstsq/_tikhonov.py +++ b/src/opinf/lstsq/_tikhonov.py @@ -236,9 +236,18 @@ def options(self): def __str__(self): """String representation: dimensions + solver options.""" - start = SolverTemplate.__str__(self) kwargs = self._print_kwargs(self.options) - return start + f"\n SVD solver: scipy.linalg.svd({kwargs})" + if np.isscalar(self.regularizer): + regstr = f"{self.regularizer:.4e}" + else: + regstr = f"{self.regularizer.shape}" + return "\n ".join( + [ + SolverTemplate.__str__(self), + f"regularizer: {regstr}", + f"SVD solver: scipy.linalg.svd({kwargs})", + ] + ) # Main methods ------------------------------------------------------------ def fit(self, data_matrix: np.ndarray, lhs_matrix: np.ndarray): @@ -566,11 +575,22 @@ def options(self): def __str__(self): """String representation: dimensions + solver options.""" - s = SolverTemplate.__str__(self) + kwargs = self._print_kwargs(self.options) + if self.regularizer[0].ndim == 1: + regstr = f" {self.regularizer.shape}" + else: + regstr = ( + f" {len(self.regularizer)} " + f"{self.regularizer[0].shape} ndarrays" + ) if self.method == "lstsq": kwargs = self._print_kwargs(self.options) - return s + f"\n solver ('lstsq'): scipy.linalg.lstsq({kwargs})" - return s + "\n solver ('normal'): scipy.linalg.solve(assume_a='pos')" + spstr = f"solver ('lstsq'): scipy.linalg.lstsq({kwargs})" + else: + spstr = "solver ('normal'): scipy.linalg.solve(assume_a='pos')" + return "\n ".join( + [SolverTemplate.__str__(self), f"regularizer: {regstr}", spstr] + ) def _check_regularizer_shape(self): if (shape1 := self.regularizer.shape) != (shape2 := (self.d, self.d)): diff --git a/src/opinf/models/mono/_nonparametric.py b/src/opinf/models/mono/_nonparametric.py index 41dea863..40bdacce 100644 --- a/src/opinf/models/mono/_nonparametric.py +++ b/src/opinf/models/mono/_nonparametric.py @@ -90,20 +90,23 @@ def _get_operator_of_type(self, OpClass): # String representation --------------------------------------------------- def __str__(self): """String representation: structure of the model, dimensions, etc.""" - # Build model structure. - out, terms = [], [] + terms = [ + op._str(self._STATE_LABEL, self._INPUT_LABEL) + for op in self.operators + ] + + out = [ + self.__class__.__name__, + f"structure: {self._LHS_LABEL} = " + " + ".join(terms), + f"state_dimension: {self.state_dimension}", + f"input_dimension: {self.input_dimension}", + "operators:", + ] for op in self.operators: - terms.append(op._str(self._STATE_LABEL, self._INPUT_LABEL)) - structure = " + ".join(terms) - out.append(f"Model structure: {self._LHS_LABEL} = {structure}") + out.append(" " + "\n ".join(str(op).split("\n"))) + out.append("solver: " + "\n ".join(str(self.solver).split("\n"))) - # Report dimensions. - if self.state_dimension: - out.append(f"State dimension r = {self.state_dimension:d}") - if self.input_dimension: - out.append(f"Input dimension m = {self.input_dimension:d}") - - return "\n".join(out) + return "\n ".join(out) def __repr__(self): """Unique ID + string representation.""" diff --git a/src/opinf/operators/_base.py b/src/opinf/operators/_base.py index 3295da01..37ea6592 100644 --- a/src/opinf/operators/_base.py +++ b/src/opinf/operators/_base.py @@ -605,6 +605,10 @@ def __add__(self, other): ) return scls(self.entries + other.entries) + def __str__(self): + out = OperatorTemplate.__str__(self) + return out + f"\n entries.shape: {self.shape}" + # Evaluation -------------------------------------------------------------- @utils.requires("entries") def jacobian(self, state, input_=None) -> np.ndarray: # pragma: no cover diff --git a/src/opinf/roms/_base.py b/src/opinf/roms/_base.py index eb8cd0c1..d8b3945b 100644 --- a/src/opinf/roms/_base.py +++ b/src/opinf/roms/_base.py @@ -132,23 +132,19 @@ def _iscontinuous(self): # Printing ---------------------------------------------------------------- def __str__(self): """String representation.""" - lines = ["reduced-order model"] - - def indent(text): - return "\n".join(f" {line}" for line in text.rstrip().split("\n")) - + lines = [] for label, obj in [ - ("Lifting", self.lifter), - ("Transformer", self.transformer), - ("Basis", self.basis), - ("Time derivative estimator", self.ddt_estimator), - ("Model", self.model), + ("lifter", self.lifter), + ("transformer", self.transformer), + ("basis", self.basis), + ("ddt_estimator", self.ddt_estimator), + ("model", self.model), ]: if obj is not None: - lines.append(f"{label}:") - lines.append(indent(str(obj))) + lines.append(f"{label}: {str(obj)}") - return "\n".join(lines) + body = "\n ".join("\n".join(lines).split("\n")) + return f"{self.__class__.__name__}\n {body}" def __repr__(self): """Repr: address + string representatation.""" diff --git a/src/opinf/roms/_nonparametric.py b/src/opinf/roms/_nonparametric.py index 6647f849..c05087b4 100644 --- a/src/opinf/roms/_nonparametric.py +++ b/src/opinf/roms/_nonparametric.py @@ -54,10 +54,6 @@ def __init__( raise TypeError("'model' must be a nonparametric model instance") super().__init__(model, lifter, transformer, basis, ddt_estimator) - def __str__(self): - """String representation.""" - return f"Nonparametric {_BaseROM.__str__(self)}" - # Training and evaluation ------------------------------------------------- def fit( self, diff --git a/src/opinf/roms/_parametric.py b/src/opinf/roms/_parametric.py index 56940233..58240e99 100644 --- a/src/opinf/roms/_parametric.py +++ b/src/opinf/roms/_parametric.py @@ -54,10 +54,6 @@ def __init__( raise TypeError("'model' must be a parametric model instance") super().__init__(model, lifter, transformer, basis, ddt_estimator) - def __str__(self): - """String representation.""" - return f"Parametric {_BaseROM.__str__(self)}" - # Training and evaluation ------------------------------------------------- def fit( self, diff --git a/tests/basis/test_base.py b/tests/basis/test_base.py index 1e70422a..b3e24d71 100644 --- a/tests/basis/test_base.py +++ b/tests/basis/test_base.py @@ -48,21 +48,16 @@ def test_state_dimensions(self): assert basis.reduced_state_dimension is None def test_str(self): - """Test __str__() and __repr__().""" + """Lightly test __str__() and __repr__().""" basis = self.Dummy() - assert str(basis) == "Dummy" + str(basis) basis.full_state_dimension = 10 - assert str(basis) == "Dummy\n Full state dimension n = 10" + str(basis) basis.name = "varname" basis.reduced_state_dimension = 5 - assert str(basis) == ( - "Dummy for variable 'varname'" - "\n Full state dimension n = 10" - "\n Reduced state dimension r = 5" - ) assert repr(basis).count(str(basis)) == 1 def test_project(self, q=5): diff --git a/tests/basis/test_linear.py b/tests/basis/test_linear.py index cdba410b..7ea6217c 100644 --- a/tests/basis/test_linear.py +++ b/tests/basis/test_linear.py @@ -89,20 +89,11 @@ def test_init(self, n=10, r=3): assert ex.value.args[0] == "expected one- or two-dimensional weights" def test_str(self): - """Test __str__() and __repr__().""" + """Lightly test __str__() and __repr__().""" basis = self.Basis(self._orth(10, 4)) - assert str(basis) == ( - "LinearBasis" - "\n Full state dimension n = 10" - "\n Reduced state dimension r = 4" - ) + str(basis) basis = self.Basis(self._orth(9, 5), name="varname") - assert str(basis) == ( - "LinearBasis for variable 'varname'" - "\n Full state dimension n = 9" - "\n Reduced state dimension r = 5" - ) assert repr(basis).count(str(basis)) == 1 # Dimension reduction ---------------------------------------------------- diff --git a/tests/basis/test_multi.py b/tests/basis/test_multi.py index 28e843b5..62b7f51a 100644 --- a/tests/basis/test_multi.py +++ b/tests/basis/test_multi.py @@ -64,7 +64,7 @@ class Dummy3(Dummy2): pass def test_init(self): - """Test BasisMulti.__init__(), bases, dimensions.""" + """Test __init__(), bases, dimensions.""" bases = [self.Dummy(), self.Dummy2(), self.Dummy3(name="third")] basis = self.Basis(bases) assert basis.num_variables == len(bases) @@ -123,7 +123,7 @@ class ExtraDummy: # Magic methods ----------------------------------------------------------- def test_getitem(self): - """Test BasisMulti.__getitem__().""" + """Test __getitem__().""" bases = [self.Dummy(), self.Dummy2(), self.Dummy()] basis = self.Basis(bases) for i, bs in enumerate(bases): @@ -135,7 +135,7 @@ def test_getitem(self): assert basis[name] is bases[i] def test_eq(self): - """Test BasisMulti.__eq__().""" + """Test __eq__().""" bases = [self.Dummy(), self.Dummy2(), self.Dummy3()] basis1 = self.Basis(bases) @@ -152,23 +152,13 @@ def test_eq(self): assert basis1 == basis2 def test_str(self): - """Test BasisMulti.__str__().""" - bases = [self.Dummy(), self.Dummy2()] - basis = self.Basis(bases) - - stringrep = str(basis) - assert stringrep.startswith("2-variable BasisMulti\n") - for bs in bases: - assert str(bs) in stringrep - - # Quick repr() test. - rep = repr(basis) - assert stringrep in rep - assert str(hex(id(basis))) in rep + """Lightly test __str__() and __repr__().""" + basis = self.Basis([self.Dummy(), self.Dummy2()]) + assert repr(basis).count(str(basis)) == 1 # Convenience methods ----------------------------------------------------- def test_get_var(self, ns=(4, 5, 6), rs=(2, 3, 4), k=5): - """Test BasisMulti.get_var().""" + """Test get_var().""" basis_A = self.Dummy(name="A") basis_B = self.Dummy(name="B") basis_C = self.Dummy(name="C") @@ -216,7 +206,7 @@ def test_get_var(self, ns=(4, 5, 6), rs=(2, 3, 4), k=5): assert ex.value.args[0].startswith("states.shape[0] must be") def test_split(self, ns=(11, 13), rs=(5, 7), k=5): - """Test BasisMulti.split().""" + """Test split().""" bases = [self.Dummy(), self.Dummy2()] basis = self.Basis(bases, ns) @@ -332,7 +322,7 @@ def test_save(self): os.remove(target) def test_load(self): - """Test BasisMulti.load().""" + """Test load().""" target = "_loadbasismultitest.h5" if os.path.isfile(target): # pragma: no cover os.remove(target) diff --git a/tests/basis/test_pod.py b/tests/basis/test_pod.py index c33e2c38..522b0ec3 100644 --- a/tests/basis/test_pod.py +++ b/tests/basis/test_pod.py @@ -203,45 +203,23 @@ def test_set_dimension(self, n=40, k=11, r=9): assert basis.projection_error(Q, relative=True) < 0.02 def test_str(self, n=30, k=20, r=10): - """Test __str__().""" + """Lightly test __str__() and __repr__().""" basis = self.Basis(num_vectors=r) - strbasis = str(basis) - assert strbasis.count("\n") == 1 - assert strbasis.endswith("SVD solver: scipy.linalg.svd()") + str(basis) Q = np.random.random((n, k)) basis.fit(Q) - strbasis = str(basis) - assert strbasis.count(f"Full state dimension n = {n}") == 1 - assert strbasis.count(f"Reduced state dimension r = {r}") == 1 - assert strbasis.count(f"{k} basis vectors available") == 1 - assert strbasis.count("Cumulative energy:") == 1 - assert strbasis.count("Residual energy:") == 1 - assert strbasis.endswith("SVD solver: scipy.linalg.svd()") + str(basis) basis = self.Basis( num_vectors=r, max_vectors=r, svdsolver="randomized", ).fit(Q) - strbasis = str(basis) - assert strbasis.count(f"Full state dimension n = {n}") == 1 - assert strbasis.count(f"Reduced state dimension r = {r}") == 1 - assert strbasis.count(f"{r} basis vectors available") == 1 - assert strbasis.count("Approximate cumulative energy:") == 1 - assert strbasis.count("Approximate residual energy:") == 1 - assert strbasis.endswith("sklearn.utils.extmath.randomized_svd()") + str(basis) basis = self.Basis(num_vectors=r, svdsolver=lambda s: s) - strbasis = str(basis) - assert strbasis.endswith("SVD solver: custom lambda function") - - def mysvdsolver(*args): - pass - - basis = self.Basis(num_vectors=r, svdsolver=mysvdsolver) - strbasis = str(basis) - assert strbasis.endswith("SVD solver: mysvdsolver()") + assert repr(basis).count(str(basis)) == 1 def test_fit(self, n=60, k=20, r=4): """Test fit().""" diff --git a/tests/lstsq/test_base.py b/tests/lstsq/test_base.py index 9cbda199..cd5cf566 100644 --- a/tests/lstsq/test_base.py +++ b/tests/lstsq/test_base.py @@ -106,29 +106,14 @@ def test_fit(self, k=30, d=20, r=5): # String representations -------------------------------------------------- def test_str(self, k=20, d=6, r=3): - """Test __str__() and __repr__().""" - # Before fitting. + """Lightly test __str__() and __repr__().""" solver = self.Dummy() - assert str(solver) == "Dummy (not trained)" - - rep = repr(solver) - assert rep.startswith(" Date: Fri, 6 Sep 2024 14:25:20 -0600 Subject: [PATCH 39/48] update introductory tutorial --- docs/source/tutorials/basics.ipynb | 402 ++++++++++++++++------------- 1 file changed, 218 insertions(+), 184 deletions(-) diff --git a/docs/source/tutorials/basics.ipynb b/docs/source/tutorials/basics.ipynb index 0cc1b536..45e87d01 100644 --- a/docs/source/tutorials/basics.ipynb +++ b/docs/source/tutorials/basics.ipynb @@ -37,11 +37,12 @@ "metadata": {}, "source": [ ":::{admonition} Governing Equations\n", - ":class: attention\n", + ":class: note\n", "\n", "For the spatial domain $\\Omega = [0,L]\\subset \\RR$ and the time domain $[t_0,t_f]\\subset\\RR$, consider the one-dimensional heat equation with homogeneous Dirichlet boundary conditions:\n", "\n", - "\\begin{align*}\n", + "$$\n", + "\\begin{aligned}\n", " &\\frac{\\partial}{\\partial t} q(x,t) = \\frac{\\partial^2}{\\partial x^2}q(x,t)\n", " & x &\\in\\Omega,\\quad t\\in(t_0,t_f],\n", " \\\\\n", @@ -50,14 +51,15 @@ " \\\\\n", " &q(x,t_0) = q_{0}(x)\n", " & x &\\in \\Omega.\n", - "\\end{align*}\n", + "\\end{aligned}\n", + "$$\n", "\n", "This is a model for a one-dimensional rod that conducts heat.\n", "The unknown state variable $q(x,t)$ represents the temperature of the rod at location $x$ and time $t$; the temperature at the ends of the rod are fixed at $0$ and heat is allowed to flow out of the rod at the ends.\n", ":::\n", "\n", ":::{admonition} Objective\n", - ":class: attention\n", + ":class: note\n", "\n", "Construct a low-dimensional system of ordinary differential equations, called the _reduced-order model_ (ROM), which can be solved rapidly to produce approximate solutions $q(x, t)$ to the partial differential equation given above. We will use OpInf to learn the ROM from high-fidelity data for one choice of initial condition $q_0(x)$ and test its performance on new initial conditions.\n", ":::" @@ -67,40 +69,61 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Training Data" + "We will make use of {mod}`numpy`, {mod}`scipy`, and {mod}`matplotlib` from the standard Python scientific stack, which are all automatically installed when `opinf` is [installed](../opinf/installation.md).\n", + "The {mod}`pandas` library is also used later to consolidate and report results." ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": 1, "metadata": {}, + "outputs": [], "source": [ - "We begin by generating training data through a traditional finite difference discretization of the PDE." + "import numpy as np\n", + "import pandas as pd\n", + "import scipy.sparse\n", + "import scipy.integrate\n", + "import scipy.linalg as la\n", + "import matplotlib.pyplot as plt\n", + "\n", + "import opinf\n", + "\n", + "opinf.utils.mpl_config()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - ":::{important}\n", - "One key advantage of OpInf is that, because it learns a ROM from data alone, direct access to a high-fidelity solver is not required.\n", - "In this tutorial, we explicitly construct the high-fidelity solver, but in practice, we only need the following:\n", - "1. Solution outputs of a high-fidelity solver to learn from, and\n", - "2. Some knowledge of the structure of the governing equations.\n", - ":::" + "## Training Data" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### Define the Full-order Model" + "We begin by generating training data through a traditional numerical method.\n", + "A spatial discretization of the governing equations with $n$ degrees of freedom via finite differences or the finite element method leads to a linear semi-discrete system of $n$ ordinary differential equations,\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\ddt\\q(t) = \\A\\q(t),\n", + " \\qquad\n", + " \\q(0) = \\q_0,\n", + "\\end{aligned}\n", + "$$ (eq_basics_fom)\n", + "\n", + "where $\\q:\\RR\\to\\RR^n$, $\\A\\in\\RR^{n\\times n}$, and $\\q_0\\in\\RR^n$.\n", + "For this tutorial, we use central finite differences to construct this system." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "To solve the problem numerically, let $\\{x\\}_{i=0}^{n+1}$ be an equidistant grid of $n+2$ points on $\\Omega$, i.e.,\n", + ":::{dropdown} Discretization details\n", + "\n", + "For a given $n\\in\\NN$, let $\\{x\\}_{i=0}^{n+1}$ be an equidistant grid of $n+2$ points on $\\Omega$, i.e.,\n", "\n", "$$\n", "\\begin{aligned}\n", @@ -108,29 +131,22 @@ " &\n", " &\\text{and}\n", " &\n", - " \\delta x &= \\frac{L}{n+1} = x_{i+1} - x_{i},\\quad i=1,\\ldots,n-1.\n", + " x_{i+1} - x_{i} &= \\delta x := \\frac{L}{n+1},\\quad i=1,\\ldots,n-1.\n", "\\end{aligned}\n", "$$\n", "\n", "The boundary conditions prescribe $q(x_0,t) = q(x_{n+1},t) = 0$.\n", - "Our goal is to compute $q(x, t)$ at the interior spatial points $x_{1}, x_{2}, \\ldots, x_{n}$ for various $t = [0,T]$. we wish to compute the state vector\n", + "Our goal is to compute $q(x, t)$ at the interior spatial points $x_{1}, x_{2}, \\ldots, x_{n}$ for various $t \\in [t_0,t_f].$ That is, we wish to compute the state vector\n", "\n", "$$\n", "\\begin{aligned}\n", " \\q(t)\n", " = \\left[\\begin{array}{c}\n", " q(x_1,t) \\\\ \\vdots \\\\ q(x_n,t)\n", - " \\end{array}\\right]\\in\\RR^n\n", + " \\end{array}\\right]\\in\\RR^n.\n", "\\end{aligned}\n", "$$\n", "\n", - "for $t\\in[t_0,t_f]$." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ "Introducing a central finite difference approximation for the spatial derivative,\n", "\n", "$$\n", @@ -148,7 +164,7 @@ " \\qquad\n", " \\q(0) = \\q_0,\n", "\\end{aligned}\n", - "$$ (eq_basics_fom)\n", + "$$\n", "\n", "where\n", "\n", @@ -168,16 +184,28 @@ "\\end{aligned}\n", "$$\n", "\n", - "Equation {eq}`eq_basics_fom` is called the _full-order model_ (FOM) or the _high-fidelity model_. The computational complexity of solving {eq}`eq_basics_fom` depends on the dimension $n$, which must often be large in order for $\\q(t)$ to approximate $q(x,t)$ well over the spatial grid. Our goal is to construct a ROM that approximates the FOM, but whose computational complexity only depends on some smaller dimension $r \\ll n$." + ":::" ] }, { "cell_type": "markdown", - "metadata": { - "toc-hr-collapsed": true - }, + "metadata": {}, + "source": [ + "The system {eq}`eq_basics_fom` is called the _full-order model_ (FOM) or the _high-fidelity model_. The computational complexity of solving {eq}`eq_basics_fom` depends on the dimension $n$, which must often be large in order for $\\q(t)$ to approximate $q(x,t)$ well over the spatial grid. Our goal is to construct a ROM that approximates the FOM, but whose computational complexity only depends on some smaller dimension $r \\ll n$." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, "source": [ - "### Solve the Full-order Model" + ":::{admonition} No FOM? No Problem.\n", + ":class: important\n", + "\n", + "One key advantage of OpInf is that, because it learns a ROM from data alone, direct access to a FOM is not required.\n", + "In this tutorial, we explicitly construct a FOM, but in practice, we only need the following:\n", + "1. Solution data to learn from, and\n", + "2. Some knowledge of the structure of the governing equations.\n", + ":::" ] }, { @@ -186,8 +214,8 @@ "toc-hr-collapsed": true }, "source": [ - "For this demo, we'll use $t_0 = 0$ and $L = t_f = 1$.\n", - "We begin by simulating the full-order system described above with the initial condition\n", + "For this demo, we set $L = 1$, $t_0 = 0$, $t_f = 1$, and use $n = 2^{10} - 1 = 1023$ spatial degrees of freedom.\n", + "We begin by solving the FOM with the initial condition\n", "\n", "$$\n", "\\begin{aligned}\n", @@ -195,8 +223,8 @@ "\\end{aligned}\n", "$$\n", "\n", - "using a maximal time step size $\\delta t = 10^{-3}$.\n", - "This results in $k = 10^3 + 1 = 1001$ state snapshots (1000 time steps after the initial condition), which are organized as the _snapshot matrix_ $\\Q\\in\\RR^{n\\times k}$, where the $j$th column is the solution trajectory at time $t_j$:\n", + "and record the solution every $\\delta t = 0.0025$ time units.\n", + "This results in $k = 401$ state snapshots ($400$ time steps after the initial condition), which are organized into the _snapshot matrix_ $\\Q\\in\\RR^{n\\times k}$, where the $j$-th column is the solution trajectory at time $t_j$:\n", "\n", "$$\n", "\\begin{aligned}\n", @@ -210,101 +238,64 @@ "\\end{aligned}\n", "$$\n", "\n", - "Note that the initial condition $\\q_{0}$ is included as a column in the snapshot matrix." + "Note that the initial condition $\\q_{0}$ is included as a column in the snapshot matrix.\n", + "\n", + "The following code constructs the spatial and time domains, the FOM matrix $\\A$, the initial condition vector $\\q_0$, and solves the FOM with {func}`scipy.integrate.solve_ivp()`." ] }, { "cell_type": "code", "execution_count": null, "metadata": { - "tags": [] + "tags": [ + "hide-input" + ] }, "outputs": [], - "source": [ - "import numpy as np\n", - "import pandas as pd\n", - "import scipy.linalg as la\n", - "import scipy.sparse as sparse\n", - "import matplotlib.pyplot as plt\n", - "from scipy.integrate import solve_ivp\n", - "\n", - "import opinf\n", - "\n", - "opinf.utils.mpl_config()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], "source": [ "# Construct the spatial domain.\n", - "L = 1 # Spatial domain length.\n", - "n = 2**7 - 1 # Spatial grid size.\n", - "x_all = np.linspace(0, L, n + 2) # Full spatial grid.\n", - "x = x_all[1:-1] # Interior spatial grid (where q is unknown).\n", - "dx = x[1] - x[0] # Spatial resolution.\n", + "L = 1\n", + "n = 2**10 - 1\n", + "x_all = np.linspace(0, L, n + 2)\n", + "x = x_all[1:-1]\n", + "dx = x[1] - x[0]\n", "\n", "# Construct the temporal domain.\n", - "t0, tf = 0, 1 # Initial and final time.\n", - "k = tf * 1000 + 1 # Temporal grid size.\n", - "t = np.linspace(t0, tf, k) # Temporal grid.\n", - "dt = t[1] - t[0] # Temporal resolution.\n", + "t0, tf = 0, 1\n", + "k = 401\n", + "t = np.linspace(t0, tf, k)\n", + "dt = t[1] - t[0]\n", + "\n", "\n", - "print(f\"Spatial step size:\\tdx = {dx}\")\n", - "print(f\"Temporal step size:\\tdt = {dt}\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ "# Construct the full-order state matrix A.\n", "diags = np.array([1, -2, 1]) / (dx**2)\n", - "A = sparse.diags(diags, [-1, 0, 1], (n, n))\n", + "A = scipy.sparse.diags(diags, [-1, 0, 1], (n, n))\n", "\n", + "# Construct the initial condition for the training data.\n", + "q0 = x * (1 - x)\n", "\n", - "# Define the full-order model dx/dt = f(t,x), x(0) = x0.\n", - "def fom(t, x):\n", - " return A @ x\n", "\n", + "def full_order_solve(initial_condition, time_domain):\n", + " \"\"\"Solve the full-order model with SciPy.\"\"\"\n", + " return scipy.integrate.solve_ivp(\n", + " fun=lambda t, x: A @ x,\n", + " t_span=[time_domain[0], time_domain[-1]],\n", + " y0=initial_condition,\n", + " t_eval=time_domain,\n", + " method=\"BDF\",\n", + " ).y\n", "\n", - "# Construct the initial condition for the training data.\n", - "q0 = x * (1 - x)\n", "\n", - "print(f\"{A.shape=}\\t{q0.shape=}\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Compute snapshots by solving the full-order model with SciPy.\n", - "Q = solve_ivp(fom, [t0, tf], q0, t_eval=t, method=\"BDF\").y\n", + "# Solve the full-order model to obtain training snapshots.\n", + "Q = full_order_solve(q0, t)\n", "\n", - "print(f\"{Q.shape=}\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - ":::{caution}\n", - "It is often better to use your own ODE solver, tailored to the problem at hand, instead of integration packages such as [**scipy.integrate**](https://docs.scipy.org/doc/scipy/tutorial/integrate.html).\n", - "If the integration strategy of the FOM is known, try using that strategy with the ROM.\n", - ":::" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Visualize Training Data" + "print(f\"Spatial domain size:\\t{x.shape=}\")\n", + "print(f\"Spatial step size:\\t{dx=:.10f}\")\n", + "print(f\"\\nTime domain size:\\t{t.shape=}\")\n", + "print(f\"Temporal step size:\\t{dt=:f}\")\n", + "print(f\"\\nFull-order matrix A:\\t{A.shape=}\")\n", + "print(f\"\\nInitial condition:\\t{q0.shape=}\")\n", + "print(f\"\\nTraining snapshots:\\t{Q.shape=}\")" ] }, { @@ -316,7 +307,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 3, "metadata": { "tags": [ "hide-input" @@ -358,7 +349,8 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "This matches our intuition: initially there is more heat toward the center of the rod, which then diffuses out of the ends of the rod. In the figure, earlier times are lighter colors and later times are darker colors." + "In the figure, earlier times are lighter colors and later times are darker colors.\n", + "This matches our intuition: initially there is more heat toward the center of the rod, which then diffuses out of the ends of the rod." ] }, { @@ -373,13 +365,12 @@ "metadata": {}, "source": [ "At this point, we have gathered some training data by simulating the FOM.\n", - "We also have an initial condition and space and time domains.\n", + "We also have an initial condition and a time domain.\n", "\n", "| Name | Symbol | Code Variable |\n", "| :--- | :----: | :------------ |\n", "| State snapshots | $\\Q$ | `Q` |\n", "| Initial state | $\\q_0$ | `q0` |\n", - "| Spatial variable | $\\Omega$ | `x` |\n", "| Time domain | $[t_0,t_f]$ | `t` |\n", "\n", "Our task now is to construct a low-dimensional system whose solutions can be used as approximate solutions to the PDE.\n", @@ -394,13 +385,13 @@ "source": [ "import opinf\n", "\n", - "# Define the reduced-order model structure.\n", + "# Define the reduced-order model.\n", "rom = opinf.ROM(\n", - " basis=opinf.basis.PODBasis(cumulative_energy=0.999999),\n", + " basis=opinf.basis.PODBasis(cumulative_energy=0.9999),\n", " ddt_estimator=opinf.ddt.UniformFiniteDifferencer(t, \"ord6\"),\n", " model=opinf.models.ContinuousModel(\n", " operators=\"A\",\n", - " solver=opinf.lstsq.L2Solver(regularizer=1e-4),\n", + " solver=opinf.lstsq.L2Solver(regularizer=1e-8),\n", " ),\n", ")\n", "\n", @@ -448,8 +439,7 @@ "We choose $\\Vr$ using proper orthogonal decomposition (POD), which is based on the singular value decomposition (SVD) of samples of $\\q(t)$.\n", "The singular values give some guidance on choosing an appropriate ROM dimension $r$.\n", "Fast singular value decay is a good sign that a ROM may be successful with this kind of data; if the singular values do not decay quickly, then a large $r$ may be required to capture the behavior of the system.\n", - "POD is implemented in this package as {class}`opinf.basis.PODBasis`.\n", - "Below, we initialize a `PODBasis` with a criteria for selecting $r$: choose the smallest $r$ such that we capture over $99.9999\\%$ of the [cumulative energy](#sec:api-basis-dimselect) of the system.\n" + "Below, we initialize a {class}`opinf.basis.PODBasis` object with the following criteria for selecting $r$: choose the smallest $r$ such that we capture over $99.9999\\%$ of the [cumulative energy](#sec:api-basis-dimselect) of the system.\n" ] }, { @@ -459,7 +449,7 @@ "outputs": [], "source": [ "# Initialize a basis.\n", - "basis = opinf.basis.PODBasis(cumulative_energy=0.999999)\n", + "basis = opinf.basis.PODBasis(cumulative_energy=0.9999)\n", "\n", "# Fit the basis (compute Vr) using the snapshot data.\n", "basis.fit(Q)\n", @@ -476,7 +466,7 @@ "source": [ "Solutions of our eventual ROM are restricted to linear combinations of these two basis vectors.\n", "\n", - "After the `PODbasis` is initialized and calibrated, we can use it to compress the state snapshots to an $r$-dimensional representation.\n", + "After the basis is initialized and calibrated, we can use it to compress the state snapshots to an $r$-dimensional representation.\n", "In this case, we have $\\qhat_j = \\Vr\\trp\\q_j \\in \\RR^{r}$.\n", "These $\\qhat_j$ are data for the ROM state $\\qhat(t)$ at time $t_j$." ] @@ -498,7 +488,7 @@ "metadata": {}, "source": [ "To see how well the state can be represented by a given basis matrix, it is helpful to examine the _projection_ of the state snapshots.\n", - "For linear state approximations, the projection of $\\q\\in\\RR^n$ is the vector $\\Vr\\Vr\\trp\\q\\in\\RR^n$." + "For linear state approximations like POD, the projection of $\\q\\in\\RR^n$ is the vector $\\Vr\\Vr\\trp\\q\\in\\RR^n$." ] }, { @@ -536,14 +526,14 @@ "1. If time derivatives of the original state snapshots are available, they can be compressed to the reduced state space.\n", "2. Otherwise, the time derivatives may be estimated from the compressed states.\n", "\n", - "The latter scenario (being given state data but not time derivative data) is common, so {mod}`opinf.ddt` defines tools for estimating time derivatives." + "The {mod}`opinf.ddt` module defines tools for estimating time derivatives from state data." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Recall that the FOM in this problem is given by $\\ddt\\q(t) = \\A\\q(t)$.\n", + "Recall that the FOM in this problem {eq}`eq_basics_fom` is given by $\\ddt\\q(t) = \\A\\q(t)$.\n", "In this case we have $\\A$, so we can compute $\\dot{\\q}_j = \\A\\q_j$, then set $\\dot{\\qhat}_j = \\Vr\\trp\\dot{\\q}_j$.\n", "Below, we should how this approach compares with using tools from {mod}`opinf.ddt`.\n", "Since the data $\\q_0,\\ldots,\\q_{k-1}$ are defined on a uniform time grid, we use {class}`opinf.ddt.UniformFiniteDifferencer`." @@ -587,9 +577,9 @@ "metadata": {}, "source": [ "We now have low-dimensional state and time derivative data.\n", - "To learn a ROM with OpInf, we must specify the structure of the ROM, which should be motivated by the FOM and the dimensionality reduction strategy.\n", + "To learn a ROM with OpInf, we must specify the structure of the ROM, which should be motivated by the structure of the FOM and the dimensionality reduction strategy.\n", "\n", - "The FOM is a linear system of ODEs,\n", + "The FOM {eq}`eq_basics_fom` is a linear system of ODEs,\n", "\n", "$$\n", "\\begin{aligned}\n", @@ -617,11 +607,11 @@ " \\qquad\n", " \\qhat(0) = \\Vr\\trp\\q_0,\n", "\\end{aligned}\n", - "$$\n", + "$$ (eq_basics_intrusiverom)\n", "\n", "where $\\tilde{\\A} = \\Vr\\trp\\A\\Vr \\in \\RR^{r\\times r}$.\n", - "This is called the _intrusive Galerkin ROM_ corresponding to the FOM and the choice of basis matrix $\\Vr$.\n", - "The intrusive ROM can only be constructed if $\\A$ is known; with OpInf, we construct a reduced system with the same linear structure as the intrusive ROM, but without using $\\A$ explicitly:\n", + "The system {eq}`eq_basics_intrusiverom` is called the _intrusive Galerkin ROM_ corresponding to the FOM and the choice of basis matrix $\\Vr$.\n", + "The intrusive ROM can only be constructed if $\\A$ is known; with OpInf, we aim to construct a reduced system with the same linear structure as the intrusive ROM, but without using $\\A$ explicitly:\n", "\n", "$$\n", "\\begin{aligned}\n", @@ -631,7 +621,7 @@ "\\end{aligned}\n", "$$\n", "\n", - "where $\\Ahat\\in\\RR^{r\\times r}$.\n", + "for some $\\Ahat\\in\\RR^{r\\times r}$ inferred from the training data.\n", "We specify this linear structure by initializing an {class}`opinf.models.ContinuousModel` with the string `\"A\"`.\n" ] }, @@ -649,8 +639,10 @@ "cell_type": "markdown", "metadata": {}, "source": [ - ":::{tip}\n", - "The `\"A\"` syntax is a shortcut for a slightly longer statement:\n", + ":::{admonition} Model Constructor Shortcut\n", + ":class: tip\n", + "\n", + "The `\"A\"` argument in the constructor is a shortcut for a slightly longer statement:\n", "\n", "```python\n", ">>> model = opinf.models.ContinuousModel([opinf.operators.LinearOperator()])\n", @@ -676,19 +668,32 @@ "OpInf does this through minimizing the residual of the model equation with respect to the data:\n", "\n", "$$\n", - " \\min_{\\Ahat\\in\\RR^{r\\times r}}\\sum_{j=0}^{k-1}\\left\\|\n", - " \\Ahat\\Vr\\trp\\q_{j} - \\Vr\\trp\\dot{\\q}_{j}\n", + "\\begin{aligned}\n", + " \\min_{\\Ahat\\in\\RR^{r\\times r}}\n", + " \\sum_{j=0}^{k-1}\\left\\|\n", + " \\Ahat\\qhat_{j} - \\dot{\\qhat}_{j}\n", " \\right\\|_{2}^2\n", " + \\mathcal{R}(\\Ahat),\n", + "\\end{aligned}\n", "$$ (eq_basics_opinf)\n", "\n", - "where $\\mathcal{R}(\\Ahat)$ is a regularization term (more on this soon).\n", - "The {mod}`opinf.lstsq` module defines tools for solving this problem (or variations on it).\n", + "where $\\mathcal{R}(\\Ahat)$ is a regularization term (more on this later).\n", "\n", + "The {mod}`opinf.lstsq` module defines tools for solving this problem (or variations on it).\n", "By default, the regression is solved without regularization, i.e., $\\mathcal{R}(\\Ahat) = 0$.\n", "The following code compares the OpInf ROM matrix $\\Ahat$ to the intrusive ROM matrix $\\tilde{\\A} = \\Vr\\trp\\A\\Vr$." ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "model.fit(states=Q_, ddts=Qdot_exact)\n", + "print(model)" + ] + }, { "cell_type": "code", "execution_count": null, @@ -699,11 +704,8 @@ "Vr = basis.entries\n", "A_intrusive = Vr.T @ A @ Vr\n", "\n", - "# Construct the OpInf ROM and extract the linear operator.\n", - "model.fit(states=Q_, ddts=Qdot_exact)\n", + "# Compare the OpInf ROM linear operator to the intrusive one.\n", "A_opinf = model.operators[0].entries\n", - "\n", - "# Compare the two linear operators.\n", "np.allclose(A_intrusive, A_opinf)" ] }, @@ -762,10 +764,13 @@ "outputs": [], "source": [ "# Define a solver for the Tikhonov-regularized least-squares problem.\n", - "model.solver = opinf.lstsq.L2Solver(regularizer=1e-2)\n", + "model = opinf.models.ContinuousModel(\n", + " \"A\",\n", + " solver=opinf.lstsq.L2Solver(regularizer=1e-2),\n", + ")\n", "\n", "# Construct the OpInf ROM through regularized least squares.\n", - "model.fit(states=Q_, ddts=Qdot_)\n", + "model.fit(states=Q_, ddts=Qdot_exact)\n", "A_opinf = model.operators[0].entries\n", "\n", "# Compare to the intrusive model.\n", @@ -786,19 +791,18 @@ "cell_type": "markdown", "metadata": {}, "source": [ - ":::{note}\n", "With inexact time derivatives or regularization, OpInf differs slightly from the intrusive operator $\\tilde{\\A}$.\n", - "However, we will see that the ROM produced by OpInf is highly accurate.\n", - "In fact, it is sometimes the case that OpInf outperforms intrusive projection.\n", - ":::" + "However, we will see that in this example the ROM produced by OpInf is highly accurate.\n", + "In fact, it is sometimes the case that OpInf outperforms intrusive Galerkin projection." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ + ":::{admonition} Regularization Matters\n", + ":class: important\n", "\n", - ":::{important}\n", "Regularization is important in all but the simplest OpInf problems.\n", "If OpInf produces an unstable ROM, try different values for the `regularizer`.\n", "See {cite}`mcquarrie2021combustion` for an example of a principled choice of regularization for a combustion problem.\n", @@ -816,8 +820,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Once the model is calibrated, we may solve the ROM with {meth}`opinf.models.ContinuousModel.predict`, which wraps [**scipy.integrate.solve_ivp()**](https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.solve_ivp.html).\n", - "This method takes an initial condition for the model $\\qhat_0 = \\Vr\\trp\\q_0$, the time domain over which to record the solution, and any additional arguments for the integrator." + "Once the model is calibrated, we may solve the ROM with {meth}`opinf.models.ContinuousModel.predict`, which wraps {func}`scipy.integrate.solve_ivp()`. This method takes an initial condition for the model $\\qhat_0 = \\Vr\\trp\\q_0$, the time domain over which to record the solution, and any additional arguments for the integrator." ] }, { @@ -828,6 +831,11 @@ "source": [ "q0_ = basis.compress(q0) # Compress the initial conditions.\n", "\n", + "model = opinf.models.ContinuousModel(\n", + " \"A\",\n", + " solver=opinf.lstsq.L2Solver(regularizer=1e-8),\n", + ").fit(Q_, Qdot_)\n", + "\n", "Q_ROM_ = model.predict(q0_, t, method=\"BDF\")\n", "\n", "print(f\"{Q_ROM_.shape=}\")" @@ -855,15 +863,17 @@ "cell_type": "markdown", "metadata": {}, "source": [ - ":::{tip}\n", - "{meth}`opinf.models.ContinuousModel.predict` is convenient, but [**scipy.integrate.solve_ivp()**](https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.solve_ivp.html) implements relatively few time integration schemes.\n", - "However, the ROM can be simulated by **any** ODE solver scheme by extracting the inferred operator $\\Ahat$. \n", + ":::{admonition} Custom ODE Solvers\n", + ":class: tip\n", + "\n", + "{meth}`opinf.models.ContinuousModel.predict` is convenient, but {func}`scipy.integrate.solve_ivp()` implements a limited repertoire of time integration schemes.\n", + "However, the ROM can be simulated by any ODE solver scheme by extracting the inferred operator $\\Ahat$. \n", "If `timestepper(A, q0)` were a solver for systems of the form $\\ddt\\qhat = \\Ahat\\qhat(t),\\ \\qhat(0) = \\qhat_0$, we could simulate the ROM with the following code.\n", "\n", "```python\n", - "q0_ = Vr.T @ q0 # Compress the initial conditions.\n", + "q0_ = basis.compress(q0) # Compress the initial conditions.\n", "Q_ROM_ = timestepper(model.A_.entries, q0_) # Solve the ROM in the reduced space.\n", - "Q_ROM = Vr @ Q_ROM_ # Decompress the ROM solutions.\n", + "Q_ROM = basis.decompress(Q_ROM_) # Decompress the ROM solutions.\n", "```\n", "\n", "More generally, the method {meth}`opinf.models.ContinuousModel.rhs` represents the right-hand side of the model, the $\\hat{\\mathbf{f}}$ of $\\ddt\\qhat(t) = \\hat{\\mathbf{f}}(t, \\qhat(t))$.\n", @@ -901,16 +911,34 @@ "outputs": [], "source": [ "rom = opinf.ROM(\n", - " basis=opinf.basis.PODBasis(cumulative_energy=0.999999),\n", + " basis=opinf.basis.PODBasis(cumulative_energy=0.9999),\n", " ddt_estimator=opinf.ddt.UniformFiniteDifferencer(t, \"ord6\"),\n", " model=opinf.models.ContinuousModel(\n", " operators=\"A\",\n", - " solver=opinf.lstsq.L2Solver(regularizer=1e-2),\n", + " solver=opinf.lstsq.L2Solver(regularizer=1e-8),\n", " ),\n", ")\n", "\n", + "print(rom)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ "rom.fit(Q)\n", "\n", + "print(rom)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ "Q_ROM_2 = rom.predict(q0, t, method=\"BDF\")\n", "\n", "np.all(Q_ROM_2 == Q_ROM)" @@ -957,8 +985,8 @@ "metadata": {}, "outputs": [], "source": [ - "abs_l2err, rel_l2err = opinf.post.lp_error(Q, Q_ROM)\n", - "plt.semilogy(t, abs_l2err)\n", + "absolute_l2err, relative_l2err = opinf.post.lp_error(Q, Q_ROM)\n", + "plt.semilogy(t, absolute_l2err)\n", "plt.title(r\"Absolute $\\ell^{2}$ error\")\n", "plt.show()" ] @@ -986,7 +1014,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "In other words, the ROM simulation is within 0.1% of the snapshot data.\n", + "In other words, the ROM simulation is within about 0.1% of the snapshot data.\n", "Note that this value is very close to the projection error that we calculated earlier." ] }, @@ -1006,24 +1034,28 @@ "source": [ "The ROM was trained using only data corresponding to the initial condition $q_0(x) = x(1 - x).$ We'll now test the ROM on the following new initial conditions and compare the results to the corresponding FOM solution:\n", "\n", - "\\begin{align*}\n", + "$$\n", + "\\begin{aligned}\n", " q_0(x) &= 10x (1 - x),\n", " &\n", - " q_0(x) &= x^{2}(1 - x)^{2},\n", + " q_0(x) &= 5x^{2}(1 - x)^{2},\n", " \\\\\n", - " q_0(x) &= x^{4}(1 - x)^{4},\n", + " q_0(x) &= 50x^{4}(1 - x)^{4},\n", " &\n", - " q_0(x) &= \\sqrt{x(1 - x)},\n", + " q_0(x) &= \\frac{1}{2}\\sqrt{x(1 - x)},\n", " \\\\\n", - " q_0(x) &= \\sqrt[4]{x(1 - x)},\n", + " q_0(x) &= \\frac{1}{4}\\sqrt[4]{x(1 - x)},\n", " &\n", - " q_0(x) &= \\sin(\\pi x) + \\tfrac{1}{5}\\sin(5\\pi x).\n", - "\\end{align*}\n", + " q_0(x) &= \\frac{1}{3}\\sin(\\pi x) + \\tfrac{1}{5}\\sin(5\\pi x).\n", + "\\end{aligned}\n", + "$$\n", "\n", "Before we compute the ROM error, we also compute the _projection error_ of the new initial condition,\n", "\n", "$$\n", + "\\begin{aligned}\n", " \\frac{||\\q_{0} - \\Vr \\Vr\\trp\\q_{0}||_{2}}{||\\q_{0}||_{2}}.\n", + "\\end{aligned}\n", "$$\n", "\n", "If this projection error is large, then the new initial condition cannot be represented well within the range of $\\Vr$. This will be apparent in the ROM solutions." @@ -1038,7 +1070,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 26, "metadata": {}, "outputs": [], "source": [ @@ -1053,13 +1085,13 @@ " rom : opinf.ROM\n", " Trained reduced-order model object.\n", " label : str\n", - " LaTeX description of the initial condition being tested.\n", + " Description of the initial condition being tested.\n", " \"\"\"\n", " # Calculate the projection error of the new initial condition.\n", " rel_projerr = rom.basis.projection_error(q0, relative=True)\n", "\n", " # Solve the full-order model (FOM) and the reduced-order model (ROM).\n", - " Q_FOM = solve_ivp(fom, [t0, tf], q0, t_eval=t, method=\"BDF\").y\n", + " Q_FOM = full_order_solve(q0, t)\n", " Q_ROM = rom.predict(q0, t, method=\"BDF\")\n", "\n", " # Plot the FOM and ROM solutions side by side.\n", @@ -1096,20 +1128,20 @@ "source": [ "q0_new = [\n", " 10 * x * (1 - x),\n", - " x**2 * (1 - x) ** 2,\n", - " x**4 * (1 - x) ** 4,\n", - " np.sqrt(x * (1 - x)),\n", - " np.sqrt(np.sqrt(x * (1 - x))),\n", - " np.sin(np.pi * x) + np.sin(5 * np.pi * x) / 5,\n", + " 5 * x**2 * (1 - x) ** 2,\n", + " 50 * x**4 * (1 - x) ** 4,\n", + " 0.5 * np.sqrt(x * (1 - x)),\n", + " 0.25 * np.sqrt(np.sqrt(x * (1 - x))),\n", + " np.sin(np.pi * x) / 3 + np.sin(5 * np.pi * x) / 5,\n", "]\n", "\n", "q0_titles = [\n", " r\"$q_{0}(x) = 10 x (1 - x)$\",\n", - " r\"$q_{0}(x) = x^{2} (1 - x)^{2}$\",\n", - " r\"$q_{0}(x) = x^{4} (1 - x)^{4}$\",\n", - " r\"$q_{0}(x) = \\sqrt{x (1 - x)}$\",\n", - " r\"$q_{0}(x) = \\sqrt[4]{x (1 - x)}$\",\n", - " r\"$q_{0}(x) = \\sin(\\pi x) + \\frac{1}{5}\\sin(5\\pi x)$\",\n", + " r\"$q_{0}(x) = 5 x^{2} (1 - x)^{2}$\",\n", + " r\"$q_{0}(x) = 50 x^{4} (1 - x)^{4}$\",\n", + " r\"$q_{0}(x) = \\frac{1}{2}\\sqrt{x (1 - x)}$\",\n", + " r\"$q_{0}(x) = \\frac{1}{4}\\sqrt[4]{x (1 - x)}$\",\n", + " r\"$q_{0}(x) = \\frac{1}{3}\\sin(\\pi x) + \\frac{1}{5}\\sin(5\\pi x)$\",\n", "]\n", "\n", "results = {}\n", @@ -1143,7 +1175,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 28, "metadata": { "tags": [ "hide-input" @@ -1232,12 +1264,14 @@ " ddt_estimator=opinf.ddt.UniformFiniteDifferencer(t, \"ord6\"),\n", " model=opinf.models.ContinuousModel(\n", " operators=\"A\",\n", - " solver=opinf.lstsq.L2Solver(regularizer=1e-5),\n", + " solver=opinf.lstsq.L2Solver(regularizer=1e-8),\n", " ),\n", ")\n", "\n", "# Use the same training data as before, but do not reset the basis.\n", - "_ = rom.fit(Q, fit_basis=False)" + "_ = rom.fit(Q, fit_basis=False)\n", + "\n", + "print(rom)" ] }, { @@ -1267,15 +1301,15 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "With a more expressive basis, we are now capturing the true solutions with the ROM to within 1% error in the Frobenius norm." + "With a more expressive basis, the ROM performance improves significantly." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - ":::{admonition} Takeaway\n", - ":class: attention\n", + ":::{admonition} No Better than the Basis\n", + ":class: tip\n", "This example illustrates a fundamental principle of model reduction: the accuracy of the ROM is limited by the accuracy of the underlying low-dimensional approximation, which in this case is $\\q(t) \\approx \\Vr\\qhat(t)$. In other words, a good $\\Vr$ is critical in order for the ROM to be accurate and predictive.\n", ":::" ] From aada910155371db0e772a5ad3951574c94398950 Mon Sep 17 00:00:00 2001 From: Shane Date: Fri, 6 Sep 2024 15:29:42 -0600 Subject: [PATCH 40/48] timed_block -> TimedBlock, fix Examples doc --- docs/source/api/utils.md | 2 +- src/opinf/utils/_timer.py | 65 ++++++++++------ tests/utils/test_timer.py | 157 +++++++++++++++++++++----------------- 3 files changed, 128 insertions(+), 96 deletions(-) diff --git a/docs/source/api/utils.md b/docs/source/api/utils.md index 7085b101..a6235ab2 100644 --- a/docs/source/api/utils.md +++ b/docs/source/api/utils.md @@ -16,7 +16,7 @@ The following class defines a context manager for timing blocks of code and logg :toctree: _autosummaries :nosignatures: - timed_block + TimedBlock ``` ## Load/Save HDF5 Utilities diff --git a/src/opinf/utils/_timer.py b/src/opinf/utils/_timer.py index 614cc958..32f7752a 100644 --- a/src/opinf/utils/_timer.py +++ b/src/opinf/utils/_timer.py @@ -2,7 +2,7 @@ """Context manager for timing blocks of code.""" __all__ = [ - "timed_block", + "TimedBlock", ] import os @@ -11,31 +11,43 @@ import logging -class timed_block: +class TimedBlock: r"""Context manager for timing a block of code and reporting the timing. - **WARNING**: this context manager may only function on Linux/Unix machines - (Windows is not supported). - Parameters ---------- message : str Message to log / print. timelimit : int Number of seconds to wait before raising an error. - Floats are rounded down to the nearest integer. + Floats are rounded down to an integer. + + Warnings + -------- + This context manager may only function on Linux/Unix machines + (Windows is not currently supported). Examples -------- >>> import time >>> import opinf - >>> with opinf.utils.timed_block("This is a test"): + Without a time limit. + + >>> with opinf.utils.TimedBlock(): ... # Code to be timed ... time.sleep(2) - This is a test...done in 2.00 s. + Running code block...done in 2.00 s. + + With a custom message. + + >>> with opinf.utils.TimedBlock("This is a test"): + ... time.sleep(3) + This is a test...done in 3.00 s. - >>> with opinf.utils.timed_block("Another test", timelimit=3): + With a time limit. + + >>> with opinf.utils.TimedBlock("Another test", timelimit=3): ... # Code to be timed and halted within the specified time limit. ... i = 0 ... while True: @@ -43,30 +55,33 @@ class timed_block: Another test... TimeoutError: TIMED OUT after 3.00s. - # Set up a logfile to record messages to. - >>> opinf.utils.timed_block.setup_logfile("log.log") + Set up a logfile to record messages to. + + >>> opinf.utils.TimedBlock.setup_logfile("log.log") Logging to '/path/to/current/folder/log.log' - # timed_block() will now write to the log file as well as print to screen. - >>> with opinf.utils.timed_block("logfile test"): + ``TimedBlock()`` will now write to the log file as well as print to screen. + + >>> with opinf.utils.TimedBlock("logfile test"): ... time.sleep(1) logfile test...done in 1.00 s. - >>> with open("log.log", "r") as infile: - ... print(infile.read().strip()) + ... print(infile.read().strip()) INFO: logfile test...done in 1.001150 s. - # Turn off print statements (but keep logging). - >>> opinf.utils.timed_block.verbose = False - >>> with opinf.utils.timed_block("not printed to the screen"): + Turn off print statements (but keep logging). + + >>> opinf.utils.TimedBlock.verbose = False + >>> with opinf.utils.TimedBlock("not printed to the screen"): ... time.sleep(1) >>> with open("log.log", "r") as infile: - ... print(infile.read().strip()) + ... print(infile.read().strip()) INFO: logfile test...done in 1.001150 s. INFO: not printed to the screen...done in 1.002232 s. - # Capture the time elapsed for later use. - >>> with opinf.utils.timed_block("how long?") as timer: + Capture the time elapsed for later use. + + >>> with opinf.utils.TimedBlock("how long?") as timer: ... time.sleep(2) >>> timer.elapsed 2.002866268157959 @@ -79,7 +94,11 @@ class timed_block: datefmt="%Y-%m-%d %H:%M:%S", ) - def __init__(self, message: str, timelimit: int = None): + def __init__( + self, + message: str = "Running code block", + timelimit: int = None, + ): """Store print/log message.""" self.__front = "\n" if message.endswith("\n") else "" self.message = message.rstrip() @@ -141,7 +160,7 @@ def __exit__(self, exc_type, exc_value, exc_traceback): @classmethod def add_logfile(cls, logfile: str = "log.log") -> None: - """Instruct :class:`timed_block` to log messages to the ``logfile``. + """Instruct :class:`TimedBlock` to log messages to the ``logfile``. Parameters ---------- diff --git a/tests/utils/test_timer.py b/tests/utils/test_timer.py index 7057e2cd..d34d1f91 100644 --- a/tests/utils/test_timer.py +++ b/tests/utils/test_timer.py @@ -8,76 +8,89 @@ import opinf -def test_timed_block(message="timed_block test", target="_timedblocktest.log"): - """Test timed_block context manager.""" - Timer = opinf.utils.timed_block - if os.path.isfile(target): - os.remove(target) - - with Timer(message) as obj: - pass - assert obj.message == message - assert obj.timelimit is None - assert isinstance(obj.elapsed, float) - - with Timer(message, timelimit=100) as obj: - pass - assert obj.message == message - - with pytest.raises(TimeoutError) as ex: - with Timer(message, timelimit=1): - time.sleep(10) - assert ex.value.args[0].startswith("TIMED OUT after ") - - class MyException(Exception): - pass - - with pytest.raises(MyException) as ex: - with Timer(message): - raise MyException("failure in the block") - assert ex.value.args[0] == "failure in the block" - - # Set up a log file. - Timer.add_logfile(target) - - # See if we write to the log file. - with Timer(message, timelimit=100) as obj: - pass - - assert os.path.isfile(target) - with open(target, "r") as infile: - text = infile.read().strip() - assert text.count(message) == 1 - - with pytest.raises(TimeoutError) as ex: - with Timer(message, timelimit=1): - time.sleep(10) - assert ex.value.args[0].startswith("TIMED OUT after ") - - with open(target, "r") as infile: - text = infile.read().strip() - assert text.count(message) == 2 - assert text.count("TIMED OUT after ") == 1 - - with pytest.raises(MyException) as ex: - with Timer(message): - raise MyException("failure in the block") - assert ex.value.args[0] == "failure in the block" - - # Log to the same file. - newmessage = f"{message} AGAIN!" - Timer.add_logfile(target) - - # Log to another file. - newtarget = f"_{target}" - if os.path.isfile(newtarget): - os.remove(newtarget) - - Timer.add_logfile(newtarget) - with Timer(newmessage) as obj: - pass - for tfile in target, newtarget: - with open(tfile, "r") as infile: +class MyException(Exception): + pass + + +class TestTimedBlock: + """Test utils.TimedBlock.""" + + Timer = opinf.utils.TimedBlock + + def test_standard(self, message="TimedBlock test, no timelimit"): + # No time limit. + with self.Timer() as obj: + pass + assert obj.timelimit is None + assert isinstance(obj.elapsed, float) + + # Time limit that does not expire. + with self.Timer(message, timelimit=100) as obj: + pass + assert obj.message == message + + def test_timeout(self, message="TimedBlock test with problems"): + # Time limit expires. + with pytest.raises(TimeoutError) as ex: + with self.Timer(message, timelimit=1): + time.sleep(10) + assert ex.value.args[0].startswith("TIMED OUT after ") + + # Exception occurs in the block. + with pytest.raises(MyException) as ex: + with self.Timer(message): + raise MyException("failure in the block") + assert ex.value.args[0] == "failure in the block" + + def test_log( + self, + message: str = "TimedBlock test with log", + target: str = "_timedblocktest.log", + ): + if os.path.isfile(target): + os.remove(target) + + # Set up a log file. + self.Timer.add_logfile(target) + + # See if we write to the log file. + with self.Timer(message, timelimit=100): + pass + + assert os.path.isfile(target) + with open(target, "r") as infile: text = infile.read().strip() - assert text.count(newmessage) == 1 - os.remove(tfile) + assert text.count(message) == 1 + + with pytest.raises(TimeoutError) as ex: + with self.Timer(message, timelimit=1): + time.sleep(10) + assert ex.value.args[0].startswith("TIMED OUT after ") + + with open(target, "r") as infile: + text = infile.read().strip() + assert text.count(message) == 2 + assert text.count("TIMED OUT after ") == 1 + + with pytest.raises(MyException) as ex: + with self.Timer(message): + raise MyException("failure in the block") + assert ex.value.args[0] == "failure in the block" + + # Log to the same file. + newmessage = f"{message} AGAIN!" + self.Timer.add_logfile(target) + + # Log to another file. + newtarget = f"_{target}" + if os.path.isfile(newtarget): + os.remove(newtarget) + + self.Timer.add_logfile(newtarget) + with self.Timer(newmessage): + pass + for tfile in target, newtarget: + with open(tfile, "r") as infile: + text = infile.read().strip() + assert text.count(newmessage) == 1 + os.remove(tfile) From 5fb9bca813d919386ff967928eda5e3d8eb54f58 Mon Sep 17 00:00:00 2001 From: Shane Date: Mon, 9 Sep 2024 16:56:18 -0600 Subject: [PATCH 41/48] heat equation tutorial -> inputs tutorial --- docs/_toc.yml | 5 +- docs/source/tutorials/basics.ipynb | 140 ++- docs/source/tutorials/inputs.ipynb | 969 ++++++++++++++++++ .../{heat_equation.ipynb => parametric.ipynb} | 7 +- 4 files changed, 1101 insertions(+), 20 deletions(-) create mode 100644 docs/source/tutorials/inputs.ipynb rename docs/source/tutorials/{heat_equation.ipynb => parametric.ipynb} (99%) diff --git a/docs/_toc.yml b/docs/_toc.yml index fdbb7a7b..9365a3f8 100644 --- a/docs/_toc.yml +++ b/docs/_toc.yml @@ -21,11 +21,10 @@ parts: # numbered: 1 chapters: - file: source/tutorials/basics.ipynb - # - file: source/tutorials/inputs.ipynb - - file: source/tutorials/heat_equation.ipynb + - file: source/tutorials/inputs.ipynb # - file: source/tutorials/lifting.ipynb # - file: source/tutorials/regularization.ipynb - # - file: source/tutorials/parametric.ipynb + - file: source/tutorials/parametric.ipynb # API reference via sphinx-autodoc + limited handwritten documentation. - caption: API Reference diff --git a/docs/source/tutorials/basics.ipynb b/docs/source/tutorials/basics.ipynb index 45e87d01..2cc8ef5d 100644 --- a/docs/source/tutorials/basics.ipynb +++ b/docs/source/tutorials/basics.ipynb @@ -79,6 +79,7 @@ "metadata": {}, "outputs": [], "source": [ + "import time\n", "import numpy as np\n", "import pandas as pd\n", "import scipy.sparse\n", @@ -266,7 +267,6 @@ "t = np.linspace(t0, tf, k)\n", "dt = t[1] - t[0]\n", "\n", - "\n", "# Construct the full-order state matrix A.\n", "diags = np.array([1, -2, 1]) / (dx**2)\n", "A = scipy.sparse.diags(diags, [-1, 0, 1], (n, n))\n", @@ -278,7 +278,7 @@ "def full_order_solve(initial_condition, time_domain):\n", " \"\"\"Solve the full-order model with SciPy.\"\"\"\n", " return scipy.integrate.solve_ivp(\n", - " fun=lambda t, x: A @ x,\n", + " fun=lambda t, q: A @ q,\n", " t_span=[time_domain[0], time_domain[-1]],\n", " y0=initial_condition,\n", " t_eval=time_domain,\n", @@ -287,9 +287,10 @@ "\n", "\n", "# Solve the full-order model to obtain training snapshots.\n", - "Q = full_order_solve(q0, t)\n", + "with opinf.utils.TimedBlock(\"Full-order solve\"):\n", + " Q = full_order_solve(q0, t)\n", "\n", - "print(f\"Spatial domain size:\\t{x.shape=}\")\n", + "print(f\"\\nSpatial domain size:\\t{x.shape=}\")\n", "print(f\"Spatial step size:\\t{dx=:.10f}\")\n", "print(f\"\\nTime domain size:\\t{t.shape=}\")\n", "print(f\"Temporal step size:\\t{dt=:f}\")\n", @@ -399,7 +400,8 @@ "rom.fit(Q)\n", "\n", "# Solve the reduced-order model.\n", - "Q_ROM = rom.predict(q0, t, method=\"BDF\", max_step=dt)\n", + "with opinf.utils.TimedBlock(\"Reduced-order solve\"):\n", + " Q_ROM = rom.predict(q0, t, method=\"BDF\", max_step=dt)\n", "\n", "# Compute the relative error of the ROM solution.\n", "opinf.post.frobenius_error(Q, Q_ROM)[1]" @@ -951,6 +953,20 @@ "### Evaluate ROM Performance" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The quality or usefulness of a ROM depends on its accuracy and its computational efficiency." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### ROM Accuracy" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -976,7 +992,31 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "For more detail, we evaluate the $\\ell^2$ error of the ROM output in time, comparing it to the snapshot set via {func}`opinf.post.lp_error`." + "For more detail, we evaluate the $\\ell^2$ error of the ROM output, comparing it to the snapshot set via {func}`opinf.post.lp_error`.\n", + "This calculates the absolute and relative error as a function of time,\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\text{err}_\\text{absolute}(t)\n", + " &= \\|\\q(t) - \\q_{\\text{ROM}}(t)\\|_{2},\n", + " \\\\ ~ \\\\\n", + " \\text{err}_\\text{relative}(t)\n", + " &= \\frac{\\|\\q(t) - \\q_{\\text{ROM}}(t)\\|_{2}}{\\|\\q(t)\\|_{2}}.\n", + "\\end{aligned}\n", + "$$\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + ":::{admonition} Normalized Absolute Error\n", + ":class: tip\n", + "\n", + "In this problem, $\\q(t)\\to\\0$ as $t$ increases, so a relative error may not be appropriate since $\\|\\q(t)\\|_{2}$ appears in the denominator.\n", + "In situations like this, consider using the _normalized absolute error_ by replacing the denominator with $\\max_{\\tau\\in[t_0,t_f]}\\|\\q(t)\\|.$\n", + "Set `normalize=True` in {func}`opinf.post.lp_error()` to use this error measure instead of the relative error.\n", + ":::" ] }, { @@ -985,9 +1025,13 @@ "metadata": {}, "outputs": [], "source": [ - "absolute_l2err, relative_l2err = opinf.post.lp_error(Q, Q_ROM)\n", - "plt.semilogy(t, absolute_l2err)\n", - "plt.title(r\"Absolute $\\ell^{2}$ error\")\n", + "abs_l2err, norm_l2err = opinf.post.lp_error(Q, Q_ROM, normalize=True)\n", + "fig, ax = plt.subplots(1, 1)\n", + "ax.semilogy(t, abs_l2err, \"-\", label=r\"Absolute $\\ell^2$ error\")\n", + "ax.semilogy(t, norm_l2err, \"--\", label=r\"Normalized absolute $\\ell^2$ error\")\n", + "ax.set_xlabel(r\"$t$\")\n", + "ax.set_ylabel(\"error\")\n", + "ax.legend(loc=\"lower left\")\n", "plt.show()" ] }, @@ -995,7 +1039,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "In this simple example, the error decreases with time (as solutions get quickly pushed to zero), but this is not the kind of error behavior that should be expected for less trivial systems.\n", + "In this simple example, the error decreases with time (as solutions get quickly pushed to zero), but this is not the kind of error behavior that should be expected when modeling more complicated phenomena.\n", "\n", "We can also get a scalar error measurement by calculating the relative Frobenius norm error with {func}`opinf.post.frobenius_error`." ] @@ -1018,6 +1062,78 @@ "Note that this value is very close to the projection error that we calculated earlier." ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### ROM Computational Speedup" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "When a FOM is available, a ROM is only useful if it can be solved much faster than the FOM.\n", + "The solution speed can be quickly checked using {class}`opinf.utils.TimedBlock`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "with opinf.utils.TimedBlock(\"Full-order solve\"):\n", + " full_order_solve(q0, t)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "with opinf.utils.TimedBlock(\"Reduced-order solve\"):\n", + " rom.predict(q0, t, method=\"BDF\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "More precise measurements can be take by aliasing the {class}`opinf.utils.TimedBlock` and accessing the `elapsed` attribute.\n", + "Below, we solve each model several times to get an average time." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "n_trials = 10\n", + "\n", + "with opinf.utils.TimedBlock(f\"{n_trials} FOM solves\") as fomtime:\n", + " for _ in range(n_trials):\n", + " full_order_solve(q0, t)\n", + "\n", + "with opinf.utils.TimedBlock(f\"{n_trials} ROM solves\") as romtime:\n", + " for _ in range(n_trials):\n", + " rom.predict(q0, t, method=\"BDF\")\n", + "\n", + "print(f\"Average FOM time: {fomtime.elapsed / n_trials :.6f} s\")\n", + "print(f\"Average ROM time: {romtime.elapsed / n_trials :.6f} s\")\n", + "print(f\"ROM speedup: {fomtime.elapsed / romtime.elapsed :.4f} times!\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this example, the FOM is efficient because it takes advantage of the sparsity of $\\A\\in\\RR^{n\\times n}$.\n", + "Even so, the ROM achieves a modest speedup due to the smaller size of $\\Ahat\\in\\RR^{r\\times r}$." + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -1070,7 +1186,7 @@ }, { "cell_type": "code", - "execution_count": 26, + "execution_count": 29, "metadata": {}, "outputs": [], "source": [ @@ -1175,7 +1291,7 @@ }, { "cell_type": "code", - "execution_count": 28, + "execution_count": 31, "metadata": { "tags": [ "hide-input" diff --git a/docs/source/tutorials/inputs.ipynb b/docs/source/tutorials/inputs.ipynb new file mode 100644 index 00000000..de463e4f --- /dev/null +++ b/docs/source/tutorials/inputs.ipynb @@ -0,0 +1,969 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "toc-hr-collapsed": false + }, + "source": [ + "# External Inputs" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "toc-hr-collapsed": false + }, + "source": [ + "The fundamental goal of model reduction is to efficiently make physics-based predictions. Given synthetic or experimental data that was generated or collected under a certain set of conditions, we aim to construct a cost-effective model that produces accurate solutions under new sets of conditions. The first tutorial showed an example of evaluating a reduced-order model (ROM) for various initial conditions. This tutorial focuses on problems with external time-dependent inputs." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "toc-nb-collapsed": true + }, + "source": [ + "## Problem Statement" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We begin with a problem with external inputs that are parameterized by a scalar-valued function $u:\\RR\\to\\RR.$" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + ":::{admonition} Governing Equations\n", + ":class: info\n", + "\n", + "Let $\\Omega = [0,L]\\subset \\mathbb{R}$ be the spatial domain indicated by the variable $x$, and let $[0,T]\\subset\\mathbb{R}$ be the time domain with variable $t$. We consider the one-dimensional heat equation with time-dependent Dirichlet boundary conditions,\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " &\\frac{\\partial}{\\partial t} q(x,t) = \\frac{\\partial^2}{\\partial x^2}q(x,t)\n", + " & x &\\in\\Omega,\\quad t\\in[0,T],\n", + " \\\\\n", + " &q(0,t) = q(L,t) = u(t)\n", + " & t &\\in[0,T],\n", + " \\\\\n", + " &q(x,0) = \\big(e^{\\alpha(x - 1)} + e^{-\\alpha x} - e^{-\\alpha}\\big)u(0)\n", + " & x &\\in \\Omega,\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "where $\\alpha>0$ is constant and $q(x,t)$ is the unknown state variable. This is a model for a one-dimensional rod conducting heat with a fixed initial heat profile. The temperature at the ends of the rod are governed by the input function $u(t)$, but heat is allowed to diffuse through the rod and flow out at the ends of the domain.\n", + ":::" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + ":::{admonition} Objective\n", + ":class: info\n", + "\n", + "Construct a reduced-order model (ROM) which can be solved rapidly to produce approximate solutions $q(x, t)$ to the partial differential equation given above for various choices of the input function $u(t)$.\n", + "In addition, we will only observe data over a limited time interval $t \\in [0, T']$ with $T' < T$, then use the ROM to predict the solution for the entire time domain $[0, T]$.\n", + "Hence, the ROM will be **predictive in time** and **predictive in the inputs**.\n", + ":::" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "import scipy.sparse\n", + "import matplotlib.pyplot as plt\n", + "\n", + "import opinf\n", + "\n", + "opinf.utils.mpl_config()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Single Training Trajectory" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this section a ROM is trained using data collected for a single choice of the input function $u(t)$." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Full-order Model Definition" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "As in the last tutorial, we use a centered finite difference approximation for the spatial derivative to arrive at a finite-dimensional system of ordinary differential equations.\n", + "This time, due to the nonzero boundary conditions, the system takes the form\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\ddt\\q(t) = \\A\\q(t) + \\B u(t),\n", + " \\qquad\n", + " \\q(0) = \\q_0,\n", + "\\end{aligned}\n", + "$$ (eq_inputs_fom)\n", + "\n", + "where $\\A\\in\\RR^{n\\times n}$ and $\\B\\in\\RR^{n}$.\n", + "The system {eq}`eq_inputs_fom` is the _full-order model_ (FOM), which we will use to generate training data for the time domain $[0, T'] \\subset [0, T]$." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + ":::{dropdown} Discretization details\n", + "\n", + "We take an equidistant grid $\\{x_i\\}_{i=0}^{n+1} \\subset \\Omega$,\n", + "\n", + "\\begin{align*}\n", + " 0 &= x_0 < x_1 < \\cdots < x_n < x_{n+1} = L\n", + " &\n", + " &\\text{and}\n", + " &\n", + " \\delta x &= \\frac{L}{n+1} = x_{i+1} - x_{i},\\quad i=1,\\ldots,n-1.\n", + "\\end{align*}\n", + "\n", + "The boundary conditions prescribe $q(x_0,t) = q(x_{n+1},t) = u(t)$.\n", + "Our goal is to compute $q(x,t)$ at the interior spatial points $x_{1},x_{2},\\ldots,x_{n}$ for various $t\\in[0,T]$, so we consider the state vector $\\q(t) = [~q(x_{1}, t)~\\cdots~q(x_{n}, t)~]\\trp\\in\\RR^n$ and derive a system governing the evolution of $\\q(t)$ in time.\n", + "\n", + "Approximating the spatial derivative with a central finite difference approximation,\n", + "\n", + "$$\n", + " \\frac{\\partial^2}{\\partial x^2}q(x,t)\n", + " \\approx \\frac{q(x-\\delta x,t) - 2q(x,t) + q(x+\\delta x,t)}{(\\delta x)^2},\n", + "$$\n", + "\n", + "and using the boundary conditions $q(0,t) = q(L,t) = u(t)$, we arrive at the following matrices for the FOM.\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\A(\\mu) &= \\frac{1}{(\\delta x)^2}\\left[\\begin{array}{ccccc}\n", + " -2 & 1 & & & \\\\\n", + " 1 & -2 & 1 & & \\\\\n", + " & \\ddots & \\ddots & \\ddots & \\\\\n", + " & & 1 & -2 & 1 \\\\\n", + " & & & 1 & -2 \\\\\n", + " \\end{array}\\right] \\in\\RR^{n\\times n},\n", + " &\n", + " \\B(\\mu) &= \\frac{1}{(\\delta x)^2}\\left[\\begin{array}{c}\n", + " 1 \\\\ 0 \\\\ \\vdots \\\\ 0 \\\\ 1\n", + " \\end{array}\\right]\\in\\RR^{n}.\n", + "\\end{aligned}\n", + "$$\n", + ":::" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Training Data Generation" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let $L = 1$, $T = 1$, and set $\\alpha = 100$.\n", + "We begin by solving the FOM described above, recording the solution every $\\delta t = 10^{-3}$ time units for a single choice of the input function $u(t)$, yielding $10^3 + 1 = 1001$ total time steps (1000 steps past the initial condition).\n", + "We will assume that we can only observe the first $k = 200$ time steps and use the ROM to predict the remaining $801$ steps.\n", + "Our training input function is\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " u_\\text{train}(t) = 1 + \\frac{1}{4}\\sin(4\\pi t).\n", + "\\end{aligned}\n", + "$$" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "def training_input(tt):\n", + " return np.ones_like(tt) + np.sin(4 * np.pi * tt) / 4" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [ + "hide-input" + ] + }, + "outputs": [], + "source": [ + "# Construct the spatial domain.\n", + "L = 1\n", + "n = 2**10 - 1\n", + "x_all = np.linspace(0, L, n + 2)\n", + "x = x_all[1:-1]\n", + "dx = x[1] - x[0]\n", + "\n", + "# Construct the temporal domain.\n", + "T = 1\n", + "K = T * 10**3 + 1\n", + "t_all = np.linspace(0, T, K)\n", + "dt = t_all[1] - t_all[0]\n", + "\n", + "# Construct the full-order state matrix A.\n", + "dx2inv = 1 / dx**2\n", + "diags = np.array([1, -2, 1]) * dx2inv\n", + "A = scipy.sparse.diags(diags, [-1, 0, 1], (n, n))\n", + "\n", + "# Construct the full-order input matrix B.\n", + "B = np.zeros_like(x)\n", + "B[0], B[-1] = dx2inv, dx2inv\n", + "\n", + "# Define the full-order model with an opinf.models class.\n", + "fom = opinf.models.ContinuousModel(\n", + " operators=[\n", + " opinf.operators.LinearOperator(A),\n", + " opinf.operators.InputOperator(B),\n", + " ]\n", + ")\n", + "\n", + "# Construct the part of the initial condition not dependent on u(t).\n", + "alpha = 100\n", + "q0 = np.exp(alpha * (x - 1)) + np.exp(-alpha * x) - np.exp(-alpha)\n", + "\n", + "\n", + "def full_order_solve(time_domain, u):\n", + " \"\"\"Solve the full-order model with SciPy.\n", + " Here, u is a callable function.\n", + " \"\"\"\n", + " return fom.predict(q0 * u(0), time_domain, u, method=\"BDF\")\n", + "\n", + "\n", + "# Solve the full-order model with the training input.\n", + "with opinf.utils.TimedBlock(\"Full-order solve\"):\n", + " Q_all = full_order_solve(t_all, training_input)\n", + "\n", + "# Retain only the first k snapshots/inputs for training the ROM.\n", + "k = 200\n", + "t = t_all[:k]\n", + "Q = Q_all[:, :k]\n", + "\n", + "print(f\"\\nSpatial domain:\\t\\t{x.shape=}\")\n", + "print(f\"Spatial step size:\\t{dx=:.10f}\")\n", + "print(f\"\\nFull time domain:\\t{t_all.shape=}\")\n", + "print(f\"Training time domain:\\t{t.shape=}\")\n", + "print(f\"Temporal step size:\\t{dt=:f}\")\n", + "print(f\"\\nFull-order matrix A:\\t{A.shape=}\")\n", + "print(f\"Full-order vector B:\\t{B.shape=}\")\n", + "print(f\"\\nInitial condition:\\t{q0.shape=}\")\n", + "print(f\"\\nAll FOM solutions:\\t{Q_all.shape=}\")\n", + "print(f\"Training snapshots:\\t{Q.shape=}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The following code visualizes the training data and the full FOM solution set by plotting a few snapshots over the spatial domain and the time evolution of the snapshots at a few spatial locations." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "tags": [ + "hide-input" + ] + }, + "outputs": [], + "source": [ + "def plot_data_space(Z, u, title, ax=None):\n", + " \"\"\"Plot state data over space at multiple instances in time.\"\"\"\n", + " if ax is None:\n", + " _, ax = plt.subplots(1, 1)\n", + "\n", + " # Plot a few snapshots over the spatial domain.\n", + " sample_columns = [0] + [2**d for d in range(10)]\n", + " color = iter(plt.cm.viridis_r(np.linspace(0.05, 1, len(sample_columns))))\n", + " while sample_columns[-1] > Z.shape[1] - 1:\n", + " sample_columns = sample_columns[:-1]\n", + " for j in sample_columns:\n", + " leftBC, rightBC = [u(t_all[j])], [u(t_all[j])]\n", + " q_all = np.concatenate([leftBC, Z[:, j], rightBC])\n", + " c = next(color)\n", + " ax.plot(x_all, q_all, lw=1, color=c, label=rf\"$q(x,t_{{{j}}})$\")\n", + "\n", + " ax.set_xlim(x_all[0], x_all[-1])\n", + " ax.set_xlabel(r\"$x$\")\n", + " ax.set_ylabel(r\"$q(x,t)$\")\n", + " ax.legend(loc=(1.05, 0.05))\n", + " ax.set_title(title)\n", + "\n", + "\n", + "def plot_data_time(Z, title, ax=None):\n", + " \"\"\"Plot state in time at multiple spatial locations.\"\"\"\n", + " if ax is None:\n", + " _, ax = plt.subplots(1, 1)\n", + "\n", + " # Plot a few snapshots over the spatial domain.\n", + " sample_rows = np.linspace(0, Z.shape[0] - 1, 11)\n", + " sample_rows = sample_rows[:-1] + (sample_rows[1] - sample_rows[0]) / 4\n", + " sample_rows = sample_rows.astype(int)\n", + " color = iter(plt.cm.inferno(np.linspace(0, 0.8, len(sample_rows))))\n", + " tt = t_all[: Z.shape[1]]\n", + " for i in sample_rows:\n", + " ax.plot(tt, Z[i], lw=1, color=next(color), label=rf\"$q(x_{{{i}}},t)$\")\n", + "\n", + " ax.set_xlim(t_all[0], t_all[-1])\n", + " ax.set_xlabel(r\"$t$\")\n", + " ax.set_ylabel(r\"$q(x,t)$\")\n", + " ax.legend(loc=(1.05, 0.05))\n", + " ax.set_title(title)\n", + "\n", + "\n", + "def plot_two_datasets(Z1, Z2, u, title1=\"\", title2=\"\", cutoff=None):\n", + " \"\"\"Plot two datasets side by side with space and time plots.\"\"\"\n", + " _, [ax1, ax2] = plt.subplots(1, 2, sharex=True, sharey=True)\n", + " plot_data_space(Z1, u, title1, ax1)\n", + " plot_data_space(Z2, u, title2, ax2)\n", + " ax1.legend([])\n", + "\n", + " fig, [ax1, ax2] = plt.subplots(2, 1, sharex=True, sharey=True)\n", + " plot_data_time(Z1, title1, ax1)\n", + " plot_data_time(Z2, title2, ax2)\n", + " ax1.legend([])\n", + " ax1.set_xlabel(\"\")\n", + " fig.subplots_adjust(hspace=0.3)\n", + " if cutoff is not None:\n", + " ax1.axvline(cutoff, color=\"gray\", linewidth=1, linestyle=\"--\")\n", + " ax1.text(cutoff - 10 * dt, 0, \"training\", ha=\"right\", color=\"gray\")\n", + " ax1.text(cutoff + 10 * dt, 0, \"prediction\", ha=\"left\", color=\"gray\")\n", + "\n", + " plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plot_two_datasets(\n", + " Q,\n", + " Q_all,\n", + " training_input,\n", + " \"Snapshot data for training\",\n", + " \"Full-order model solution\",\n", + " cutoff=t[-1],\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### ROM Construction" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We now have snapshot data $\\Q \\in \\RR^{n \\times k}$, but to learn a model with external inputs, we need training data for the inputs as well as for the snapshots.\n", + "Define the vector\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\U = \\left[\\begin{array}{cccc}\n", + " u_\\text{train}(t_0) & u_\\text{train}(t_1) & \\cdots & u_\\text{train}(t_{k-1})\n", + " \\end{array}\\right]\n", + " \\in\\RR^{k},\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "which collects the values of the training input function at the same times as the training snapshots." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "U = training_input(t)\n", + "\n", + "print(f\"Training snapshots:\\t{Q.shape=}\")\n", + "print(f\"Training inputs:\\t{U.shape=}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We will use a {class}`opinf.basis.PODBasis` to reduce the dimension of the snapshot training data, which approximates the discretized state vector as $\\q(t) \\approx \\Vr\\qhat(t)$ for some $\\Vr\\in\\RR^{n\\times r}$ with orthonormal columns and $\\qhat(t)\\in\\RR^{r}$, with and $r\\ll n$.\n", + "Input training data are *not* typically compressed with dimensionality reduction or subjected to other pre-processing routines.\n", + "Because the FOM {eq}`eq_inputs_fom` has the linear-time invariant form $\\ddt\\q(t) = \\A\\q(t) + \\B u(t)$, we seek a ROM with the structure, i.e.,\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\ddt\\qhat(t) = \\Ahat\\qhat(t) + \\Bhat u(t),\n", + " \\qquad\n", + " \\qhat(0) = \\Vr\\trp\\q_0.\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "Data for the time derivative $\\ddt\\qhat(t)$ are estimated in this example with sixth-order finite differences using {class}`opinf.ddt.UniformFiniteDifferencer`." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + ":::{dropdown} Why Use the Same Structure?\n", + "\n", + "An OpInf ROM should have the same structure as an intrusive Galerkin ROM.\n", + "The Galerkin ROM for {eq}`eq_inputs_fom` is derived by substituting in the approximation $\\q(t)\\approx\\Vr\\qhat(t)$, yielding\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\ddt\\Vr\\qhat(t) = \\A\\Vr\\qhat(t) + \\B u(t),\n", + " \\qquad\n", + " \\Vr\\qhat(0) = \\q_0.\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "Next, left multiply by $\\Vr\\trp$ and use the fact that $\\Vr\\trp\\Vr = \\I$ to get the following:\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\ddt\\qhat(t) = \\tilde{\\A}\\qhat(t) + \\tilde{\\B}u(t),\n", + " \\qquad\n", + " \\qhat(0) = \\Vr\\trp\\q_0,\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "where $\\tilde{\\A} = \\Vr\\trp\\A\\Vr \\in \\RR^{r\\times r}$ and $\\tilde{\\B} = \\Vr\\trp\\B\\in\\RR^{r}$.\n", + "Note that this ROM has the same input function $u(t)$ as the FOM.\n", + ":::" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Training input data are passed to {meth}`opinf.rom.ROM.fit()` as the `inputs` argument." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "rom = opinf.ROM(\n", + " basis=opinf.basis.PODBasis(residual_energy=1e-6),\n", + " ddt_estimator=opinf.ddt.UniformFiniteDifferencer(t, \"ord6\"),\n", + " model=opinf.models.ContinuousModel(\"AB\"),\n", + ")\n", + "\n", + "with opinf.utils.TimedBlock(\"Fitting OpInf ROM\"):\n", + " rom.fit(Q, inputs=U)\n", + "\n", + "with opinf.utils.TimedBlock(\"Reduced-order solve\"):\n", + " Q_ROM = rom.predict(q0, t_all, input_func=training_input, method=\"BDF\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plot_two_datasets(\n", + " Q_ROM,\n", + " Q_all,\n", + " training_input,\n", + " \"Reduced-order model solution\",\n", + " \"Full-order model solution\",\n", + " cutoff=t[-1],\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "For a closer look at the difference between the FOM and ROM solutions, we compute the relative $\\ell_2$-norm error of the ROM solution as a function of time using {func}`opinf.post.lp_error()` and the relative Forbenius-norm error using {func}`opinf.post.frobenius_error()`." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "tags": [ + "hide-input" + ] + }, + "outputs": [], + "source": [ + "def plot_errors_over_time(\n", + " Ztrue, basis, Z1, label1, Z2=None, label2=None, cutoff=None\n", + "):\n", + " \"\"\"Plot normalized absolute projection error and ROM error(s)\n", + " as a function of time.\n", + " \"\"\"\n", + " _, ax = plt.subplots(1, 1)\n", + "\n", + " projection_err = opinf.post.lp_error(Ztrue, basis.project(Ztrue))[1]\n", + " ax.semilogy(t_all, projection_err, \"C3-\", lw=1, label=\"Projection Error\")\n", + "\n", + " relative_error = opinf.post.lp_error(Ztrue, Z1)[1]\n", + " ax.semilogy(t_all, relative_error, \"C0--\", lw=1, label=label1)\n", + "\n", + " if Z2 is not None:\n", + " relative_error = opinf.post.lp_error(Ztrue, Z2)[1]\n", + " ax.semilogy(t_all, relative_error, \"C5-.\", lw=1, label=label2)\n", + "\n", + " if cutoff is not None:\n", + " ax.axvline(cutoff, color=\"gray\", linewidth=1, linestyle=\"--\")\n", + " ymin = projection_err.min() / 4\n", + " ax.text(cutoff - 10 * dt, ymin, \"training\", ha=\"right\", color=\"gray\")\n", + " ax.text(cutoff + 10 * dt, ymin, \"prediction\", ha=\"left\", color=\"gray\")\n", + " ax.set_ylim(bottom=ymin / 2)\n", + "\n", + " ax.set_xlim(t_all[0], t_all[-1])\n", + " ax.set_xlabel(r\"$t$\")\n", + " ax.set_ylabel(\"Relative error\")\n", + " ax.legend(loc=\"lower right\")\n", + " plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plot_errors_over_time(Q_all, rom.basis, Q_ROM, \"OpInf ROM error\", cutoff=t[-1])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "error_opinf = opinf.post.frobenius_error(Q_all, Q_ROM)[1]\n", + "print(f\"OpInf ROM error:\\t{error_opinf:.4e}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Comparison to the Intrusive Galerkin ROM" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The classical intrusive Galerkin ROM for this problem is given by\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\ddt\\qhat(t) = \\tilde{\\A}\\qhat(t) + \\tilde{\\B}u(t),\n", + " \\qquad\n", + " \\qhat(0) = \\Vr\\trp\\q_0,\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "where $\\tilde{\\A} = \\Vr\\trp\\A\\Vr \\in \\RR^{r\\times r}$ and $\\tilde{\\B} = \\Vr\\trp\\B\\in\\RR^{r}$.\n", + "Here, we form this ROM explicitly (using the same basis matrix $\\Vr$ as before) and compare it to our existing OpInf ROM." + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "rom_intrusive = opinf.ROM(\n", + " basis=rom.basis,\n", + " model=fom.galerkin(rom.basis.entries), # Explicitly project FOM operators.\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "with opinf.utils.TimedBlock(\"Reduced-order model solve (intrusive)\"):\n", + " Q_ROM_intrusive = rom_intrusive.predict(\n", + " q0, t_all, input_func=training_input, method=\"BDF\"\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plot_errors_over_time(\n", + " Q_all,\n", + " rom.basis,\n", + " Q_ROM,\n", + " \"OpInf ROM error\",\n", + " Q_ROM_intrusive,\n", + " \"Intrusive ROM error\",\n", + " cutoff=t[-1],\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "error_intrusive = opinf.post.frobenius_error(Q_all, Q_ROM_intrusive)[1]\n", + "error_projection = rom.basis.projection_error(Q_all, relative=True)\n", + "\n", + "print(\n", + " \"Relative Frobenius-norm errors\",\n", + " \"-\" * 33,\n", + " f\"Projection error:\\t{error_projection:%}\",\n", + " f\"OpInf ROM error:\\t{error_opinf:%}\",\n", + " f\"Intrusive ROM error:\\t{error_intrusive:%}\",\n", + " sep=\"\\n\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this experiment, the OpInf ROM and the corresponding intrusive ROM have comparable error, even though the OpInf ROM is calibrated without intrusive access to the FOM." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Generalization to New Inputs" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The previous experiment uses a single choice of $u(t)$ for the training and for the prediction in time.\n", + "Now, we define a new choice of input function $u(t)$,\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " u_\\text{test}(t)\n", + " = 1 + t(1 - t),\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "and evaluate the FOM and ROM for this new input." + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [], + "source": [ + "def test_input(t):\n", + " return 1 + t * (1 - t)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "with opinf.utils.TimedBlock(\"Full-order solve\"):\n", + " Qtest_FOM = full_order_solve(t_all, test_input)\n", + "\n", + "with opinf.utils.TimedBlock(\"Reduced-order solve (OpInf)\"):\n", + " Qtest_ROM = rom.predict(q0, t_all, test_input, method=\"BDF\")\n", + "\n", + "with opinf.utils.TimedBlock(\"Reduced-order solve (intrusive)\"):\n", + " Qtest_ROM_intrusive = rom_intrusive.predict(\n", + " q0, t_all, test_input, method=\"BDF\"\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plot_two_datasets(\n", + " Qtest_ROM,\n", + " Qtest_FOM,\n", + " test_input,\n", + " \"OpInf Reduced-order model solution\",\n", + " \"Full-order model solution\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plot_errors_over_time(\n", + " Qtest_FOM,\n", + " rom.basis,\n", + " Qtest_ROM,\n", + " \"OpInf ROM error\",\n", + " Qtest_ROM_intrusive,\n", + " \"Intrusive ROM error\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Both ROMs perform well with a new input function, but the intrusive ROM performs slightly better than the OpInf ROM.\n", + "This is typical; intrusive ROMs are often more robust and generalizable than standard OpInf ROMs, but OpInf ROMs tend to reproduce training data better than intrusive ROMs." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Multiple Training Trajectories" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If data corresponding to several choices of the input function $u(t)$ are available for training, we collect a list of snapshot matrices and a list of corresponding inputs to pass to `fit()`." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Training Data Generation" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Below, we solve the PDE using the three input functions for training data:\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " &u_\\text{train}^{(1)}(t) = e^{-t},\n", + " &&&\n", + " &u_\\text{train}^{(2)}(t) = 1 + \\frac{1}{2}t^2,\n", + " &&&\n", + " &u_\\text{train}^{(3)}(t) = 1 - \\frac{1}{2}\\sin(\\pi t).\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "The following input functions are used for testing.\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " &u_\\text{test}^{(1)}(t) = 1 - \\frac{1}{2}\\sin(3\\pi t),\n", + " &&&\n", + " &u_\\text{test}^{(2)}(t) = 1 + 25 (t (t - 1))^3,\n", + " &&&\n", + " &u_\\text{test}^{(3)}(t) = 1 + e^{-2t}\\sin(\\pi t).\n", + "\\end{aligned}\n", + "$$" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [], + "source": [ + "training_inputs = [\n", + " lambda t: np.exp(-t),\n", + " lambda t: 1 + t**2 / 2,\n", + " lambda t: 1 - np.sin(np.pi * t) / 2,\n", + "]\n", + "\n", + "testing_inputs = [\n", + " lambda t: 1 - np.sin(3 * np.pi * t) / 3,\n", + " lambda t: 1 + 25 * (t * (t - 1)) ** 3,\n", + " lambda t: 1 + np.exp(-2 * t) * np.sin(np.pi * t),\n", + "]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [ + "hide-input" + ] + }, + "outputs": [], + "source": [ + "# Visualize the input functions.\n", + "fig, [ax1, ax2] = plt.subplots(1, 2, sharex=True)\n", + "c = 0\n", + "for input_func in training_inputs:\n", + " ax1.plot(t_all, input_func(t_all), color=f\"C{c}\", lw=1)\n", + " c += 1\n", + "for input_func in testing_inputs:\n", + " ax2.plot(t_all, input_func(t_all), color=f\"C{c}\", lw=1)\n", + " c += 1\n", + "\n", + "ax1.set_title(\"Training inputs\")\n", + "ax2.set_title(\"Testing inputs\")\n", + "# ax1.axvline(t[-1], color=\"k\", lw=1)\n", + "ax1.axvline(t[-1], color=\"gray\", linewidth=1, linestyle=\"--\")\n", + "ax1.text(t[-1] - 10 * dt, 1.4, \"training\", ha=\"right\", color=\"gray\")\n", + "ax1.text(t[-1] + 10 * dt, 1.4, \"prediction\", ha=\"left\", color=\"gray\")\n", + "for ax in (ax1, ax2):\n", + " ax.set_xlim(t_all[0], t_all[-1])\n", + " ax.set_xlabel(r\"$t$\")\n", + " ax.set_ylabel(r\"$u(t)$\")\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [], + "source": [ + "# Solve the full-order model for each training input and collect results.\n", + "Qs = [] # State snapshots.\n", + "Us = [] # Corresponding inputs.\n", + "\n", + "for u in training_inputs:\n", + " Qs.append(full_order_solve(t, u))\n", + " Us.append(u(t))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "rom = opinf.ROM(\n", + " basis=opinf.basis.PODBasis(residual_energy=1e-6),\n", + " ddt_estimator=opinf.ddt.UniformFiniteDifferencer(t, \"ord6\"),\n", + " model=opinf.models.ContinuousModel(\"AB\"),\n", + ")\n", + "\n", + "with opinf.utils.TimedBlock(\"Fitting OpInf ROM\"):\n", + " rom.fit(Qs, inputs=Us)\n", + "\n", + "rom_intrusive = opinf.ROM(\n", + " basis=rom.basis,\n", + " model=fom.galerkin(rom.basis.entries),\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for i, u in enumerate(testing_inputs):\n", + " print(f\"Test input function {i+1:d}\")\n", + "\n", + " with opinf.utils.TimedBlock(\"Full-order solve\"):\n", + " Q_FOM = full_order_solve(t_all, u)\n", + "\n", + " with opinf.utils.TimedBlock(\"Reduced-order solve (OpInf)\"):\n", + " Q_ROM = rom.predict(q0, t_all, u, method=\"BDF\")\n", + "\n", + " with opinf.utils.TimedBlock(\"Reduced-order solve (intrusive)\"):\n", + " Q_ROM_intrusive = rom_intrusive.predict(q0, t_all, u, method=\"BDF\")\n", + "\n", + " plot_two_datasets(\n", + " Q_ROM,\n", + " Q_FOM,\n", + " u,\n", + " \"Reduced-order model solution (OpInf)\",\n", + " \"Full-order model solution\",\n", + " )\n", + "\n", + " plot_errors_over_time(\n", + " Q_FOM,\n", + " rom.basis,\n", + " Q_ROM,\n", + " \"OpInf ROM error\",\n", + " Q_ROM_intrusive,\n", + " \"Intrusive ROM error\",\n", + " )" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "TODO: final comment." + ] + } + ], + "metadata": { + "celltoolbar": "Tags", + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.3" + }, + "toc-showmarkdowntxt": false, + "toc-showtags": true + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/docs/source/tutorials/heat_equation.ipynb b/docs/source/tutorials/parametric.ipynb similarity index 99% rename from docs/source/tutorials/heat_equation.ipynb rename to docs/source/tutorials/parametric.ipynb index e270f0d2..254919c5 100644 --- a/docs/source/tutorials/heat_equation.ipynb +++ b/docs/source/tutorials/parametric.ipynb @@ -6,7 +6,7 @@ "toc-hr-collapsed": false }, "source": [ - "# Heat Equation" + "# External Inputs" ] }, { @@ -15,10 +15,7 @@ "toc-hr-collapsed": false }, "source": [ - "The fundamental goal of model reduction is to efficiently make physics-based predictions. Given synthetic or experimental data that was generated or collected under a certain set of conditions, we aim to construct a cost-effective model that produces accurate solutions under new sets of conditions. This tutorial explores the following prediction problems for the heat equation example of {cite}`peherstorfer2016opinf`:\n", - "1. Predicting **forward in time**.\n", - "2. Using new time-dependent **boundary conditions**.\n", - "3. Changing the **system parameters** (e.g., coefficients in the governing equation)." + "The fundamental goal of model reduction is to efficiently make physics-based predictions. Given synthetic or experimental data that was generated or collected under a certain set of conditions, we aim to construct a cost-effective model that produces accurate solutions under new sets of conditions. The first tutorial showed an example of evaluating a reduced-order model (ROM) for various initial conditions. This tutorial focuses on problems with external time-dependent inputs." ] }, { From c7df17767671ebf52333eac0398381a1206f1405 Mon Sep 17 00:00:00 2001 From: Shane Date: Mon, 9 Sep 2024 21:26:46 -0600 Subject: [PATCH 42/48] ROM check fit args --- src/opinf/roms/_base.py | 22 +++++++++++++++++----- src/opinf/roms/_nonparametric.py | 2 ++ src/opinf/roms/_parametric.py | 1 + tests/roms/test_nonparametric.py | 14 ++++++++++++++ tests/roms/test_parametric.py | 14 ++++++++++++++ 5 files changed, 48 insertions(+), 5 deletions(-) diff --git a/src/opinf/roms/_base.py b/src/opinf/roms/_base.py index d8b3945b..9d04fe2c 100644 --- a/src/opinf/roms/_base.py +++ b/src/opinf/roms/_base.py @@ -287,6 +287,23 @@ def project(self, states): return self.decode(self.encode(states)) # Abstract methods -------------------------------------------------------- + def _check_fit_args(self, lhs, inputs): + """Verify required arguments for :meth:`fit()`.""" + + # Make sure lhs is given if required. + if lhs is None and self._iscontinuous and self.ddt_estimator is None: + raise ValueError( + "argument 'lhs' required when model is time-continuous" + " and ddt_estimator=None" + ) + + # Make sure inputs are passed in correctly when requried. + if inputs is None and self.model._has_inputs: + raise ValueError( + "argument 'inputs' required (model depends on external inputs)" + ) + + # Training ---------------------------------------------------------------- @abc.abstractmethod def fit( self, @@ -372,11 +389,6 @@ def fit( # Time derivative estimation / discrete LHS if lhs is None: if self._iscontinuous: - if self.ddt_estimator is None: - raise ValueError( - "argument 'lhs' required when model is time-continuous" - " and ddt_estimator=None" - ) if inputs is None: states, lhs = zip( *[self.ddt_estimator.estimate(Q) for Q in states] diff --git a/src/opinf/roms/_nonparametric.py b/src/opinf/roms/_nonparametric.py index c05087b4..02bb4b5b 100644 --- a/src/opinf/roms/_nonparametric.py +++ b/src/opinf/roms/_nonparametric.py @@ -106,6 +106,8 @@ def fit( ------- self """ + _BaseROM._check_fit_args(self, lhs=lhs, inputs=inputs) + # Single trajectory case. if states[0].ndim == 1: states = [states] diff --git a/src/opinf/roms/_parametric.py b/src/opinf/roms/_parametric.py index 58240e99..2b950a89 100644 --- a/src/opinf/roms/_parametric.py +++ b/src/opinf/roms/_parametric.py @@ -105,6 +105,7 @@ def fit( ------- self """ + _BaseROM._check_fit_args(self, lhs=lhs, inputs=inputs) states, lhs, inputs = _BaseROM.fit( self, states=states, diff --git a/tests/roms/test_nonparametric.py b/tests/roms/test_nonparametric.py index 962eeed1..bf53c831 100644 --- a/tests/roms/test_nonparametric.py +++ b/tests/roms/test_nonparametric.py @@ -47,6 +47,20 @@ def test_fit(self, n=10, m=3, s=3, k0=50): lhs = [np.zeros_like(Q) for Q in states] inputs = [np.ones((m, Q.shape[-1])) for Q in states] + rom = self.ROM(model=opinf.models.ContinuousModel("cBH")) + with pytest.raises(ValueError) as ex: + rom.fit(states, inputs) + assert ex.value.args[0] == ( + "argument 'inputs' required (model depends on external inputs)" + ) + + with pytest.raises(ValueError) as ex: + rom.fit(states, inputs=inputs) + assert ex.value.args[0] == ( + "argument 'lhs' required when model is time-continuous" + " and ddt_estimator=None" + ) + def _fit(prom, withlhs=True, singletrajectory=False): kwargs = dict(states=states) if withlhs: diff --git a/tests/roms/test_parametric.py b/tests/roms/test_parametric.py index 217d8c83..1b06f724 100644 --- a/tests/roms/test_parametric.py +++ b/tests/roms/test_parametric.py @@ -62,6 +62,20 @@ def test_fit(self, n=20, m=3, s=8, k0=50): lhs = [np.zeros_like(Q) for Q in states] inputs = [np.ones((m, Q.shape[-1])) for Q in states] + rom = self.ROM(model=opinf.models.InterpContinuousModel("AB")) + with pytest.raises(ValueError) as ex: + rom.fit(parameters, states, inputs) + assert ex.value.args[0] == ( + "argument 'inputs' required (model depends on external inputs)" + ) + + with pytest.raises(ValueError) as ex: + rom.fit(parameters, states, inputs=inputs) + assert ex.value.args[0] == ( + "argument 'lhs' required when model is time-continuous" + " and ddt_estimator=None" + ) + def _fit(prom, withlhs=True): kwargs = dict(parameters=parameters, states=states) if withlhs: From 2cd67e28fb8d1b72a1ad68cb4289d071fe1719a2 Mon Sep 17 00:00:00 2001 From: Shane Date: Mon, 9 Sep 2024 21:27:41 -0600 Subject: [PATCH 43/48] finish inputs tutorial --- docs/source/tutorials/inputs.ipynb | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/docs/source/tutorials/inputs.ipynb b/docs/source/tutorials/inputs.ipynb index de463e4f..8399bfba 100644 --- a/docs/source/tutorials/inputs.ipynb +++ b/docs/source/tutorials/inputs.ipynb @@ -31,7 +31,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We begin with a problem with external inputs that are parameterized by a scalar-valued function $u:\\RR\\to\\RR.$" + "We consider a problem with external inputs that are parameterized by a scalar-valued function $u:\\RR\\to\\RR.$" ] }, { @@ -938,7 +938,23 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "TODO: final comment." + ":::{admonition} Multi-dimensional Inputs\n", + ":class: tip\n", + "\n", + "The examples in this tutorial use a scalar-valued input function $u:\\RR\\to\\RR$.\n", + "For models with vector inputs $\\u:\\RR\\to\\RR^m$ with $m > 1$, training inputs are collected into a matrix with $m$ rows:\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\U = \\left[\\begin{array}{cccc}\n", + " \\u(t_0) & \\u(t_1) & \\cdots & \\u_(t_{k-1})\n", + " \\end{array}\\right]\n", + " \\in \\RR^{m \\times k}.\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "This is the matrix used for the `inputs` argument of `fit()`.\n", + ":::" ] } ], From 87d085b2e5f8aa4dfb51a9d923a9789012fc5cad Mon Sep 17 00:00:00 2001 From: Shane Date: Mon, 9 Sep 2024 21:39:10 -0600 Subject: [PATCH 44/48] update changelog --- docs/source/opinf/changelog.md | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/docs/source/opinf/changelog.md b/docs/source/opinf/changelog.md index 9021167e..c35ebabc 100644 --- a/docs/source/opinf/changelog.md +++ b/docs/source/opinf/changelog.md @@ -5,6 +5,28 @@ New versions may introduce substantial new features or API adjustments. ::: +## Version 0.5.8 + +Support for affine-parametric problems: + +- Affine-parametric operator classes `AffineConstantOperator`, `AffineLinearOperator`, etc. +- Parametric model classes `ParametricContinuousModel`, `ParametricDiscreteModel`. +- `ParametricROM` class. +- Updates to operator / model documentation. + +Renamed interpolatory operators / model classes from `Interpolated` to `Interp`. +Old names are deprecated but not yet removed. + +Miscellaneous: + +- Reorganized and expanded tutorials. +- Added and documented `opinf.utils.TimedBlock` context manager for quick timing of code blocks. +- Updated structure for some unit tests. +- Refactored interpolatory operators. +- Standardized string representations, added `[Parametric]ROM.__str__()`. +- Removed some public functions from `operators`, regrouped in `operators._utils`. +- Removed some public functions from `models`, regrouped in `models._utils`. + ## Version 0.5.7 Updates to `opinf.lstsq`: From fe9199908a48940ce3628037df324cb276a284cd Mon Sep 17 00:00:00 2001 From: Shane Date: Tue, 10 Sep 2024 13:21:11 -0600 Subject: [PATCH 45/48] p = 1 case for affine operators --- src/opinf/operators/_affine.py | 12 ++++++++---- tests/operators/test_affine.py | 22 ++++++++++++++++++++++ 2 files changed, 30 insertions(+), 4 deletions(-) diff --git a/src/opinf/operators/_affine.py b/src/opinf/operators/_affine.py index 3f28dc27..eb8ee04b 100644 --- a/src/opinf/operators/_affine.py +++ b/src/opinf/operators/_affine.py @@ -185,7 +185,7 @@ def entries(self) -> list: return ParametricOpInfOperator.entries.fget(self) @property - def nterms(self): + def nterms(self) -> int: r"""Number of terms :math:`A_{\ell}` in the affine expansion.""" return self.__nterms @@ -227,7 +227,7 @@ def set_entries(self, entries, fromblock: bool = False) -> None: [self._OperatorClass(A).entries for A in entries], ) - def __str__(self): + def __str__(self) -> str: lines = ParametricOpInfOperator.__str__(self).split("\n") lines.insert(-1, f" expansion terms: {self.nterms}") return "\n".join(lines) @@ -250,8 +250,10 @@ def evaluate(self, parameter): if self.parameter_dimension is None: self._set_parameter_dimension_from_values([parameter]) self._check_parametervalue_dimension(parameter) - thetamus = self.coeffs(parameter) - entries = sum([tm * A for tm, A in zip(thetamus, self.entries)]) + theta_mus = self.coeffs(parameter) + if self.nterms == 1 and np.isscalar(theta_mus): + theta_mus = [theta_mus] + entries = sum([tm * A for tm, A in zip(theta_mus, self.entries)]) return self._OperatorClass(entries) # Dimensionality reduction ------------------------------------------------ @@ -407,6 +409,8 @@ def datablock(self, parameters, states, inputs=None) -> np.ndarray: for mu, Q, U in zip(parameters, states, inputs): Di = self._OperatorClass.datablock(Q, U) theta_mus = self.coeffs(mu) + if self.nterms == 1 and np.isscalar(theta_mus): + theta_mus = [theta_mus] blockcolumns.append(np.vstack([theta * Di for theta in theta_mus])) return np.hstack(blockcolumns) diff --git a/tests/operators/test_affine.py b/tests/operators/test_affine.py index 87284d25..b8ba449c 100644 --- a/tests/operators/test_affine.py +++ b/tests/operators/test_affine.py @@ -162,6 +162,17 @@ def test_evaluate(self, r=9, m=4): ) assert np.allclose(op_mu.entries, Amu) + # Special case: scalar parameter A(mu) = mu A0. + + def _check(newop): + op_mu = newop.evaluate(0.5) + assert isinstance(op_mu, newop._OperatorClass) + assert op_mu.entries.shape == arrays[0].shape + assert np.allclose(op_mu.entries, arrays[0] / 2) + + _check(self.OpClass(lambda mu: mu, nterms=1, entries=[arrays[0]])) + _check(self.OpClass(1, entries=[arrays[0]])) + def test_galerkin(self, r=9, m=4): """Test galerkin().""" ncoeffs = len(self.thetas1) @@ -198,6 +209,17 @@ def test_opinf(self, s=10, k=15, r=11, m=3): assert block.shape[0] == dim assert block.shape[1] == s * k + # Special case: scalar parameter A(mu) = mu A0. + + def _check(newop): + block = newop.datablock(np.linspace(0, 1, s), states, inputs) + dim = newop.operator_dimension(1, r, m) + assert block.shape[0] == dim + assert block.shape[1] == s * k + + _check(self.OpClass(lambda mu: mu, nterms=1, entries=[arrays[0]])) + _check(self.OpClass(1, entries=[arrays[0]])) + def test_copysaveload(self, r=10, m=2, target="_affinesavetest.h5"): """Test copy(), save(), and load().""" ncoeffs = len(self.thetas1) From 431c1b5c856d5fada75d605f066783abe7403817 Mon Sep 17 00:00:00 2001 From: Shane Date: Tue, 10 Sep 2024 13:22:16 -0600 Subject: [PATCH 46/48] fix doc typos --- docs/source/api/operators.ipynb | 2 +- docs/source/tutorials/basics.ipynb | 1 - docs/source/tutorials/inputs.ipynb | 8 ++++---- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/docs/source/api/operators.ipynb b/docs/source/api/operators.ipynb index 0512cd8a..5d649ce1 100644 --- a/docs/source/api/operators.ipynb +++ b/docs/source/api/operators.ipynb @@ -1122,7 +1122,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Affine parametric operators are instantiated with a function $\\boldsymbol{theta}_{\\ell}(\\mu) = [~\\theta_{ell}^{(0)}(\\bfmu)~~\\cdots~~\\theta_{ell}^{(A_{\\ell}-1)}(\\bfmu)~]\\trp$ for the affine expansion coefficients, the number of terms $A_{\\ell}$ in the expansion, and with or without the operator matrices $\\Ohat_{\\ell}^{(1)},\\ldots,\\Ohat_{\\ell}^{(A_{\\ell})}$." + "Affine parametric operators are instantiated with a function $\\boldsymbol{\\theta}_{\\ell}(\\mu) = [~\\theta_{\\ell}^{(0)}(\\bfmu)~~\\cdots~~\\theta_{\\ell}^{(A_{\\ell}-1)}(\\bfmu)~]\\trp$ for the affine expansion coefficients, the number of terms $A_{\\ell}$ in the expansion, and with or without the operator matrices $\\Ohat_{\\ell}^{(1)},\\ldots,\\Ohat_{\\ell}^{(A_{\\ell})}$." ] }, { diff --git a/docs/source/tutorials/basics.ipynb b/docs/source/tutorials/basics.ipynb index 2cc8ef5d..81c84847 100644 --- a/docs/source/tutorials/basics.ipynb +++ b/docs/source/tutorials/basics.ipynb @@ -79,7 +79,6 @@ "metadata": {}, "outputs": [], "source": [ - "import time\n", "import numpy as np\n", "import pandas as pd\n", "import scipy.sparse\n", diff --git a/docs/source/tutorials/inputs.ipynb b/docs/source/tutorials/inputs.ipynb index 8399bfba..d76f13eb 100644 --- a/docs/source/tutorials/inputs.ipynb +++ b/docs/source/tutorials/inputs.ipynb @@ -99,7 +99,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "In this section a ROM is trained using data collected for a single choice of the input function $u(t)$." + "In this section a ROM is trained using data collected for a single choice of the input function $u(t).$" ] }, { @@ -113,7 +113,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "As in the last tutorial, we use a centered finite difference approximation for the spatial derivative to arrive at a finite-dimensional system of ordinary differential equations.\n", + "As in the last tutorial, we use a centered finite difference approximation for the spatial derivative to arrive at a system of $n$ ordinary differential equations.\n", "This time, due to the nonzero boundary conditions, the system takes the form\n", "\n", "$$\n", @@ -124,7 +124,7 @@ "\\end{aligned}\n", "$$ (eq_inputs_fom)\n", "\n", - "where $\\A\\in\\RR^{n\\times n}$ and $\\B\\in\\RR^{n}$.\n", + "where $\\q:\\RR\\to\\RR^n$, $\\A\\in\\RR^{n\\times n}$, and $\\B\\in\\RR^{n}$.\n", "The system {eq}`eq_inputs_fom` is the _full-order model_ (FOM), which we will use to generate training data for the time domain $[0, T'] \\subset [0, T]$." ] }, @@ -226,7 +226,7 @@ "\n", "# Construct the temporal domain.\n", "T = 1\n", - "K = T * 10**3 + 1\n", + "K = 10**3 + 1\n", "t_all = np.linspace(0, T, K)\n", "dt = t_all[1] - t_all[0]\n", "\n", From 09cc274f3ea546f14dcb3aedbf7e531334bbea4d Mon Sep 17 00:00:00 2001 From: Shane Date: Tue, 10 Sep 2024 16:27:16 -0600 Subject: [PATCH 47/48] draft parametric problems tutorial --- docs/source/tutorials/inputs.ipynb | 16 +- docs/source/tutorials/parametric.ipynb | 1582 ++++-------------------- 2 files changed, 276 insertions(+), 1322 deletions(-) diff --git a/docs/source/tutorials/inputs.ipynb b/docs/source/tutorials/inputs.ipynb index d76f13eb..8802b708 100644 --- a/docs/source/tutorials/inputs.ipynb +++ b/docs/source/tutorials/inputs.ipynb @@ -422,7 +422,7 @@ "source": [ "We will use a {class}`opinf.basis.PODBasis` to reduce the dimension of the snapshot training data, which approximates the discretized state vector as $\\q(t) \\approx \\Vr\\qhat(t)$ for some $\\Vr\\in\\RR^{n\\times r}$ with orthonormal columns and $\\qhat(t)\\in\\RR^{r}$, with and $r\\ll n$.\n", "Input training data are *not* typically compressed with dimensionality reduction or subjected to other pre-processing routines.\n", - "Because the FOM {eq}`eq_inputs_fom` has the linear-time invariant form $\\ddt\\q(t) = \\A\\q(t) + \\B u(t)$, we seek a ROM with the structure, i.e.,\n", + "Because the FOM {eq}`eq_inputs_fom` has the linear-time invariant form $\\ddt\\q(t) = \\A\\q(t) + \\B u(t)$, we seek a ROM with the same structure, i.e.,\n", "\n", "$$\n", "\\begin{aligned}\n", @@ -432,7 +432,19 @@ "\\end{aligned}\n", "$$\n", "\n", - "Data for the time derivative $\\ddt\\qhat(t)$ are estimated in this example with sixth-order finite differences using {class}`opinf.ddt.UniformFiniteDifferencer`." + "Data for the time derivative $\\ddt\\qhat(t)$ are estimated in this example with sixth-order finite differences using {class}`opinf.ddt.UniformFiniteDifferencer`.\n", + "The underlying least-squares problem to determine $\\Ahat$ and $\\Bhat$ is given by\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\min_{\\Ahat,\\Bhat}\n", + " \\sum_{j=0]^{k-1}\\left\\|\n", + " \\Ahat\\qhat_{j} + \\Bhat\\u_j - \\dot{\\qhat}_j\n", + " \\right\\|_{2}^{2},\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "where $\\qhat_j = \\qhat(t_j)\\in\\RR^{r}$ and $u_j = u(t_j)\\in\\RR$ are the state snapshots and input data, respectively, and $\\dot{\\qhat}_j \\approx \\ddt\\qhat(t)|_{t=t_j}\\in\\RR^{r}$ are the estimated time derivatives." ] }, { diff --git a/docs/source/tutorials/parametric.ipynb b/docs/source/tutorials/parametric.ipynb index 254919c5..00bca2b9 100644 --- a/docs/source/tutorials/parametric.ipynb +++ b/docs/source/tutorials/parametric.ipynb @@ -6,48 +6,54 @@ "toc-hr-collapsed": false }, "source": [ - "# External Inputs" + "# Parametric Problems" ] }, { "cell_type": "markdown", - "metadata": { - "toc-hr-collapsed": false - }, + "metadata": {}, "source": [ - "The fundamental goal of model reduction is to efficiently make physics-based predictions. Given synthetic or experimental data that was generated or collected under a certain set of conditions, we aim to construct a cost-effective model that produces accurate solutions under new sets of conditions. The first tutorial showed an example of evaluating a reduced-order model (ROM) for various initial conditions. This tutorial focuses on problems with external time-dependent inputs." + "Many systems depend on independent parameters that describe material properties or other physical characteristics of the phenomenon being modeled.\n", + "In such cases, the operators of a reduced-order model (ROM) should be designed to vary with the system parameters. This tutorial demonstrates how to construct and evaluate a parametric ROM through an elementary example." ] }, { "cell_type": "markdown", - "metadata": { - "toc-nb-collapsed": true - }, + "metadata": {}, "source": [ "## Problem Statement" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We consider a problem with a single scalar system parameter $\\mu > 0$." + ] + }, { "cell_type": "markdown", "metadata": {}, "source": [ ":::{admonition} Governing Equations\n", - ":class: attention\n", + ":class: info\n", "\n", - "Let $\\Omega = [0,L]\\subset \\mathbb{R}$ be the spatial domain indicated by the variable $x$, and let $[0,T]\\subset\\mathbb{R}$ be the time domain with variable $t$. We consider the one-dimensional heat equation with non-homogeneous Dirichlet boundary conditions,\n", + "Let $\\Omega = [0,L]\\subset \\RR$ be the spatial domain indicated by the variable $x$, and let $[0,T]\\subset\\RR$ be the time domain with variable $t$. We consider the one-dimensional heat equation with constant non-homogeneous Dirichlet boundary conditions,\n", "\n", - "\\begin{align*}\n", + "$$\n", + "\\begin{aligned}\n", " &\\frac{\\partial}{\\partial t} q(x,t;\\mu) = \\mu\\frac{\\partial^2}{\\partial x^2}q(x,t;\\mu)\n", " & x &\\in\\Omega,\\quad t\\in[0,T],\n", " \\\\\n", - " &q(0,t;\\mu) = q(L,t;\\mu) = u(t)\n", + " &q(0,t;\\mu) = \\frac{1}{2}, \\quad q(L,t;\\mu) = 1\n", " & t &\\in[0,T],\n", " \\\\\n", - " &q(x,0;\\mu) = \\big(e^{\\alpha(x - 1)} + e^{-\\alpha x} - e^{-\\alpha}\\big)u(0)\n", + " &q(x,0;\\mu) = \\big(e^{\\alpha(x - 1)} + e^{-\\alpha x} - e^{-\\alpha}\\big)\n", " & x &\\in \\Omega,\n", - "\\end{align*}\n", + "\\end{aligned}\n", + "$$\n", "\n", - "where the constant $\\mu > 0$ is the thermal diffusivity, $\\alpha>0$ is constant, and $q(x,t;\\mu)$ is the unknown state variable. This is a model for a one-dimensional rod conducting heat with a fixed initial heat profile. The temperature at the ends of the rod are governed by the input function $u(t)$, but heat is allowed to diffuse through the rod and flow out at the ends of the domain. We aim to numerically solve for $q(x,t;\\mu)$ efficiently for all $t \\in [0,T]$ and/or for various choices of $u(t)$ and $\\mu$.\n", + "where the constant $\\mu > 0$ is a thermal diffusivity parameter, $\\alpha>0$ is constant, and $q(x,t;\\mu)$ is the unknown state variable. This is a model for a one-dimensional rod conducting heat with a fixed initial heat profile. The temperature at the ends of the rod are fixed, but heat is allowed to diffuse through the rod and flow out at the ends of the domain.\n", ":::" ] }, @@ -55,62 +61,66 @@ "cell_type": "markdown", "metadata": {}, "source": [ - ":::{note}\n", - "This problem can be solved with a straightforward discretization of the spatial domain $\\Omega$ with little computational effort, so using model reduction to speed up the computation is not highly beneficial. However, the way that the user interacts with the package for this problem is highly similar for more complex problems.\n", + ":::{admonition} Objective\n", + ":class: info\n", + "\n", + "Construct a reduced-order model (ROM) which can be solved rapidly to produce approximate solutions $q(x, t; \\mu)$ to the partial differential equation given above for various choices of the diffusivity parameter $\\mu > 0$.\n", + "We will observe data for a few values of $\\mu$, then use the ROM to predict the solution for the entire time domain $[0, T]$ and for new values of $\\mu$. \n", + "Hence, the ROM will be **predictive in the parameter** $\\mu$.\n", + "\n", + "\n", + "\n", ":::" ] }, { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Prediction in Time" - ] - }, - { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": 1, "metadata": {}, + "outputs": [], "source": [ - "Our first objective is to get solutions in time beyond a set of available training data.\n", + "import numpy as np\n", + "import scipy.sparse\n", + "import matplotlib.pyplot as plt\n", "\n", - ":::{image} ../../images/summary.svg\n", - ":align: center\n", - ":width: 80 %\n", - ":::" + "import opinf\n", + "\n", + "opinf.utils.mpl_config()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - ":::{admonition} Objective\n", - ":class: attention\n", - "\n", - "Construct a reduced-order model (ROM) of the heat equation that is **predictive in time**. In other words, we will observe data for $t \\in [0, T']$ with $T' < T$, use that data to construct the ROM, and use the ROM to predict the solution for the entire time domain $[0,T]$.\n", - ":::" + "## Full-order Model Definition" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### Full-order Model Definition" + "We consider the parameter domain $\\mathcal{P} = [.1,10]\\subset\\RR$.\n", + "A finite element or finite difference discretization leads to a system of differential equations,\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\ddt\\q(t;\\mu)\n", + " = \\c(\\mu) + \\A(\\mu)\\q(t;\\mu),\n", + " \\qquad\n", + " \\q(0) = \\q_0,\n", + "\\end{aligned}\n", + "$$ (eq_parametric_fom)\n", + "\n", + "where $\\q:\\RR\\times\\mathcal{P}\\to\\RR^n,$ $\\c:\\mathcal{P}\\to\\RR^n,$ and $\\A:\\mathcal{P}\\to\\RR^{n\\times n}.$\n", + "This is the full-order model (FOM).\n", + "The constant term $\\c(\\mu)$ arises due to the nonzero boundary conditions.\n", + "In this case, the parametric dependence on $\\mu$ is linear: there are $\\c^{(0)}\\in\\RR^{n}$ and $\\A^{(0)}\\in\\RR^{n\\times n}$ such that $\\c(\\mu) = \\mu\\c^{(0)}$ and $\\A(\\mu) = \\mu\\A^{(0)}.$" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "As in the last tutorial, we use a centered finite difference approximation for the spatial derivative to arrive at a first-order system, this time of the form\n", - "\n", - "$$\n", - " \\frac{\\text{d}}{\\text{d}t}\\mathbf{q}(t;\\mu)\n", - " = \\mathbf{A}(\\mu)\\mathbf{q}(t;\\mu) + \\mathbf{B}(\\mu)u(t),\n", - " \\qquad\n", - " \\mathbf{q}(0;\\mu)\n", - " = \\mathbf{q}_0.\n", - "$$ (eq_heat_fom_parametric)\n", - "\n", ":::{dropdown} Discretization details\n", "\n", "We take an equidistant grid $\\{x_i\\}_{i=0}^{n+1} \\subset \\Omega$,\n", @@ -123,7 +133,8 @@ " \\delta x &= \\frac{L}{n+1} = x_{i+1} - x_{i},\\quad i=1,\\ldots,n-1.\n", "\\end{align*}\n", "\n", - "The boundary conditions prescribe $q(x_0,t) = q(x_{n+1},t) = u(t)$. Our goal is to compute $q(x,t)$ at the interior spatial points $x_{1},x_{2},\\ldots,x_{n}$ for various $t\\in[0,T]$, so we consider the state vector $\\mathbf{q}(t) = [~q(x_{1}, t)~\\cdots~q(x_{n}, t)~]^{\\top}\\in\\mathbb{R}^n$ and derive a system governing the evolution of $\\mathbf{q}(t)$ in time.\n", + "The boundary conditions prescribe $q(x_0,t;\\mu) = q(x_{n+1},t;\\mu) = 1$.\n", + "Our goal is to compute $q(x,t)$ at the interior spatial points $x_{1},x_{2},\\ldots,x_{n}$ for various $t\\in[0,T]$, so we consider the state vector $\\q(t;\\mu) = [~q(x_{1}, t;\\mu)~\\cdots~q(x_{n}, t;\\mu)~]\\trp\\in\\RR^n$ and derive a system governing the evolution of $\\q(t;\\mu)$ in time.\n", "\n", "Approximating the spatial derivative with a central finite difference approximation,\n", "\n", @@ -132,51 +143,51 @@ " \\approx \\frac{q(x-\\delta x,t) - 2q(x,t) + q(x+\\delta x,t)}{(\\delta x)^2},\n", "$$\n", "\n", - "we arrive at the following matrices for the full-order model.\n", + "and using the boundary conditions $q(0,t;\\mu) = q(L,t;\\mu) = 1$, we arrive at the following matrices for the FOM.\n", "\n", - "\\begin{align*}\n", - " \\mathbf{A}(\\mu) &= \\frac{\\mu}{(\\delta x)^2}\\left[\\begin{array}{ccccc}\n", + "$$\n", + "\\begin{aligned}\n", + " \\c^{(0)} &= \\frac{1}{(\\delta x)^2}\\left[\\begin{array}{c}\n", + " 1 \\\\ 0 \\\\ \\vdots \\\\ 0 \\\\ 1\n", + " \\end{array}\\right]\\in\\RR^{n},\n", + " &\n", + " \\A^{(0)} &= \\frac{\\mu}{(\\delta x)^2}\\left[\\begin{array}{ccccc}\n", " -2 & 1 & & & \\\\\n", " 1 & -2 & 1 & & \\\\\n", " & \\ddots & \\ddots & \\ddots & \\\\\n", " & & 1 & -2 & 1 \\\\\n", " & & & 1 & -2 \\\\\n", - " \\end{array}\\right] \\in\\mathbb{R}^{n\\times n},\n", - " &\n", - " \\mathbf{B}(\\mu) &= \\frac{\\mu}{(\\delta x)^2}\\left[\\begin{array}{c}\n", - " 1 \\\\ 0 \\\\ \\vdots \\\\ 0 \\\\ 1\n", - " \\end{array}\\right]\\in\\mathbb{R}^{n}.\n", - "\\end{align*}\n", - ":::\n", - "\n", - "The state $\\mathbf{q}(t;\\mu)$ implicity depends on the parameter $\\mu$ because the operators $\\mathbf{A}(\\mu)$ and $\\mathbf{B}(\\mu)$ are parameterized by $\\mu$.\n", - "For now, we set $\\mu = 1$ and simply write\n", - "\n", - "$$\n", - " \\frac{\\text{d}}{\\text{d}t}\\mathbf{q}(t)\n", - " = \\mathbf{A}\\mathbf{q}(t) + \\mathbf{B}u(t),\n", - " \\qquad\n", - " \\mathbf{q}(0)\n", - " = \\mathbf{q}_0.\n", + " \\end{array}\\right] \\in\\RR^{n\\times n}.\n", + "\\end{aligned}\n", "$$\n", - "\n", - "This is the _full-order model_ (FOM), which we will use to generate training data for the time domain $[0, T'] \\subset [0, T]$." + ":::" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### Training Data Generation" + "## Training Data Generation" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Let $L = T = \\mu = 1$, $\\alpha = 100$, and suppose for now that the boundary conditions are given by the constant input function $u(t) \\equiv 1$.\n", - "We begin by simulating the full-order system described above with a uniform time step $\\delta t = 10^{-3}$, yielding $10^3 + 1 = 1001$ total time steps (1000 steps past the initial condition).\n", - "We will assume that we can only observe the first $k = 100$ time steps and use the ROM to predict the remaining $901$ steps." + "Let $L = 1$, $T = 1$, and set $\\alpha = 100$.\n", + "For this demo, we use $n = 2^{10} - 1 = 1023$ spatial degrees of freedom and record the FOM solution every $\\delta t = 0.0025$ time units.\n", + "For each training parameter $\\mu_i$, this results in $k = 401$ state snapshots, organized in snapshot matrices\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\Q_i = \\left[\\begin{array}{cccc}\n", + " \\q(t_0;\\mu_i) & \\q(t_1;\\mu_i) & \\cdots & \\q(t_{k-1};\\mu_i)\n", + " \\end{array}\\right]\n", + " \\in\\RR^{n\\times k},\n", + " \\quad\n", + " i = 0,\\ldots, s-1.\n", + "\\end{aligned}\n", + "$$\n" ] }, { @@ -185,181 +196,83 @@ "metadata": {}, "outputs": [], "source": [ - "import numpy as np\n", - "import scipy.linalg as la\n", - "import scipy.sparse as sparse\n", - "import matplotlib.pyplot as plt\n", - "\n", - "import opinf\n", - "\n", - "opinf.utils.mpl_config()" + "# Get s logarithmically spaced paraneter values in D = [.1, 10].\n", + "s = 10\n", + "training_parameters = np.logspace(-1, 1, s)\n", + "print(training_parameters)" ] }, { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "tags": [ + "hide-input" + ] + }, "outputs": [], "source": [ "# Construct the spatial domain.\n", - "L = 1 # Spatial domain length.\n", - "n = 2**7 - 1 # Spatial grid size.\n", - "x_all = np.linspace(0, L, n + 2) # Full spatial grid.\n", - "x = x_all[1:-1] # Interior spatial grid (where q is unknown).\n", - "dx = x[1] - x[0] # Spatial resolution.\n", + "L = 1\n", + "n = 2**10 - 1\n", + "x_all = np.linspace(0, L, n + 2)\n", + "x = x_all[1:-1]\n", + "dx = x[1] - x[0]\n", "\n", "# Construct the temporal domain.\n", - "T = 1 # Temporal domain length (final simulation time).\n", - "K = T * 10**3 + 1 # Temporal grid size.\n", - "t = np.linspace(0, T, K) # Temporal grid.\n", - "dt = t[1] - t[0] # Temporal resolution.\n", + "T = 1\n", + "K = 401\n", + "t_all = np.linspace(0, T, K)\n", + "dt = t_all[1] - t_all[0]\n", "\n", - "print(f\"Spatial step size\\tdx = {dx}\")\n", - "print(f\"Temporal step size\\tdt = {dt}\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ "# Construct the full-order state matrix A.\n", "dx2inv = 1 / dx**2\n", "diags = np.array([1, -2, 1]) * dx2inv\n", - "A = sparse.diags(diags, [-1, 0, 1], (n, n))\n", + "A0 = scipy.sparse.diags(diags, [-1, 0, 1], (n, n))\n", "\n", "# Construct the full-order input matrix B.\n", - "B = np.zeros_like(x)\n", - "B[0], B[-1] = dx2inv, dx2inv" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Define the inputs.\n", - "input_func = np.ones_like # Constant input function u(t) = 1.\n", - "U_all = input_func(t) # Inputs over the time domain.\n", + "c0 = np.zeros_like(x)\n", + "c0[0], c0[-1] = dx2inv, dx2inv\n", "\n", - "# Construct the initial condition.\n", + "# Construct the part of the initial condition not dependent on u(t).\n", "alpha = 100\n", "q0 = np.exp(alpha * (x - 1)) + np.exp(-alpha * x) - np.exp(-alpha)\n", "\n", - "print(f\"shape of A:\\t{A.shape}\")\n", - "print(f\"shape of B:\\t{B.shape}\")\n", - "print(f\"shape of q0:\\t{q0.shape}\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Since this is a diffusive problem, we will use the implicit (backward) Euler method for solving the ODEs.\n", - "For the problem $\\frac{\\text{d}}{\\text{d}t}\\mathbf{q}(t) = \\mathbf{f}(t, \\mathbf{q}(t), \\mathbf{u}(t))$, implicit Euler is defined by the rule\n", - "\n", - "$$\n", - " \\mathbf{q}_{j+1} = \\mathbf{q}_{j} + \\delta t\\,\\mathbf{f}(t_{j+1},\\mathbf{q}_{j+1},u_{j+1}),\n", - "$$\n", - "\n", - "where $\\mathbf{q}_{j} := \\mathbf{q}(t_{j})$ and $u_{j} := u(t_{j})$.\n", - "With the form $\\mathbf{f}(t,\\mathbf{q}(t),u(t)) = \\mathbf{A}\\mathbf{q}(t) + \\mathbf{B}u(t)$, this becomes\n", - "\n", - "$$\n", - " \\mathbf{q}_{j+1} = (\\mathbf{I} - \\delta t \\mathbf{A})^{-1}\\left(\\mathbf{q}_{j} + \\delta t \\mathbf{B} u_{j+1}\\right),\n", - "$$\n", - "\n", - "where $\\mathbf{I}$ is the identity matrix." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [ - "hide-input" - ] - }, - "outputs": [], - "source": [ - "def implicit_euler(t, q0, A, B, U):\n", - " \"\"\"Solve the system\n", - "\n", - " dq / dt = Aq(t) + Bu(t), q(0) = q0,\n", "\n", - " over a uniform time domain via the implicit Euler method.\n", - "\n", - " Parameters\n", - " ----------\n", - " t : (k,) ndarray\n", - " Uniform time array over which to solve the ODE.\n", - " q0 : (n,) ndarray\n", - " Initial condition.\n", - " A : (n, n) ndarray\n", - " State matrix.\n", - " B : (n,) or (n, 1) ndarray\n", - " Input matrix.\n", - " U : (k,) ndarray\n", - " Inputs over the time array.\n", - "\n", - " Returns\n", - " -------\n", - " q : (n, k) ndarray\n", - " Solution to the ODE at time t; that is, q[:,j] is the\n", - " computed solution corresponding to time t[j].\n", + "def full_order_solve(mu, time_domain):\n", + " \"\"\"Solve the full-order model with SciPy.\n", + " Here, u is a callable function.\n", " \"\"\"\n", - " # Check and store dimensions.\n", - " k = len(t)\n", - " n = len(q0)\n", - " B = np.ravel(B)\n", - " assert A.shape == (n, n)\n", - " assert B.shape == (n,)\n", - " assert U.shape == (k,)\n", - " I = np.eye(n)\n", + " return scipy.integrate.solve_ivp(\n", + " fun=lambda t, q: mu * (c0 + A0 @ q),\n", + " y0=q0,\n", + " t_span=[time_domain[0], time_domain[-1]],\n", + " t_eval=time_domain,\n", + " method=\"BDF\",\n", + " ).y\n", "\n", - " # Check that the time step is uniform.\n", - " dt = t[1] - t[0]\n", - " assert np.allclose(np.diff(t), dt)\n", "\n", - " # Factor I - dt*A for quick solving at each time step.\n", - " factored = la.lu_factor(I - dt * A)\n", + "Qs = []\n", + "# Solve the full-order model at the training parameter values.\n", + "with opinf.utils.TimedBlock(\"Full-order solves\"):\n", + " for mu in training_parameters:\n", + " Qs.append(full_order_solve(mu, t_all))\n", "\n", - " # Solve the problem by stepping in time.\n", - " q = np.empty((n, k))\n", - " q[:, 0] = q0.copy()\n", - " for j in range(1, k):\n", - " q[:, j] = la.lu_solve(factored, q[:, j - 1] + dt * B * U[j])\n", - "\n", - " return q" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Compute snapshots by solving the equation with implicit_euler().\n", - "Q_all = implicit_euler(t, q0, A, B, U_all)\n", "\n", - "# Retain only the first k snapshots/inputs for training the ROM.\n", - "k = 100 # Number of training snapshots.\n", - "t_train = t[:k] # Temporal domain for training snapshots.\n", - "Q = Q_all[:, :k] # Observed snapshots." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Finally, we visualize the snapshots to get a sense of how the solution looks qualitatively." + "print(f\"\\nSpatial domain:\\t\\t{x.shape=}\")\n", + "print(f\"Spatial step size:\\t{dx=:.10f}\")\n", + "print(f\"\\nFull time domain:\\t{t_all.shape=}\")\n", + "# print(f\"Training time domain:\\t{t.shape=}\")\n", + "print(f\"Temporal step size:\\t{dt=:f}\")\n", + "print(f\"\\nFull-order matrix A0:\\t{A0.shape=}\")\n", + "print(f\"Full-order vector c0:\\t{c0.shape=}\")\n", + "print(f\"\\nInitial condition:\\t{q0.shape=}\")\n", + "print(f\"Training snapshots:\\t{Qs[0].shape=}\")" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 4, "metadata": { "tags": [ "hide-input" @@ -367,27 +280,34 @@ }, "outputs": [], "source": [ - "def plot_heat_data(Z, title, ax=None):\n", - " \"\"\"Visualize temperature data in space and time.\"\"\"\n", + "def plot_data_space(Z, title, ax=None):\n", + " \"\"\"Plot state data over space at multiple instances in time.\"\"\"\n", " if ax is None:\n", " _, ax = plt.subplots(1, 1)\n", "\n", " # Plot a few snapshots over the spatial domain.\n", - " sample_columns = [0, 10, 20, 40, 80, 160, 320, 640]\n", " sample_columns = [0] + [2**d for d in range(10)]\n", " color = iter(plt.cm.viridis_r(np.linspace(0.05, 1, len(sample_columns))))\n", - " while sample_columns[-1] > Z.shape[1]:\n", - " sample_columns.pop()\n", - " leftBC, rightBC = [input_func(x_all[0])], [input_func(x_all[-1])]\n", + " while sample_columns[-1] > Z.shape[1] - 1:\n", + " sample_columns = sample_columns[:-1]\n", " for j in sample_columns:\n", - " q_all = np.concatenate([leftBC, Z[:, j], rightBC])\n", - " ax.plot(x_all, q_all, color=next(color), label=rf\"$q(x,t_{{{j}}})$\")\n", + " q_all = np.concatenate([[0.5], Z[:, j], [1]])\n", + " c = next(color)\n", + " ax.plot(x_all, q_all, lw=1, color=c, label=rf\"$q(x,t_{{{j}}})$\")\n", "\n", " ax.set_xlim(x_all[0], x_all[-1])\n", " ax.set_xlabel(r\"$x$\")\n", " ax.set_ylabel(r\"$q(x,t)$\")\n", " ax.legend(loc=(1.05, 0.05))\n", - " ax.set_title(title)" + " ax.set_title(title)\n", + "\n", + "\n", + "def plot_two_datasets(Z1, title1, Z2, title2):\n", + " \"\"\"Plot two datasets side by side.\"\"\"\n", + " _, [ax1, ax2] = plt.subplots(1, 2)\n", + " plot_data_space(Z1, title1, ax1)\n", + " plot_data_space(Z2, title2, ax2)\n", + " ax1.legend([])" ] }, { @@ -396,108 +316,107 @@ "metadata": {}, "outputs": [], "source": [ - "fig, [ax1, ax2] = plt.subplots(1, 2)\n", - "plot_heat_data(Q, \"Snapshot data for training\", ax1)\n", - "plot_heat_data(Q_all, \"Full-order model solution\", ax2)\n", - "ax1.legend([])\n", - "plt.show()" + "for i in [0, s // 2, s - 1]:\n", + " plot_data_space(Qs[i], rf\"Full-order model solution at $\\mu = \\mu_{i}$\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### ROM Construction" + "## Reduced-order Model Construction" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Now that we have snapshot data $\\mathbf{Q} \\in \\mathbb{R}^{n \\times k}$, we can construct [a basis matrix](opinf.basis) $\\mathbf{V}_r \\in \\mathbb{R}^{n \\times r}$. The basis matrix relates the high-dimensional and low-dimensional by $\\mathbf{q}(t) = \\mathbf{V}_{r}\\widehat{\\mathbf{q}}(t)$.\n", + "Now that we have parameter and snapshot data, we instantiate a `opinf.ParametricROM` and pass the training parameter values and the corresponding state snapshots to the `fit()` method.\n", "\n", - "For operator inference (OpInf), we often use the [proper orthogonal decomposition](opinf.basis.PODBasis) (POD) basis. The integer $r$, which defines the dimension of the reduced-order model to be constructed, is usually determined by how quickly the singular values of $\\mathbf{Q}$ decay. In this example, we choose the minimal $r$ such that the [residual energy](opinf.basis.residual_energy) is less than a given tolerance $\\varepsilon$, i.e.,\n", + "We will use a {class}`opinf.basis.PODBasis` to reduce the dimension of the snapshot training data, which approximates the discretized state vector as $\\q(t;\\mu) \\approx \\Vr\\qhat(t;\\mu)$ for some $\\Vr\\in\\RR^{n\\times r}$ with orthonormal columns and $\\qhat(t)\\in\\RR^{r}$, with and $r\\ll n$.\n", + "Based on the FOM {eq}`eq_parametric_fom`, we specify a ROM with the following structure:\n", "\n", "$$\n", - "\\frac{\\sum_{j=r + 1}^{k}\\sigma_{j}^{2}}{\\sum_{j=1}^{k}\\sigma_{j}^{2}} = \\frac{||\\mathbf{Q} - \\mathbf{V}_r \\mathbf{V}_r^{\\top}\\mathbf{Q}||_{F}^{2}}{||\\mathbf{Q}||_{F}^{2}} < \\varepsilon.\n", - "$$" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Compute the POD basis, using the residual energy decay to select r.\n", - "basis = opinf.basis.PODBasis(residual_energy=1e-8).fit(Q)\n", - "print(basis)\n", + "\\begin{aligned}\n", + " \\ddt\\qhat(t;\\mu)\n", + " &= \\chat(\\mu) + \\Ahat(\\mu)\\qhat(t;\\mu)\n", + " = \\mu\\chat^{(0)} + \\mu\\Ahat^{(0)}\\qhat(t;\\mu),\n", + "\\end{aligned}\n", + "$$\n", "\n", - "# Check the decay of the singular values and the associated residual energy.\n", - "basis.plot_energy(right=25)\n", - "plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "::::{margin}\n", - ":::{note}\n", - "In this case, since $u(t) \\equiv 1$ is constant, we could equivalently set `modelform=\"cA\"` to learn a ROM of the form $\\frac{\\text{d}}{\\text{d}t}\\widehat{\\mathbf{q}}(t) = \\widehat{\\mathbf{c}} + \\widehat{\\mathbf{A}}\\widehat{\\mathbf{q}}(t)$, where $\\widehat{\\mathbf{c}}$ is a constant term.\n", - "There is no difference between the two models, i.e., $\\widehat{\\mathbf{c}} = \\widehat{\\mathbf{B}}u(t) = \\widehat{\\mathbf{B}}$, except that `modelform=\"AB\"` allows us to use different inputs for $u(t)$ later on.\n", - ":::\n", - "::::" + "where $\\chat^{(0)}\\in\\RR^{r}$ and $\\Ahat^{(0)}\\in\\RR^{r\\times r}.$\n", + "Data for the time derivative $\\ddt\\qhat(t)$ are estimated in this example with sixth-order finite differences using {class}`opinf.ddt.UniformFiniteDifferencer`.\n", + "The underlying least-squares problem to determine $\\chat^{(0)}$ and $\\Ahat^{(0)}$ is given by\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\min_{\\Ahat,\\Bhat}\n", + " \\sum_{i=0}^{s-1}\\sum_{j=0}^{k-1}\\left\\|\n", + " \\mu_{i}\\chat^{(0)} + \\mu_{i}\\Ahat^{(0)}\\qhat_{i,j} - \\dot{\\qhat}_{i,j}\n", + " \\right\\|_{2}^{2},\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "where $\\qhat_{i,j} = \\qhat(t_j;\\mu_i)\\in\\RR^{r}$ are the state snapshots and $\\dot{\\qhat}_{i,j} \\approx \\ddt\\qhat(t;\\mu_{i})|_{t=t_j}\\in\\RR^{r}$ are the estimated time derivatives." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Now we can learn the reduced model with OpInf.\n", - "Because the full-order model is of the form $\\frac{\\text{d}}{\\text{d}t}\\mathbf{q}(t) = \\mathbf{A}\\mathbf{q}(t) + \\mathbf{B}u(t)$, we construct a reduced-order model of the form $\\frac{\\text{d}}{\\text{d}t}\\widehat{\\mathbf{q}}(t) = \\widehat{\\mathbf{A}}\\widehat{\\mathbf{q}}(t) + \\widehat{\\mathbf{B}}u(t)$." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Instantiate the model.\n", - "model = opinf.models.ContinuousModel(\n", - " operators=[\n", - " opinf.operators.LinearOperator(),\n", - " opinf.operators.InputOperator(),\n", - " ]\n", - ")" + ":::{dropdown} Preserving Parametric Structure\n", + "\n", + "An OpInf ROM should have the same structure as an intrusive Galerkin ROM.\n", + "The Galerkin ROM for {eq}`eq_parametric_fom` is derived by substituting in the approximation $\\q(t;\\mu)\\approx\\Vr\\qhat(t;\\mu)$, yielding\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\ddt\\Vr\\qhat(t;\\mu)\n", + " = \\c(\\mu) + \\A(\\mu)\\Vr\\qhat(t;\\mu)\n", + " \\qquad\n", + " \\Vr\\qhat(0) = \\q_0.\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "Next, left multiply by $\\Vr\\trp$ and use the fact that $\\Vr\\trp\\Vr = \\I$ to get the following:\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\ddt\\qhat(t;\\mu)\n", + " = \\tilde{\\c} + \\tilde{\\A}(\\mu)\\qhat(t;\\mu)\n", + " \\qquad\n", + " \\qhat(0) = \\Vr\\trp\\q_0,\n", + "\\end{aligned}\n", + "$$\n", + "\n", + "where $\\tilde{\\c}(\\mu) = \\Vr\\trp\\c(\\mu)\\in\\RR^{r}$ and $\\tilde{\\A}(\\mu) = \\Vr\\trp\\A(\\mu)\\Vr \\in \\RR^{r\\times r}.$\n", + "Finally, using the formulae $\\c(\\mu) = \\mu\\c^{(0)}$ and $\\A(\\mu) = \\mu\\A^{(0)}$, we can further simplify to\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + " \\tilde{\\c}(\\mu)\n", + " &= \\Vr\\trp\\c(\\mu)\n", + " = \\mu\\Vr\\trp\\c^{(0)}\n", + " \\\\\n", + " \\tilde{\\A}(\\mu)\n", + " &= \\Vr\\trp\\A(\\mu)\\Vr\n", + " = \\mu\\Vr\\trp\\A^{(0)}\\Vr.\n", + "\\end{aligned}\n", + "$$\n", + "\n", + ":::" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "To train the model, we need to compress the snapshot data to the low-dimensional subspace defined by the basis.\n", - "We also need the time derivatives $\\frac{d}{dt}\\mathbf{q}(t)$ of the training snapshots.\n", - "If $\\mathbf{A}$ and $\\mathbf{B}$ are known, we can set $\\dot{\\mathbf{q}}_{j} = \\mathbf{A}\\mathbf{q}_{j} + \\mathbf{B}u_{j}$. If we do not have access to $\\mathbf{A}$ and $\\mathbf{B}$, we can estimate the time derivatives using finite differences.\n", - "In this case, we use first-order backward differences." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Compress the snapshot data.\n", - "Q_compressed = basis.compress(Q)\n", + ":::{admonition} Interpolatory and Affine Parameterizations\n", + ":class: tip\n", "\n", - "# Estimate time derivatives (dq/dt) for each training snapshot.\n", - "Q_train, Qdot_train, U_train = opinf.ddt.bwd1(Q_compressed, dt, U_all[:k])\n", - "\n", - "print(f\"shape of Q_train:\\t{Q_train.shape}\")\n", - "print(f\"shape of Qdot_train:\\t{Qdot_train.shape}\")\n", - "print(f\"shape of U_train:\\t{U_train.shape}\")" + "In this problem, the dependence on $\\mu$ in the ROM operators $\\chat(\\mu)$ and $\\Ahat(\\mu)$ is known from because the structure from the FOM is preserved by linear projection (see [affine operators](sec-operators-affine)).\n", + "If the dependence on $\\mu$ is not known a-priori or cannot be written in an affine form, [interpolatory operators](sec-operators-interpolated) sometimes provide a feasible alternative.\n", + ":::" ] }, { @@ -506,24 +425,31 @@ "metadata": {}, "outputs": [], "source": [ - "# Train the reduced-order model.\n", - "model.fit(states=Q_train, ddts=Qdot_train, inputs=U_train)\n", - "print(model)" + "rom = opinf.ParametricROM(\n", + " basis=opinf.basis.PODBasis(projection_error=1e-6),\n", + " ddt_estimator=opinf.ddt.UniformFiniteDifferencer(t_all, \"ord6\"),\n", + " model=opinf.models.ParametricContinuousModel(\n", + " operators=[\n", + " opinf.operators.AffineConstantOperator(1),\n", + " opinf.operators.AffineLinearOperator(1),\n", + " ],\n", + " solver=opinf.lstsq.L2Solver(1e-6),\n", + " ),\n", + ").fit(training_parameters, Qs)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### Model Evaluation" + "## Reduced-order Model Evaluation" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Like the FOM, we integrate the learned ROM using the implicit Euler method, using the reduced-order operators $\\widehat{\\mathbf{A}}$ and $\\widehat{\\mathbf{B}}$ and the initial condition $\\widehat{\\mathbf{q}}_{0} = \\mathbf{V}^{\\mathsf{T}}\\mathbf{q}_{0}$.\n", - "The resulting low-dimensional state vectors are reconstructed in the full-dimensional space via $\\mathbf{q}(t) = \\mathbf{V}_{r}\\widehat{\\mathbf{q}}(t)$." + "We start by checking comparing the solutions of the ROM at the training parameter values to the training snapshots." ] }, { @@ -532,43 +458,21 @@ "metadata": {}, "outputs": [], "source": [ - "# Express the initial condition in the coordinates of the basis.\n", - "q0_ = basis.compress(q0)\n", - "\n", - "# Solve the reduced-order model using Implicit Euler.\n", - "Q_ROM = basis.decompress(\n", - " implicit_euler(t, q0_, model.A_.entries, model.B_.entries, U_all)\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "fig, [ax1, ax2] = plt.subplots(1, 2)\n", - "plot_heat_data(Q_ROM, \"Reduced-order model solution\", ax1)\n", - "plot_heat_data(Q_all, \"Full-order model solution\", ax2)\n", - "ax1.legend([])\n", - "plt.show()" + "for i, mu in enumerate(training_parameters):\n", + " with opinf.utils.TimedBlock(\"Reduced-order solve\"):\n", + " Q_ROM = rom.predict(mu, q0, t_all, method=\"BDF\")\n", + " fig, [ax1, ax2] = plt.subplots(1, 2)\n", + " plot_data_space(Qs[i], \"Snapshot data\", ax1)\n", + " plot_data_space(Q_ROM, \"ROM state output\", ax2)\n", + " ax1.legend([])\n", + " plt.show()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "To quantify the accuracy of the ROM, we evaluate the ROM solution error in the Frobenius norm and compare it to the projection error,\n", - "\n", - "$$\n", - " \\text{err}_{\\text{ROM}}\n", - " = \\frac{||\\mathbf{Q}_{\\text{all}} - \\mathbf{Q}_{\\text{ROM}}||_F}{||\\mathbf{Q}_{\\text{all}}||_F},\n", - " \\qquad\n", - " \\text{err}_{\\text{proj}}\n", - " = \\frac{||\\mathbf{Q}_{\\text{all}} - \\mathbf{V}_{r}\\mathbf{V}_{r}^{\\top}\\mathbf{Q}_{\\text{all}}||_F}{||\\mathbf{Q}_{\\text{all}}||_F},\n", - "$$\n", - "\n", - "where $\\mathbf{Q}_{\\text{all}}$ is the full-order model solution over the entire time domain and $\\mathbf{Q}_{\\text{ROM}}$ is the reduced-order model solution." + "Next, we solve the FOM and ROM at new parameter values not included in the training set." ] }, { @@ -577,52 +481,8 @@ "metadata": {}, "outputs": [], "source": [ - "rel_froerr_projection = basis.projection_error(Q_all, relative=True)\n", - "rel_froerr_opinf = opinf.post.frobenius_error(Q_all, Q_ROM)[1]\n", - "\n", - "print(\n", - " \"Relative Frobenius-norm errors\",\n", - " \"-\" * 33,\n", - " f\"projection error:\\t{rel_froerr_projection:%}\",\n", - " f\"OpInf ROM error:\\t{rel_froerr_opinf:%}\",\n", - " sep=\"\\n\",\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The ROM error cannot be better than the projection error, but the two are pretty close. We also compare the ROM error with the projection error as a function of time, i.e.,\n", - "\n", - "$$\n", - " \\text{err}_{\\text{ROM}}(t)\n", - " = \\frac{\\|\\mathbf{q}(t) - \\mathbf{q}_{\\text{ROM}}(t)\\|_{2}}{\\|\\mathbf{q}(t)\\|_{2}},\n", - " \\qquad\n", - " \\text{err}_{\\text{proj}}(t)\n", - " = \\frac{\\|\\mathbf{q}(t) - \\mathbf{V}_{r}\\mathbf{V}_{r}^{\\mathsf{T}}\\mathbf{q}(t)\\|_{2}}{\\|\\mathbf{q}(t)\\|_{2}},\n", - "$$\n", - "\n", - "where $\\mathbf{q}(t)$ is the full-order solution and $\\mathbf{q}_{\\text{ROM}}(t)$ is the ROM solution at time $t$." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - ":::{tip}\n", - "In this problem, $\\mathbf{q}(t) \\to \\mathbf{0}$ as $t$ increases, so a relative error may not be appropriate since $\\|\\mathbf{q}(t)\\|_{2}$ appears in the denominator.\n", - "In situations like this, consider using the _normalized absolute error_ by replacing the denominator with $\\max_{\\tau\\in[0,T]}\\|\\mathbf{q}(t)\\|,$ for example:\n", - "\n", - "$$\n", - "\\begin{aligned}\n", - " \\text{err}_{\\text{ROM}}(t)\n", - " = \\frac{\\|\\mathbf{q}(t) - \\mathbf{q}_{\\text{ROM}}(t)\\|_{2}}{\\max_{\\tau\\in[0,T]}\\|\\mathbf{q}(\\tau)\\|_{2}}.\n", - "\\end{aligned}\n", - "$$\n", - "\n", - "Use `normalize=True` in `opinf.post.lp_error()` to use this error measure instead of the relative error.\n", - ":::" + "test_parameters = np.sqrt(training_parameters[:-1] * training_parameters[1:])\n", + "print(test_parameters)" ] }, { @@ -631,37 +491,23 @@ "metadata": {}, "outputs": [], "source": [ - "projerr_in_time = opinf.post.lp_error(\n", - " Q_all,\n", - " basis.project(Q_all),\n", - " normalize=True,\n", - ")[1]\n", + "errors = []\n", "\n", + "for mu in test_parameters:\n", + " with opinf.utils.TimedBlock(\"Full-order solve\"):\n", + " Q_FOM = full_order_solve(mu, t_all)\n", "\n", - "def plot_errors_over_time(Zlist, labels):\n", - " \"\"\"Plot normalized absolute projection error and ROM errors\n", - " as a function of time.\n", + " with opinf.utils.TimedBlock(\"Reduced-order solve\"):\n", + " Q_ROM = rom.predict(mu, q0, t_all, method=\"BDF\")\n", "\n", - " Parameters\n", - " ----------\n", - " Zlist : list((n, k) ndarrays)\n", - " List of reduced-order model solutions.\n", - " labels : list(str)\n", - " Labels for each of the reduced-order models.\n", - " \"\"\"\n", - " fig, ax = plt.subplots(1, 1)\n", - "\n", - " ax.semilogy(t, projerr_in_time, \"C3\", label=\"Projection Error\")\n", - " colors = [\"C0\", \"C5\"]\n", - " for Z, label, c in zip(Zlist, labels, colors[: len(Zlist)]):\n", - " rel_err = opinf.post.lp_error(Q_all, Z, normalize=True)[1]\n", - " plt.semilogy(t, rel_err, c, label=label)\n", - "\n", - " ax.set_xlim(t[0], t[-1])\n", - " ax.set_xlabel(r\"$t$\")\n", - " ax.set_ylabel(\"Normalized absolute error\")\n", - " ax.legend(loc=\"lower right\")\n", - " plt.show()" + " plot_two_datasets(\n", + " Q_FOM,\n", + " \"Full-order model solution\",\n", + " Q_ROM,\n", + " \"Reduced-order model solution\",\n", + " )\n", + " plt.show()\n", + " errors.append(opinf.post.frobenius_error(Q_FOM, Q_ROM)[1])" ] }, { @@ -670,924 +516,20 @@ "metadata": {}, "outputs": [], "source": [ - "plot_errors_over_time([Q_ROM], [\"ROM Error\"])" + "for mu, err in zip(test_parameters, errors):\n", + " print(f\"Test parameter mu = {mu:.6f}: error = {err:.4%}\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### Comparison with Intrusive Projection" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "In the limit as the amount of training data $k$ and the dimension $r$ increases, the reduced operators $\\widehat{\\mathbf{A}}$ and $\\widehat{\\mathbf{B}}$ learned through OpInf converge to the corresponding operators obtained through _intrusive projection_,\n", - "\n", - "\\begin{align*}\n", - " \\widetilde{\\mathbf{A}} &= \\mathbf{V}_{r}^{\\mathsf{T}} \\mathbf{A} \\mathbf{V}_{r},\n", - " &\n", - " \\widetilde{\\mathbf{B}} &= \\mathbf{V}_{r}^{\\mathsf{T}}\\mathbf{B}.\n", - "\\end{align*}\n", - "\n", - "Computing $\\widetilde{\\mathbf{A}}$ and $\\widetilde{\\mathbf{B}}$ is considered \"intrusive\" because it requires explicit access to the full-order operators $\\mathbf{A}$ and $\\mathbf{B}$." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "Vr = basis.entries\n", - "Atilde = Vr.T @ A @ Vr\n", - "Btilde = Vr.T @ B\n", - "q0_ = basis.compress(q0)\n", - "Q_ROM_intrusive = basis.decompress(\n", - " implicit_euler(t, q0_, Atilde, Btilde, U_all)\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "fig, [ax1, ax2] = plt.subplots(1, 2)\n", - "plot_heat_data(Q_ROM, \"OpInf ROM solution\", ax1)\n", - "plot_heat_data(Q_ROM_intrusive, \"Intrusive ROM solution\", ax2)\n", - "ax1.legend([])\n", - "plt.show()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "rel_froerr_intrusive = opinf.post.frobenius_error(Q_all, Q_ROM_intrusive)[1]\n", - "\n", - "print(\n", - " \"Relative Frobenius-norm errors\",\n", - " \"-\" * 33,\n", - " f\"projection error:\\t{rel_froerr_projection:%}\",\n", - " f\"OpInf ROM error:\\t{rel_froerr_opinf:%}\",\n", - " f\"intrusive ROM error:\\t{rel_froerr_intrusive:%}\",\n", - " sep=\"\\n\",\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "plot_errors_over_time(\n", - " [Q_ROM, Q_ROM_intrusive],\n", - " [\"OpInf ROM Error\", \"Intrusive ROM Error\"],\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's repeat the experiment with different choices of $r$ to see how the size of the ROM affects its accuracy." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "full_order_operators = [\n", - " opinf.operators.LinearOperator(A.toarray()),\n", - " opinf.operators.InputOperator(B),\n", - "]\n", - "\n", - "\n", - "def run_trial(r):\n", - " \"\"\"Do OpInf / intrusive ROM prediction with r basis vectors.\"\"\"\n", - " basis.set_dimension(num_vectors=r)\n", - " q0_ = basis.compress(q0)\n", - " Q_compressed = basis.compress(Q)\n", - " Q_train, Qdot_train, U_train = opinf.ddt.bwd1(Q_compressed, dt, U_all[:k])\n", - "\n", - " intrusive_reduced_operators = [\n", - " op.galerkin(basis.entries) for op in full_order_operators\n", - " ]\n", - "\n", - " # Construct and simulate the intrusive ROM.\n", - " model_intrusive = opinf.models.ContinuousModel(intrusive_reduced_operators)\n", - " Q_ROM_intrusive = basis.decompress(\n", - " implicit_euler(\n", - " t,\n", - " q0_,\n", - " model_intrusive.A_.entries,\n", - " model_intrusive.B_.entries,\n", - " U_all,\n", - " )\n", - " )\n", - "\n", - " # Construct and simulate the operator inference ROM.\n", - " model_opinf = opinf.models.ContinuousModel(\"AB\").fit(\n", - " states=Q_train,\n", - " ddts=Qdot_train,\n", - " inputs=U_train,\n", - " )\n", - " Q_ROM_opinf = basis.decompress(\n", - " implicit_euler(\n", - " t,\n", - " q0_,\n", - " model_opinf.A_.entries,\n", - " model_opinf.B_.entries,\n", - " U_all,\n", - " )\n", - " )\n", - "\n", - " # Calculate errors.\n", - " projection_error = basis.projection_error(Q_all, relative=True)\n", - " intrusive_error = opinf.post.frobenius_error(Q_all, Q_ROM_intrusive)[1]\n", - " opinf_error = opinf.post.frobenius_error(Q_all, Q_ROM_opinf)[1]\n", - "\n", - " return projection_error, intrusive_error, opinf_error" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [ - "hide-input" - ] - }, - "outputs": [], - "source": [ - "def plot_state_error(rmax, runner, ylabel):\n", - " \"\"\"Run the experiment for r = 1, ..., rmax and plot results.\"\"\"\n", - " rs = np.arange(1, rmax + 1)\n", - " err_projection, err_intrusive, err_opinf = zip(*[runner(r) for r in rs])\n", - "\n", - " _, ax = plt.subplots(1, 1)\n", - " ax.semilogy(\n", - " rs,\n", - " err_projection,\n", - " \"C3-\",\n", - " label=\"projection error\",\n", - " lw=1,\n", - " )\n", - " ax.semilogy(\n", - " rs,\n", - " err_intrusive,\n", - " \"C5+-\",\n", - " label=\"intrusive ROM error\",\n", - " lw=1,\n", - " mew=2,\n", - " )\n", - " ax.semilogy(\n", - " rs,\n", - " err_opinf,\n", - " \"C0o-\",\n", - " label=\"OpInf ROM error\",\n", - " lw=1,\n", - " mfc=\"none\",\n", - " mec=\"C0\",\n", - " mew=1.5,\n", - " )\n", - "\n", - " ax.set_xlim(rs.min(), rs.max())\n", - " ax.set_xticks(rs, [str(int(r)) for r in rs])\n", - " ax.set_xlabel(r\"Reduced dimension $r$\")\n", - " ax.set_ylabel(ylabel)\n", - " ax.grid(ls=\":\")\n", - " ax.legend(loc=\"upper right\", fontsize=14, frameon=True, framealpha=1)\n", - " plt.show()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "plot_state_error(15, run_trial, \"Relative Frobenius-norm error\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - ":::{admonition} Takeaway\n", - ":class: attention\n", - "In this case, the operator inference and intrusive ROMs give essentially the same result.\n", - "However, the operator inference ROM successfully emulates the FOM **without explicit access to** $\\mathbf{A}$ **and** $\\mathbf{B}$.\n", - ":::" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - ":::{admonition} On Convergence\n", - ":class: warning\n", - "The figure above conveys a sense of convergence: as the reduced dimension $r$ increases, the ROM error decreases. In more complex problems, **the error does not always decrease monotonically as $r$ increases**. In fact, at some point as $r$ increases performance often deteriorates significantly due to poor conditioning in the operator inference regression. In practice, choose a reduced dimension $r$ that balances solution accuracy with computational speed, not too small but also not too large.\n", - ":::" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## New Boundary Conditions" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Our heat equation has Dirichlet boundary conditions given by\n", - "\n", - "$$\n", - "q(0,t;\\mu) = q(L,t;\\mu) = u(t).\n", - "$$\n", - "\n", - "In this section we consider the role of $u(t)$, which governs the boundary equations." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - ":::{admonition} Objective\n", - ":class: attention\n", + ":::{admonition} Stay Tuned\n", + ":class: note\n", "\n", - "Construct a reduced-order model (ROM) of the heat equation that can be used for various sets of boundary conditions. We will observe data for some $u(t)$ and use the ROM to predict the solution for new choices of $u(t)$.\n", + "More examples are forthcoming.\n", ":::" ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Note that the full-order model defined in the previous section is valid for arbitrary $u(t)$." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Training Data Generation" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Define several sets of boundary condition inputs.\n", - "Us_all = [\n", - " np.ones_like(t), # u(t) = 1.0\n", - " np.exp(-t), # u(t) = e^(-t)\n", - " 1 + t**2 / 2, # u(t) = 1 + .5 t^2\n", - " 1 - np.sin(np.pi * t) / 2, # u(t) = 1 - sin(πt)/2\n", - " 1 - np.sin(3 * np.pi * t) / 3, # u(t) = 1 - sin(3πt)/2\n", - " 1 + 25 * (t * (t - 1)) ** 3, # u(t) = 1 + 25(t(t - 1))^3\n", - " 1 + np.sin(np.pi * t) * np.exp(-2 * t), # u(t) = 1 + sin(πt)e^(-t)\n", - "]\n", - "\n", - "k = 300 # Number of training snapshots." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Note that $u(0) = 1$ for each of our boundary inputs, which is consistent with the initial condition `q0` used earlier.\n", - "We will gather data for the first few inputs, learn a ROM from the data, and test the ROM on the remaining inputs." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Split into training / testing sets.\n", - "Us_all_train = Us_all[:4]\n", - "Us_all_test = Us_all[4:]\n", - "\n", - "# Visualize the input functions.\n", - "fig, [ax1, ax2] = plt.subplots(1, 2)\n", - "c = 0\n", - "for U in Us_all_train:\n", - " ax1.plot(t, U, color=f\"C{c}\")\n", - " c += 1\n", - "for U in Us_all_test:\n", - " ax2.plot(t, U, color=f\"C{c}\")\n", - " c += 1\n", - "\n", - "ax1.set_title(\"Training inputs\")\n", - "ax2.set_title(\"Testing inputs\")\n", - "ax1.axvline(t[k], color=\"k\")\n", - "for ax in (ax1, ax2):\n", - " ax.set_xlim(0, 1)\n", - " ax.set_xlabel(r\"$t$\")\n", - " ax.set_ylabel(r\"$u(t)$\")\n", - "plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We only record the first $k$ snapshots corresponding to each of the training inputs, so we are still predicting in time as in the previous section." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Compute snapshots by solving the equation with implicit_euler().\n", - "Qs_all = [implicit_euler(t, q0, A, B, U) for U in Us_all]\n", - "Qs_all_train = Qs_all[: len(Us_all_train)]\n", - "Qs_all_test = Qs_all[len(Us_all_train) :]\n", - "\n", - "# Retain only the first k snapshots/inputs for training the ROM.\n", - "t_train = t[:k] # Temporal domain for training snapshots.\n", - "Qs = [Q[:, :k] for Q in Qs_all_train] # Observed snapshots.\n", - "\n", - "# Compute time derivatives (dq/dt) for each snapshot and stack training data.\n", - "Qs_train, Qdots_train, Us_train = [\n", - " np.hstack(X)\n", - " for X in zip(\n", - " *[opinf.ddt.bwd1(Q, dt, U[:k]) for Q, U in zip(Qs, Us_all_train)]\n", - " )\n", - "]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### ROM Construction" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Compute a basis from all of the training snapshots.\n", - "basis = opinf.basis.PODBasis(residual_energy=1e-8).fit(Qs)\n", - "print(basis)\n", - "\n", - "# Express the initial condition in the coordinates of the new basis.\n", - "q0_ = basis.compress(q0)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Train a reduced-order model using the training data.\n", - "model = opinf.models.ContinuousModel(\"AB\")\n", - "model.fit(\n", - " states=basis.compress(Qs_train),\n", - " ddts=basis.compress(Qdots_train),\n", - " inputs=Us_train,\n", - ")\n", - "print(model)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### ROM Evaluation" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We now test the learned ROM on both the training and testing inputs." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def plot_errors_over_time_inputs(Q_ROMs, Q_trues, cidx=0):\n", - " \"\"\"Plot normalized absolute projection error and ROM errors\n", - " as a function of time.\n", - "\n", - " Parameters\n", - " ----------\n", - " Q_ROMs : list((n, k) ndarrays)\n", - " List of reduced-order model solutions.\n", - " Q_trues : list(str)\n", - " List of full-order model solutions.\n", - " \"\"\"\n", - " _, ax = plt.subplots(1, 1)\n", - "\n", - " for Q_ROM, Q_true in zip(Q_ROMs, Q_trues):\n", - " rel_err = opinf.post.lp_error(Q_true, Q_ROM, normalize=True)[1]\n", - " plt.semilogy(t, rel_err, color=f\"C{cidx}\")\n", - " cidx += 1\n", - "\n", - " ax.set_xlim(t[0], t[-1])\n", - " ax.set_xlabel(r\"$t$\")\n", - " ax.set_ylabel(\"Normalized absolute error\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Test ROM accuracy on the training inputs.\n", - "Qs_ROM_train = [\n", - " basis.decompress(\n", - " implicit_euler(t, q0_, model.A_.entries, model.B_.entries, U)\n", - " )\n", - " for U in Us_all_train\n", - "]\n", - "plot_errors_over_time_inputs(Qs_ROM_train, Qs_all_train)\n", - "plt.title(\"ROM error with training inputs\")\n", - "\n", - "# Test ROM accuracy on the testing inputs.\n", - "Qs_ROM_test = [\n", - " basis.decompress(\n", - " implicit_euler(t, q0_, model.A_.entries, model.B_.entries, U)\n", - " )\n", - " for U in Us_all_test\n", - "]\n", - "plot_errors_over_time_inputs(Qs_ROM_test, Qs_all_test, cidx=len(Qs_ROM_train))\n", - "plt.title(\"ROM error with testing inputs\")\n", - "plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "In this experiment, the training and testing error are similar and small (less than 0.1%) throughout the time domain. We conclude this section by checking the average ROM error on the test inputs as a function of basis size." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def run_trial_inputs(r):\n", - " \"\"\"Do OpInf / intrusive ROM prediction with r basis vectors.\"\"\"\n", - " basis.set_dimension(num_vectors=r)\n", - " q0_ = basis.compress(q0)\n", - "\n", - " # Construct the intrusive ROM.\n", - " model_intrusive = opinf.models.ContinuousModel(\n", - " [op.galerkin(basis.entries) for op in full_order_operators]\n", - " )\n", - "\n", - " # Construct the operator inference ROM from the training data.\n", - " model_opinf = opinf.models.ContinuousModel(operators=\"AB\")\n", - " model_opinf.fit(\n", - " states=basis.compress(Qs_train),\n", - " ddts=basis.compress(Qdots_train),\n", - " inputs=Us_train,\n", - " )\n", - "\n", - " # Test the ROMs at each testing input.\n", - " projection_error, intrusive_error, opinf_error = 0, 0, 0\n", - " for Q, U in zip(Qs_all_test, Us_all_test):\n", - " # Simulate the intrusive ROM for this testing input.\n", - " Q_ROM_intrusive = basis.decompress(\n", - " implicit_euler(\n", - " t,\n", - " q0_,\n", - " model_intrusive.A_.entries,\n", - " model_intrusive.B_.entries,\n", - " U,\n", - " )\n", - " )\n", - "\n", - " # Simulate the operator inference ROM for this testing input.\n", - " Q_ROM_opinf = basis.decompress(\n", - " implicit_euler(\n", - " t, q0_, model_opinf.A_.entries, model_opinf.B_.entries, U\n", - " )\n", - " )\n", - "\n", - " # Calculate errors.\n", - " projection_error += basis.projection_error(Q, relative=True)\n", - " intrusive_error += opinf.post.frobenius_error(Q, Q_ROM_intrusive)[1]\n", - " opinf_error += opinf.post.frobenius_error(Q, Q_ROM_opinf)[1]\n", - "\n", - " # Average the relative errors.\n", - " projection_error /= len(Us_all_test)\n", - " intrusive_error /= len(Us_all_test)\n", - " opinf_error /= len(Us_all_test)\n", - "\n", - " return projection_error, intrusive_error, opinf_error" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "plot_state_error(\n", - " 15,\n", - " run_trial_inputs,\n", - " \"Average relative\\nFrobenius-norm error\",\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This experiment shows that the operator inference ROMs is robust to new boundary conditions; in other words, the ROM learns an input operator $\\widehat{\\mathbf{B}}$ that performs well for multiple choices of the input $u(t)$." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Prediction in Parameter Space" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Recall the governing equation,\n", - "\n", - "$$\n", - " \\frac{\\partial}{\\partial t} q(x,t;{\\color{teal}\\mu})\n", - " = {\\color{teal}\\mu}\\frac{\\partial^2}{\\partial x^2}q(x,t;{\\color{teal}\\mu}).\n", - "$$\n", - "\n", - "In this section we examine the role of the constant $\\mu > 0$, the heat diffusivity parameter." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - ":::{admonition} Objective\n", - ":class: attention\n", - "\n", - "Construct a ROM of the heat equation that can be solved for different choices of the diffusivity parameter $\\mu > 0$.\n", - "We will observe data for a few values of $\\mu$ and use the ROM to predict the solution for new values of $\\mu$. As before, we also aim to be predictive in time.\n", - ":::" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Full-order Model Definition" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We solved this problem earlier for fixed $\\mu = 1$.\n", - "For variable $\\mu$, {eq}`eq_heat_fom_parametric` defines the full-order model:\n", - "\n", - "$$\n", - " \\frac{\\text{d}}{\\text{d}t}\\mathbf{q}(t;\\mu)\n", - " = \\mathbf{A}(\\mu)\\mathbf{q}(t;\\mu) + \\mathbf{B}(\\mu)u(t),\n", - " \\qquad\n", - " \\mathbf{q}(0;\\mu)\n", - " = \\mathbf{q}_0.\n", - "$$\n", - "\n", - "Note that $\\mathbf{A}(\\mu) = \\mu \\mathbf{A}(1)$ and $\\mathbf{B}(\\mu) = \\mu \\mathbf{B}(1)$, and that $\\mathbf{A}(1)$ and $\\mathbf{B}(1)$ are the full-order operators we constructed previously." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Training Data Generation" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We consider the parameter domain $\\mathcal{D} = [.1, 10] \\subset \\mathbb{R}$.\n", - "Taking $s$ logarithmically spaced samples $\\{\\mu_i\\}_{i=1}^{s}\\subset\\mathcal{D}$, we solve the full-order model over $[0, T']$ for each parameter sample.\n", - "For each parameter $\\mu_{i}$, the resulting snapshots matrix is denoted as $\\mathbf{Q}(\\mu_{i})\\in \\mathbb{R}^{n \\times k}$.\n", - "We choose $s = 10$ training parameters in the following experiment." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Get s logarithmically spaced paraneter values from D = [.1, 10].\n", - "s = 10 # Number of parameter samples.\n", - "params = np.logspace(-1, 1, s)\n", - "params" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Retain only the first k snapshots/inputs for training the ROM.\n", - "k = 600 # Number of training snapshots.\n", - "t_train = t[:k] # Temporal domain for training snapshots.\n", - "U_train = U_all[:k]\n", - "\n", - "# Solve the full-order model at each of the parameter samples.\n", - "Qs = [implicit_euler(t_train, q0, p * A, p * B, U_train) for p in params]\n", - "Qs_train, Qdots_train, Us_train = zip(\n", - " *[opinf.ddt.bwd1(Q, dt, U_train) for Q in Qs]\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### ROM Construction" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "A (global) POD basis can be constructed from the concatenation of the individual snapshot matrices,\n", - "\n", - "$$\n", - " \\mathbf{Q}\n", - " = \\left[~\\mathbf{Q}(\\mu_1)~\\cdots~\\mathbf{Q}(\\mu_s)~\\right]\n", - " \\in\\mathbb{R}^{n \\times sk}.\n", - "$$\n", - "\n", - "We can select the reduced dimension $r$ as before by examining the residual energy of the singular values." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Compute the POD basis, using the residual energy decay to select r.\n", - "basis = opinf.basis.PODBasis(residual_energy=1e-8).fit(Qs)\n", - "print(basis)\n", - "\n", - "basis.plot_energy(right=30)\n", - "plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Alternatively, we could choose $r$ so that the average relative projection error,\n", - "\n", - "$$\n", - " \\text{avgerr}_\\text{proj} = \\frac{1}{s}\\sum_{i=1}^{s}\\frac{||\\mathbf{Q}(\\mu_i) - \\mathbf{V}_r \\mathbf{V}_r^{\\top}\\mathbf{Q}(\\mu_i)||_F}{||\\mathbf{Q}(\\mu_i)||_F},\n", - "$$\n", - "\n", - "is below a certain threshold, say $10^{-5}$." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def average_relative_projection_error(r):\n", - " \"\"\"Compute the average relative projection error with r basis vectors.\"\"\"\n", - " oldr = basis.reduced_state_dimension\n", - " basis.set_dimension(num_vectors=r)\n", - " avgerr = np.mean([basis.projection_error(Q, relative=True) for Q in Qs])\n", - " basis.set_dimension(num_vectors=oldr)\n", - " return avgerr" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [ - "hide-input" - ] - }, - "outputs": [], - "source": [ - "rs = np.arange(1, 21)\n", - "errors = [average_relative_projection_error(r) for r in rs]\n", - "\n", - "fig, ax = plt.subplots(1, 1)\n", - "ax.axhline(1e-5, color=\"gray\", lw=1)\n", - "ax.axvline(10, color=\"gray\", lw=1)\n", - "ax.semilogy(rs, errors, \"C3.-\", ms=10)\n", - "\n", - "ax.set_xticks(rs[::2])\n", - "ax.set_xlabel(r\"$r$\")\n", - "ax.set_ylabel(\"Average relative\\nprojection error\")\n", - "plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Based on these criteria, we choose $r = 10$." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "basis.set_dimension(num_vectors=10)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Interpolatory Operator Inference" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "There are several strategies to account for the parameter $\\mu$. The reduced-order operators obtained through Galerkin projection are given by\n", - "\n", - "$$\n", - " \\widetilde{\\mathbf{A}}(\\mu)\n", - " = \\mathbf{V}_{r}^{\\mathsf{T}} \\mathbf{A}(\\mu) \\mathbf{V}_{r},\n", - " \\qquad\n", - " \\widetilde{\\mathbf{B}}(\\mu)\n", - " = \\mathbf{V}_{r}^{\\mathsf{T}} \\mathbf{B}(\\mu).\n", - "$$\n", - "\n", - "Here, we perform interpolation on the entries of the reduced-order operators learned for each parameter sample. This means we learn a separate ROM for each $\\mu_i$, $i=1, \\ldots, s$, obtaining reduced-order operators $\\widehat{\\mathbf{A}}(\\mu_{i})$ and $\\widehat{\\mathbf{B}}(\\mu_{i})$.\n", - "Then, for a new parameter value $\\bar{\\mu}\\in\\mathcal{D}$, we interpolate the entries of the learned reduced model operators to create a new reduced model corresponding to $\\bar{\\mu}\\in\\mathcal{D}$.\n", - "The {class}`opinf.models.InterpContinuousModel` class encapsulates this process." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Learn reduced models for each parameter value.\n", - "model = opinf.models.InterpContinuousModel(\"AB\")\n", - "model.fit(\n", - " parameters=params,\n", - " states=basis.compress(Qs_train),\n", - " ddts=basis.compress(Qdots_train),\n", - " inputs=Us_train,\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### ROM Evaluation" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "To test the ROM, we take $s - 1$ parameter values that lie between the training parameter values and compute the average relative state error,\n", - "\n", - "$$\n", - " \\text{avgerr}_\\text{ROM} = \\frac{1}{s - 1}\\sum_{i=1}^{s - 1}\\frac{||\\mathbf{Q}(\\mu_i) - \\mathbf{Q}_{\\text{ROM}}(\\mu_i)||_F}{||\\mathbf{Q}(\\mu_i)||_F},\n", - "$$\n", - "\n", - "where $\\mathbf{Q}_{\\text{ROM}}(\\mu_{i})$ is the ROM solution at the $i$-th test parameter value $\\mu_{i}$." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "params_test = np.sqrt(params[:-1] * params[1:])\n", - "params_test" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "full_order_model = opinf.models.InterpContinuousModel(\n", - " [\n", - " opinf.operators.InterpLinearOperator(\n", - " params, [p * A.toarray() for p in params]\n", - " ),\n", - " opinf.operators.InterpInputOperator(params, [p * B for p in params]),\n", - " ]\n", - ")\n", - "\n", - "\n", - "def run_trial_parametric(r):\n", - " \"\"\"Do OpInf / intrusive ROM prediction with r basis vectors.\"\"\"\n", - " basis.set_dimension(num_vectors=r)\n", - " q0_ = basis.compress(q0)\n", - "\n", - " # Compute the intrusive ROM.\n", - " model_intrusive = full_order_model.galerkin(basis.entries)\n", - "\n", - " # Learn an operator inference ROM from the training data.\n", - " model_opinf = opinf.models.InterpContinuousModel(\n", - " operators=\"AB\",\n", - " solver=opinf.lstsq.L2Solver(1e-12),\n", - " ).fit(\n", - " parameters=params,\n", - " states=basis.compress(Qs_train),\n", - " ddts=basis.compress(Qdots_train),\n", - " inputs=Us_train,\n", - " )\n", - "\n", - " # Test the ROM at each parameter in the test set.\n", - " projc_error, intru_error, opinf_error = 0, 0, 0\n", - " for p in params_test:\n", - " # Solve the FOM at this parameter value.\n", - " Ap = p * A\n", - " Bp = p * B\n", - " Q_FOM = implicit_euler(t, q0, Ap, Bp, U_all)\n", - "\n", - " # Simulate the intrusive ROM at this parameter value.\n", - " model = model_intrusive.evaluate(p)\n", - " Q_ROM_intrusive = basis.decompress(\n", - " implicit_euler(t, q0_, model.A_.entries, model.B_.entries, U_all)\n", - " )\n", - "\n", - " # Simulate the interpolating OpInf ROM at this parameter value.\n", - " model = model_opinf.evaluate(p)\n", - " Q_ROM_opinf = basis.decompress(\n", - " implicit_euler(t, q0_, model.A_.entries, model.B_.entries, U_all)\n", - " )\n", - "\n", - " # Calculate errors.\n", - " projc_error += basis.projection_error(Q_FOM, relative=True)\n", - " intru_error += opinf.post.frobenius_error(Q_FOM, Q_ROM_intrusive)[1]\n", - " opinf_error += opinf.post.frobenius_error(Q_FOM, Q_ROM_opinf)[1]\n", - "\n", - " # Average the relative errors.\n", - " projc_error /= len(params_test)\n", - " intru_error /= len(params_test)\n", - " opinf_error /= len(params_test)\n", - "\n", - " return projc_error, intru_error, opinf_error" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "plot_state_error(\n", - " 14,\n", - " run_trial_parametric,\n", - " \"Average relative\\nFrobenius-norm error\",\n", - ")" - ] } ], "metadata": { From b6db39366fc1546a151d7123e75aca5471e4db05 Mon Sep 17 00:00:00 2001 From: Shane Date: Tue, 10 Sep 2024 16:39:33 -0600 Subject: [PATCH 48/48] skip TimedBlock tests on Windows --- tests/utils/test_timer.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/tests/utils/test_timer.py b/tests/utils/test_timer.py index d34d1f91..22b42398 100644 --- a/tests/utils/test_timer.py +++ b/tests/utils/test_timer.py @@ -4,10 +4,22 @@ import os import time import pytest +import platform import opinf +SYSTEM = platform.system() + + +def skipwindows(func): + + def skip(self, *args, **kwargs): + pass + + return skip if SYSTEM == "Windows" else func + + class MyException(Exception): pass @@ -17,6 +29,7 @@ class TestTimedBlock: Timer = opinf.utils.TimedBlock + @skipwindows def test_standard(self, message="TimedBlock test, no timelimit"): # No time limit. with self.Timer() as obj: @@ -29,6 +42,7 @@ def test_standard(self, message="TimedBlock test, no timelimit"): pass assert obj.message == message + @skipwindows def test_timeout(self, message="TimedBlock test with problems"): # Time limit expires. with pytest.raises(TimeoutError) as ex: @@ -42,6 +56,7 @@ def test_timeout(self, message="TimedBlock test with problems"): raise MyException("failure in the block") assert ex.value.args[0] == "failure in the block" + @skipwindows def test_log( self, message: str = "TimedBlock test with log",