diff --git a/.github/workflows/test_src.yml b/.github/workflows/test_src.yml index 92763b62..ac037bab 100644 --- a/.github/workflows/test_src.yml +++ b/.github/workflows/test_src.yml @@ -26,6 +26,7 @@ jobs: - "3.10" - "3.11" - "3.12" + - "3.13" runs-on: ${{ matrix.os }} steps: - uses: actions/checkout@v4 diff --git a/docs/literature.bib b/docs/literature.bib index 6652d2c7..769fac4f 100644 --- a/docs/literature.bib +++ b/docs/literature.bib @@ -656,3 +656,13 @@ @article{vijaywargiya2025tensoropinf doi = {10.48550/arXiv.2502.10888}, category = {structure} } + +@article{kang2025semiconductor, + title = {Parametric {O}perator {I}nference to simulate the purging process in semiconductor manufacturing}, + author = {Seunghyon Kang and Hyeonghun Kim and Boris Kramer}, + journal = {arXiv}, + volume = {2504.03990}, + year = {2025}, + doi = {10.48550/arXiv.2504.03990}, + category = {application} +} diff --git a/docs/source/opinf/changelog.md b/docs/source/opinf/changelog.md index b2320e7c..1047e476 100644 --- a/docs/source/opinf/changelog.md +++ b/docs/source/opinf/changelog.md @@ -5,6 +5,13 @@ New versions may introduce substantial new features or API adjustments. ::: +## Version 0.5.14 + +- Catch any errors in `fit_regselect*()` that occur when the model uses `refit()`. +- Tikhonov-type least-squares solvers do not require the regularizer in the constructor but will raise an `AttributeError` in `solve()` (and other methods) if the regularizer is not set. This makes using `fit_regselect_*()` much less cumbersome. +- `PODBasis.fit(Q)` raises a warning when using the `"method-of-snapshots"`/`"eigh"` strategy if $n < k$ for $\mathbf{Q}\in\mathbb{R}^{n \times k}.$ In this case, calculating the $n \times k$ SVD is likely more efficient than the $k \times k$ eigenvalue problem. +- Added Python 3.13 to list of tests. + ## Version 0.5.13 Bayesian operator inference: diff --git a/pyproject.toml b/pyproject.toml index b9d16b96..240a7a24 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -39,13 +39,13 @@ dependencies = [ [project.optional-dependencies] dev = [ "bibtexparser>=2.0.0b7", - "tox>=4", - "pre-commit>=3.7.1", - "flake8==7.0.0", "black==24.4.2", + "flake8==7.0.0", "jupyterlab", - "pandas", "notebook", + "pandas", + "pre-commit>=3.7.1", + "tox>=4", ] [project.urls] diff --git a/src/opinf/__init__.py b/src/opinf/__init__.py index a75ced6e..347e3d3a 100644 --- a/src/opinf/__init__.py +++ b/src/opinf/__init__.py @@ -7,7 +7,7 @@ https://github.com/Willcox-Research-Group/rom-operator-inference-Python3 """ -__version__ = "0.5.13" +__version__ = "0.5.14" from . import ( basis, diff --git a/src/opinf/basis/_pod.py b/src/opinf/basis/_pod.py index f54fecc1..f2978e65 100644 --- a/src/opinf/basis/_pod.py +++ b/src/opinf/basis/_pod.py @@ -72,6 +72,7 @@ def method_of_snapshots( ---------- states : (n, k) ndarray, Snapshot matrix :math:`\Q` from which to compute the POD vectors. + This method is computationally efficient when n >> k. inner_product_matrix : (n, n) sparse SPD matrix or None Spatial inner product matrix :math:`\W` for measuring how different indices in the snapshot matrix interact with each other. @@ -100,7 +101,13 @@ def method_of_snapshots( any negative eigenvalues, then ``minthresh`` is increased to the absolute value of the most negative eigenvalue. """ - n_states = states.shape[1] + state_dimension, n_states = states.shape + if n_states > state_dimension: + warnings.warn( + "state dimension < number of states, " + "method-of-snapshots / eigh may be inefficient", + errors.OpInfWarning, + ) if inner_product_matrix is None: gramian = states.T @ (states / n_states) else: @@ -113,7 +120,7 @@ def method_of_snapshots( eigvals = eigvals[::-1] eigvecs = eigvecs[:, ::-1] - # NOTE: By definition the Gramian is symmetric positive semi-definite. + # By definition the Gramian is symmetric positive semi-definite. # If any eigenvalues are smaller than zero, they are only measuring # numerical error and can be truncated. positives = eigvals > max(minthresh, abs(np.min(eigvals))) diff --git a/src/opinf/lstsq/_tikhonov.py b/src/opinf/lstsq/_tikhonov.py index 5c9b2a75..6636b44c 100644 --- a/src/opinf/lstsq/_tikhonov.py +++ b/src/opinf/lstsq/_tikhonov.py @@ -129,10 +129,10 @@ def _save(self, savefile, overwrite=False, extras=tuple()): Names of additional attributes to save. """ with utils.hdf5_savehandle(savefile, overwrite) as hf: - reg = self.regularizer - if self.__class__ is L2Solver: - reg = [reg] - hf.create_dataset("regularizer", data=reg) + if (reg := self.regularizer) is not None: + if self.__class__ is L2Solver: + reg = [reg] + hf.create_dataset("regularizer", data=reg) if isinstance(self, TikhonovSolver): meta = hf.create_dataset("meta", shape=(0,)) @@ -165,9 +165,11 @@ def _load(cls, loadfile: str, extras=tuple()): """ with utils.hdf5_loadhandle(loadfile) as hf: - reg = hf["regularizer"][:] - if cls is L2Solver: - reg = reg[0] + reg = None + if "regularizer" in hf: + reg = hf["regularizer"][:] + if cls is L2Solver: + reg = reg[0] options = cls._load_dict(hf, "options") kwargs = dict( @@ -230,7 +232,7 @@ class L2Solver(_BaseRegularizedSolver): See :func:`scipy.linalg.svd()`. """ - def __init__(self, regularizer, lapack_driver: str = "gesdd"): + def __init__(self, regularizer=None, lapack_driver: str = "gesdd"): """Store the regularizer and initialize attributes.""" _BaseRegularizedSolver.__init__(self) self.regularizer = regularizer @@ -249,10 +251,11 @@ def regularizer(self): @regularizer.setter def regularizer(self, reg): """Set the regularization constant.""" - if not np.isscalar(reg): - raise TypeError("regularization constant must be a scalar") - if reg < 0: - raise ValueError("regularization constant must be nonnegative") + if reg is not None: + if not np.isscalar(reg): + raise TypeError("regularization constant must be a scalar") + if reg < 0: + raise ValueError("regularization constant must be nonnegative") self.__reg = reg @property @@ -265,10 +268,13 @@ def options(self): def __str__(self): """String representation: dimensions + solver options.""" kwargs = self._print_kwargs(self.options) - if np.isscalar(self.regularizer): - regstr = f"{self.regularizer:.4e}" + if self.regularizer is not None: + if np.isscalar(self.regularizer): + regstr = f"{self.regularizer:.4e}" + else: + regstr = f"{self.regularizer.shape}" else: - regstr = f"{self.regularizer.shape}" + regstr = "None" return "\n ".join( [ SolverTemplate.__str__(self), @@ -308,6 +314,8 @@ def solve(self) -> np.ndarray: Ohat : (r, d) ndarray Operator matrix :math:`\Ohat` (not its transpose!). """ + if self.regularizer is None: + raise AttributeError("solver regularizer not set") svals = self._svals.reshape((-1, 1)) svals_inv = svals / (svals**2 + self.regularizer**2) return (self._ZPhi * svals_inv.T) @ self._PsiT @@ -377,6 +385,8 @@ def regcond(self) -> float: cond : float Condition number of the regularized data matrix. """ + if self.regularizer is None: + raise AttributeError("solver regularizer not set") svals2 = self._svals**2 + self.regularizer**2 return np.sqrt(svals2.max() / svals2.min()) @@ -404,6 +414,8 @@ def regresidual(self, Ohat: np.ndarray) -> np.ndarray: residuals : (r,) ndarray :math:`2`-norm residuals for each row of the operator matrix. """ + if self.regularizer is None: + raise AttributeError("solver regularizer not set") residual = self.residual(Ohat) return residual + (self.regularizer**2 * np.sum(Ohat**2, axis=-1)) @@ -505,13 +517,16 @@ def regularizer(self): @regularizer.setter def regularizer(self, regs): """Set the regularization constants.""" - regs = np.array(regs) - if regs.ndim != 1: - raise ValueError("regularizer must be one-dimensional") - if np.any(regs < 0): - raise ValueError("regularization constants must be nonnegative") + if regs is not None: + regs = np.array(regs) + if regs.ndim != 1: + raise ValueError("regularizer must be one-dimensional") + if np.any(regs < 0): + raise ValueError( + "regularization constants must be nonnegative" + ) self.__regs = regs - if self.r is not None: + if self.r is not None and regs is not None: self._check_regularizer_shape() # Main methods ------------------------------------------------------------ @@ -528,7 +543,8 @@ def fit(self, data_matrix: np.ndarray, lhs_matrix: np.ndarray): If one-dimensional, assume :math:`r = 1`. """ L2Solver.fit(self, data_matrix, lhs_matrix) - self._check_regularizer_shape() + if self.regularizer is not None: + self._check_regularizer_shape() return self def posterior(self): @@ -594,6 +610,8 @@ def regcond(self) -> float: conds : (r,) ndarray Condition numbers of the regularized data matrices. """ + if self.regularizer is None: + raise AttributeError("solver regularizer not set") svals2 = self._svals**2 + self.regularizer.reshape((-1, 1)) ** 2 return np.sqrt(svals2.max(axis=1) / svals2.min(axis=1)) @@ -682,7 +700,7 @@ class TikhonovSolver(_BaseRegularizedSolver): def __init__( self, - regularizer, + regularizer=None, method: str = "lstsq", cond: float = None, lapack_driver: str = None, @@ -702,13 +720,16 @@ def options(self): def __str__(self): """String representation: dimensions + solver options.""" kwargs = self._print_kwargs(self.options) - if self.regularizer[0].ndim == 1: - regstr = f" {self.regularizer.shape}" + if self.regularizer is not None: + if self.regularizer[0].ndim == 1: + regstr = f" {self.regularizer.shape}" + else: + regstr = ( + f" {len(self.regularizer)} " + f"{self.regularizer[0].shape} ndarrays" + ) else: - regstr = ( - f" {len(self.regularizer)} " - f"{self.regularizer[0].shape} ndarrays" - ) + regstr = "None" if self.method == "lstsq": kwargs = self._print_kwargs(self.options) spstr = f"solver ('lstsq'): scipy.linalg.lstsq({kwargs})" @@ -734,21 +755,22 @@ def regularizer(self): @regularizer.setter def regularizer(self, G): """Set the regularization matrix.""" - if sparse.issparse(G): - G = G.toarray() - elif not isinstance(G, np.ndarray): - G = np.array(G) + if G is not None: + if sparse.issparse(G): + G = G.toarray() + elif not isinstance(G, np.ndarray): + G = np.array(G) - if G.ndim == 1: - if np.any(G < 0): - raise ValueError( - "diagonal regularizer must be positive semi-definite" - ) - G = np.diag(G) + if G.ndim == 1: + if np.any(G < 0): + raise ValueError( + "diagonal regularizer must be positive semi-definite" + ) + G = np.diag(G) self.__reg = G - if self.d is not None: + if self.d is not None and G is not None: self._check_regularizer_shape() @classmethod @@ -894,7 +916,8 @@ def fit(self, data_matrix: np.ndarray, lhs_matrix: np.ndarray): "Left-hand side" data matrix :math:`\Z` (not its transpose!). """ _BaseRegularizedSolver.fit(self, data_matrix, lhs_matrix) - self._check_regularizer_shape() + if self.regularizer is not None: + self._check_regularizer_shape() D, Z = self.data_matrix, self.lhs_matrix # Pad lhs matrix for "svd" solve. @@ -915,6 +938,8 @@ def solve(self) -> np.ndarray: Ohat : (r, d) ndarray Operator matrix :math:`\Ohat` (not its transpose!). """ + if self.regularizer is None: + raise AttributeError("solver regularizer not set") if self.method == "lstsq": DPad = np.vstack((self.data_matrix, self.regularizer)) Ohat = la.lstsq(DPad, self._ZtPad, **self.options)[0].T @@ -982,6 +1007,8 @@ def regcond(self) -> float: cond : float Condition number of the regularized data matrix. """ + if self.regularizer is None: + raise AttributeError("solver regularizer not set") return np.linalg.cond(np.vstack((self.data_matrix, self.regularizer))) @_require_trained @@ -1008,6 +1035,8 @@ def regresidual(self, Ohat: np.ndarray) -> np.ndarray: residuals : (r,) ndarray :math:`2`-norm residuals for each row of the operator matrix. """ + if self.regularizer is None: + raise AttributeError("solver regularizer not set") residual = self.residual(Ohat) return residual + np.sum((self.regularizer @ Ohat.T) ** 2, axis=0) @@ -1138,22 +1167,25 @@ def regularizer(self): @regularizer.setter def regularizer(self, Gs): """Set the regularization matrices.""" - regs = [] - for G in Gs: - if sparse.issparse(G): - G = G.toarray() - elif not isinstance(G, np.ndarray): - G = np.array(G) - if G.ndim == 1: - if np.any(G < 0): - raise ValueError( - "diagonal regularizer must be positive semi-definite" - ) - G = np.diag(G) - regs.append(G) + regs = None + if Gs is not None: + regs = [] + for G in Gs: + if sparse.issparse(G): + G = G.toarray() + elif not isinstance(G, np.ndarray): + G = np.array(G) + if G.ndim == 1: + if np.any(G < 0): + raise ValueError( + "diagonal regularizer must be " + "positive semi-definite" + ) + G = np.diag(G) + regs.append(G) self.__regs = regs - if self.d is not None: + if self.d is not None and Gs is not None: self._check_regularizer_shape() # Main methods ------------------------------------------------------------ @@ -1166,6 +1198,8 @@ def solve(self) -> np.ndarray: Ohat : (r, d) ndarray Operator matrix :math:`\Ohat` (not its transpose!). """ + if self.regularizer is None: + raise AttributeError("solver regularizer not set") Ohat = np.empty((self.r, self.d)) # Solve each independent regression problem (sequentially for now). @@ -1239,6 +1273,8 @@ def regcond(self) -> float: conds : (r,) ndarray Condition numbers for the regularized data matrices. """ + if self.regularizer is None: + raise AttributeError("solver regularizer not set") return np.array( [ np.linalg.cond(np.vstack((self.data_matrix, G))) @@ -1271,6 +1307,8 @@ def regresidual(self, Ohat: np.ndarray) -> np.ndarray: residuals : (r,) ndarray :math:`2`-norm residuals for each row of the operator matrix. """ + if self.regularizer is None: + raise AttributeError("solver regularizer not set") residual = self.residual(Ohat) rg = [np.sum((G @ oi) ** 2) for G, oi in zip(self.regularizer, Ohat)] return residual + np.array(rg) diff --git a/src/opinf/roms/_base.py b/src/opinf/roms/_base.py index b9e3a19a..7fdc9484 100644 --- a/src/opinf/roms/_base.py +++ b/src/opinf/roms/_base.py @@ -651,14 +651,26 @@ def fit_regselect_continuous( ) # Fit the model for the first time. - states = self._fit_and_return_training_data( - parameters=parameters, - states=states, - lhs=ddts, - inputs=inputs, - fit_transformer=fit_transformer, - fit_basis=fit_basis, - ) + initialized = False + for reg in candidates: + self.model.solver.regularizer = regularizer_factory(reg) + try: + states = self._fit_and_return_training_data( + parameters=parameters, + states=states, + lhs=ddts, + inputs=inputs, + fit_transformer=fit_transformer, + fit_basis=fit_basis, + ) + initialized = True + break + except Exception: # pragma: no cover + pass + if not initialized: # pragma: no cover + raise RuntimeError( + "fit() failed with all regularization candidates" + ) # Set up the regularization selection. shifts, limits = self._get_stability_limits(states, stability_margin) @@ -699,7 +711,12 @@ def training_error(reg_params): candidate by solving the model, checking for stability, and comparing to available training data. """ - update_model(reg_params) + try: + update_model(reg_params) + except Exception as ex: + if verbose: + print(f"{type(ex).__name__} in refit(): {ex}") + return np.inf # Pass stability checks. for tcase in processed_test_cases: @@ -863,14 +880,26 @@ def fit_regselect_discrete( ) # Fit the model for the first time. - states = self._fit_and_return_training_data( - parameters=parameters, - states=states, - lhs=None, - inputs=inputs, - fit_transformer=fit_transformer, - fit_basis=fit_basis, - ) + initialized = False + for reg in candidates: + self.model.solver.regularizer = regularizer_factory(candidates[0]) + try: + states = self._fit_and_return_training_data( + parameters=parameters, + states=states, + lhs=None, + inputs=inputs, + fit_transformer=fit_transformer, + fit_basis=fit_basis, + ) + initialized = True + break + except Exception: # pragma: no cover + pass + if not initialized: # pragma: no cover + raise RuntimeError( + "fit() failed with all regularization candidates" + ) # Set up the regularization selection. shifts, limits = self._get_stability_limits(states, stability_margin) @@ -902,7 +931,12 @@ def training_error(reg_params): candidate by solving the model, checking for stability, and comparing to available training data. """ - update_model(reg_params) + try: + update_model(reg_params) + except Exception as ex: + if verbose: + print(f"{type(ex).__name__} in refit(): {ex}") + return np.inf # Pass stability checks. for tcase in processed_test_cases: diff --git a/tests/basis/test_pod.py b/tests/basis/test_pod.py index 33497ef0..644dca5b 100644 --- a/tests/basis/test_pod.py +++ b/tests/basis/test_pod.py @@ -358,6 +358,15 @@ def test_fit(self, n=60, k=20, r=4): np.eye(r), ) + Q = np.random.random((n, 2 * n)) + with pytest.warns(opinf.errors.OpInfWarning) as wn: + basis.fit(Q) + assert len(wn) == 1 + assert wn[0].message.args[0] == ( + "state dimension < number of states, " + "method-of-snapshots / eigh may be inefficient" + ) + # Visualization ----------------------------------------------------------- def test_plots(self, n=40, r=4): """Lightly test plot_*().""" diff --git a/tests/lstsq/test_tikhonov.py b/tests/lstsq/test_tikhonov.py index db60f3f2..f71665e8 100644 --- a/tests/lstsq/test_tikhonov.py +++ b/tests/lstsq/test_tikhonov.py @@ -71,6 +71,10 @@ def get_solvers(self): # Properties -------------------------------------------------------------- def test_regularizer(self): """Test regularizer property and setter.""" + solver = self.Solver() + assert solver.regularizer is None + str(solver) + # Try with nonscalar regularizer. with pytest.raises(TypeError) as ex: self.Solver([1, 2, 3]) @@ -100,6 +104,11 @@ def _check(o1, o2): assert o2.shape == o1.shape assert np.allclose(o2, o1) + solver = self.Solver().fit(D, Z) + with pytest.raises(AttributeError) as ex: + solver.solve() + assert ex.value.args[0] == "solver regularizer not set" + for solver in self.get_solvers(): # Try solving before fitting. @@ -162,6 +171,12 @@ def _singletest(reg, regcondtrue): regcond_true = np.linalg.cond(np.vstack((A, reg * np.eye(d)))) _singletest(reg, regcond_true) + # No regularizer. + solver = self.Solver().fit(A, B) + with pytest.raises(AttributeError) as ex: + solver.regcond() + assert ex.value.args[0] == "solver regularizer not set" + def test_regresidual(self, k=20, d=11, r=3, ntests=5): """Test regresidual().""" solver = self.Solver(0) @@ -207,6 +222,12 @@ def test_regresidual(self, k=20, d=11, r=3, ntests=5): ans = np.linalg.norm(A @ x - b) ** 2 + np.linalg.norm(reg * x) ** 2 assert np.isclose(residual[0], ans) + # No regularizer. + solver = self.Solver().fit(A, B) + with pytest.raises(AttributeError) as ex: + solver.regresidual(None) + assert ex.value.args[0] == "solver regularizer not set" + class TestL2DecoupledSolver(_TestBaseRegularizedSolver): """Test lstsq._tikhonov.L2DecoupledSolver.""" @@ -222,6 +243,10 @@ def get_solvers(self): # Properties -------------------------------------------------------------- def test_regularizer(self, k=10, d=6, r=3): """Test _check_regularizer_shape(), fit(), and regularizer property.""" + solver = self.Solver() + assert solver.regularizer is None + str(solver) + solver = self.Solver(np.random.random(r + 1)) A = np.empty((k, d)) B = np.empty((r, k)) @@ -260,6 +285,12 @@ def test_solve(self, k=20, d=10): Id = np.eye(d) ZpadT = np.vstack((Z.T, np.zeros((d, r)))) + solver = self.Solver() + solver.fit(D, Z) + with pytest.raises(AttributeError) as ex: + solver.solve() + assert ex.value.args[0] == "solver regularizer not set" + for solver in self.get_solvers(): Ohat1 = [] for i, reg in enumerate(solver.regularizer): @@ -316,6 +347,12 @@ def test_regcond(self, k=20, d=11, r=3): conds = [np.linalg.cond(np.vstack((A, reg * Id))) for reg in regs] assert np.allclose(solver.regcond(), conds) + # No regularizer. + solver = self.Solver().fit(A, B) + with pytest.raises(AttributeError) as ex: + solver.regcond() + assert ex.value.args[0] == "solver regularizer not set" + def test_regresidual(self, k=20, d=11, r=3): """Test lstsq._tikhonov.L2DecoupledSolver.residual().""" solver = self.Solver([0] * r) @@ -350,6 +387,12 @@ def test_regresidual(self, k=20, d=11, r=3): ) assert np.allclose(residual, ans) + # No regularizer. + solver = self.Solver().fit(A, B) + with pytest.raises(AttributeError) as ex: + solver.regresidual(None) + assert ex.value.args[0] == "solver regularizer not set" + def test_save_load_and_copy_via_verify(self, k=20, d=11): return super().test_save_load_and_copy_via_verify(k, d, 4) @@ -371,6 +414,10 @@ def get_solvers(self): # Properties -------------------------------------------------------------- def test_regularizer(self, k=20, d=11, r=3): """Test _check_regularizer_shape(), regularizer, and method.""" + solver = self.Solver() + assert solver.regularizer is None + str(solver) + Z = np.random.random((d, d)) with pytest.raises(ValueError) as ex: @@ -492,6 +539,12 @@ def test_solve(self, k=40, r=5): d = 10 A = np.random.random((k, d)) B = np.random.random((r, k)) + + solver = self.Solver().fit(A, B) + with pytest.raises(AttributeError) as ex: + solver.solve() + assert ex.value.args[0] == "solver regularizer not set" + Bpad = np.concatenate((B.T, np.zeros((d, r)))) b = B[0, :] bpad = Bpad[:, 0] @@ -595,6 +648,12 @@ def test_regcond(self, k=20, d=11, r=3, ntests=5): solver.regularizer = P assert np.isclose(solver.regcond(), cond) + # No regularizer. + solver = self.Solver().fit(A, B) + with pytest.raises(AttributeError) as ex: + solver.regcond() + assert ex.value.args[0] == "solver regularizer not set" + def test_regresidual(self, k=20, d=11, r=3, ntests=5): """Test lstsq._tikhonov.TikhonovSolver.residual().""" Z = np.zeros(d) @@ -635,6 +694,12 @@ def test_regresidual(self, k=20, d=11, r=3, ntests=5): ans = np.linalg.norm(A @ x - b) ** 2 + np.linalg.norm(P * x) ** 2 assert np.isclose(residual[0], ans) + # No regularizer. + solver = self.Solver().fit(A, B) + with pytest.raises(AttributeError) as ex: + solver.regresidual(None) + assert ex.value.args[0] == "solver regularizer not set" + def test_save_load_and_copy_via_verify(self, k=20, r=6): return super().test_save_load_and_copy_via_verify(k=k, d=10, r=r) @@ -655,6 +720,10 @@ def get_solvers(self): # Properties -------------------------------------------------------------- def test_regularizer(self, k=10, d=6, r=3): """Test _check_regularizer_shape() and regularizer.""" + solver = self.Solver() + assert solver.regularizer is None + str(solver) + Z = np.random.random((d, d)) solver = opinf.lstsq.TikhonovDecoupledSolver([Z] * r) A = np.empty((k, d)) @@ -699,9 +768,14 @@ def test_solve(self, k=20, d=10): r = len(Ps) A = np.random.random((k, d)) B = np.random.random((r, k)) - solver = self.Solver(Ps) + + solver = self.Solver().fit(A, B) + with pytest.raises(AttributeError) as ex: + solver.solve() + assert ex.value.args[0] == "solver regularizer not set" # Try solving before fitting. + solver = self.Solver(Ps) with pytest.raises(AttributeError) as ex: solver.solve() assert ex.value.args[0] == "solver not trained, call fit()" @@ -772,6 +846,12 @@ def test_regcond(self, k=20, d=11, r=3): conds = [np.linalg.cond(np.vstack((A, P))) for P in Ps] assert np.allclose(solver.regcond(), conds) + # No regularizer. + solver = self.Solver().fit(A, B) + with pytest.raises(AttributeError) as ex: + solver.regcond() + assert ex.value.args[0] == "solver regularizer not set" + def test_residual(self, k=20): return super().test_residual(k, d=10, r=5) @@ -793,6 +873,12 @@ def test_regresidual(self, k=20, d=11, r=3): ) assert np.allclose(residual, ans) + # No regularizer. + solver = self.Solver().fit(A, B) + with pytest.raises(AttributeError) as ex: + solver.regresidual(None) + assert ex.value.args[0] == "solver regularizer not set" + def test_save_load_and_copy_via_verify(self, k=20): return super().test_save_load_and_copy_via_verify(k=k, d=10, r=5) diff --git a/tests/utils/test_timer.py b/tests/utils/test_timer.py index 1a6dede4..8a59cc69 100644 --- a/tests/utils/test_timer.py +++ b/tests/utils/test_timer.py @@ -42,6 +42,11 @@ def test_standard(self, message="TimedBlock test, no timelimit"): pass assert obj.message == message + # No message. + with self.Timer(None) as obj: + pass + assert obj.message == "" + @skipwindows def test_timeout(self, message="TimedBlock test with problems"): # Time limit expires. diff --git a/tox.ini b/tox.ini index 00b33698..c7817a33 100644 --- a/tox.ini +++ b/tox.ini @@ -1,7 +1,7 @@ [tox] requires = tox>=4 -env_list = py{39,310,311,312} +env_list = py{39,310,311,312,313} [testenv] description = Run unit tests with pytest