diff --git a/.circleci/config.yml b/.circleci/config.yml index 2d4eb9b..ee7431c 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -4,15 +4,15 @@ workflows: version: 2 test: jobs: - - test-3.6 - - test-3.7 - - test-3.8 + - test-3.10 + - test-3.11 + - test-3.12 jobs: - test-3.8: &unit-tests + test-3.11: &unit-tests working_directory: ~/python-glmnet docker: - - image: circleci/python:3.8 + - image: circleci/python:3.11 steps: - checkout - restore_cache: @@ -48,11 +48,11 @@ jobs: . venv/bin/activate cd ~ pytest -v python-glmnet/glmnet - test-3.6: + test-3.10: <<: *unit-tests docker: - - image: circleci/python:3.6 - test-3.7: + - image: circleci/python:3.10 + test-3.12: <<: *unit-tests docker: - - image: circleci/python:3.7 + - image: circleci/python:3.12 diff --git a/.gitignore b/.gitignore index 342d4ff..83219e8 100644 --- a/.gitignore +++ b/.gitignore @@ -1,8 +1,170 @@ -build/ -*.pyc -*.so -*.egg-info +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions *.so.dSYM -_glmnetmodule.c -dist +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ +.pdm-python +.Rproj.user/ +.pdm-build/ +.ruff_cache/ +**/*.Rproj +.DS_Store +**/*.ipynb +**/.vscode +**/builddir \ No newline at end of file diff --git a/.pdm-python b/.pdm-python new file mode 100644 index 0000000..dfff7ec --- /dev/null +++ b/.pdm-python @@ -0,0 +1 @@ +/home/milo/workspace/python-glmnet/.venv/bin/python \ No newline at end of file diff --git a/.sourcery.yaml b/.sourcery.yaml new file mode 100644 index 0000000..f79ea95 --- /dev/null +++ b/.sourcery.yaml @@ -0,0 +1,81 @@ +# 🪄 This is your project's Sourcery configuration file. + +# You can use it to get Sourcery working in the way you want, such as +# ignoring specific refactorings, skipping directories in your project, +# or writing custom rules. + +# 📚 For a complete reference to this file, see the documentation at +# https://docs.sourcery.ai/Configuration/Project-Settings/ + +# This file was auto-generated by Sourcery on 2024-06-09 at 11:54. + +version: '1' # The schema version of this config file + +ignore: # A list of paths or files which Sourcery will ignore. +- .git +- env +- .env +- .tox +- node_modules +- vendor +- venv +- .venv +- ~/.pyenv +- ~/.rye +- ~/.vscode +- .vscode +- ~/.cache +- ~/.config +- ~/.local + +rule_settings: + enable: + - default + disable: [] # A list of rule IDs Sourcery will never suggest. + rule_types: + - refactoring + - suggestion + - comment + python_version: '3.9' # A string specifying the lowest Python version your project supports. Sourcery will not suggest refactorings requiring a higher Python version. + +# rules: # A list of custom rules Sourcery will include in its analysis. +# - id: no-print-statements +# description: Do not use print statements in the test directory. +# pattern: print(...) +# language: python +# replacement: +# condition: +# explanation: +# paths: +# include: +# - test +# exclude: +# - conftest.py +# tests: [] +# tags: [] + +# rule_tags: {} # Additional rule tags. + +# metrics: +# quality_threshold: 25.0 + +# github: +# labels: [] +# ignore_labels: +# - sourcery-ignore +# request_review: author +# sourcery_branch: sourcery/{base_branch} + +# clone_detection: +# min_lines: 3 +# min_duplicates: 2 +# identical_clones_only: false + +# proxy: +# url: +# ssl_certs_file: +# no_ssl_verify: false + +# coding_assistant: +# project_description: '' +# enabled: diff --git a/CHANGELOG.md b/CHANGELOG.md index 246ad0b..233ec77 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,31 @@ All notable changes to this project will be documented in this file. This project adheres to [Semantic Versioning](http://semver.org/). +## 2.6.1 - 2024-11-18 +### Changed +* Parameterized unit tests in nox to test a range of python versions > 3.10 and + numpy >1.26 + * Trying to get 3.9 to work is a bust + +## 2.6.0 - 2024-11-16 +### Changed +* In root `meson.build`, replace `import numpy` and `numpy.f2py.get_include()` to + `from numpy import f2py` and `f2py.get_include()` to allow numpy < 2.0 to work +* Rewrote unittest-style tests as pytest-style tests +* Add Nox +* Formatting and linting using Ruff +* Revert usage of structural pattern matching so earlier versions of python work + +## 2.5.0 - 2024-11-13 + +### Changed +* Python version support: Allow 3.12 and newer versions of numpy + that lack numpy.code.distutils +* Replace Numpy-based build system with Meson +* Remove setup.py and move all information into pyproject.toml +* Formatting and linting using Ruff +* Changed package layout to src-type, moved tests outside + ## Unreleased ## 2.2.2 - 2024-05-09 diff --git a/MANIFEST.in b/MANIFEST.in deleted file mode 100644 index 10c3d88..0000000 --- a/MANIFEST.in +++ /dev/null @@ -1,6 +0,0 @@ -include README.rst -include LICENSE -recursive-include LICENSES * -include pyproject.toml -include glmnet/*.pyf -exclude glmnet/*.c diff --git a/codecov.yml b/codecov.yml new file mode 100644 index 0000000..9ac2650 --- /dev/null +++ b/codecov.yml @@ -0,0 +1,9 @@ +comment: false +coverage: + status: + project: + default: + target: "100" + patch: + default: + target: "100" diff --git a/debug.py b/debug.py new file mode 100644 index 0000000..f29f5be --- /dev/null +++ b/debug.py @@ -0,0 +1,11 @@ +import numpy as np +from sklearn.datasets import make_regression + +from glmnet import ElasticNet + +if __name__ == "__main__": + np.random.seed(488881) + X, y = make_regression(n_samples=1000, random_state=561) + m = ElasticNet() + m.fit(X, y) + p = m.predict(X[0].reshape((1, -1)), lamb=[20, 10]) \ No newline at end of file diff --git a/dev-requirements.txt b/dev-requirements.txt deleted file mode 100644 index 8ff9fe1..0000000 --- a/dev-requirements.txt +++ /dev/null @@ -1,5 +0,0 @@ -# Pin exact dependency version numbers for local dev and CircleCI. -numpy==1.19.2 -pytest==5.4.3 -scikit-learn==0.23.1 -scipy==1.5.0 diff --git a/glmnet/errors.py b/glmnet/errors.py deleted file mode 100644 index 2e62f13..0000000 --- a/glmnet/errors.py +++ /dev/null @@ -1,68 +0,0 @@ -import warnings - - -def _check_error_flag(jerr): - """Check the glmnet solver error flag and issue warnings or raise - exceptions as appropriate. - - The codes break down roughly as follows: - - jerr == 0: everything is fine - jerr > 0: fatal errors such as memory allocation problems - jerr < 0: non-fatal errors such as convergence warnings - """ - if jerr == 0: - return - - if jerr > 0: - _fatal_errors(jerr) - - if jerr < 0: - _convergence_errors(jerr) - - -def _fatal_errors(jerr): - if jerr == 7777: - raise ValueError("All predictors have zero variance " - "(glmnet error no. 7777).") - - if jerr == 10000: - raise ValueError("At least one value of relative_penalties must be " - "positive (glmnet error no. 10000).") - - if jerr == 90000: - raise ValueError("Solver did not converge (glmnet error no. 90000).") - - if jerr < 7777: - raise RuntimeError("Memory allocation error (glmnet error no. {})." - .format(jerr)) - - if jerr > 8000 and jerr < 9000: - k = jerr - 8000 - raise ValueError("Probability for class {} close to 0.".format(k)) - - if jerr > 9000: - k = jerr - 9000 - raise ValueError("Probability for class {} close to 1.".format(k)) - - else: - raise RuntimeError("Fatal glmnet error no. {}.".format(jerr)) - - -def _convergence_errors(jerr): - if jerr < -20000: - k = abs(20000 + jerr) - warnings.warn("Predicted probability close to 0 or 1 for " - "lambda no. {}.".format(k), RuntimeWarning) - - if jerr > -20000 and jerr < -10000: - # This indicates the number of non-zero coefficients in a model - # exceeded a user-specified bound. We don't expose this parameter to - # the user, so there is not much sense in exposing the error either. - warnings.warn("Non-fatal glmnet error no. {}.".format(jerr), - RuntimeWarning) - - if jerr > -10000: - warnings.warn("Model did not converge for smaller values of lambda, " - "returning solution for the largest {} values." - .format(-1 * (jerr - 1)), RuntimeWarning) diff --git a/glmnet/tests/test_errors.py b/glmnet/tests/test_errors.py deleted file mode 100644 index fdf2dd7..0000000 --- a/glmnet/tests/test_errors.py +++ /dev/null @@ -1,58 +0,0 @@ -import unittest - -from glmnet.errors import _check_error_flag - - -class TestErrors(unittest.TestCase): - - def test_zero_jerr(self): - # This should not raise any warnings or exceptions. - _check_error_flag(0) - - def test_convergence_err(self): - msg = ("Model did not converge for smaller values of lambda, " - "returning solution for the largest 75 values.") - with self.assertWarns(RuntimeWarning, msg=msg): - _check_error_flag(-76) - - def test_zero_var_err(self): - msg = "All predictors have zero variance (glmnet error no. 7777)." - with self.assertRaises(ValueError, msg=msg): - _check_error_flag(7777) - - def test_all_negative_rel_penalty(self): - msg = ("At least one value of relative_penalties must be positive, " - "(glmnet error no. 10000).") - with self.assertRaises(ValueError, msg=msg): - _check_error_flag(10000) - - def test_memory_allocation_err(self): - msg = "Memory allocation error (glmnet error no. 1234)." - with self.assertRaises(RuntimeError, msg=msg): - _check_error_flag(1234) - - def test_other_fatal_err(self): - msg = "Fatal glmnet error no. 7778." - with self.assertRaises(RuntimeError, msg=msg): - _check_error_flag(7778) - - def test_class_prob_close_to_1(self): - msg = "Probability for class 2 close to 0." - with self.assertRaises(ValueError, msg=msg): - _check_error_flag(8002) - - def test_class_prob_close_to_0(self): - msg = "Probability for class 4 close to 0." - with self.assertRaises(ValueError, msg=msg): - _check_error_flag(8004) - - def test_predicted_class_close_to_0_or_1(self): - msg = "Predicted probability close to 0 or 1 for lambda no. 7." - with self.assertWarns(RuntimeWarning, msg=msg): - _check_error_flag(-20007) - - def test_did_not_converge(self): - msg = "Solver did not converge (glmnet error no. 90000)." - with self.assertRaises(ValueError, msg=msg): - _check_error_flag(90000) - diff --git a/glmnet/tests/test_linear.py b/glmnet/tests/test_linear.py deleted file mode 100644 index 988f4fc..0000000 --- a/glmnet/tests/test_linear.py +++ /dev/null @@ -1,211 +0,0 @@ -import unittest - -import numpy as np - -from scipy.sparse import csr_matrix - -from sklearn.datasets import make_regression -from sklearn.metrics import r2_score -from sklearn.utils import estimator_checks -from sklearn.utils.testing import ignore_warnings - -from glmnet.tests.util import sanity_check_regression - -from glmnet import ElasticNet - - -class TestElasticNet(unittest.TestCase): - - def setUp(self): - np.random.seed(488881) - x, y = make_regression(n_samples=1000, random_state=561) - x_sparse = csr_matrix(x) - - x_wide, y_wide = make_regression(n_samples=100, n_features=150, - random_state=1105) - x_wide_sparse = csr_matrix(x_wide) - - self.inputs = [(x,y), (x_sparse, y), (x_wide, y_wide), - (x_wide_sparse, y_wide)] - self.alphas = [0., 0.25, 0.50, 0.75, 1.] - self.n_splits = [-1, 0, 5] - self.scoring = [ - "r2", - "mean_squared_error", - "mean_absolute_error", - "median_absolute_error", - ] - - @ignore_warnings(category=RuntimeWarning) - def test_estimator_interface(self): - estimator_checks.check_estimator(ElasticNet) - - def test_with_defaults(self): - m = ElasticNet(random_state=2821) - for x, y in self.inputs: - m = m.fit(x, y) - sanity_check_regression(m, x) - - # check selection of lambda_best - self.assertTrue(m.lambda_best_inx_ <= m.lambda_max_inx_) - - # check full path predict - p = m.predict(x, lamb=m.lambda_path_) - self.assertEqual(p.shape[-1], m.lambda_path_.size) - - def test_one_row_predict(self): - # Verify that predicting on one row gives only one row of output - m = ElasticNet(random_state=42) - for X, y in self.inputs: - m.fit(X, y) - p = m.predict(X[0].reshape((1, -1))) - assert p.shape == (1,) - - def test_one_row_predict_with_lambda(self): - # One row to predict along with lambdas should give 2D output - m = ElasticNet(random_state=42) - for X, y in self.inputs: - m.fit(X, y) - p = m.predict(X[0].reshape((1, -1)), lamb=[20, 10]) - assert p.shape == (1, 2) - - def test_with_single_var(self): - x = np.random.rand(500,1) - y = (1.3 * x).ravel() - - m = ElasticNet(random_state=449065) - m = m.fit(x, y) - self.check_r2_score(y, m.predict(x), 0.90) - - def test_with_no_predictor_variance(self): - x = np.ones((500, 1)) - y = np.random.rand(500) - - m = ElasticNet(random_state=561) - msg = "All predictors have zero variance (glmnet error no. 7777)." - with self.assertRaises(ValueError, msg=msg): - m.fit(x, y) - - def test_relative_penalties(self): - m1 = ElasticNet(random_state=4328) - m2 = ElasticNet(random_state=4328) - for x, y in self.inputs: - p = x.shape[1] - - # m1 no relative penalties applied - m1.fit(x, y) - - # find the nonzero indices from LASSO - nonzero = np.nonzero(m1.coef_) - - # unpenalize those nonzero coefs - penalty = np.repeat(1, p) - penalty[nonzero] = 0 - - # refit the model with the unpenalized coefs - m2.fit(x, y, relative_penalties=penalty) - - # verify that the unpenalized coef ests exceed the penalized ones - # in absolute value - assert(np.all(np.abs(m1.coef_) <= np.abs(m2.coef_))) - - def test_alphas(self): - x, y = self.inputs[0] - for alpha in self.alphas: - m = ElasticNet(alpha=alpha, random_state=2465) - m = m.fit(x, y) - self.check_r2_score(y, m.predict(x), 0.90, alpha=alpha) - - def test_coef_limits(self): - x, y = self.inputs[0] - lower_limits = np.repeat(-1, x.shape[1]) - upper_limits = 0 - m = ElasticNet(lower_limits=lower_limits, upper_limits=upper_limits, random_state=5934, alpha=0) - m = m.fit(x, y) - assert(np.all(m.coef_ >= -1)) - assert(np.all(m.coef_ <= 0)) - - def test_n_splits(self): - x, y = self.inputs[0] - for n in self.n_splits: - m = ElasticNet(n_splits=n, random_state=6601) - if n > 0 and n < 3: - with self.assertRaisesRegexp(ValueError, - "n_splits must be at least 3"): - m = m.fit(x, y) - else: - m = m.fit(x, y) - sanity_check_regression(m, x) - - def test_cv_scoring(self): - x, y = self.inputs[0] - for method in self.scoring: - m = ElasticNet(scoring=method, random_state=1729) - m = m.fit(x, y) - self.check_r2_score(y, m.predict(x), 0.90, scoring=method) - - def test_predict_without_cv(self): - x, y = self.inputs[0] - m = ElasticNet(n_splits=0, random_state=340561) - m = m.fit(x, y) - - # should not make prediction unless value is passed for lambda - with self.assertRaises(ValueError): - m.predict(x) - - def test_coef_interpolation(self): - x, y = self.inputs[0] - m = ElasticNet(n_splits=0, random_state=1729) - m = m.fit(x, y) - - # predict for a value of lambda between two values on the computed path - lamb_lo = m.lambda_path_[1] - lamb_hi = m.lambda_path_[2] - - # a value not equal to one on the computed path - lamb_mid = (lamb_lo + lamb_hi) / 2.0 - - pred_lo = m.predict(x, lamb=lamb_lo) - pred_hi = m.predict(x, lamb=lamb_hi) - pred_mid = m.predict(x, lamb=lamb_mid) - - self.assertFalse(np.allclose(pred_lo, pred_mid)) - self.assertFalse(np.allclose(pred_hi, pred_mid)) - - def test_lambda_clip_warning(self): - x, y = self.inputs[0] - m = ElasticNet(n_splits=0, random_state=1729) - m = m.fit(x, y) - - # we should get a warning when we ask for predictions at values of - # lambda outside the range of lambda_path_ - with self.assertWarns(RuntimeWarning): - # note, lambda_path_ is in decreasing order - m.predict(x, lamb=m.lambda_path_[0] + 1) - - with self.assertWarns(RuntimeWarning): - m.predict(x, lamb=m.lambda_path_[-1] - 1) - - def check_r2_score(self, y_true, y, at_least, **other_params): - score = r2_score(y_true, y) - msg = "expected r2 of {}, got: {}, with: {}".format(at_least, score, other_params) - self.assertTrue(score > at_least, msg) - - def test_random_state_cv(self): - random_state = 133 - m = ElasticNet(random_state=random_state) - x, y = self.inputs[0] - m.fit(x, y) - print(dir(m._cv)) - assert m._cv.random_state == random_state - - def test_max_features(self): - x, y = self.inputs[3] - max_features = 5 - m = ElasticNet(n_splits=3, random_state=42, max_features=max_features) - m = m.fit(x, y) - num_features = np.count_nonzero(m.coef_) - self.assertTrue(num_features <= max_features) - -if __name__ == "__main__": - unittest.main() diff --git a/glmnet/tests/test_logistic.py b/glmnet/tests/test_logistic.py deleted file mode 100644 index 9067c4e..0000000 --- a/glmnet/tests/test_logistic.py +++ /dev/null @@ -1,287 +0,0 @@ -import itertools -import unittest - -import numpy as np - -from scipy.sparse import csr_matrix - -from sklearn.datasets import make_classification -from sklearn.metrics import accuracy_score, f1_score -from sklearn.utils import estimator_checks, class_weight -from sklearn.utils.testing import ignore_warnings - -from glmnet.tests.util import sanity_check_logistic - -from glmnet import LogitNet - - -class TestLogitNet(unittest.TestCase): - - def setUp(self): - np.random.seed(488881) - # binomial - x, y = make_classification(n_samples=300, random_state=6601) - x_sparse = csr_matrix(x) - - x_wide, y_wide = make_classification(n_samples=100, n_features=150, - random_state=8911) - x_wide_sparse = csr_matrix(x_wide) - self.binomial = [(x, y), (x_sparse, y), (x_wide, y_wide), - (x_wide_sparse, y_wide)] - - # multinomial - x, y = make_classification(n_samples=400, n_classes=3, n_informative=15, - n_features=25, random_state=10585) - x_sparse = csr_matrix(x) - - x_wide, y_wide = make_classification(n_samples=400, n_classes=3, - n_informative=15, n_features=500, - random_state=15841) - x_wide_sparse = csr_matrix(x_wide) - self.multinomial = [(x, y), (x_sparse, y), (x_wide, y_wide), - (x_wide_sparse, y_wide)] - - self.alphas = [0., 0.25, 0.50, 0.75, 1.] - self.n_splits = [-1, 0, 5] - self.scoring = [ - "accuracy", - "roc_auc", - "average_precision", - "log_loss", - "precision_macro", - "precision_micro", - "precision_weighted", - "f1_micro", - "f1_macro", - "f1_weighted", - ] - self.multinomial_scoring = [ - "accuracy", - "log_loss", - "precision_macro", - "precision_micro", - "precision_weighted", - "f1_micro", - "f1_macro", - "f1_weighted" - ] - - @ignore_warnings(category=RuntimeWarning) # convergence warnings from glmnet - def test_estimator_interface(self): - estimator_checks.check_estimator(LogitNet) - - def test_with_defaults(self): - m = LogitNet(random_state=29341) - for x, y in itertools.chain(self.binomial, self.multinomial): - m = m.fit(x, y) - sanity_check_logistic(m, x) - - # check selection of lambda_best - assert m.lambda_best_inx_ <= m.lambda_max_inx_ - - # check full path predict - p = m.predict(x, lamb=m.lambda_path_) - assert p.shape[-1] == m.lambda_path_.size - - def test_one_row_predict(self): - # Verify that predicting on one row gives only one row of output - m = LogitNet(random_state=42) - for X, y in itertools.chain(self.binomial, self.multinomial): - m.fit(X, y) - p = m.predict(X[0].reshape((1, -1))) - assert p.shape == (1,) - - def test_one_row_predict_proba(self): - # Verify that predict_proba on one row gives 2D output - m = LogitNet(random_state=42) - for X, y in itertools.chain(self.binomial, self.multinomial): - m.fit(X, y) - p = m.predict_proba(X[0].reshape((1, -1))) - assert p.shape == (1, len(np.unique(y))) - - def test_one_row_predict_with_lambda(self): - # One row to predict along with lambdas should give 2D output - m = LogitNet(random_state=42) - lamb = [0.01, 0.02, 0.04, 0.1] - for X, y in itertools.chain(self.binomial, self.multinomial): - m.fit(X, y) - p = m.predict(X[0].reshape((1, -1)), lamb=lamb) - assert p.shape == (1, len(lamb)) - - def test_one_row_predict_proba_with_lambda(self): - # One row to predict_proba along with lambdas should give 3D output - m = LogitNet(random_state=42) - lamb = [0.01, 0.02, 0.04, 0.1] - for X, y in itertools.chain(self.binomial, self.multinomial): - m.fit(X, y) - p = m.predict_proba(X[0].reshape((1, -1)), lamb=lamb) - assert p.shape == (1, len(np.unique(y)), len(lamb)) - - def test_alphas(self): - x, y = self.binomial[0] - for alpha in self.alphas: - m = LogitNet(alpha=alpha, random_state=41041) - m = m.fit(x, y) - check_accuracy(y, m.predict(x), 0.85, alpha=alpha) - - def test_coef_limits(self): - x, y = self.binomial[0] - lower_limits = np.repeat(-1, x.shape[1]) - upper_limits = 0 - m = LogitNet(lower_limits=lower_limits, upper_limits=upper_limits, random_state=69265, alpha=0) - m = m.fit(x, y) - assert(np.all(m.coef_ >= -1)) - assert(np.all(m.coef_ <= 0)) - - def test_relative_penalties(self): - x, y = self.binomial[0] - p = x.shape[1] - - # m1 no relative penalties applied - m1 = LogitNet(alpha=1) - m1.fit(x, y) - - # find the nonzero indices from LASSO - nonzero = np.nonzero(m1.coef_[0]) - - # unpenalize those nonzero coefs - penalty = np.repeat(1, p) - penalty[nonzero] = 0 - - # refit the model with the unpenalized coefs - m2 = LogitNet(alpha=1) - m2.fit(x, y, relative_penalties=penalty) - - # verify that the unpenalized coef ests exceed the penalized ones - # in absolute value - assert(np.all(np.abs(m1.coef_[0]) <= np.abs(m2.coef_[0]))) - - def test_n_splits(self): - x, y = self.binomial[0] - for n in self.n_splits: - m = LogitNet(n_splits=n, random_state=46657) - if n > 0 and n < 3: - with self.assertRaisesRegexp(ValueError, - "n_splits must be at least 3"): - m = m.fit(x, y) - else: - m = m.fit(x, y) - sanity_check_logistic(m, x) - - def test_cv_scoring(self): - x, y = self.binomial[0] - for method in self.scoring: - m = LogitNet(scoring=method, random_state=52633) - m = m.fit(x, y) - check_accuracy(y, m.predict(x), 0.85, scoring=method) - - def test_cv_scoring_multinomial(self): - x, y = self.multinomial[0] - for method in self.scoring: - m = LogitNet(scoring=method, random_state=488881) - - if method in self.multinomial_scoring: - m = m.fit(x, y) - check_accuracy(y, m.predict(x), 0.65, scoring=method) - else: - with self.assertRaises(ValueError): - m.fit(x, y) - - def test_predict_without_cv(self): - x, y = self.binomial[0] - m = LogitNet(n_splits=0, random_state=399001) - m = m.fit(x, y) - - # should not make prediction unless value is passed for lambda - with self.assertRaises(ValueError): - m.predict(x) - - def test_coef_interpolation(self): - x, y = self.binomial[0] - m = LogitNet(n_splits=0, random_state=561) - m = m.fit(x, y) - - # predict for a value of lambda between two values on the computed path - lamb_lo = m.lambda_path_[1] - lamb_hi = m.lambda_path_[2] - - # a value not equal to one on the computed path - lamb_mid = (lamb_lo + lamb_hi) / 2.0 - - pred_lo = m.predict_proba(x, lamb=lamb_lo) - pred_hi = m.predict_proba(x, lamb=lamb_hi) - pred_mid = m.predict_proba(x, lamb=lamb_mid) - - self.assertFalse(np.allclose(pred_lo, pred_mid)) - self.assertFalse(np.allclose(pred_hi, pred_mid)) - - def test_lambda_clip_warning(self): - x, y = self.binomial[0] - m = LogitNet(n_splits=0, random_state=1729) - m = m.fit(x, y) - - with self.assertWarns(RuntimeWarning): - m.predict(x, lamb=m.lambda_path_[0] + 1) - - with self.assertWarns(RuntimeWarning): - m.predict(x, lamb=m.lambda_path_[-1] - 1) - - def test_single_class_exception(self): - x, y = self.binomial[0] - y = np.ones_like(y) - m = LogitNet() - - with self.assertRaises(ValueError) as e: - m.fit(x, y) - - self.assertEqual("Training data need to contain at least 2 classes.", - str(e.exception)) - - def test_random_state_cv(self): - random_state = 133 - m = LogitNet(random_state=random_state) - x, y = self.binomial[0] - m.fit(x, y) - print(dir(m._cv)) - assert m._cv.random_state == random_state - - def test_max_features(self): - max_features = 5 - m = LogitNet(random_state=1, max_features=max_features) - x, y = self.multinomial[3] - m = m.fit(x, y) - num_features = np.count_nonzero(m.coef_, axis=1) - self.assertTrue(np.all(num_features <= max_features)) - - def test_use_sample_weights(self): - x, y = self.multinomial[1] - class_0_idx = np.where(y==0) - to_drop = class_0_idx[0][:-3] - to_keep = np.ones(len(y), dtype=bool) - to_keep[to_drop] = False - y = y[to_keep] - x = x[to_keep, :] - sample_weight = class_weight.compute_sample_weight('balanced', y) - sample_weight[0] = 0. - - unweighted = LogitNet(random_state=2, scoring='f1_micro') - unweighted = unweighted.fit(x, y) - unweighted_acc = f1_score(y, unweighted.predict(x), sample_weight=sample_weight, - average='micro') - - weighted = LogitNet(random_state=2, scoring='f1_micro') - weighted = weighted.fit(x, y, sample_weight=sample_weight) - weighted_acc = f1_score(y, weighted.predict(x), sample_weight=sample_weight, - average='micro') - - self.assertTrue(weighted_acc >= unweighted_acc) - - -def check_accuracy(y, y_hat, at_least, **other_params): - score = accuracy_score(y, y_hat) - msg = "expected accuracy of {}, got: {} with {}".format(at_least, score, other_params) - assert score > at_least, msg - - -if __name__ == "__main__": - unittest.main() diff --git a/glmnet/tests/test_pandas.py b/glmnet/tests/test_pandas.py deleted file mode 100644 index 7b6fe07..0000000 --- a/glmnet/tests/test_pandas.py +++ /dev/null @@ -1,42 +0,0 @@ -import unittest - -from sklearn.datasets import make_regression, make_classification -from glmnet import LogitNet, ElasticNet - -from glmnet.tests.util import sanity_check_logistic, sanity_check_regression - -pd = None -try: - import pandas as pd -except: - pass - - -class TestElasticNetPandas(unittest.TestCase): - - @unittest.skipUnless(pd, "pandas not available") - def test_with_pandas_df(self): - x, y = make_regression(random_state=561) - df = pd.DataFrame(x) - df['y'] = y - - m = ElasticNet(n_splits=3, random_state=123) - m = m.fit(df.drop(['y'], axis=1), df.y) - sanity_check_regression(m, x) - - -class TestLogitNetPandas(unittest.TestCase): - - @unittest.skipUnless(pd, "pandas not available") - def test_with_pandas_df(self): - x, y = make_classification(random_state=1105) - df = pd.DataFrame(x) - df['y'] = y - - m = LogitNet(n_splits=3, random_state=123) - m = m.fit(df.drop(['y'], axis=1), df.y) - sanity_check_logistic(m, x) - - -if __name__ == "__main__": - unittest.main() diff --git a/glmnet/tests/test_util.py b/glmnet/tests/test_util.py deleted file mode 100644 index 3d90d67..0000000 --- a/glmnet/tests/test_util.py +++ /dev/null @@ -1,23 +0,0 @@ -import unittest - -import numpy as np -from numpy.testing import assert_warns - -from glmnet.util import _interpolate_model - - -class TestUtils(unittest.TestCase): - - def test_interpolate_model_intercept_only(self): - lambda_path = np.array((0.99,)) - coef_path = np.random.random(size=(5, 1)) - intercept_path = np.random.random(size=(1,)) - - # would be nice to use assertWarnsRegex to check the message, but this - # fails due to http://bugs.python.org/issue20484 - assert_warns(RuntimeWarning, _interpolate_model, lambda_path, - coef_path, intercept_path, 0.99) - - -if __name__ == "__main__": - unittest.main() diff --git a/meson.build b/meson.build new file mode 100644 index 0000000..ba9f609 --- /dev/null +++ b/meson.build @@ -0,0 +1,176 @@ +# This is going to be a Frankenstein's monster of +# a build file based on that from https://github.com/amolenaar/meson-python-pdm-example +# and whatever I need to steal from Scipy to get this to work +project('glmnet', 'c', + version : '2.6.0', + meson_version: '>= 1.5.0', + default_options : [ + 'warning_level=1', + 'fortran_std=legacy', + 'buildtype=debugoptimized', + ]) + +# https://mesonbuild.com/Python-module.html +# requires atleast 0.46.0 +py_mod = import('python') +py3 = py_mod.find_installation() +py3_dep = py3.dependency() + +cc = meson.get_compiler('c') + +# Check compiler is recent enough (see "Toolchain Roadmap" for details) +_global_c_args = cc.get_supported_arguments( + '-Wno-unused-but-set-variable', + '-Wno-unused-function', + '-Wno-conversion', + '-Wno-misleading-indentation', +) +add_project_arguments(_global_c_args, language : 'c') + +m_dep = cc.find_library('m', required : false) +if m_dep.found() + add_project_link_arguments('-lm', language : 'c') +endif + +# setup fortran +add_languages('fortran', native: false) +ff = meson.get_compiler('fortran') +if ff.has_argument('-Wno-conversion') + add_project_arguments('-Wno-conversion', language: 'fortran') +endif + +_global_ff_args = ff.get_supported_arguments( + # '-fallow-argument-mismatch', + '-fdefault-real-8', + '-ffixed-form', + '-fallow-argument-mismatch', + '-fno-optimize-sibling-calls', + '-w', +) +add_project_arguments(_global_ff_args, language: 'fortran') +add_project_arguments('-ffixed-form', language: 'fortran') + +if host_machine.system() == 'darwin' + if cc.has_link_argument('-Wl,-ld_classic') + # New linker introduced in macOS 14 not working yet, see gh-19357 and gh-19387 + add_project_link_arguments('-Wl,-ld_classic', language : ['c', 'fortran']) + endif + if cc.has_link_argument('-Wl,-dead_strip') + # Allow linker to strip unused symbols + add_project_link_arguments('-Wl,-dead_strip', language : ['c', 'fortran']) + endif +endif + +# Hide symbols when building on Linux with GCC. For Python extension modules, +# we only need `PyInit_*` to be public, anything else may cause problems. So we +# use a linker script to avoid exporting those symbols (this is in addition to +# Meson using `-fvisibility=hidden` for C and `-fvisibility-inlines-hidden` for +# C++ code. See gh-15996 for details. +_linker_script = meson.project_source_root() / 'scipy/_build_utils/link-version-pyinit.map' +version_link_args = ['-Wl,--version-script=' + _linker_script] +# Note that FreeBSD only accepts version scripts when -shared is passed, +# hence we need to pass that to `cc.links` explicitly (flag is already +# present for `extension_module` invocations). +if not cc.links('', name: '-Wl,--version-script', args: ['-shared', version_link_args]) + version_link_args = [] +endif + +_linker_script = meson.project_source_root() / 'scipy/_build_utils/link-version-pyinit.map' +version_link_args = ['-Wl,--version-script=' + _linker_script] + +# Fortran warning flags +_fflag_Wno_argument_mismatch = ff.get_supported_arguments('-Wno-argument-mismatch') +_fflag_Wno_conversion = ff.get_supported_arguments('-Wno-conversion') +_fflag_Wno_intrinsic_shadow = ff.get_supported_arguments('-Wno-intrinsic-shadow') +_fflag_Wno_maybe_uninitialized = ff.get_supported_arguments('-Wno-maybe-uninitialized') +_fflag_Wno_surprising = ff.get_supported_arguments('-Wno-surprising') +_fflag_Wno_uninitialized = ff.get_supported_arguments('-Wno-uninitialized') +_fflag_Wno_unused_dummy_argument = ff.get_supported_arguments('-Wno-unused-dummy-argument') +_fflag_Wno_unused_label = ff.get_supported_arguments('-Wno-unused-label') +_fflag_Wno_unused_variable = ff.get_supported_arguments('-Wno-unused-variable') +_fflag_Wno_tabs = ff.get_supported_arguments('-Wno-tabs') +# The default list of warnings to ignore from Fortran code. There is a lot of +# old, vendored code that is very bad and we want to compile it silently (at +# least with GCC and Clang) +fortran_ignore_warnings = ff.get_supported_arguments( + _fflag_Wno_argument_mismatch, + _fflag_Wno_conversion, + _fflag_Wno_maybe_uninitialized, + _fflag_Wno_unused_dummy_argument, + _fflag_Wno_unused_label, + _fflag_Wno_unused_variable, + _fflag_Wno_tabs, +) + + +incdir_numpy = meson.get_external_property('numpy-include-dir', 'not-given') +if incdir_numpy == 'not-given' + incdir_numpy = run_command(py3, + [ + '-c', + '''import os +import numpy as np +try: + incdir = os.path.relpath(np.get_include()) +except Exception: + incdir = np.get_include() +print(incdir) + ''' + ], + check: true + ).stdout().strip() + + # We do need an absolute path to feed to `cc.find_library` below + _incdir_numpy_abs = run_command(py3, + [ + '-c', + '''import os +os.chdir("..") +import numpy +print(numpy.get_include()) + ''' + ], + check: true + ).stdout().strip() +else + _incdir_numpy_abs = incdir_numpy +endif + +inc_np = include_directories(incdir_numpy) +incdir_f2py = run_command(py3, + [ + '-c', + 'import os; from numpy import f2py; incdir = os.path.relpath(f2py.get_include()); print(incdir)', + ], + check: true +).stdout().strip() + +inc_f2py = include_directories(incdir_f2py) +fortranobject_c = incdir_f2py / 'fortranobject.c' + + +numpy_nodepr_api = ['-DNPY_NO_DEPRECATED_API=NPY_1_9_API_VERSION'] +np_dep = declare_dependency(include_directories: inc_np, compile_args: numpy_nodepr_api) + +fortranobject_lib = static_library('_fortranobject', + fortranobject_c, + c_args: numpy_nodepr_api, + dependencies: py3_dep, + include_directories: [inc_np, inc_f2py], + gnu_symbol_visibility: 'hidden', +) +fortranobject_dep = declare_dependency( + link_with: fortranobject_lib, + include_directories: [inc_np, inc_f2py], +) + +# setup f2py in the style of Scipy +generate_f2pymod = find_program('tools/generate_f2pymod.py') +f2py = find_program('f2py') + +f2py_gen = generator(generate_f2pymod, + arguments : ['@INPUT@', '-o', '@BUILD_DIR@'], + output : ['_@BASENAME@module.c', '_@BASENAME@-f2pywrappers.f'], +) + +subdir('src/glmnet') diff --git a/meson.options b/meson.options new file mode 100644 index 0000000..0d68285 --- /dev/null +++ b/meson.options @@ -0,0 +1 @@ +option('python', type : 'string', value : 'python3') \ No newline at end of file diff --git a/noxfile.py b/noxfile.py new file mode 100755 index 0000000..68a275f --- /dev/null +++ b/noxfile.py @@ -0,0 +1,101 @@ +import os +import sys +from pathlib import Path + +import nox + +PACKAGE = "glmnet" +PYTHON_VERSIONS = ["3.10", "3.11", "3.12", "3.13"] # no 3.9 because sweet christmas does it want to fail to install +LATEST_VERSION = PYTHON_VERSIONS[-2] # since 3.13 isn't *officially* released yet +NUMPY_VERSIONS = ["2.1.3", "2.0.2", "1.26.4", "1.25.2", "1.24.4"] +os.environ.update( + { + "PDM_IGNORE_SAVED_PYTHON": "1", + "PDM_IGNORE_ACTIVE_VENV": "0", + } +) +nox.needs_version = ">=2024.4.15" +nox.options.sessions = ( + "mypy", + "tests", +) + +locations = ( + "src", + "tests", +) + + +@nox.session(python=LATEST_VERSION, reuse_venv=True) +def lockfile(session) -> None: + """Run the test suite.""" + session.run_always("pdm", "lock", external=True) + + +@nox.session(python=LATEST_VERSION) +def lint(session) -> None: + """Lint using ruff.""" + args = session.posargs or locations + session.install("ruff") + session.run("ruff", "check", "--fix", *args) + session.run("ruff", "format", *args) + + +@nox.session(python=LATEST_VERSION, reuse_venv=False) +def mypy(session) -> None: + """Type-check using mypy.""" + session.install("mypy") + session.install(".", external=True) + session.run( + "mypy", + "--install-types", + "--non-interactive", + f"--python-executable={sys.executable}", + "noxfile.py", + ) + + +@nox.session() +# numpy >=2.1 requires python 3.10-3.13 +@nox.parametrize( + "python,numpy", + [ (python, numpy) + for python in PYTHON_VERSIONS + for numpy in NUMPY_VERSIONS + if (python, numpy) not in [ + # ("3.9", "2.1.3"), + # ("3.9", "2.0.2"), + ("3.12", "1.25.2"), + ("3.12", "1.24.4"), + ("3.13", "1.25.2"), + ("3.13", "1.24.4") + ] + ] + ) +def tests(session, numpy) -> None: + """Run the test suite.""" + # session.install("pdm") + # session.run("pdm", "lock") + session.install("cython", f"numpy=={numpy}") + session.install("meson-python", "ninja", "setuptools") + session.install("pytest>=8.3.3","pytest-lazy-fixtures>=1.1.1","pytest-randomly>=3.16.0","pytest-xdist[psutil]>=3.6.1", "coverage[toml]") + session.install(".", external=True) + session.run("python", "-c", "'import numpy; print(numpy.__version__)'") + session.run( + # running in parallel doesn't work I think because of not setting a seed + # "coverage", "run", "--parallel", "-m", "pytest", "--numprocesses", "auto", "--random-order", external=True + "coverage", "run", "-m", "pytest", "tests" + ) + + +@nox.session(python=LATEST_VERSION, reuse_venv=True) +def coverage(session) -> None: + """Produce the coverage report.""" + args = session.posargs or ["report"] + session.install("coverage[toml]", "codecov", external=True) + + if not session.posargs and any(Path().glob(".coverage.*")): + session.run("coverage", "combine") + + session.run("coverage", "json", "--fail-under=0") + session.run("codecov", *args) diff --git a/pdm.lock b/pdm.lock new file mode 100644 index 0000000..3390dac --- /dev/null +++ b/pdm.lock @@ -0,0 +1,721 @@ +# This file is @generated by PDM. +# It is not intended for manual editing. + +[metadata] +groups = ["default", "dev"] +strategy = ["inherit_metadata"] +lock_version = "4.5.0" +content_hash = "sha256:9ca0385fc15ad0be9a63392dd901b39422f8470f6e8c9a863181fc54ccc572bb" + +[[metadata.targets]] +requires_python = ">=3.10" + +[[package]] +name = "argcomplete" +version = "3.5.1" +requires_python = ">=3.8" +summary = "Bash tab completion for argparse" +groups = ["dev"] +files = [ + {file = "argcomplete-3.5.1-py3-none-any.whl", hash = "sha256:1a1d148bdaa3e3b93454900163403df41448a248af01b6e849edc5ac08e6c363"}, + {file = "argcomplete-3.5.1.tar.gz", hash = "sha256:eb1ee355aa2557bd3d0145de7b06b2a45b0ce461e1e7813f5d066039ab4177b4"}, +] + +[[package]] +name = "colorama" +version = "0.4.6" +requires_python = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +summary = "Cross-platform colored terminal text." +groups = ["dev"] +marker = "sys_platform == \"win32\"" +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] + +[[package]] +name = "colorlog" +version = "6.9.0" +requires_python = ">=3.6" +summary = "Add colours to the output of Python's logging module." +groups = ["dev"] +dependencies = [ + "colorama; sys_platform == \"win32\"", +] +files = [ + {file = "colorlog-6.9.0-py3-none-any.whl", hash = "sha256:5906e71acd67cb07a71e779c47c4bcb45fb8c2993eebe9e5adcd6a6f1b283eff"}, + {file = "colorlog-6.9.0.tar.gz", hash = "sha256:bfba54a1b93b94f54e1f4fe48395725a3d92fd2a4af702f6bd70946bdc0c6ac2"}, +] + +[[package]] +name = "coverage" +version = "7.6.7" +requires_python = ">=3.9" +summary = "Code coverage measurement for Python" +groups = ["dev"] +files = [ + {file = "coverage-7.6.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:108bb458827765d538abcbf8288599fee07d2743357bdd9b9dad456c287e121e"}, + {file = "coverage-7.6.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c973b2fe4dc445cb865ab369df7521df9c27bf40715c837a113edaa2aa9faf45"}, + {file = "coverage-7.6.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c6b24007c4bcd0b19fac25763a7cac5035c735ae017e9a349b927cfc88f31c1"}, + {file = "coverage-7.6.7-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:acbb8af78f8f91b3b51f58f288c0994ba63c646bc1a8a22ad072e4e7e0a49f1c"}, + {file = "coverage-7.6.7-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad32a981bcdedb8d2ace03b05e4fd8dace8901eec64a532b00b15217d3677dd2"}, + {file = "coverage-7.6.7-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:34d23e28ccb26236718a3a78ba72744212aa383141961dd6825f6595005c8b06"}, + {file = "coverage-7.6.7-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e25bacb53a8c7325e34d45dddd2f2fbae0dbc230d0e2642e264a64e17322a777"}, + {file = "coverage-7.6.7-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:af05bbba896c4472a29408455fe31b3797b4d8648ed0a2ccac03e074a77e2314"}, + {file = "coverage-7.6.7-cp310-cp310-win32.whl", hash = "sha256:796c9b107d11d2d69e1849b2dfe41730134b526a49d3acb98ca02f4985eeff7a"}, + {file = "coverage-7.6.7-cp310-cp310-win_amd64.whl", hash = "sha256:987a8e3da7da4eed10a20491cf790589a8e5e07656b6dc22d3814c4d88faf163"}, + {file = "coverage-7.6.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7e61b0e77ff4dddebb35a0e8bb5a68bf0f8b872407d8d9f0c726b65dfabe2469"}, + {file = "coverage-7.6.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1a5407a75ca4abc20d6252efeb238377a71ce7bda849c26c7a9bece8680a5d99"}, + {file = "coverage-7.6.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df002e59f2d29e889c37abd0b9ee0d0e6e38c24f5f55d71ff0e09e3412a340ec"}, + {file = "coverage-7.6.7-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:673184b3156cba06154825f25af33baa2671ddae6343f23175764e65a8c4c30b"}, + {file = "coverage-7.6.7-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e69ad502f1a2243f739f5bd60565d14a278be58be4c137d90799f2c263e7049a"}, + {file = "coverage-7.6.7-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:60dcf7605c50ea72a14490d0756daffef77a5be15ed1b9fea468b1c7bda1bc3b"}, + {file = "coverage-7.6.7-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:9c2eb378bebb2c8f65befcb5147877fc1c9fbc640fc0aad3add759b5df79d55d"}, + {file = "coverage-7.6.7-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3c0317288f032221d35fa4cbc35d9f4923ff0dfd176c79c9b356e8ef8ef2dff4"}, + {file = "coverage-7.6.7-cp311-cp311-win32.whl", hash = "sha256:951aade8297358f3618a6e0660dc74f6b52233c42089d28525749fc8267dccd2"}, + {file = "coverage-7.6.7-cp311-cp311-win_amd64.whl", hash = "sha256:5e444b8e88339a2a67ce07d41faabb1d60d1004820cee5a2c2b54e2d8e429a0f"}, + {file = "coverage-7.6.7-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f07ff574986bc3edb80e2c36391678a271d555f91fd1d332a1e0f4b5ea4b6ea9"}, + {file = "coverage-7.6.7-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:49ed5ee4109258973630c1f9d099c7e72c5c36605029f3a91fe9982c6076c82b"}, + {file = "coverage-7.6.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3e8796434a8106b3ac025fd15417315d7a58ee3e600ad4dbcfddc3f4b14342c"}, + {file = "coverage-7.6.7-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a3b925300484a3294d1c70f6b2b810d6526f2929de954e5b6be2bf8caa1f12c1"}, + {file = "coverage-7.6.7-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c42ec2c522e3ddd683dec5cdce8e62817afb648caedad9da725001fa530d354"}, + {file = "coverage-7.6.7-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:0266b62cbea568bd5e93a4da364d05de422110cbed5056d69339bd5af5685433"}, + {file = "coverage-7.6.7-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:e5f2a0f161d126ccc7038f1f3029184dbdf8f018230af17ef6fd6a707a5b881f"}, + {file = "coverage-7.6.7-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c132b5a22821f9b143f87446805e13580b67c670a548b96da945a8f6b4f2efbb"}, + {file = "coverage-7.6.7-cp312-cp312-win32.whl", hash = "sha256:7c07de0d2a110f02af30883cd7dddbe704887617d5c27cf373362667445a4c76"}, + {file = "coverage-7.6.7-cp312-cp312-win_amd64.whl", hash = "sha256:fd49c01e5057a451c30c9b892948976f5d38f2cbd04dc556a82743ba8e27ed8c"}, + {file = "coverage-7.6.7-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:46f21663e358beae6b368429ffadf14ed0a329996248a847a4322fb2e35d64d3"}, + {file = "coverage-7.6.7-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:40cca284c7c310d622a1677f105e8507441d1bb7c226f41978ba7c86979609ab"}, + {file = "coverage-7.6.7-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77256ad2345c29fe59ae861aa11cfc74579c88d4e8dbf121cbe46b8e32aec808"}, + {file = "coverage-7.6.7-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:87ea64b9fa52bf395272e54020537990a28078478167ade6c61da7ac04dc14bc"}, + {file = "coverage-7.6.7-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2d608a7808793e3615e54e9267519351c3ae204a6d85764d8337bd95993581a8"}, + {file = "coverage-7.6.7-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdd94501d65adc5c24f8a1a0eda110452ba62b3f4aeaba01e021c1ed9cb8f34a"}, + {file = "coverage-7.6.7-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:82c809a62e953867cf57e0548c2b8464207f5f3a6ff0e1e961683e79b89f2c55"}, + {file = "coverage-7.6.7-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:bb684694e99d0b791a43e9fc0fa58efc15ec357ac48d25b619f207c41f2fd384"}, + {file = "coverage-7.6.7-cp313-cp313-win32.whl", hash = "sha256:963e4a08cbb0af6623e61492c0ec4c0ec5c5cf74db5f6564f98248d27ee57d30"}, + {file = "coverage-7.6.7-cp313-cp313-win_amd64.whl", hash = "sha256:14045b8bfd5909196a90da145a37f9d335a5d988a83db34e80f41e965fb7cb42"}, + {file = "coverage-7.6.7-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:f2c7a045eef561e9544359a0bf5784b44e55cefc7261a20e730baa9220c83413"}, + {file = "coverage-7.6.7-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5dd4e4a49d9c72a38d18d641135d2fb0bdf7b726ca60a103836b3d00a1182acd"}, + {file = "coverage-7.6.7-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c95e0fa3d1547cb6f021ab72f5c23402da2358beec0a8e6d19a368bd7b0fb37"}, + {file = "coverage-7.6.7-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f63e21ed474edd23f7501f89b53280014436e383a14b9bd77a648366c81dce7b"}, + {file = "coverage-7.6.7-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ead9b9605c54d15be228687552916c89c9683c215370c4a44f1f217d2adcc34d"}, + {file = "coverage-7.6.7-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:0573f5cbf39114270842d01872952d301027d2d6e2d84013f30966313cadb529"}, + {file = "coverage-7.6.7-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:e2c8e3384c12dfa19fa9a52f23eb091a8fad93b5b81a41b14c17c78e23dd1d8b"}, + {file = "coverage-7.6.7-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:70a56a2ec1869e6e9fa69ef6b76b1a8a7ef709972b9cc473f9ce9d26b5997ce3"}, + {file = "coverage-7.6.7-cp313-cp313t-win32.whl", hash = "sha256:dbba8210f5067398b2c4d96b4e64d8fb943644d5eb70be0d989067c8ca40c0f8"}, + {file = "coverage-7.6.7-cp313-cp313t-win_amd64.whl", hash = "sha256:dfd14bcae0c94004baba5184d1c935ae0d1231b8409eb6c103a5fd75e8ecdc56"}, + {file = "coverage-7.6.7-pp39.pp310-none-any.whl", hash = "sha256:0ddcb70b3a3a57581b450571b31cb774f23eb9519c2aaa6176d3a84c9fc57671"}, + {file = "coverage-7.6.7.tar.gz", hash = "sha256:d79d4826e41441c9a118ff045e4bccb9fdbdcb1d02413e7ea6eb5c87b5439d24"}, +] + +[[package]] +name = "coverage" +version = "7.6.7" +extras = ["toml"] +requires_python = ">=3.9" +summary = "Code coverage measurement for Python" +groups = ["dev"] +dependencies = [ + "coverage==7.6.7", + "tomli; python_full_version <= \"3.11.0a6\"", +] +files = [ + {file = "coverage-7.6.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:108bb458827765d538abcbf8288599fee07d2743357bdd9b9dad456c287e121e"}, + {file = "coverage-7.6.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c973b2fe4dc445cb865ab369df7521df9c27bf40715c837a113edaa2aa9faf45"}, + {file = "coverage-7.6.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c6b24007c4bcd0b19fac25763a7cac5035c735ae017e9a349b927cfc88f31c1"}, + {file = "coverage-7.6.7-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:acbb8af78f8f91b3b51f58f288c0994ba63c646bc1a8a22ad072e4e7e0a49f1c"}, + {file = "coverage-7.6.7-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad32a981bcdedb8d2ace03b05e4fd8dace8901eec64a532b00b15217d3677dd2"}, + {file = "coverage-7.6.7-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:34d23e28ccb26236718a3a78ba72744212aa383141961dd6825f6595005c8b06"}, + {file = "coverage-7.6.7-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e25bacb53a8c7325e34d45dddd2f2fbae0dbc230d0e2642e264a64e17322a777"}, + {file = "coverage-7.6.7-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:af05bbba896c4472a29408455fe31b3797b4d8648ed0a2ccac03e074a77e2314"}, + {file = "coverage-7.6.7-cp310-cp310-win32.whl", hash = "sha256:796c9b107d11d2d69e1849b2dfe41730134b526a49d3acb98ca02f4985eeff7a"}, + {file = "coverage-7.6.7-cp310-cp310-win_amd64.whl", hash = "sha256:987a8e3da7da4eed10a20491cf790589a8e5e07656b6dc22d3814c4d88faf163"}, + {file = "coverage-7.6.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7e61b0e77ff4dddebb35a0e8bb5a68bf0f8b872407d8d9f0c726b65dfabe2469"}, + {file = "coverage-7.6.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1a5407a75ca4abc20d6252efeb238377a71ce7bda849c26c7a9bece8680a5d99"}, + {file = "coverage-7.6.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df002e59f2d29e889c37abd0b9ee0d0e6e38c24f5f55d71ff0e09e3412a340ec"}, + {file = "coverage-7.6.7-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:673184b3156cba06154825f25af33baa2671ddae6343f23175764e65a8c4c30b"}, + {file = "coverage-7.6.7-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e69ad502f1a2243f739f5bd60565d14a278be58be4c137d90799f2c263e7049a"}, + {file = "coverage-7.6.7-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:60dcf7605c50ea72a14490d0756daffef77a5be15ed1b9fea468b1c7bda1bc3b"}, + {file = "coverage-7.6.7-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:9c2eb378bebb2c8f65befcb5147877fc1c9fbc640fc0aad3add759b5df79d55d"}, + {file = "coverage-7.6.7-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3c0317288f032221d35fa4cbc35d9f4923ff0dfd176c79c9b356e8ef8ef2dff4"}, + {file = "coverage-7.6.7-cp311-cp311-win32.whl", hash = "sha256:951aade8297358f3618a6e0660dc74f6b52233c42089d28525749fc8267dccd2"}, + {file = "coverage-7.6.7-cp311-cp311-win_amd64.whl", hash = "sha256:5e444b8e88339a2a67ce07d41faabb1d60d1004820cee5a2c2b54e2d8e429a0f"}, + {file = "coverage-7.6.7-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f07ff574986bc3edb80e2c36391678a271d555f91fd1d332a1e0f4b5ea4b6ea9"}, + {file = "coverage-7.6.7-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:49ed5ee4109258973630c1f9d099c7e72c5c36605029f3a91fe9982c6076c82b"}, + {file = "coverage-7.6.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3e8796434a8106b3ac025fd15417315d7a58ee3e600ad4dbcfddc3f4b14342c"}, + {file = "coverage-7.6.7-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a3b925300484a3294d1c70f6b2b810d6526f2929de954e5b6be2bf8caa1f12c1"}, + {file = "coverage-7.6.7-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c42ec2c522e3ddd683dec5cdce8e62817afb648caedad9da725001fa530d354"}, + {file = "coverage-7.6.7-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:0266b62cbea568bd5e93a4da364d05de422110cbed5056d69339bd5af5685433"}, + {file = "coverage-7.6.7-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:e5f2a0f161d126ccc7038f1f3029184dbdf8f018230af17ef6fd6a707a5b881f"}, + {file = "coverage-7.6.7-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c132b5a22821f9b143f87446805e13580b67c670a548b96da945a8f6b4f2efbb"}, + {file = "coverage-7.6.7-cp312-cp312-win32.whl", hash = "sha256:7c07de0d2a110f02af30883cd7dddbe704887617d5c27cf373362667445a4c76"}, + {file = "coverage-7.6.7-cp312-cp312-win_amd64.whl", hash = "sha256:fd49c01e5057a451c30c9b892948976f5d38f2cbd04dc556a82743ba8e27ed8c"}, + {file = "coverage-7.6.7-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:46f21663e358beae6b368429ffadf14ed0a329996248a847a4322fb2e35d64d3"}, + {file = "coverage-7.6.7-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:40cca284c7c310d622a1677f105e8507441d1bb7c226f41978ba7c86979609ab"}, + {file = "coverage-7.6.7-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77256ad2345c29fe59ae861aa11cfc74579c88d4e8dbf121cbe46b8e32aec808"}, + {file = "coverage-7.6.7-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:87ea64b9fa52bf395272e54020537990a28078478167ade6c61da7ac04dc14bc"}, + {file = "coverage-7.6.7-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2d608a7808793e3615e54e9267519351c3ae204a6d85764d8337bd95993581a8"}, + {file = "coverage-7.6.7-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdd94501d65adc5c24f8a1a0eda110452ba62b3f4aeaba01e021c1ed9cb8f34a"}, + {file = "coverage-7.6.7-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:82c809a62e953867cf57e0548c2b8464207f5f3a6ff0e1e961683e79b89f2c55"}, + {file = "coverage-7.6.7-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:bb684694e99d0b791a43e9fc0fa58efc15ec357ac48d25b619f207c41f2fd384"}, + {file = "coverage-7.6.7-cp313-cp313-win32.whl", hash = "sha256:963e4a08cbb0af6623e61492c0ec4c0ec5c5cf74db5f6564f98248d27ee57d30"}, + {file = "coverage-7.6.7-cp313-cp313-win_amd64.whl", hash = "sha256:14045b8bfd5909196a90da145a37f9d335a5d988a83db34e80f41e965fb7cb42"}, + {file = "coverage-7.6.7-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:f2c7a045eef561e9544359a0bf5784b44e55cefc7261a20e730baa9220c83413"}, + {file = "coverage-7.6.7-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5dd4e4a49d9c72a38d18d641135d2fb0bdf7b726ca60a103836b3d00a1182acd"}, + {file = "coverage-7.6.7-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c95e0fa3d1547cb6f021ab72f5c23402da2358beec0a8e6d19a368bd7b0fb37"}, + {file = "coverage-7.6.7-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f63e21ed474edd23f7501f89b53280014436e383a14b9bd77a648366c81dce7b"}, + {file = "coverage-7.6.7-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ead9b9605c54d15be228687552916c89c9683c215370c4a44f1f217d2adcc34d"}, + {file = "coverage-7.6.7-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:0573f5cbf39114270842d01872952d301027d2d6e2d84013f30966313cadb529"}, + {file = "coverage-7.6.7-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:e2c8e3384c12dfa19fa9a52f23eb091a8fad93b5b81a41b14c17c78e23dd1d8b"}, + {file = "coverage-7.6.7-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:70a56a2ec1869e6e9fa69ef6b76b1a8a7ef709972b9cc473f9ce9d26b5997ce3"}, + {file = "coverage-7.6.7-cp313-cp313t-win32.whl", hash = "sha256:dbba8210f5067398b2c4d96b4e64d8fb943644d5eb70be0d989067c8ca40c0f8"}, + {file = "coverage-7.6.7-cp313-cp313t-win_amd64.whl", hash = "sha256:dfd14bcae0c94004baba5184d1c935ae0d1231b8409eb6c103a5fd75e8ecdc56"}, + {file = "coverage-7.6.7-pp39.pp310-none-any.whl", hash = "sha256:0ddcb70b3a3a57581b450571b31cb774f23eb9519c2aaa6176d3a84c9fc57671"}, + {file = "coverage-7.6.7.tar.gz", hash = "sha256:d79d4826e41441c9a118ff045e4bccb9fdbdcb1d02413e7ea6eb5c87b5439d24"}, +] + +[[package]] +name = "distlib" +version = "0.3.9" +summary = "Distribution utilities" +groups = ["dev"] +files = [ + {file = "distlib-0.3.9-py2.py3-none-any.whl", hash = "sha256:47f8c22fd27c27e25a65601af709b38e4f0a45ea4fc2e710f65755fa8caaaf87"}, + {file = "distlib-0.3.9.tar.gz", hash = "sha256:a60f20dea646b8a33f3e7772f74dc0b2d0772d2837ee1342a00645c81edf9403"}, +] + +[[package]] +name = "exceptiongroup" +version = "1.2.2" +requires_python = ">=3.7" +summary = "Backport of PEP 654 (exception groups)" +groups = ["dev"] +marker = "python_version < \"3.11\"" +files = [ + {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, + {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, +] + +[[package]] +name = "execnet" +version = "2.1.1" +requires_python = ">=3.8" +summary = "execnet: rapid multi-Python deployment" +groups = ["dev"] +files = [ + {file = "execnet-2.1.1-py3-none-any.whl", hash = "sha256:26dee51f1b80cebd6d0ca8e74dd8745419761d3bef34163928cbebbdc4749fdc"}, + {file = "execnet-2.1.1.tar.gz", hash = "sha256:5189b52c6121c24feae288166ab41b32549c7e2348652736540b9e6e7d4e72e3"}, +] + +[[package]] +name = "filelock" +version = "3.16.1" +requires_python = ">=3.8" +summary = "A platform independent file lock." +groups = ["dev"] +files = [ + {file = "filelock-3.16.1-py3-none-any.whl", hash = "sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0"}, + {file = "filelock-3.16.1.tar.gz", hash = "sha256:c249fbfcd5db47e5e2d6d62198e565475ee65e4831e2561c8e313fa7eb961435"}, +] + +[[package]] +name = "iniconfig" +version = "2.0.0" +requires_python = ">=3.7" +summary = "brain-dead simple config-ini parsing" +groups = ["dev"] +files = [ + {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, + {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, +] + +[[package]] +name = "joblib" +version = "1.4.2" +requires_python = ">=3.8" +summary = "Lightweight pipelining with Python functions" +groups = ["default"] +files = [ + {file = "joblib-1.4.2-py3-none-any.whl", hash = "sha256:06d478d5674cbc267e7496a410ee875abd68e4340feff4490bcb7afb88060ae6"}, + {file = "joblib-1.4.2.tar.gz", hash = "sha256:2382c5816b2636fbd20a09e0f4e9dad4736765fdfb7dca582943b9c1366b3f0e"}, +] + +[[package]] +name = "meson" +version = "1.6.0" +requires_python = ">=3.7" +summary = "A high performance build system" +groups = ["dev"] +files = [ + {file = "meson-1.6.0-py3-none-any.whl", hash = "sha256:234a45f9206c6ee33b473ec1baaef359d20c0b89a71871d58c65a6db6d98fe74"}, + {file = "meson-1.6.0.tar.gz", hash = "sha256:999b65f21c03541cf11365489c1fad22e2418bb0c3d50ca61139f2eec09d5496"}, +] + +[[package]] +name = "meson-python" +version = "0.17.1" +requires_python = ">=3.7" +summary = "Meson Python build backend (PEP 517)" +groups = ["dev"] +dependencies = [ + "meson>=0.63.3; python_version < \"3.12\"", + "meson>=1.2.3; python_version >= \"3.12\"", + "packaging>=19.0", + "pyproject-metadata>=0.7.1", + "tomli>=1.0.0; python_version < \"3.11\"", +] +files = [ + {file = "meson_python-0.17.1-py3-none-any.whl", hash = "sha256:30a75c52578ef14aff8392677b09c39346e0a24d2b2c6204b8ed30583c11269c"}, + {file = "meson_python-0.17.1.tar.gz", hash = "sha256:efb91f69f2e19eef7bc9a471ed2a4e730088cc6b39eacaf3e49fc4f930eb5f83"}, +] + +[[package]] +name = "ninja" +version = "1.11.1.1" +summary = "Ninja is a small build system with a focus on speed" +groups = ["dev"] +files = [ + {file = "ninja-1.11.1.1-py2.py3-none-macosx_10_9_universal2.macosx_10_9_x86_64.macosx_11_0_arm64.macosx_11_0_universal2.whl", hash = "sha256:376889c76d87b95b5719fdd61dd7db193aa7fd4432e5d52d2e44e4c497bdbbee"}, + {file = "ninja-1.11.1.1-py2.py3-none-manylinux1_i686.manylinux_2_5_i686.whl", hash = "sha256:ecf80cf5afd09f14dcceff28cb3f11dc90fb97c999c89307aea435889cb66877"}, + {file = "ninja-1.11.1.1-py2.py3-none-manylinux1_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:84502ec98f02a037a169c4b0d5d86075eaf6afc55e1879003d6cab51ced2ea4b"}, + {file = "ninja-1.11.1.1-py2.py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:73b93c14046447c7c5cc892433d4fae65d6364bec6685411cb97a8bcf815f93a"}, + {file = "ninja-1.11.1.1-py2.py3-none-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:18302d96a5467ea98b68e1cae1ae4b4fb2b2a56a82b955193c637557c7273dbd"}, + {file = "ninja-1.11.1.1-py2.py3-none-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:aad34a70ef15b12519946c5633344bc775a7656d789d9ed5fdb0d456383716ef"}, + {file = "ninja-1.11.1.1-py2.py3-none-musllinux_1_1_aarch64.whl", hash = "sha256:d491fc8d89cdcb416107c349ad1e3a735d4c4af5e1cb8f5f727baca6350fdaea"}, + {file = "ninja-1.11.1.1-py2.py3-none-musllinux_1_1_i686.whl", hash = "sha256:7563ce1d9fe6ed5af0b8dd9ab4a214bf4ff1f2f6fd6dc29f480981f0f8b8b249"}, + {file = "ninja-1.11.1.1-py2.py3-none-musllinux_1_1_ppc64le.whl", hash = "sha256:9df724344202b83018abb45cb1efc22efd337a1496514e7e6b3b59655be85205"}, + {file = "ninja-1.11.1.1-py2.py3-none-musllinux_1_1_s390x.whl", hash = "sha256:3e0f9be5bb20d74d58c66cc1c414c3e6aeb45c35b0d0e41e8d739c2c0d57784f"}, + {file = "ninja-1.11.1.1-py2.py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:76482ba746a2618eecf89d5253c0d1e4f1da1270d41e9f54dfbd91831b0f6885"}, + {file = "ninja-1.11.1.1-py2.py3-none-win32.whl", hash = "sha256:fa2ba9d74acfdfbfbcf06fad1b8282de8a7a8c481d9dee45c859a8c93fcc1082"}, + {file = "ninja-1.11.1.1-py2.py3-none-win_amd64.whl", hash = "sha256:95da904130bfa02ea74ff9c0116b4ad266174fafb1c707aa50212bc7859aebf1"}, + {file = "ninja-1.11.1.1-py2.py3-none-win_arm64.whl", hash = "sha256:185e0641bde601e53841525c4196278e9aaf4463758da6dd1e752c0a0f54136a"}, + {file = "ninja-1.11.1.1.tar.gz", hash = "sha256:9d793b08dd857e38d0b6ffe9e6b7145d7c485a42dcfea04905ca0cdb6017cc3c"}, +] + +[[package]] +name = "nox" +version = "2024.10.9" +requires_python = ">=3.8" +summary = "Flexible test automation." +groups = ["dev"] +dependencies = [ + "argcomplete<4,>=1.9.4", + "colorlog<7,>=2.6.1", + "packaging>=20.9", + "tomli>=1; python_version < \"3.11\"", + "virtualenv>=20.14.1", +] +files = [ + {file = "nox-2024.10.9-py3-none-any.whl", hash = "sha256:1d36f309a0a2a853e9bccb76bbef6bb118ba92fa92674d15604ca99adeb29eab"}, + {file = "nox-2024.10.9.tar.gz", hash = "sha256:7aa9dc8d1c27e9f45ab046ffd1c3b2c4f7c91755304769df231308849ebded95"}, +] + +[[package]] +name = "nox" +version = "2024.10.9" +extras = ["uv"] +requires_python = ">=3.8" +summary = "Flexible test automation." +groups = ["dev"] +dependencies = [ + "nox==2024.10.9", + "uv>=0.1.6", +] +files = [ + {file = "nox-2024.10.9-py3-none-any.whl", hash = "sha256:1d36f309a0a2a853e9bccb76bbef6bb118ba92fa92674d15604ca99adeb29eab"}, + {file = "nox-2024.10.9.tar.gz", hash = "sha256:7aa9dc8d1c27e9f45ab046ffd1c3b2c4f7c91755304769df231308849ebded95"}, +] + +[[package]] +name = "numpy" +version = "2.1.3" +requires_python = ">=3.10" +summary = "Fundamental package for array computing in Python" +groups = ["default"] +files = [ + {file = "numpy-2.1.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c894b4305373b9c5576d7a12b473702afdf48ce5369c074ba304cc5ad8730dff"}, + {file = "numpy-2.1.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b47fbb433d3260adcd51eb54f92a2ffbc90a4595f8970ee00e064c644ac788f5"}, + {file = "numpy-2.1.3-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:825656d0743699c529c5943554d223c021ff0494ff1442152ce887ef4f7561a1"}, + {file = "numpy-2.1.3-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:6a4825252fcc430a182ac4dee5a505053d262c807f8a924603d411f6718b88fd"}, + {file = "numpy-2.1.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e711e02f49e176a01d0349d82cb5f05ba4db7d5e7e0defd026328e5cfb3226d3"}, + {file = "numpy-2.1.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78574ac2d1a4a02421f25da9559850d59457bac82f2b8d7a44fe83a64f770098"}, + {file = "numpy-2.1.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c7662f0e3673fe4e832fe07b65c50342ea27d989f92c80355658c7f888fcc83c"}, + {file = "numpy-2.1.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:fa2d1337dc61c8dc417fbccf20f6d1e139896a30721b7f1e832b2bb6ef4eb6c4"}, + {file = "numpy-2.1.3-cp310-cp310-win32.whl", hash = "sha256:72dcc4a35a8515d83e76b58fdf8113a5c969ccd505c8a946759b24e3182d1f23"}, + {file = "numpy-2.1.3-cp310-cp310-win_amd64.whl", hash = "sha256:ecc76a9ba2911d8d37ac01de72834d8849e55473457558e12995f4cd53e778e0"}, + {file = "numpy-2.1.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4d1167c53b93f1f5d8a139a742b3c6f4d429b54e74e6b57d0eff40045187b15d"}, + {file = "numpy-2.1.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c80e4a09b3d95b4e1cac08643f1152fa71a0a821a2d4277334c88d54b2219a41"}, + {file = "numpy-2.1.3-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:576a1c1d25e9e02ed7fa5477f30a127fe56debd53b8d2c89d5578f9857d03ca9"}, + {file = "numpy-2.1.3-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:973faafebaae4c0aaa1a1ca1ce02434554d67e628b8d805e61f874b84e136b09"}, + {file = "numpy-2.1.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:762479be47a4863e261a840e8e01608d124ee1361e48b96916f38b119cfda04a"}, + {file = "numpy-2.1.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc6f24b3d1ecc1eebfbf5d6051faa49af40b03be1aaa781ebdadcbc090b4539b"}, + {file = "numpy-2.1.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:17ee83a1f4fef3c94d16dc1802b998668b5419362c8a4f4e8a491de1b41cc3ee"}, + {file = "numpy-2.1.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:15cb89f39fa6d0bdfb600ea24b250e5f1a3df23f901f51c8debaa6a5d122b2f0"}, + {file = "numpy-2.1.3-cp311-cp311-win32.whl", hash = "sha256:d9beb777a78c331580705326d2367488d5bc473b49a9bc3036c154832520aca9"}, + {file = "numpy-2.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:d89dd2b6da69c4fff5e39c28a382199ddedc3a5be5390115608345dec660b9e2"}, + {file = "numpy-2.1.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f55ba01150f52b1027829b50d70ef1dafd9821ea82905b63936668403c3b471e"}, + {file = "numpy-2.1.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:13138eadd4f4da03074851a698ffa7e405f41a0845a6b1ad135b81596e4e9958"}, + {file = "numpy-2.1.3-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:a6b46587b14b888e95e4a24d7b13ae91fa22386c199ee7b418f449032b2fa3b8"}, + {file = "numpy-2.1.3-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:0fa14563cc46422e99daef53d725d0c326e99e468a9320a240affffe87852564"}, + {file = "numpy-2.1.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8637dcd2caa676e475503d1f8fdb327bc495554e10838019651b76d17b98e512"}, + {file = "numpy-2.1.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2312b2aa89e1f43ecea6da6ea9a810d06aae08321609d8dc0d0eda6d946a541b"}, + {file = "numpy-2.1.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a38c19106902bb19351b83802531fea19dee18e5b37b36454f27f11ff956f7fc"}, + {file = "numpy-2.1.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:02135ade8b8a84011cbb67dc44e07c58f28575cf9ecf8ab304e51c05528c19f0"}, + {file = "numpy-2.1.3-cp312-cp312-win32.whl", hash = "sha256:e6988e90fcf617da2b5c78902fe8e668361b43b4fe26dbf2d7b0f8034d4cafb9"}, + {file = "numpy-2.1.3-cp312-cp312-win_amd64.whl", hash = "sha256:0d30c543f02e84e92c4b1f415b7c6b5326cbe45ee7882b6b77db7195fb971e3a"}, + {file = "numpy-2.1.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:96fe52fcdb9345b7cd82ecd34547fca4321f7656d500eca497eb7ea5a926692f"}, + {file = "numpy-2.1.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f653490b33e9c3a4c1c01d41bc2aef08f9475af51146e4a7710c450cf9761598"}, + {file = "numpy-2.1.3-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:dc258a761a16daa791081d026f0ed4399b582712e6fc887a95af09df10c5ca57"}, + {file = "numpy-2.1.3-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:016d0f6f5e77b0f0d45d77387ffa4bb89816b57c835580c3ce8e099ef830befe"}, + {file = "numpy-2.1.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c181ba05ce8299c7aa3125c27b9c2167bca4a4445b7ce73d5febc411ca692e43"}, + {file = "numpy-2.1.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5641516794ca9e5f8a4d17bb45446998c6554704d888f86df9b200e66bdcce56"}, + {file = "numpy-2.1.3-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ea4dedd6e394a9c180b33c2c872b92f7ce0f8e7ad93e9585312b0c5a04777a4a"}, + {file = "numpy-2.1.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b0df3635b9c8ef48bd3be5f862cf71b0a4716fa0e702155c45067c6b711ddcef"}, + {file = "numpy-2.1.3-cp313-cp313-win32.whl", hash = "sha256:50ca6aba6e163363f132b5c101ba078b8cbd3fa92c7865fd7d4d62d9779ac29f"}, + {file = "numpy-2.1.3-cp313-cp313-win_amd64.whl", hash = "sha256:747641635d3d44bcb380d950679462fae44f54b131be347d5ec2bce47d3df9ed"}, + {file = "numpy-2.1.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:996bb9399059c5b82f76b53ff8bb686069c05acc94656bb259b1d63d04a9506f"}, + {file = "numpy-2.1.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:45966d859916ad02b779706bb43b954281db43e185015df6eb3323120188f9e4"}, + {file = "numpy-2.1.3-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:baed7e8d7481bfe0874b566850cb0b85243e982388b7b23348c6db2ee2b2ae8e"}, + {file = "numpy-2.1.3-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:a9f7f672a3388133335589cfca93ed468509cb7b93ba3105fce780d04a6576a0"}, + {file = "numpy-2.1.3-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7aac50327da5d208db2eec22eb11e491e3fe13d22653dce51b0f4109101b408"}, + {file = "numpy-2.1.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4394bc0dbd074b7f9b52024832d16e019decebf86caf909d94f6b3f77a8ee3b6"}, + {file = "numpy-2.1.3-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:50d18c4358a0a8a53f12a8ba9d772ab2d460321e6a93d6064fc22443d189853f"}, + {file = "numpy-2.1.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:14e253bd43fc6b37af4921b10f6add6925878a42a0c5fe83daee390bca80bc17"}, + {file = "numpy-2.1.3-cp313-cp313t-win32.whl", hash = "sha256:08788d27a5fd867a663f6fc753fd7c3ad7e92747efc73c53bca2f19f8bc06f48"}, + {file = "numpy-2.1.3-cp313-cp313t-win_amd64.whl", hash = "sha256:2564fbdf2b99b3f815f2107c1bbc93e2de8ee655a69c261363a1172a79a257d4"}, + {file = "numpy-2.1.3-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:4f2015dfe437dfebbfce7c85c7b53d81ba49e71ba7eadbf1df40c915af75979f"}, + {file = "numpy-2.1.3-pp310-pypy310_pp73-macosx_14_0_x86_64.whl", hash = "sha256:3522b0dfe983a575e6a9ab3a4a4dfe156c3e428468ff08ce582b9bb6bd1d71d4"}, + {file = "numpy-2.1.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c006b607a865b07cd981ccb218a04fc86b600411d83d6fc261357f1c0966755d"}, + {file = "numpy-2.1.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:e14e26956e6f1696070788252dcdff11b4aca4c3e8bd166e0df1bb8f315a67cb"}, + {file = "numpy-2.1.3.tar.gz", hash = "sha256:aa08e04e08aaf974d4458def539dece0d28146d866a39da5639596f4921fd761"}, +] + +[[package]] +name = "packaging" +version = "24.2" +requires_python = ">=3.8" +summary = "Core utilities for Python packages" +groups = ["dev"] +files = [ + {file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"}, + {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"}, +] + +[[package]] +name = "platformdirs" +version = "4.3.6" +requires_python = ">=3.8" +summary = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." +groups = ["dev"] +files = [ + {file = "platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb"}, + {file = "platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907"}, +] + +[[package]] +name = "pluggy" +version = "1.5.0" +requires_python = ">=3.8" +summary = "plugin and hook calling mechanisms for python" +groups = ["dev"] +files = [ + {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, + {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, +] + +[[package]] +name = "psutil" +version = "6.1.0" +requires_python = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" +summary = "Cross-platform lib for process and system monitoring in Python." +groups = ["dev"] +files = [ + {file = "psutil-6.1.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:6e2dcd475ce8b80522e51d923d10c7871e45f20918e027ab682f94f1c6351688"}, + {file = "psutil-6.1.0-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:0895b8414afafc526712c498bd9de2b063deaac4021a3b3c34566283464aff8e"}, + {file = "psutil-6.1.0-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9dcbfce5d89f1d1f2546a2090f4fcf87c7f669d1d90aacb7d7582addece9fb38"}, + {file = "psutil-6.1.0-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:498c6979f9c6637ebc3a73b3f87f9eb1ec24e1ce53a7c5173b8508981614a90b"}, + {file = "psutil-6.1.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d905186d647b16755a800e7263d43df08b790d709d575105d419f8b6ef65423a"}, + {file = "psutil-6.1.0-cp37-abi3-win32.whl", hash = "sha256:1ad45a1f5d0b608253b11508f80940985d1d0c8f6111b5cb637533a0e6ddc13e"}, + {file = "psutil-6.1.0-cp37-abi3-win_amd64.whl", hash = "sha256:a8fb3752b491d246034fa4d279ff076501588ce8cbcdbb62c32fd7a377d996be"}, + {file = "psutil-6.1.0.tar.gz", hash = "sha256:353815f59a7f64cdaca1c0307ee13558a0512f6db064e92fe833784f08539c7a"}, +] + +[[package]] +name = "pyproject-metadata" +version = "0.9.0" +requires_python = ">=3.7" +summary = "PEP 621 metadata parsing" +groups = ["dev"] +dependencies = [ + "packaging>=19.0", + "typing-extensions; python_version < \"3.8\"", +] +files = [ + {file = "pyproject_metadata-0.9.0-py3-none-any.whl", hash = "sha256:fc862aab066a2e87734333293b0af5845fe8ac6cb69c451a41551001e923be0b"}, + {file = "pyproject_metadata-0.9.0.tar.gz", hash = "sha256:8511c00a4cad96686af6a6b4143433298beb96105a9379afdc9b0328f4f260c9"}, +] + +[[package]] +name = "pytest" +version = "8.3.3" +requires_python = ">=3.8" +summary = "pytest: simple powerful testing with Python" +groups = ["dev"] +dependencies = [ + "colorama; sys_platform == \"win32\"", + "exceptiongroup>=1.0.0rc8; python_version < \"3.11\"", + "iniconfig", + "packaging", + "pluggy<2,>=1.5", + "tomli>=1; python_version < \"3.11\"", +] +files = [ + {file = "pytest-8.3.3-py3-none-any.whl", hash = "sha256:a6853c7375b2663155079443d2e45de913a911a11d669df02a50814944db57b2"}, + {file = "pytest-8.3.3.tar.gz", hash = "sha256:70b98107bd648308a7952b06e6ca9a50bc660be218d53c257cc1fc94fda10181"}, +] + +[[package]] +name = "pytest-lazy-fixtures" +version = "1.1.1" +requires_python = "<4.0,>=3.8" +summary = "Allows you to use fixtures in @pytest.mark.parametrize." +groups = ["dev"] +dependencies = [ + "pytest>=7", +] +files = [ + {file = "pytest_lazy_fixtures-1.1.1-py3-none-any.whl", hash = "sha256:a4b396a361faf56c6305535fd0175ce82902ca7cf668c4d812a25ed2bcde8183"}, + {file = "pytest_lazy_fixtures-1.1.1.tar.gz", hash = "sha256:0c561f0d29eea5b55cf29b9264a3241999ffdb74c6b6e8c4ccc0bd2c934d01ed"}, +] + +[[package]] +name = "pytest-randomly" +version = "3.16.0" +requires_python = ">=3.9" +summary = "Pytest plugin to randomly order tests and control random.seed." +groups = ["dev"] +dependencies = [ + "importlib-metadata>=3.6; python_version < \"3.10\"", + "pytest", +] +files = [ + {file = "pytest_randomly-3.16.0-py3-none-any.whl", hash = "sha256:8633d332635a1a0983d3bba19342196807f6afb17c3eef78e02c2f85dade45d6"}, + {file = "pytest_randomly-3.16.0.tar.gz", hash = "sha256:11bf4d23a26484de7860d82f726c0629837cf4064b79157bd18ec9d41d7feb26"}, +] + +[[package]] +name = "pytest-xdist" +version = "3.6.1" +requires_python = ">=3.8" +summary = "pytest xdist plugin for distributed testing, most importantly across multiple CPUs" +groups = ["dev"] +dependencies = [ + "execnet>=2.1", + "pytest>=7.0.0", +] +files = [ + {file = "pytest_xdist-3.6.1-py3-none-any.whl", hash = "sha256:9ed4adfb68a016610848639bb7e02c9352d5d9f03d04809919e2dafc3be4cca7"}, + {file = "pytest_xdist-3.6.1.tar.gz", hash = "sha256:ead156a4db231eec769737f57668ef58a2084a34b2e55c4a8fa20d861107300d"}, +] + +[[package]] +name = "pytest-xdist" +version = "3.6.1" +extras = ["psutil"] +requires_python = ">=3.8" +summary = "pytest xdist plugin for distributed testing, most importantly across multiple CPUs" +groups = ["dev"] +dependencies = [ + "psutil>=3.0", + "pytest-xdist==3.6.1", +] +files = [ + {file = "pytest_xdist-3.6.1-py3-none-any.whl", hash = "sha256:9ed4adfb68a016610848639bb7e02c9352d5d9f03d04809919e2dafc3be4cca7"}, + {file = "pytest_xdist-3.6.1.tar.gz", hash = "sha256:ead156a4db231eec769737f57668ef58a2084a34b2e55c4a8fa20d861107300d"}, +] + +[[package]] +name = "ruff" +version = "0.7.4" +requires_python = ">=3.7" +summary = "An extremely fast Python linter and code formatter, written in Rust." +groups = ["dev"] +files = [ + {file = "ruff-0.7.4-py3-none-linux_armv6l.whl", hash = "sha256:a4919925e7684a3f18e18243cd6bea7cfb8e968a6eaa8437971f681b7ec51478"}, + {file = "ruff-0.7.4-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:cfb365c135b830778dda8c04fb7d4280ed0b984e1aec27f574445231e20d6c63"}, + {file = "ruff-0.7.4-py3-none-macosx_11_0_arm64.whl", hash = "sha256:63a569b36bc66fbadec5beaa539dd81e0527cb258b94e29e0531ce41bacc1f20"}, + {file = "ruff-0.7.4-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d06218747d361d06fd2fdac734e7fa92df36df93035db3dc2ad7aa9852cb109"}, + {file = "ruff-0.7.4-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e0cea28d0944f74ebc33e9f934238f15c758841f9f5edd180b5315c203293452"}, + {file = "ruff-0.7.4-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:80094ecd4793c68b2571b128f91754d60f692d64bc0d7272ec9197fdd09bf9ea"}, + {file = "ruff-0.7.4-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:997512325c6620d1c4c2b15db49ef59543ef9cd0f4aa8065ec2ae5103cedc7e7"}, + {file = "ruff-0.7.4-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:00b4cf3a6b5fad6d1a66e7574d78956bbd09abfd6c8a997798f01f5da3d46a05"}, + {file = "ruff-0.7.4-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7dbdc7d8274e1422722933d1edddfdc65b4336abf0b16dfcb9dedd6e6a517d06"}, + {file = "ruff-0.7.4-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e92dfb5f00eaedb1501b2f906ccabfd67b2355bdf117fea9719fc99ac2145bc"}, + {file = "ruff-0.7.4-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:3bd726099f277d735dc38900b6a8d6cf070f80828877941983a57bca1cd92172"}, + {file = "ruff-0.7.4-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:2e32829c429dd081ee5ba39aef436603e5b22335c3d3fff013cd585806a6486a"}, + {file = "ruff-0.7.4-py3-none-musllinux_1_2_i686.whl", hash = "sha256:662a63b4971807623f6f90c1fb664613f67cc182dc4d991471c23c541fee62dd"}, + {file = "ruff-0.7.4-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:876f5e09eaae3eb76814c1d3b68879891d6fde4824c015d48e7a7da4cf066a3a"}, + {file = "ruff-0.7.4-py3-none-win32.whl", hash = "sha256:75c53f54904be42dd52a548728a5b572344b50d9b2873d13a3f8c5e3b91f5cac"}, + {file = "ruff-0.7.4-py3-none-win_amd64.whl", hash = "sha256:745775c7b39f914238ed1f1b0bebed0b9155a17cd8bc0b08d3c87e4703b990d6"}, + {file = "ruff-0.7.4-py3-none-win_arm64.whl", hash = "sha256:11bff065102c3ae9d3ea4dc9ecdfe5a5171349cdd0787c1fc64761212fc9cf1f"}, + {file = "ruff-0.7.4.tar.gz", hash = "sha256:cd12e35031f5af6b9b93715d8c4f40360070b2041f81273d0527683d5708fce2"}, +] + +[[package]] +name = "scikit-learn" +version = "1.5.2" +requires_python = ">=3.9" +summary = "A set of python modules for machine learning and data mining" +groups = ["default"] +dependencies = [ + "joblib>=1.2.0", + "numpy>=1.19.5", + "scipy>=1.6.0", + "threadpoolctl>=3.1.0", +] +files = [ + {file = "scikit_learn-1.5.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:299406827fb9a4f862626d0fe6c122f5f87f8910b86fe5daa4c32dcd742139b6"}, + {file = "scikit_learn-1.5.2-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:2d4cad1119c77930b235579ad0dc25e65c917e756fe80cab96aa3b9428bd3fb0"}, + {file = "scikit_learn-1.5.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c412ccc2ad9bf3755915e3908e677b367ebc8d010acbb3f182814524f2e5540"}, + {file = "scikit_learn-1.5.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a686885a4b3818d9e62904d91b57fa757fc2bed3e465c8b177be652f4dd37c8"}, + {file = "scikit_learn-1.5.2-cp310-cp310-win_amd64.whl", hash = "sha256:c15b1ca23d7c5f33cc2cb0a0d6aaacf893792271cddff0edbd6a40e8319bc113"}, + {file = "scikit_learn-1.5.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:03b6158efa3faaf1feea3faa884c840ebd61b6484167c711548fce208ea09445"}, + {file = "scikit_learn-1.5.2-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:1ff45e26928d3b4eb767a8f14a9a6efbf1cbff7c05d1fb0f95f211a89fd4f5de"}, + {file = "scikit_learn-1.5.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f763897fe92d0e903aa4847b0aec0e68cadfff77e8a0687cabd946c89d17e675"}, + {file = "scikit_learn-1.5.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8b0ccd4a902836493e026c03256e8b206656f91fbcc4fde28c57a5b752561f1"}, + {file = "scikit_learn-1.5.2-cp311-cp311-win_amd64.whl", hash = "sha256:6c16d84a0d45e4894832b3c4d0bf73050939e21b99b01b6fd59cbb0cf39163b6"}, + {file = "scikit_learn-1.5.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f932a02c3f4956dfb981391ab24bda1dbd90fe3d628e4b42caef3e041c67707a"}, + {file = "scikit_learn-1.5.2-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:3b923d119d65b7bd555c73be5423bf06c0105678ce7e1f558cb4b40b0a5502b1"}, + {file = "scikit_learn-1.5.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f60021ec1574e56632be2a36b946f8143bf4e5e6af4a06d85281adc22938e0dd"}, + {file = "scikit_learn-1.5.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:394397841449853c2290a32050382edaec3da89e35b3e03d6cc966aebc6a8ae6"}, + {file = "scikit_learn-1.5.2-cp312-cp312-win_amd64.whl", hash = "sha256:57cc1786cfd6bd118220a92ede80270132aa353647684efa385a74244a41e3b1"}, + {file = "scikit_learn-1.5.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e9a702e2de732bbb20d3bad29ebd77fc05a6b427dc49964300340e4c9328b3f5"}, + {file = "scikit_learn-1.5.2-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:b0768ad641981f5d3a198430a1d31c3e044ed2e8a6f22166b4d546a5116d7908"}, + {file = "scikit_learn-1.5.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:178ddd0a5cb0044464fc1bfc4cca5b1833bfc7bb022d70b05db8530da4bb3dd3"}, + {file = "scikit_learn-1.5.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f7284ade780084d94505632241bf78c44ab3b6f1e8ccab3d2af58e0e950f9c12"}, + {file = "scikit_learn-1.5.2-cp313-cp313-win_amd64.whl", hash = "sha256:b7b0f9a0b1040830d38c39b91b3a44e1b643f4b36e36567b80b7c6bd2202a27f"}, + {file = "scikit_learn-1.5.2.tar.gz", hash = "sha256:b4237ed7b3fdd0a4882792e68ef2545d5baa50aca3bb45aa7df468138ad8f94d"}, +] + +[[package]] +name = "scipy" +version = "1.14.1" +requires_python = ">=3.10" +summary = "Fundamental algorithms for scientific computing in Python" +groups = ["default"] +dependencies = [ + "numpy<2.3,>=1.23.5", +] +files = [ + {file = "scipy-1.14.1-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:b28d2ca4add7ac16ae8bb6632a3c86e4b9e4d52d3e34267f6e1b0c1f8d87e389"}, + {file = "scipy-1.14.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:d0d2821003174de06b69e58cef2316a6622b60ee613121199cb2852a873f8cf3"}, + {file = "scipy-1.14.1-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:8bddf15838ba768bb5f5083c1ea012d64c9a444e16192762bd858f1e126196d0"}, + {file = "scipy-1.14.1-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:97c5dddd5932bd2a1a31c927ba5e1463a53b87ca96b5c9bdf5dfd6096e27efc3"}, + {file = "scipy-1.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ff0a7e01e422c15739ecd64432743cf7aae2b03f3084288f399affcefe5222d"}, + {file = "scipy-1.14.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e32dced201274bf96899e6491d9ba3e9a5f6b336708656466ad0522d8528f69"}, + {file = "scipy-1.14.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8426251ad1e4ad903a4514712d2fa8fdd5382c978010d1c6f5f37ef286a713ad"}, + {file = "scipy-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:a49f6ed96f83966f576b33a44257d869756df6cf1ef4934f59dd58b25e0327e5"}, + {file = "scipy-1.14.1-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:2da0469a4ef0ecd3693761acbdc20f2fdeafb69e6819cc081308cc978153c675"}, + {file = "scipy-1.14.1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:c0ee987efa6737242745f347835da2cc5bb9f1b42996a4d97d5c7ff7928cb6f2"}, + {file = "scipy-1.14.1-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:3a1b111fac6baec1c1d92f27e76511c9e7218f1695d61b59e05e0fe04dc59617"}, + {file = "scipy-1.14.1-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:8475230e55549ab3f207bff11ebfc91c805dc3463ef62eda3ccf593254524ce8"}, + {file = "scipy-1.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:278266012eb69f4a720827bdd2dc54b2271c97d84255b2faaa8f161a158c3b37"}, + {file = "scipy-1.14.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fef8c87f8abfb884dac04e97824b61299880c43f4ce675dd2cbeadd3c9b466d2"}, + {file = "scipy-1.14.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b05d43735bb2f07d689f56f7b474788a13ed8adc484a85aa65c0fd931cf9ccd2"}, + {file = "scipy-1.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:716e389b694c4bb564b4fc0c51bc84d381735e0d39d3f26ec1af2556ec6aad94"}, + {file = "scipy-1.14.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:631f07b3734d34aced009aaf6fedfd0eb3498a97e581c3b1e5f14a04164a456d"}, + {file = "scipy-1.14.1-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:af29a935803cc707ab2ed7791c44288a682f9c8107bc00f0eccc4f92c08d6e07"}, + {file = "scipy-1.14.1-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:2843f2d527d9eebec9a43e6b406fb7266f3af25a751aa91d62ff416f54170bc5"}, + {file = "scipy-1.14.1-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:eb58ca0abd96911932f688528977858681a59d61a7ce908ffd355957f7025cfc"}, + {file = "scipy-1.14.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:30ac8812c1d2aab7131a79ba62933a2a76f582d5dbbc695192453dae67ad6310"}, + {file = "scipy-1.14.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f9ea80f2e65bdaa0b7627fb00cbeb2daf163caa015e59b7516395fe3bd1e066"}, + {file = "scipy-1.14.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:edaf02b82cd7639db00dbff629995ef185c8df4c3ffa71a5562a595765a06ce1"}, + {file = "scipy-1.14.1-cp312-cp312-win_amd64.whl", hash = "sha256:2ff38e22128e6c03ff73b6bb0f85f897d2362f8c052e3b8ad00532198fbdae3f"}, + {file = "scipy-1.14.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:1729560c906963fc8389f6aac023739ff3983e727b1a4d87696b7bf108316a79"}, + {file = "scipy-1.14.1-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:4079b90df244709e675cdc8b93bfd8a395d59af40b72e339c2287c91860deb8e"}, + {file = "scipy-1.14.1-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:e0cf28db0f24a38b2a0ca33a85a54852586e43cf6fd876365c86e0657cfe7d73"}, + {file = "scipy-1.14.1-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:0c2f95de3b04e26f5f3ad5bb05e74ba7f68b837133a4492414b3afd79dfe540e"}, + {file = "scipy-1.14.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b99722ea48b7ea25e8e015e8341ae74624f72e5f21fc2abd45f3a93266de4c5d"}, + {file = "scipy-1.14.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5149e3fd2d686e42144a093b206aef01932a0059c2a33ddfa67f5f035bdfe13e"}, + {file = "scipy-1.14.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e4f5a7c49323533f9103d4dacf4e4f07078f360743dec7f7596949149efeec06"}, + {file = "scipy-1.14.1-cp313-cp313-win_amd64.whl", hash = "sha256:baff393942b550823bfce952bb62270ee17504d02a1801d7fd0719534dfb9c84"}, + {file = "scipy-1.14.1.tar.gz", hash = "sha256:5a275584e726026a5699459aa72f828a610821006228e841b94275c4a7c08417"}, +] + +[[package]] +name = "setuptools" +version = "75.5.0" +requires_python = ">=3.9" +summary = "Easily download, build, install, upgrade, and uninstall Python packages" +groups = ["default", "dev"] +files = [ + {file = "setuptools-75.5.0-py3-none-any.whl", hash = "sha256:87cb777c3b96d638ca02031192d40390e0ad97737e27b6b4fa831bea86f2f829"}, + {file = "setuptools-75.5.0.tar.gz", hash = "sha256:5c4ccb41111392671f02bb5f8436dfc5a9a7185e80500531b133f5775c4163ef"}, +] + +[[package]] +name = "threadpoolctl" +version = "3.5.0" +requires_python = ">=3.8" +summary = "threadpoolctl" +groups = ["default"] +files = [ + {file = "threadpoolctl-3.5.0-py3-none-any.whl", hash = "sha256:56c1e26c150397e58c4926da8eeee87533b1e32bef131bd4bf6a2f45f3185467"}, + {file = "threadpoolctl-3.5.0.tar.gz", hash = "sha256:082433502dd922bf738de0d8bcc4fdcbf0979ff44c42bd40f5af8a282f6fa107"}, +] + +[[package]] +name = "tomli" +version = "2.1.0" +requires_python = ">=3.8" +summary = "A lil' TOML parser" +groups = ["dev"] +marker = "python_version < \"3.11\"" +files = [ + {file = "tomli-2.1.0-py3-none-any.whl", hash = "sha256:a5c57c3d1c56f5ccdf89f6523458f60ef716e210fc47c4cfb188c5ba473e0391"}, + {file = "tomli-2.1.0.tar.gz", hash = "sha256:3f646cae2aec94e17d04973e4249548320197cfabdf130015d023de4b74d8ab8"}, +] + +[[package]] +name = "uv" +version = "0.5.2" +requires_python = ">=3.8" +summary = "An extremely fast Python package and project manager, written in Rust." +groups = ["dev"] +files = [ + {file = "uv-0.5.2-py3-none-linux_armv6l.whl", hash = "sha256:7bde66f13571e437fd45f32f5742ab53d5e011b4edb1c74cb74cb8b1cbb828b5"}, + {file = "uv-0.5.2-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:d0834c6b37750c045bbea80600d3ae3e95becc4db148f5c0d0bc3ec6a7924e8f"}, + {file = "uv-0.5.2-py3-none-macosx_11_0_arm64.whl", hash = "sha256:a8a9897dd7657258c53f41aecdbe787da99f4fc0775f19826ab65cc0a7136cbf"}, + {file = "uv-0.5.2-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.musllinux_1_1_aarch64.whl", hash = "sha256:15c7ffa08ae21abd221dbdf9ba25c8969235f587cec6df8035552434e5ca1cc5"}, + {file = "uv-0.5.2-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d1fe4e025dbb9ec5c9250bfc1231847b8487706538f94d10c769f0a54db3e0af"}, + {file = "uv-0.5.2-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cfba5b0070652da4174083b78852f3ab3d262ba1c8b63a4d5ae497263b02b834"}, + {file = "uv-0.5.2-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:dfcd8275ff8cb59d5f26f826a44270b2fe8f38aa7188d7355c48d3e9b759d0c0"}, + {file = "uv-0.5.2-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:71467545d51883d1af7094c8f6da69b55e7d49b742c2dc707d644676dcb66515"}, + {file = "uv-0.5.2-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5052758d374dd769efd0c70b4789ffb08439567eb114ad8fe728536bb5cc5299"}, + {file = "uv-0.5.2-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:374e9498e155fcaa8728a6770b84f03781106d705332f4ec059e1cc93c8f4d8a"}, + {file = "uv-0.5.2-py3-none-manylinux_2_28_aarch64.whl", hash = "sha256:675ca34829ceca3e9de395cf05e8f881334a24488f97dd923c463830270d52a7"}, + {file = "uv-0.5.2-py3-none-musllinux_1_1_armv7l.whl", hash = "sha256:c9795b990fb0b2a18d3a8cef8822e13c6a6f438bc16d34ccf01d931c76cfd5da"}, + {file = "uv-0.5.2-py3-none-musllinux_1_1_i686.whl", hash = "sha256:27d666da8fbb0f87d9df67abf9feea0da4ee1336730f2c4be29a11f3feaa0a29"}, + {file = "uv-0.5.2-py3-none-musllinux_1_1_ppc64le.whl", hash = "sha256:67776d34cba359c63919c5ad50331171261d2ec7a83fd07f032eb8cc22e22b8e"}, + {file = "uv-0.5.2-py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:772b32d157ec8f27c0099ecac94cf5cd298bce72f1a1f512205591de4e9f0c5c"}, + {file = "uv-0.5.2-py3-none-win32.whl", hash = "sha256:2597e91be45b3f4458d0d16a5a1cda7e93af7d6dbfddf251aae5377f9187fa88"}, + {file = "uv-0.5.2-py3-none-win_amd64.whl", hash = "sha256:a4d4fdad03e6dc3e8216192b8a12bcf2c71c8b12046e755575c7f262cbb61924"}, + {file = "uv-0.5.2.tar.gz", hash = "sha256:89e60ad9601f35f187326de84f35e7517c6eb1438359da42ec85cfd9c1895957"}, +] + +[[package]] +name = "virtualenv" +version = "20.27.1" +requires_python = ">=3.8" +summary = "Virtual Python Environment builder" +groups = ["dev"] +dependencies = [ + "distlib<1,>=0.3.7", + "filelock<4,>=3.12.2", + "importlib-metadata>=6.6; python_version < \"3.8\"", + "platformdirs<5,>=3.9.1", +] +files = [ + {file = "virtualenv-20.27.1-py3-none-any.whl", hash = "sha256:f11f1b8a29525562925f745563bfd48b189450f61fb34c4f9cc79dd5aa32a1f4"}, + {file = "virtualenv-20.27.1.tar.gz", hash = "sha256:142c6be10212543b32c6c45d3d3893dff89112cc588b7d0879ae5a1ec03a47ba"}, +] diff --git a/pyproject.toml b/pyproject.toml index d99be3c..fa457d0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,23 +1,90 @@ [project] -name = "python-glmnet" -version = "2.2.2.post2" +name = "python_glmnet" description = "Python wrapper for glmnet" +authors = [ + {name="Civis Analytics Inc", email="opensource@replicahq.com"} +] +requires-python = ">=3.10" readme = 'README.rst' -requires-python = ">=3.9" +version = "2.6.1" dependencies = [ - "numpy>=1.9.2", - "scikit-learn>=0.18.0", - "scipy>=0.14.1", - "joblib>=0.14.1", + "numpy>=1.26.4", + "scikit-learn>=1.4.0", + "scipy>=1.12.0", + "joblib>=1.3.0", + "setuptools>=75.3.0", +] +classifiers=[ + "Development Status :: 5 - Production/Stable", + "Environment :: Console", + "Programming Language :: Python", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.1", + "Programming Language :: Python :: 3.13", + "Programming Language :: Python :: 3 :: Only", + "Operating System :: OS Independent", + "Intended Audience :: Developers", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: GNU General Public License v2 (GPLv2)", + "Topic :: Scientific/Engineering", ] +[project.urls] +homepage = "https://github.com/replicahq/python-glmnet" +repository = "https://github.com/replicahq/python-glmnet" + [build-system] -build-backend = "setuptools.build_meta" +build-backend = "mesonpy" requires = [ - "numpy==1.22.4; python_version=='3.9'", - "numpy==1.22.4; python_version=='3.10'", - "numpy==1.23.2; python_version=='3.11'", - "numpy; python_version>'3.11'", + "meson-python>=0.16.0", "numpy>=2.1", "ninja>=1.11", "meson>=1.1" +] + +[tool.coverage.run] +branch = true +source = ["glmnet"] + +[tool.coverage.report] +show_missing = true +fail_under = 100 + +[tool.pytest.ini_options] +pythonpath = "." +addopts = [ + "--import-mode=importlib", +] +filterwarnings = [ + "ignore::UserWarning", + "ignore::DeprecationWarning:.*(jupyter_client).*", +] + + +[tool.pdm] + +[tool.pdm.options] +install = ["--no-isolation"] + +[tool.pdm.scripts] +format = "ruff format src/" +lint = "ruff check --fix src/" +test = "nox -s tests" +all = {composite = ["format", "lint", "test"]} + +[tool.pdm.build] +includes = [] + +[dependency-groups] +dev = [ "setuptools", - "wheel" + "meson-python>=0.16.0", + "ninja>=1.8.2", + "ruff>=0.7.3", + "coverage[toml]>=7.6.7", + "pytest>=8.3.3", + "nox[uv]>=2024.10.9", + "pytest-lazy-fixtures>=1.1.1", + "pytest-randomly>=3.16.0", + "pytest-xdist[psutil]>=3.6.1", ] diff --git a/renovate.json b/renovate.json new file mode 100644 index 0000000..39a2b6e --- /dev/null +++ b/renovate.json @@ -0,0 +1,6 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "extends": [ + "config:base" + ] +} diff --git a/ruff.toml b/ruff.toml new file mode 100644 index 0000000..adedf29 --- /dev/null +++ b/ruff.toml @@ -0,0 +1,98 @@ +line-length = 120 +target-version = "py312" + +[lint] +select = [ + "A", + "ARG", + "B", + "C", + "DTZ", + "E", + "EM", + "F", + "FBT", + "I", + "I001", + "ICN", + "ISC", + "N", + "PLC", + "PLE", + "PLR", + "PLW", + "Q", + "RUF", + "S", + "T", + "TID", + "UP", + "W", + "YTT", +] +ignore = [ + "B027", + "C901", + "D100", + "D103", + "D205", + "D212", + "D415", + "E203", + "E501", + "FBT001", + "FBT002", + "FBT003", + "ISC001", + "N802", + "N803", + "N806", + "PLR0911", + "PLR0912", + "PLR0913", + "PLR0915", + "S105", + "S106", + "S107", + "ARG001", + "UP007", +] +unfixable = [ + "F401", + "UP007", +] +exclude = [ + ".git", + ".hg", + "__pycache__", + "_bin/*", + "_build/*", + "_ig_fbcode_wheel/*", + "buck-out/*", + "third-party-buck/*", + "third-party2/*", + "dist", + ".venv", + ".nox", + ".mypy_cache", + ".pytype", + ".svn", + "__pypackages__", +] + +[format] +# Like Black, use double quotes for strings. +quote-style = "double" + +# Like Black, indent with spaces, rather than tabs. +indent-style = "space" + +# Like Black, respect magic trailing commas. +skip-magic-trailing-comma = false + +# Like Black, automatically detect the appropriate line ending. +line-ending = "auto" + +[lint.extend-per-file-ignores] +# Stop ruff from complaining about "assert" in the unit tests +"test*.py" = ["S101"] \ No newline at end of file diff --git a/setup.py b/setup.py deleted file mode 100644 index 87f8ef2..0000000 --- a/setup.py +++ /dev/null @@ -1,99 +0,0 @@ -import sys -import os - -# `Extension` from setuptools cannot compile Fortran code, so we have to use -# the one from numpy. To do so, we also need to use the `setup` function -# from numpy, not from setuptools. -# Confusingly, we need to explicitly import setuptools *before* importing -# from numpy, so that numpy can internally detect and use the `setup` function -# from setuptools. -# References: https://stackoverflow.com/a/51691203 -# and https://stackoverflow.com/a/55358607 -import setuptools # noqa: F401 -try: - from numpy.distutils.core import Extension, setup -except ImportError: - sys.exit("install requires: 'numpy'." - " use pip or easy_install." - " \n $ pip install numpy") - - -_VERSION = "2.2.2.post2" - -f_compile_args = ['-ffixed-form', '-fdefault-real-8'] - - -def read(fname): - with open(os.path.join(os.path.dirname(__file__), fname)) as _in: - return _in.read() - - -def get_lib_dir(dylib): - import subprocess - from os.path import realpath, dirname - - p = subprocess.Popen("gfortran -print-file-name={}".format(dylib), - stdout=subprocess.PIPE, stderr=subprocess.PIPE, - shell=True) - retcode = p.wait() - if retcode != 0: - raise Exception("Failed to find {}".format(dylib)) - - libdir = dirname(realpath(p.communicate()[0].strip().decode('ascii'))) - - return libdir - - -if sys.platform == 'darwin': - GFORTRAN_LIB = get_lib_dir('libgfortran.5.dylib') - QUADMATH_LIB = get_lib_dir('libquadmath.0.dylib') - ARGS = ["-Wl,-rpath,{}:{}".format(GFORTRAN_LIB, QUADMATH_LIB)] - f_compile_args += ARGS - library_dirs = [GFORTRAN_LIB, QUADMATH_LIB] -else: - library_dirs = None - - -glmnet_lib = Extension(name='_glmnet', - sources=['glmnet/_glmnet.pyf', - 'glmnet/src/glmnet/glmnet5.f90'], - extra_f90_compile_args=f_compile_args, - library_dirs=library_dirs, - ) - -if __name__ == "__main__": - setup(name="glmnet", - version=_VERSION, - description="Python wrapper for glmnet", - long_description=read('README.rst'), - long_description_content_type="text/x-rst", - author="Civis Analytics Inc", - author_email="opensource@replicahq.com", - url="https://github.com/replicahq/python-glmnet", - install_requires=[ - "numpy>=1.9.2", - "scikit-learn>=0.18.0", - "scipy>=0.14.1", - "joblib>=0.14.1", - ], - python_requires=">=3.9", - # We need pkg_resources, shipped with setuptools, - # for version numbering. - setup_requires=["setuptools"], - ext_modules=[glmnet_lib], - packages=['glmnet'], - classifiers=[ - 'Development Status :: 5 - Production/Stable', - 'Environment :: Console', - 'Programming Language :: Python', - 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.9', - 'Programming Language :: Python :: 3.10', - 'Programming Language :: Python :: 3.11', - 'Programming Language :: Python :: 3 :: Only', - 'Operating System :: OS Independent', - 'Intended Audience :: Developers', - 'Intended Audience :: Science/Research', - 'License :: OSI Approved :: GNU General Public License v2 (GPLv2)', - 'Topic :: Scientific/Engineering' - ]) diff --git a/glmnet/__init__.py b/src/glmnet/__init__.py similarity index 80% rename from glmnet/__init__.py rename to src/glmnet/__init__.py index da31314..02bd1fa 100644 --- a/glmnet/__init__.py +++ b/src/glmnet/__init__.py @@ -1,8 +1,8 @@ import importlib.metadata -from .logistic import LogitNet from .linear import ElasticNet +from .logistic import LogitNet -__all__ = ['LogitNet', 'ElasticNet'] +__all__ = ["LogitNet", "ElasticNet"] __version__ = importlib.metadata.version("python-glmnet") diff --git a/glmnet/doc.py b/src/glmnet/doc.py similarity index 100% rename from glmnet/doc.py rename to src/glmnet/doc.py diff --git a/src/glmnet/errors.py b/src/glmnet/errors.py new file mode 100644 index 0000000..5c97d88 --- /dev/null +++ b/src/glmnet/errors.py @@ -0,0 +1,66 @@ +# ruff: noqa: PLR2004 +import warnings + + +def _check_error_flag(jerr): + """Check the glmnet solver error flag and issue warnings or raise + exceptions as appropriate. + + The codes break down roughly as follows: + + jerr == 0: everything is fine + jerr > 0: fatal errors such as memory allocation problems + jerr < 0: non-fatal errors such as convergence warnings + """ + if jerr == 0: + return + + if jerr > 0: + _fatal_errors(jerr) + + if jerr < 0: + _convergence_errors(jerr) + + +def _fatal_errors(jerr): + if jerr == 7777: + msg = "All predictors have zero variance " "(glmnet error no. 7777)." + raise ValueError(msg) + if jerr == 10000: + msg = "At least one value of relative_penalties must be " "positive (glmnet error no. 10000)." + raise ValueError(msg) + elif jerr == 90000: + msg = "Solver did not converge (glmnet error no. 90000)." + raise ValueError(msg) + elif jerr < 7777: + msg = f"Memory allocation error (glmnet error no. {jerr})." + raise RuntimeError(msg) + elif jerr > 8000 and jerr < 9000: + k = jerr - 8000 + msg = f"Probability for class {k} close to 0." + raise ValueError(msg) + elif jerr > 9000: + k = jerr - 9000 + msg = f"Probability for class {k} close to 1." + raise ValueError(msg) + else: + msg = f"Fatal glmnet error no. {jerr}." + raise RuntimeError(msg) + + +def _convergence_errors(jerr): + if jerr < -20000: + k = abs(20000 + jerr) + warnings.warn("Predicted probability close to 0 or 1 for " f"lambda no. {k}.", RuntimeWarning, stacklevel=1) + elif jerr > -20000 and jerr < -10000: + # This indicates the number of non-zero coefficients in a model + # exceeded a user-specified bound. We don't expose this parameter to + # the user, so there is not much sense in exposing the error either. + warnings.warn(f"Non-fatal glmnet error no. {jerr}.", RuntimeWarning, stacklevel=1) + elif jerr > -10000: + warnings.warn( + "Model did not converge for smaller values of lambda, " + f"returning solution for the largest {-1 * (jerr - 1)} values.", + RuntimeWarning, + stacklevel=1, + ) diff --git a/glmnet/_glmnet.pyf b/src/glmnet/glmnet.pyf similarity index 100% rename from glmnet/_glmnet.pyf rename to src/glmnet/glmnet.pyf diff --git a/glmnet/linear.py b/src/glmnet/linear.py similarity index 71% rename from glmnet/linear.py rename to src/glmnet/linear.py index 30ae747..0a6bb26 100644 --- a/glmnet/linear.py +++ b/src/glmnet/linear.py @@ -1,19 +1,23 @@ -import numpy as np +from typing import Final -from scipy.sparse import issparse, csc_matrix +import numpy as np from scipy import stats - +from scipy.sparse import csc_matrix, issparse from sklearn.base import BaseEstimator from sklearn.metrics import r2_score -from sklearn.model_selection import KFold, GroupKFold +from sklearn.model_selection import GroupKFold, KFold from sklearn.utils import check_array, check_X_y -from .errors import _check_error_flag -from _glmnet import elnet, spelnet, solns -from glmnet.util import (_fix_lambda_path, - _check_user_lambda, - _interpolate_model, - _score_lambda_path) +from glmnet._glmnet import elnet, solns, spelnet +from glmnet.errors import _check_error_flag +from glmnet.util import ( + _check_user_lambda, + _fix_lambda_path, + _interpolate_model, + _score_lambda_path, +) + +THERE_ARE_AT_LEAST_THREE_SPLITS: Final[int] = 3 class ElasticNet(BaseEstimator): @@ -133,12 +137,26 @@ class ElasticNet(BaseEstimator): performs within cut_point * standard error of lambda_max_. """ - def __init__(self, alpha=1, n_lambda=100, min_lambda_ratio=1e-4, - lambda_path=None, standardize=True, fit_intercept=True, - lower_limits=-np.inf, upper_limits=np.inf, - cut_point=1.0, n_splits=3, scoring=None, n_jobs=1, tol=1e-7, - max_iter=100000, random_state=None, max_features=None, verbose=False): - + def __init__( + self, + alpha=1, + n_lambda=100, + min_lambda_ratio=1e-4, + lambda_path=None, + standardize=True, + fit_intercept=True, + lower_limits=-np.inf, + upper_limits=np.inf, + cut_point=1.0, + n_splits=3, + scoring=None, + n_jobs=1, + tol=1e-7, + max_iter=100000, + random_state=None, + max_features=None, + verbose=False, + ): self.alpha = alpha self.n_lambda = n_lambda self.min_lambda_ratio = min_lambda_ratio @@ -199,7 +217,7 @@ def fit(self, X, y, sample_weight=None, relative_penalties=None, groups=None): Returns self. """ - X, y = check_X_y(X, y, accept_sparse='csr', ensure_min_samples=2) + X, y = check_X_y(X, y, accept_sparse="csr", ensure_min_samples=2) if sample_weight is None: sample_weight = np.ones(X.shape[0]) else: @@ -208,39 +226,50 @@ def fit(self, X, y, sample_weight=None, relative_penalties=None, groups=None): if not np.isscalar(self.lower_limits): self.lower_limits = np.asarray(self.lower_limits) if len(self.lower_limits) != X.shape[1]: - raise ValueError("lower_limits must equal number of features") + msg = "lower_limits must equal number of features" + raise ValueError(msg) if not np.isscalar(self.upper_limits): self.upper_limits = np.asarray(self.upper_limits) if len(self.upper_limits) != X.shape[1]: - raise ValueError("upper_limits must equal number of features") + msg = "upper_limits must equal number of features" + raise ValueError(msg) if any(self.lower_limits > 0) if isinstance(self.lower_limits, np.ndarray) else self.lower_limits > 0: - raise ValueError("lower_limits must be non-positive") + msg = "lower_limits must be non-positive" + raise ValueError(msg) if any(self.upper_limits < 0) if isinstance(self.upper_limits, np.ndarray) else self.upper_limits < 0: - raise ValueError("upper_limits must be positive") + msg = "upper_limits must be positive" + raise ValueError(msg) if self.alpha > 1 or self.alpha < 0: - raise ValueError("alpha must be between 0 and 1") + msg = "alpha must be between 0 and 1" + raise ValueError(msg) - if self.n_splits > 0 and self.n_splits < 3: - raise ValueError("n_splits must be at least 3") + if self.n_splits > 0 and self.n_splits < THERE_ARE_AT_LEAST_THREE_SPLITS: + msg = "n_splits must be at least 3" + raise ValueError(msg) self._fit(X, y, sample_weight, relative_penalties) - if self.n_splits >= 3: + if self.n_splits >= THERE_ARE_AT_LEAST_THREE_SPLITS: if groups is None: self._cv = KFold(n_splits=self.n_splits, shuffle=True, random_state=self.random_state) else: self._cv = GroupKFold(n_splits=self.n_splits) - cv_scores = _score_lambda_path(self, X, y, groups, - sample_weight, - relative_penalties, - self.scoring, - n_jobs=self.n_jobs, - verbose=self.verbose) + cv_scores = _score_lambda_path( + self, + X, + y, + groups, + sample_weight, + relative_penalties, + self.scoring, + n_jobs=self.n_jobs, + verbose=self.verbose, + ) self.cv_mean_score_ = np.atleast_1d(np.mean(cv_scores, axis=0)) self.cv_standard_error_ = np.atleast_1d(stats.sem(cv_scores)) @@ -248,14 +277,16 @@ def fit(self, X, y, sample_weight=None, relative_penalties=None, groups=None): self.lambda_max_inx_ = np.argmax(self.cv_mean_score_) self.lambda_max_ = self.lambda_path_[self.lambda_max_inx_] - target_score = self.cv_mean_score_[self.lambda_max_inx_] -\ - self.cut_point * self.cv_standard_error_[self.lambda_max_inx_] + target_score = ( + self.cv_mean_score_[self.lambda_max_inx_] + - self.cut_point * self.cv_standard_error_[self.lambda_max_inx_] + ) self.lambda_best_inx_ = np.argwhere(self.cv_mean_score_ >= target_score)[0] self.lambda_best_ = self.lambda_path_[self.lambda_best_inx_] self.coef_ = self.coef_path_[..., self.lambda_best_inx_] - self.coef_ = self.coef_.squeeze(axis=self.coef_.ndim-1) + self.coef_ = self.coef_.squeeze(axis=self.coef_.ndim - 1) self.intercept_ = self.intercept_path_[..., self.lambda_best_inx_].squeeze() if self.intercept_.shape == (): # convert 0d array to scalar self.intercept_ = float(self.intercept_) @@ -263,7 +294,6 @@ def fit(self, X, y, sample_weight=None, relative_penalties=None, groups=None): return self def _fit(self, X, y, sample_weight, relative_penalties): - if self.lambda_path is not None: n_lambda = len(self.lambda_path) min_lambda_ratio = 1.0 @@ -271,121 +301,111 @@ def _fit(self, X, y, sample_weight, relative_penalties): n_lambda = self.n_lambda min_lambda_ratio = self.min_lambda_ratio - _y = y.astype(dtype=np.float64, order='F', copy=True) - _sample_weight = sample_weight.astype(dtype=np.float64, order='F', - copy=True) + _y = y.astype(dtype=np.float64, order="F", copy=True) + _sample_weight = sample_weight.astype(dtype=np.float64, order="F", copy=True) exclude_vars = 0 if relative_penalties is None: - relative_penalties = np.ones(X.shape[1], dtype=np.float64, - order='F') + relative_penalties = np.ones(X.shape[1], dtype=np.float64, order="F") - coef_bounds = np.empty((2, X.shape[1]), dtype=np.float64, order='F') + coef_bounds = np.empty((2, X.shape[1]), dtype=np.float64, order="F") coef_bounds[0, :] = self.lower_limits coef_bounds[1, :] = self.upper_limits - if X.shape[1] > X.shape[0]: - # the glmnet docs suggest using a different algorithm for the case - # of p >> n - algo_flag = 2 - else: - algo_flag = 1 - + algo_flag = 2 if X.shape[1] > X.shape[0] else 1 # This is a stopping criterion (nx) # R defaults to nx = num_features, and ne = num_features + 1 - if self.max_features is None: - max_features = X.shape[1] - else: - max_features = self.max_features - + max_features = X.shape[1] if self.max_features is None else self.max_features if issparse(X): _x = csc_matrix(X, dtype=np.float64, copy=True) - (self.n_lambda_, - self.intercept_path_, - ca, - ia, - nin, - _, # rsq - self.lambda_path_, - _, # nlp - jerr) = spelnet(algo_flag, - self.alpha, - _x.shape[0], - _x.shape[1], - _x.data, - _x.indptr + 1, # Fortran uses 1-based indexing - _x.indices + 1, - _y, - _sample_weight, - exclude_vars, - relative_penalties, - coef_bounds, - max_features, - X.shape[1] + 1, - min_lambda_ratio, - self.lambda_path, - self.tol, - n_lambda, - self.standardize, - self.fit_intercept, - self.max_iter) + ( + self.n_lambda_, + self.intercept_path_, + ca, + ia, + nin, + _, # rsq + self.lambda_path_, + _, # nlp + jerr, + ) = spelnet( + algo_flag, + self.alpha, + _x.shape[0], + _x.shape[1], + _x.data, + _x.indptr + 1, # Fortran uses 1-based indexing + _x.indices + 1, + _y, + _sample_weight, + exclude_vars, + relative_penalties, + coef_bounds, + max_features, + X.shape[1] + 1, + min_lambda_ratio, + self.lambda_path, + self.tol, + n_lambda, + self.standardize, + self.fit_intercept, + self.max_iter, + ) else: - _x = X.astype(dtype=np.float64, order='F', copy=True) - - (self.n_lambda_, - self.intercept_path_, - ca, - ia, - nin, - _, # rsq - self.lambda_path_, - _, # nlp - jerr) = elnet(algo_flag, - self.alpha, - _x, - _y, - _sample_weight, - exclude_vars, - relative_penalties, - coef_bounds, - X.shape[1] + 1, - min_lambda_ratio, - self.lambda_path, - self.tol, - max_features, - n_lambda, - self.standardize, - self.fit_intercept, - self.max_iter) + _x = X.astype(dtype=np.float64, order="F", copy=True) + ( + self.n_lambda_, + self.intercept_path_, + ca, + ia, + nin, + _, # rsq + self.lambda_path_, + _, # nlp + jerr, + ) = elnet( + algo_flag, + self.alpha, + _x, + _y, + _sample_weight, + exclude_vars, + relative_penalties, + coef_bounds, + X.shape[1] + 1, + min_lambda_ratio, + self.lambda_path, + self.tol, + max_features, + n_lambda, + self.standardize, + self.fit_intercept, + self.max_iter, + ) # raises RuntimeError if self.jerr_ is nonzero self.jerr_ = jerr _check_error_flag(self.jerr_) - self.lambda_path_ = self.lambda_path_[:self.n_lambda_] + self.lambda_path_ = self.lambda_path_[: self.n_lambda_] self.lambda_path_ = _fix_lambda_path(self.lambda_path_) # trim the pre-allocated arrays returned by glmnet to match the actual # number of values found for lambda - self.intercept_path_ = self.intercept_path_[:self.n_lambda_] - ca = ca[:, :self.n_lambda_] - nin = nin[:self.n_lambda_] + self.intercept_path_ = self.intercept_path_[: self.n_lambda_] + ca = ca[:, : self.n_lambda_] + nin = nin[: self.n_lambda_] self.coef_path_ = solns(_x.shape[1], ca, ia, nin) return self def decision_function(self, X, lamb=None): - lambda_best = None - if hasattr(self, 'lambda_best_'): - lambda_best = self.lambda_best_ - + lambda_best = self.lambda_best_ if hasattr(self, "lambda_best_") else None lamb = _check_user_lambda(self.lambda_path_, lambda_best, lamb) - coef, intercept = _interpolate_model(self.lambda_path_, - self.coef_path_, - self.intercept_path_, lamb) + coef, intercept = _interpolate_model(self.lambda_path_, self.coef_path_, self.intercept_path_, lamb) - X = check_array(X, accept_sparse='csr') + X = check_array(X, accept_sparse="csr") z = X.dot(coef) + intercept # drop last dimension (lambda path) when we are predicting for a diff --git a/glmnet/logistic.py b/src/glmnet/logistic.py similarity index 77% rename from glmnet/logistic.py rename to src/glmnet/logistic.py index cc8a190..6bf00ca 100644 --- a/glmnet/logistic.py +++ b/src/glmnet/logistic.py @@ -1,21 +1,29 @@ -import numpy as np +from typing import Final -from scipy.special import expit -from scipy.sparse import issparse, csc_matrix +import numpy as np from scipy import stats - +from scipy.sparse import csc_matrix, issparse +from scipy.special import expit from sklearn.base import BaseEstimator from sklearn.metrics import accuracy_score -from sklearn.model_selection import StratifiedKFold, GroupKFold +from sklearn.model_selection import GroupKFold, StratifiedKFold from sklearn.utils import check_array, check_X_y from sklearn.utils.multiclass import check_classification_targets +from glmnet._glmnet import lognet, lsolns, splognet +from glmnet.util import ( + _check_user_lambda, + _fix_lambda_path, + _interpolate_model, + _score_lambda_path, +) + from .errors import _check_error_flag -from _glmnet import lognet, splognet, lsolns -from glmnet.util import (_fix_lambda_path, - _check_user_lambda, - _interpolate_model, - _score_lambda_path) + +CORRECT_NUMBER_OF_INTERCEPT_DIMS: Final[int] = 2 +CORRECT_NUMBER_OF_COEF_DIMS: Final[int] = 3 +THERE_ARE_AT_LEAST_THREE_SPLITS: Final[int] = 3 +THERE_ARE_TWO_CLASSES: Final[int] = 2 class LogitNet(BaseEstimator): @@ -138,12 +146,26 @@ class LogitNet(BaseEstimator): performs within cut_point * standard error of lambda_max_. """ - def __init__(self, alpha=1, n_lambda=100, min_lambda_ratio=1e-4, - lambda_path=None, standardize=True, fit_intercept=True, - lower_limits=-np.inf, upper_limits=np.inf, - cut_point=1.0, n_splits=3, scoring=None, n_jobs=1, tol=1e-7, - max_iter=100000, random_state=None, max_features=None, verbose=False): - + def __init__( + self, + alpha=1, + n_lambda=100, + min_lambda_ratio=1e-4, + lambda_path=None, + standardize=True, + fit_intercept=True, + lower_limits=-np.inf, + upper_limits=np.inf, + cut_point=1.0, + n_splits=3, + scoring=None, + n_jobs=1, + tol=1e-7, + max_iter=100000, + random_state=None, + max_features=None, + verbose=False, + ): self.alpha = alpha self.n_lambda = n_lambda self.min_lambda_ratio = min_lambda_ratio @@ -203,51 +225,62 @@ def fit(self, X, y, sample_weight=None, relative_penalties=None, groups=None): self : object Returns self. """ - X, y = check_X_y(X, y, accept_sparse='csr', ensure_min_samples=2) + X, y = check_X_y(X, y, accept_sparse="csr", ensure_min_samples=2) if sample_weight is None: sample_weight = np.ones(X.shape[0]) else: sample_weight = np.asarray(sample_weight) if y.shape != sample_weight.shape: - raise ValueError('the shape of weights is not the same with the shape of y') + msg = "the shape of weights is not the same with the shape of y" + raise ValueError(msg) if not np.isscalar(self.lower_limits): self.lower_limits = np.asarray(self.lower_limits) if len(self.lower_limits) != X.shape[1]: - raise ValueError("lower_limits must equal number of features") + msg = "lower_limits must equal number of features" + raise ValueError(msg) if not np.isscalar(self.upper_limits): self.upper_limits = np.asarray(self.upper_limits) if len(self.upper_limits) != X.shape[1]: - raise ValueError("upper_limits must equal number of features") + msg = "upper_limits must equal number of features" + raise ValueError(msg) if any(self.lower_limits > 0) if isinstance(self.lower_limits, np.ndarray) else self.lower_limits > 0: - raise ValueError("lower_limits must be non-positive") + msg = "lower_limits must be non-positive" + raise ValueError(msg) if any(self.upper_limits < 0) if isinstance(self.upper_limits, np.ndarray) else self.upper_limits < 0: - raise ValueError("upper_limits must be positive") + msg = "upper_limits must be positive" + raise ValueError(msg) if self.alpha > 1 or self.alpha < 0: - raise ValueError("alpha must be between 0 and 1") + msg = "alpha must be between 0 and 1" + raise ValueError(msg) # fit the model self._fit(X, y, sample_weight, relative_penalties) # score each model on the path of lambda values found by glmnet and # select the best scoring - if self.n_splits >= 3: + if self.n_splits >= THERE_ARE_AT_LEAST_THREE_SPLITS: if groups is None: self._cv = StratifiedKFold(n_splits=self.n_splits, shuffle=True, random_state=self.random_state) else: self._cv = GroupKFold(n_splits=self.n_splits) - cv_scores = _score_lambda_path(self, X, y, groups, - sample_weight, - relative_penalties, - self.scoring, - n_jobs=self.n_jobs, - verbose=self.verbose) + cv_scores = _score_lambda_path( + self, + X, + y, + groups, + sample_weight, + relative_penalties, + self.scoring, + n_jobs=self.n_jobs, + verbose=self.verbose, + ) self.cv_mean_score_ = np.atleast_1d(np.mean(cv_scores, axis=0)) self.cv_standard_error_ = np.atleast_1d(stats.sem(cv_scores)) @@ -255,14 +288,16 @@ def fit(self, X, y, sample_weight=None, relative_penalties=None, groups=None): self.lambda_max_inx_ = np.argmax(self.cv_mean_score_) self.lambda_max_ = self.lambda_path_[self.lambda_max_inx_] - target_score = self.cv_mean_score_[self.lambda_max_inx_] -\ - self.cut_point * self.cv_standard_error_[self.lambda_max_inx_] + target_score = ( + self.cv_mean_score_[self.lambda_max_inx_] + - self.cut_point * self.cv_standard_error_[self.lambda_max_inx_] + ) self.lambda_best_inx_ = np.argwhere(self.cv_mean_score_ >= target_score)[0] self.lambda_best_ = self.lambda_path_[self.lambda_best_inx_] self.coef_ = self.coef_path_[..., self.lambda_best_inx_] - self.coef_ = self.coef_.squeeze(axis=self.coef_.ndim-1) + self.coef_ = self.coef_.squeeze(axis=self.coef_.ndim - 1) self.intercept_ = self.intercept_path_[..., self.lambda_best_inx_].squeeze() if self.intercept_.shape == (): # convert 0d array to scalar self.intercept_ = float(self.intercept_) @@ -280,13 +315,13 @@ def _fit(self, X, y, sample_weight=None, relative_penalties=None): check_classification_targets(y) self.classes_ = np.unique(y) # the output of np.unique is sorted n_classes = len(self.classes_) - if n_classes < 2: - raise ValueError("Training data need to contain at least 2 " - "classes.") + if n_classes < THERE_ARE_TWO_CLASSES: + msg = "Training data need to contain at least 2 classes." + raise ValueError(msg) # glmnet requires the labels a one-hot-encoded array of # (n_samples, n_classes) - if n_classes == 2: + if n_classes == THERE_ARE_TWO_CLASSES: # Normally we use 1/0 for the positive and negative classes. Since # np.unique sorts the output, the negative class will be in the 0th # column. We want a model predicting the positive class, not the @@ -297,11 +332,11 @@ def _fit(self, X, y, sample_weight=None, relative_penalties=None): # "reshapes" y to (n_samples, n_classes) and self.classes_ to # (n_samples, n_classes) and performs an element-wise comparison # resulting in _y with shape (n_samples, n_classes). - _y = (y[:, None] != self.classes_).astype(np.float64, order='F') + _y = (y[:, None] != self.classes_).astype(np.float64, order="F") else: # multinomial case, glmnet uses the entire array so we can # keep the original order. - _y = (y[:, None] == self.classes_).astype(np.float64, order='F') + _y = (y[:, None] == self.classes_).astype(np.float64, order="F") # use sample weights, making sure all weights are positive # this is inspired by the R wrapper for glmnet, in lognet.R @@ -314,8 +349,7 @@ def _fit(self, X, y, sample_weight=None, relative_penalties=None): # we need some sort of "offset" array for glmnet # an array of shape (n_examples, n_classes) - offset = np.zeros((X.shape[0], n_classes), dtype=np.float64, - order='F') + offset = np.zeros((X.shape[0], n_classes), dtype=np.float64, order="F") # You should have thought of that before you got here. exclude_vars = 0 @@ -327,14 +361,13 @@ def _fit(self, X, y, sample_weight=None, relative_penalties=None): # vignette: # http://web.stanford.edu/~hastie/glmnet/glmnet_alpha.html if relative_penalties is None: - relative_penalties = np.ones(X.shape[1], dtype=np.float64, - order='F') + relative_penalties = np.ones(X.shape[1], dtype=np.float64, order="F") - coef_bounds = np.empty((2, X.shape[1]), dtype=np.float64, order='F') + coef_bounds = np.empty((2, X.shape[1]), dtype=np.float64, order="F") coef_bounds[0, :] = self.lower_limits coef_bounds[1, :] = self.upper_limits - if n_classes == 2: + if n_classes == THERE_ARE_TWO_CLASSES: # binomial, tell glmnet there is only one class # otherwise we will get a coef matrix with two dimensions # where each pair are equal in magnitude and opposite in sign @@ -342,49 +375,48 @@ def _fit(self, X, y, sample_weight=None, relative_penalties=None): # returned coefficients would be one half of the proper values n_classes = 1 - # This is a stopping criterion (nx) # R defaults to nx = num_features, and ne = num_features + 1 - if self.max_features is None: - max_features = X.shape[1] - else: - max_features = self.max_features - + max_features = X.shape[1] if self.max_features is None else self.max_features # for documentation on the glmnet function lognet, see doc.py if issparse(X): _x = csc_matrix(X, dtype=np.float64, copy=True) - (self.n_lambda_, - self.intercept_path_, - ca, - ia, - nin, - _, # dev0 - _, # dev - self.lambda_path_, - _, # nlp - jerr) = splognet(self.alpha, - _x.shape[0], - _x.shape[1], - n_classes, - _x.data, - _x.indptr + 1, # Fortran uses 1-based indexing - _x.indices + 1, - _y, - offset, - exclude_vars, - relative_penalties, - coef_bounds, - max_features, - X.shape[1] + 1, - min_lambda_ratio, - self.lambda_path, - self.tol, - n_lambda, - self.standardize, - self.fit_intercept, - self.max_iter, - 0) + ( + self.n_lambda_, + self.intercept_path_, + ca, + ia, + nin, + _, # dev0 + _, # dev + self.lambda_path_, + _, # nlp + jerr, + ) = splognet( + self.alpha, + _x.shape[0], + _x.shape[1], + n_classes, + _x.data, + _x.indptr + 1, # Fortran uses 1-based indexing + _x.indices + 1, + _y, + offset, + exclude_vars, + relative_penalties, + coef_bounds, + max_features, + X.shape[1] + 1, + min_lambda_ratio, + self.lambda_path, + self.tol, + n_lambda, + self.standardize, + self.fit_intercept, + self.max_iter, + 0, + ) else: # not sparse # some notes: glmnet requires both x and y to be float64, the two # arrays @@ -393,35 +425,39 @@ def _fit(self, X, y, sample_weight=None, relative_penalties=None): # copy any arrays passed to a wrapped function if they are not in # the fortran layout, to avoid making extra copies, ensure x and y # are `F_CONTIGUOUS` prior to calling lognet. - _x = X.astype(dtype=np.float64, order='F', copy=True) - - (self.n_lambda_, - self.intercept_path_, - ca, - ia, - nin, - _, # dev0 - _, # dev - self.lambda_path_, - _, # nlp - jerr) = lognet(self.alpha, - n_classes, - _x, - _y, - offset, - exclude_vars, - relative_penalties, - coef_bounds, - X.shape[1] + 1, - min_lambda_ratio, - self.lambda_path, - self.tol, - max_features, - n_lambda, - self.standardize, - self.fit_intercept, - self.max_iter, - 0) + _x = X.astype(dtype=np.float64, order="F", copy=True) + + ( + self.n_lambda_, + self.intercept_path_, + ca, + ia, + nin, + _, # dev0 + _, # dev + self.lambda_path_, + _, # nlp + jerr, + ) = lognet( + self.alpha, + n_classes, + _x, + _y, + offset, + exclude_vars, + relative_penalties, + coef_bounds, + X.shape[1] + 1, + min_lambda_ratio, + self.lambda_path, + self.tol, + max_features, + n_lambda, + self.standardize, + self.fit_intercept, + self.max_iter, + 0, + ) # raises RuntimeError if self.jerr_ is nonzero self.jerr_ = jerr @@ -430,14 +466,14 @@ def _fit(self, X, y, sample_weight=None, relative_penalties=None): # glmnet may not return the requested number of lambda values, so we # need to trim the trailing zeros from the returned path so # len(lambda_path_) is equal to n_lambda_ - self.lambda_path_ = self.lambda_path_[:self.n_lambda_] + self.lambda_path_ = self.lambda_path_[: self.n_lambda_] # also fix the first value of lambda self.lambda_path_ = _fix_lambda_path(self.lambda_path_) - self.intercept_path_ = self.intercept_path_[:, :self.n_lambda_] + self.intercept_path_ = self.intercept_path_[:, : self.n_lambda_] # also trim the compressed coefficient matrix - ca = ca[:, :, :self.n_lambda_] + ca = ca[:, :, : self.n_lambda_] # and trim the array of n_coef per lambda (may or may not be non-zero) - nin = nin[:self.n_lambda_] + nin = nin[: self.n_lambda_] # decompress the coefficients returned by glmnet, see doc.py self.coef_path_ = lsolns(X.shape[1], ca, ia, nin) # coef_path_ has shape (n_features, n_classes, n_lambda), we should @@ -448,30 +484,25 @@ def _fit(self, X, y, sample_weight=None, relative_penalties=None): return self def decision_function(self, X, lamb=None): - lambda_best = None - if hasattr(self, 'lambda_best_'): - lambda_best = self.lambda_best_ - + lambda_best = self.lambda_best_ if hasattr(self, "lambda_best_") else None lamb = _check_user_lambda(self.lambda_path_, lambda_best, lamb) - coef, intercept = _interpolate_model(self.lambda_path_, - self.coef_path_, - self.intercept_path_, lamb) + coef, intercept = _interpolate_model(self.lambda_path_, self.coef_path_, self.intercept_path_, lamb) # coef must be (n_classes, n_features, n_lambda) - if coef.ndim != 3: + if coef.ndim != CORRECT_NUMBER_OF_COEF_DIMS: # we must be working with an intercept only model coef = coef[:, :, np.newaxis] # intercept must be (n_classes, n_lambda) - if intercept.ndim != 2: + if intercept.ndim != CORRECT_NUMBER_OF_INTERCEPT_DIMS: intercept = intercept[:, np.newaxis] - X = check_array(X, accept_sparse='csr') + X = check_array(X, accept_sparse="csr") # return (n_samples, n_classes, n_lambda) z = np.empty((X.shape[0], coef.shape[0], coef.shape[-1])) # well... sometimes we just need a for loop for c in range(coef.shape[0]): # all classes - for l in range(coef.shape[-1]): # all values of lambda - z[:, c, l] = X.dot(coef[c, :, l]) + for lamb in range(coef.shape[-1]): # all values of lambda + z[:, c, lamb] = X.dot(coef[c, :, lamb]) z += intercept # drop the last dimension (lambda) when we are predicting for a single @@ -510,7 +541,7 @@ def predict_proba(self, X, lamb=None): if z.shape[1] == 1: # binomial, for consistency and to match scikit-learn, add the # complement so z has shape (n_samples, 2, n_lambda) - z = np.concatenate((1-z, z), axis=1) + z = np.concatenate((1 - z, z), axis=1) else: # normalize for multinomial z /= np.expand_dims(z.sum(axis=1), axis=1) diff --git a/src/glmnet/meson.build b/src/glmnet/meson.build new file mode 100644 index 0000000..ba6e18c --- /dev/null +++ b/src/glmnet/meson.build @@ -0,0 +1,30 @@ +py3.extension_module('_glmnet', + [ + f2py_gen.process('glmnet.pyf'), + 'src/glmnet5.f90', + ], + include_directories: inc_np, + fortran_args: [fortran_ignore_warnings, _fflag_Wno_surprising], + dependencies: [py3_dep, fortranobject_dep], + link_with: fortranobject_lib, + link_language: 'fortran', + install : true, + subdir: 'glmnet' +) + +py3.install_sources( + [ + '__init__.py', + 'doc.py', + 'errors.py', + 'linear.py', + 'logistic.py', + 'scorer.py', + 'util.py', + ], + pure : false, + subdir : 'glmnet' +) + + +# subdir('_glmnet') \ No newline at end of file diff --git a/glmnet/scorer.py b/src/glmnet/scorer.py similarity index 73% rename from glmnet/scorer.py rename to src/glmnet/scorer.py index 1f93413..dfb188d 100644 --- a/glmnet/scorer.py +++ b/src/glmnet/scorer.py @@ -15,11 +15,19 @@ from functools import partial import numpy as np - -from sklearn.metrics import (r2_score, median_absolute_error, mean_absolute_error, - mean_squared_error, accuracy_score, f1_score, - roc_auc_score, average_precision_score, - precision_score, recall_score, log_loss) +from sklearn.metrics import ( + accuracy_score, + average_precision_score, + f1_score, + log_loss, + mean_absolute_error, + mean_squared_error, + median_absolute_error, + precision_score, + r2_score, + recall_score, + roc_auc_score, +) from sklearn.utils.multiclass import type_of_target @@ -34,12 +42,8 @@ def __call__(self, estimator, X, y, sample_weight=None): pass def __repr__(self): - kwargs_string = "".join([", %s=%s" % (str(k), str(v)) - for k, v in self._kwargs.items()]) - return ("make_scorer(%s%s%s%s)" - % (self._score_func.__name__, - "" if self._sign > 0 else ", greater_is_better=False", - self._factory_args(), kwargs_string)) + kwargs_string = "".join([f", {k!s}={v!s}" for k, v in self._kwargs.items()]) + return f"make_scorer({self._score_func.__name__}{'' if self._sign > 0 else ', greater_is_better=False'}{self._factory_args()}{kwargs_string})" def _factory_args(self): """Return non-default make_scorer arguments for repr.""" @@ -76,11 +80,16 @@ def __call__(self, estimator, X, y_true, sample_weight=None, lamb=None): """ y_pred = estimator.predict(X, lamb=lamb) if sample_weight is not None: - scores = np.apply_along_axis(lambda y_hat: self._score_func(y_true, y_hat, sample_weight=sample_weight, **self._kwargs), 0, y_pred) + scores = np.apply_along_axis( + lambda y_hat: self._score_func(y_true, y_hat, sample_weight=sample_weight, **self._kwargs), + 0, + y_pred, + ) else: scores = np.apply_along_axis(lambda y_hat: self._score_func(y_true, y_hat, **self._kwargs), 0, y_pred) return self._sign * scores + class _ProbaScorer(_BaseScorer): def __call__(self, clf, X, y_true, sample_weight=None, lamb=None): """Evaluate predicted probabilities for X relative to y_true. @@ -112,13 +121,17 @@ def __call__(self, clf, X, y_true, sample_weight=None, lamb=None): y_pred = clf.predict_proba(X, lamb=lamb) # y_pred shape (n_samples, n_classes, n_lambda) if sample_weight is not None: - score_func = lambda y_hat: self._score_func(y_true, y_hat, sample_weight=sample_weight, **self._kwargs) + + def score_func(y_hat): + return self._score_func(y_true, y_hat, sample_weight=sample_weight, **self._kwargs) else: - score_func = lambda y_hat: self._score_func(y_true, y_hat, **self._kwargs) + + def score_func(y_hat): + return self._score_func(y_true, y_hat, **self._kwargs) scores = np.zeros(y_pred.shape[-1]) for i in range(len(scores)): - scores[i] = score_func(y_pred[...,i]) + scores[i] = score_func(y_pred[..., i]) return self._sign * scores @@ -158,11 +171,16 @@ def __call__(self, clf, X, y_true, sample_weight=None, lamb=None): """ y_type = type_of_target(y_true) if y_type not in ("binary", "multilabel-indicator"): - raise ValueError("{0} format is not supported".format(y_type)) + msg = f"{y_type} format is not supported" + raise ValueError(msg) y_pred = clf.decision_function(X, lamb=lamb) if sample_weight is not None: - scores = np.apply_along_axis(lambda y_hat: self._score_func(y_true, y_hat, sample_weight=sample_weight, **self._kwargs), 0, y_pred) + scores = np.apply_along_axis( + lambda y_hat: self._score_func(y_true, y_hat, sample_weight=sample_weight, **self._kwargs), + 0, + y_pred, + ) else: scores = np.apply_along_axis(lambda y_hat: self._score_func(y_true, y_hat, **self._kwargs), 0, y_pred) return self._sign * scores @@ -175,10 +193,9 @@ def get_scorer(scoring): if isinstance(scoring, str): try: scorer = SCORERS[scoring] - except KeyError: - raise ValueError('%r is not a valid scoring value. ' - 'Valid options are %s' - % (scoring, sorted(SCORERS.keys()))) + except KeyError as e: + msg = f"{scoring} is not a valid scoring value. Valid options are {sorted(SCORERS.keys())!s}" + raise ValueError(msg) from e else: scorer = scoring return scorer @@ -215,23 +232,27 @@ def check_scoring(estimator, scoring=None, allow_none=False): ``scorer(estimator, X, y)``. """ has_scoring = scoring is not None - if not hasattr(estimator, 'fit'): - raise TypeError("estimator should a be an estimator implementing " - "'fit' method, %r was passed" % estimator) + if not hasattr(estimator, "fit"): + msg = "estimator should a be an estimator implementing 'fit' method, {estimator!r} was passed" + raise TypeError(msg) elif has_scoring: return get_scorer(scoring) - elif hasattr(estimator, 'score'): + elif hasattr(estimator, "score"): return _passthrough_scorer elif allow_none: return None else: - raise TypeError( - "If no scoring is specified, the estimator passed should " - "have a 'score' method. The estimator %r does not." % estimator) + msg = "If no scoring is specified, the estimator passed should have a 'score' method. The estimator {estimator!r} does not." + raise TypeError(msg) -def make_scorer(score_func, greater_is_better=True, needs_proba=False, - needs_threshold=False, **kwargs): +def make_scorer( + score_func, + greater_is_better=True, + needs_proba=False, + needs_threshold=False, + **kwargs, +): """Make a scorer from a performance metric or loss function. This factory function wraps scoring functions for use in GridSearchCV @@ -285,8 +306,8 @@ def make_scorer(score_func, greater_is_better=True, needs_proba=False, """ sign = 1 if greater_is_better else -1 if needs_proba and needs_threshold: - raise ValueError("Set either needs_proba or needs_threshold to True," - " but not both.") + msg = "Set either needs_proba or needs_threshold to True, but not both." + raise ValueError(msg) if needs_proba: cls = _ProbaScorer elif needs_threshold: @@ -298,41 +319,40 @@ def make_scorer(score_func, greater_is_better=True, needs_proba=False, # Standard regression scores r2_scorer = make_scorer(r2_score) -mean_squared_error_scorer = make_scorer(mean_squared_error, - greater_is_better=False) -mean_absolute_error_scorer = make_scorer(mean_absolute_error, - greater_is_better=False) -median_absolute_error_scorer = make_scorer(median_absolute_error, - greater_is_better=False) +mean_squared_error_scorer = make_scorer(mean_squared_error, greater_is_better=False) +mean_absolute_error_scorer = make_scorer(mean_absolute_error, greater_is_better=False) +median_absolute_error_scorer = make_scorer(median_absolute_error, greater_is_better=False) # Standard Classification Scores accuracy_scorer = make_scorer(accuracy_score) f1_scorer = make_scorer(f1_score) # Score functions that need decision values -roc_auc_scorer = make_scorer(roc_auc_score, greater_is_better=True, - needs_threshold=True) -average_precision_scorer = make_scorer(average_precision_score, - needs_threshold=True) +roc_auc_scorer = make_scorer(roc_auc_score, greater_is_better=True, needs_threshold=True) +average_precision_scorer = make_scorer(average_precision_score, needs_threshold=True) precision_scorer = make_scorer(precision_score) recall_scorer = make_scorer(recall_score) # Score function for probabilistic classification -log_loss_scorer = make_scorer(log_loss, greater_is_better=False, - needs_proba=True) - -SCORERS = dict(r2=r2_scorer, - median_absolute_error=median_absolute_error_scorer, - mean_absolute_error=mean_absolute_error_scorer, - mean_squared_error=mean_squared_error_scorer, - accuracy=accuracy_scorer, roc_auc=roc_auc_scorer, - average_precision=average_precision_scorer, - log_loss=log_loss_scorer) - -for name, metric in [('precision', precision_score), - ('recall', recall_score), ('f1', f1_score)]: +log_loss_scorer = make_scorer(log_loss, greater_is_better=False, needs_proba=True) + +SCORERS = { + "r2": r2_scorer, + "median_absolute_error": median_absolute_error_scorer, + "mean_absolute_error": mean_absolute_error_scorer, + "mean_squared_error": mean_squared_error_scorer, + "accuracy": accuracy_scorer, + "roc_auc": roc_auc_scorer, + "average_precision": average_precision_scorer, + "log_loss": log_loss_scorer, +} + +for name, metric in [ + ("precision", precision_score), + ("recall", recall_score), + ("f1", f1_score), +]: SCORERS[name] = make_scorer(metric) - for average in ['macro', 'micro', 'samples', 'weighted']: - qualified_name = '{0}_{1}'.format(name, average) - SCORERS[qualified_name] = make_scorer(partial(metric, pos_label=None, - average=average)) + for average in ["macro", "micro", "samples", "weighted"]: + qualified_name = f"{name}_{average}" + SCORERS[qualified_name] = make_scorer(partial(metric, pos_label=None, average=average)) diff --git a/glmnet/src/glmnet/DESCRIPTION b/src/glmnet/src/DESCRIPTION similarity index 100% rename from glmnet/src/glmnet/DESCRIPTION rename to src/glmnet/src/DESCRIPTION diff --git a/glmnet/src/glmnet/NOTICE b/src/glmnet/src/NOTICE similarity index 73% rename from glmnet/src/glmnet/NOTICE rename to src/glmnet/src/NOTICE index ed63f0b..861f17b 100644 --- a/glmnet/src/glmnet/NOTICE +++ b/src/glmnet/src/NOTICE @@ -1,3 +1,3 @@ The files glmnet5.f90 and DESCRIPTION were copied from the CRAN github mirror for the R package named glmnet (https://github.com/cran/glmnet) on June 1, 2016. -See DESCRIPTION for license and attribution information. +See DESCRIPTION for license and attribution information. \ No newline at end of file diff --git a/glmnet/src/glmnet/glmnet5.f90 b/src/glmnet/src/glmnet5.f90 similarity index 100% rename from glmnet/src/glmnet/glmnet5.f90 rename to src/glmnet/src/glmnet5.f90 diff --git a/glmnet/util.py b/src/glmnet/util.py similarity index 82% rename from glmnet/util.py rename to src/glmnet/util.py index c469fb8..e08759a 100644 --- a/glmnet/util.py +++ b/src/glmnet/util.py @@ -2,18 +2,15 @@ import warnings import numpy as np - +from joblib import Parallel, delayed from scipy.interpolate import interp1d - from sklearn.base import clone from sklearn.exceptions import UndefinedMetricWarning -from joblib import Parallel, delayed from glmnet.scorer import check_scoring -def _score_lambda_path(est, X, y, groups, sample_weight, relative_penalties, - scoring, n_jobs, verbose): +def _score_lambda_path(est, X, y, groups, sample_weight, relative_penalties, scoring, n_jobs, verbose): """Score each model found by glmnet using cross validation. Parameters @@ -60,19 +57,38 @@ def _score_lambda_path(est, X, y, groups, sample_weight, relative_penalties, # the scikit-learn metrics unhappy, so we are silencing these warnings. # Also note, catch_warnings is not thread safe. with warnings.catch_warnings(): - action = 'always' if verbose else 'ignore' + action = "always" if verbose else "ignore" warnings.simplefilter(action, UndefinedMetricWarning) - scores = Parallel(n_jobs=n_jobs, verbose=verbose, backend='threading')( - delayed(_fit_and_score)(est, scorer, X, y, sample_weight, relative_penalties, - est.lambda_path_, train_idx, test_idx) - for (train_idx, test_idx) in cv_split) + scores = Parallel(n_jobs=n_jobs, verbose=verbose, backend="threading")( + delayed(_fit_and_score)( + est, + scorer, + X, + y, + sample_weight, + relative_penalties, + est.lambda_path_, + train_idx, + test_idx, + ) + for (train_idx, test_idx) in cv_split + ) return scores -def _fit_and_score(est, scorer, X, y, sample_weight, relative_penalties, - score_lambda_path, train_inx, test_inx): +def _fit_and_score( + est, + scorer, + X, + y, + sample_weight, + relative_penalties, + score_lambda_path, + train_inx, + test_inx, +): """Fit and score a single model. Parameters @@ -119,10 +135,13 @@ def _fit_and_score(est, scorer, X, y, sample_weight, relative_penalties, return scorer(m, X[test_inx, :], y[test_inx], lamb=lamb) +MAX_LAMBDA_PATH_DIMS = 2 + + def _fix_lambda_path(lambda_path): """Replace the first value in lambda_path (+inf) with something more reasonable. The method below matches what is done in the R/glmnent wrapper.""" - if lambda_path.shape[0] > 2: + if lambda_path.shape[0] > MAX_LAMBDA_PATH_DIMS: lambda_0 = math.exp(2 * math.log(lambda_path[1]) - math.log(lambda_path[2])) lambda_path[0] = lambda_0 return lambda_path @@ -156,18 +175,18 @@ def _check_user_lambda(lambda_path, lambda_best=None, lamb=None): if lamb is None: if lambda_best is None: - raise ValueError("You must specify a value for lambda or run " - "with cv_folds > 1 to select a value " - "automatically.") + msg = "You must specify a value for lambda or run with cv_folds > 1 to select a value automatically." + raise ValueError(msg) lamb = lambda_best # ensure numpy math works later - lamb = np.array(lamb, ndmin=1) + lamb = np.array(lamb, ndmin=1, dtype=lambda_path.dtype) if np.any(lamb < lambda_path[-1]) or np.any(lamb > lambda_path[0]): - warnings.warn("Some values of lamb are outside the range of " - "lambda_path_ [{}, {}]".format(lambda_path[-1], - lambda_path[0]), - RuntimeWarning) + warnings.warn( + f"Some values of lamb are outside the range of lambda_path_ [{lambda_path[-1]}, {lambda_path[0]}]", + RuntimeWarning, + stacklevel=1, + ) np.clip(lamb, lambda_path[-1], lambda_path[0], lamb) return lamb @@ -199,8 +218,9 @@ def _interpolate_model(lambda_path, coef_path, intercept_path, lamb): The interpolated path of intercepts. """ if lambda_path.shape[0] == 1: - warnings.warn("lambda_path has a single value, this may be an " - "intercept-only model.", RuntimeWarning) + warnings.warn( + "lambda_path has a single value, this may be an " "intercept-only model.", RuntimeWarning, stacklevel=1 + ) coef = np.take(coef_path, 0, axis=-1) intercept = np.take(intercept_path, 0, axis=-1) else: diff --git a/glmnet/tests/__init__.py b/tests/__init__.py similarity index 100% rename from glmnet/tests/__init__.py rename to tests/__init__.py diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..1aea497 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,46 @@ +import numpy as np +import pytest + + +@pytest.fixture +def rng(): + return np.random.default_rng() + + +@pytest.fixture +def random_int(rng): + return rng.integers(10000) + + +@pytest.fixture +def max_features(): + return 5 + + +@pytest.fixture +def min_acceptable_correlation(): + return 0.90 + + +@pytest.fixture +def min_acceptable_accuracy(): + return 0.85 + + +@pytest.fixture +def even_miner_acceptable_accuracy(): + return 0.65 + + +@pytest.fixture(params=[0.0, 0.25, 0.50, 0.75, 1.0]) +def alphas(request): + return request.param + + +@pytest.fixture(params=[-1, 0, 5]) +def n_splits(request): + return request.param + + +def record_numpy_version(record_property): + record_property("numpy_version", np.__version__) \ No newline at end of file diff --git a/tests/test_errors.py b/tests/test_errors.py new file mode 100644 index 0000000..357f295 --- /dev/null +++ b/tests/test_errors.py @@ -0,0 +1,32 @@ +import pytest + +from glmnet.errors import _check_error_flag + + +@pytest.mark.parametrize( + "error_code,error_type,message", + [ + (7777, ValueError, r".*7777.*"), + (10000, ValueError, r".*10000.*"), + (1234, RuntimeError, r".*1234.*"), + (7778, RuntimeError, r".*7778.*"), + (8002, ValueError, r"Probability for class 2.*"), + (8004, ValueError, r"Probability for class 4.*"), + (90000, ValueError, r".*90000.*"), + ], +) +def test_errors(error_code, error_type, message): + with pytest.raises(error_type, match=message): + _check_error_flag(error_code) + + +@pytest.mark.parametrize( + "error_code,error_type,message", + [ + (-76, RuntimeWarning, r"Model did not converge"), + (-20007, RuntimeWarning, r"Predicted probability close to 0 or 1 for lambda no. 7."), + ], +) +def test_warnings(error_code, error_type, message): + with pytest.warns(error_type, match=message): + _check_error_flag(error_code) diff --git a/tests/test_linear.py b/tests/test_linear.py new file mode 100644 index 0000000..ed7dea7 --- /dev/null +++ b/tests/test_linear.py @@ -0,0 +1,306 @@ +from copy import deepcopy + +import numpy as np +import numpy.testing as nptst +import pytest +from pytest_lazy_fixtures import lf +from scipy.sparse import csr_matrix +from sklearn.datasets import make_regression +from sklearn.metrics import r2_score + +# from sklearn.utils.estimator_checks import parametrize_with_checks +from glmnet import ElasticNet +from tests.util import sanity_check_regression + + +@pytest.fixture +def x_y(): + np.random.seed(488881) + return make_regression(n_samples=1000, random_state=561) + + +@pytest.fixture +def x(x_y): + return x_y[0] + + +@pytest.fixture +def y(x_y): + return x_y[1] + + +@pytest.fixture +def x_sparse(x_y): + return csr_matrix(x_y[0]) + + +@pytest.fixture +def x_y_wide(): + return make_regression(n_samples=100, n_features=150, random_state=1105) + + +@pytest.fixture +def x_wide(x_y_wide): + return x_y_wide[0] + + +@pytest.fixture +def y_wide(x_y_wide): + return x_y_wide[1] + + +@pytest.fixture +def x_wide_sparse(x_wide): + return csr_matrix(x_wide) + + +@pytest.fixture( + params=[ + (lf("x"), lf("y")), + (lf("x_sparse"), lf("y")), + (lf("x_wide"), lf("y_wide")), + (lf("x_wide_sparse"), lf("y_wide")), + ] +) +def x_y_inputs(request): + return request.param + + +@pytest.fixture( + params=[ + (lf("x"), lf("y")), + (lf("x_sparse"), lf("y")), + ] +) +def x_y_tall_inputs(request): + return request.param + + +@pytest.fixture( + params=[ + (lf("x_wide"), lf("y_wide")), + (lf("x_wide_sparse"), lf("y_wide")), + ] +) +def x_y_wide_inputs(request): + return request.param + + +# NOT creating a lot of models with specific seeds +# if it is important, we can try changing the seed +# per-func +@pytest.fixture +def m(): + return ElasticNet() + + +@pytest.fixture +def m_alphas(alphas): + return ElasticNet(alpha=alphas, random_state=2465) + + +@pytest.fixture +def m_nsplits(n_splits): + return ElasticNet(n_splits=n_splits, random_state=6601) + + +@pytest.fixture( + params=[ + "r2", + "mean_squared_error", + "mean_absolute_error", + "median_absolute_error", + ] +) +def scoring(request): + return request.param + + +@pytest.fixture +def m_scoring(scoring): + return ElasticNet(scoring=scoring) + + +# I don't think I understand what this test +# does enough to fix this right now? +# @pytest.mark.filterwarnings +# @parametrize_with_checks([ElasticNet()]) +# def test_sklearn_compatible_estimator(estimator, check): +# check(estimator) + + +@pytest.mark.parametrize("inputs", [(lf("x_y_inputs"))]) +def test_with_defaults(m, inputs): + # print(f"{meta_inputs=}") + x, y = inputs + m = m.fit(x, y) + sanity_check_regression(m, x) + + # check selection of lambda_best + assert m.lambda_best_inx_ <= m.lambda_max_inx_ + + # check full path predict + p = m.predict(x, lamb=m.lambda_path_) + assert p.shape[-1] == m.lambda_path_.size + + +@pytest.mark.parametrize("inputs", [(lf("x_y_inputs"))]) +def test_one_row_predict(m, inputs): + # Verify that predicting on one row gives only one row of output + X, y = inputs + m.fit(X, y) + p = m.predict(X[0].reshape((1, -1))) + assert p.shape == (1,) + + +@pytest.mark.parametrize("inputs", [(lf("x_y_inputs"))]) +def test_one_row_predict_with_lambda(m, inputs): + # One row to predict along with lambdas should give 2D output + X, y = inputs + m.fit(X, y) + p = m.predict(X[0].reshape((1, -1)), lamb=[20, 10]) + assert p.shape == (1, 2) + + +def test_with_single_var(m, min_acceptable_correlation): + x = np.random.rand(500, 1) + y = (1.3 * x).ravel() + + m = m.fit(x, y) + score = r2_score(y, m.predict(x)) + assert score >= min_acceptable_correlation + + +def test_with_no_predictor_variance(m): + x = np.ones((500, 1)) + y = np.random.rand(500) + + with pytest.raises(ValueError, match=r".*7777.*"): + m.fit(x, y) + + +@pytest.mark.parametrize("inputs", [(lf("x_y_inputs"))]) +def test_relative_penalties(m, inputs): + x, y = inputs + m1 = m + m2 = deepcopy(m1) + p = x.shape[1] + + # m1 no relative penalties applied + m1.fit(x, y) + + # find the nonzero indices from LASSO + nonzero = np.nonzero(m1.coef_) + + # unpenalize those nonzero coefs + penalty = np.repeat(1, p) + penalty[nonzero] = 0 + + # refit the model with the unpenalized coefs + m2.fit(x, y, relative_penalties=penalty) + + # verify that the unpenalized coef ests exceed the penalized ones + # in absolute value + assert np.all(np.abs(m1.coef_) <= np.abs(m2.coef_)) + + +@pytest.mark.parametrize("m_alpha", [(lf("m_alphas"))]) +def test_alphas(x, y, m_alpha, min_acceptable_correlation): + m_alpha = m_alpha.fit(x, y) + score = r2_score(y, m_alpha.predict(x)) + assert score >= min_acceptable_correlation + + +@pytest.fixture +def m_with_limits(x): + return ElasticNet(lower_limits=np.repeat(-1, x.shape[1]), upper_limits=0, alpha=0) + + +# TODO I think it should be possible to merge the tall and wide +# tests here, I just haven't figured exactly how yet +def test_coef_limits(m_with_limits, x, y): + m_with_limits = m_with_limits.fit(x, y) + assert np.all(m_with_limits.coef_ >= -1) + assert np.all(m_with_limits.coef_ <= 0) + + +@pytest.mark.parametrize("inputs,m_score", [(lf("x_y_inputs"), lf("m_scoring"))]) +def test_cv_scoring(inputs, m_score, min_acceptable_correlation): + x, y = inputs + m_score = m_score.fit(x, y) + score = r2_score(y, m_score.predict(x)) + assert score >= min_acceptable_correlation + + +@pytest.fixture +def m_nosplits(): + return ElasticNet(n_splits=0) + + +# @pytest.mark.parametrize("inputs", [(lf("x_y_inputs"))]) +def test_predict_without_cv(x_y, m_nosplits): + x, y = x_y + m_nosplits = m_nosplits.fit(x, y) + + # should not make prediction unless value is passed for lambda + with pytest.raises(ValueError): + m_nosplits.predict(x) + + +@pytest.mark.xfail +def test_coef_interpolation(x_y, m_nosplits): + x, y = x_y + m_nosplits = m_nosplits.fit(x, y) + + # predict for a value of lambda between two values on the computed path + lamb_lo = m_nosplits.lambda_path_[1] + lamb_hi = m_nosplits.lambda_path_[2] + + # a value not equal to one on the computed path + lamb_mid = (lamb_lo + lamb_hi) / 2.0 + + pred_lo = m_nosplits.predict(x, lamb=lamb_lo) + pred_hi = m_nosplits.predict(x, lamb=lamb_hi) + pred_mid = m_nosplits.predict(x, lamb=lamb_mid) + + nptst.assert_allclose(pred_lo, pred_mid) + nptst.assert_allclose(pred_hi, pred_mid) + + +def test_lambda_clip_warning(x_y, m_nosplits): + x, y = x_y + m_nosplits = m_nosplits.fit(x, y) + + # we should get a warning when we ask for predictions at values of + # lambda outside the range of lambda_path_ + with pytest.warns(RuntimeWarning): + # note, lambda_path_ is in decreasing order + m_nosplits.predict(x, lamb=m_nosplits.lambda_path_[0] + 1) + + with pytest.warns(RuntimeWarning): + m_nosplits.predict(x, lamb=m_nosplits.lambda_path_[-1] - 1) + + +@pytest.fixture +def m_random(random_int): + return ElasticNet(random_state=random_int) + + +def test_random_state_cv(m_random, random_int, x_y): + x, y = x_y + m_random.fit(x, y) + # print(dir(m_random._cv)) + assert m_random._cv.random_state == random_int + + +@pytest.fixture +def m_3_splits(max_features): + return ElasticNet(n_splits=3, random_state=42, max_features=max_features) + + +@pytest.mark.parametrize("inputs", [(lf("x_y_wide_inputs"))]) +def test_max_features(inputs, m_3_splits, max_features): + x, y = inputs + m_3_splits = m_3_splits.fit(x, y) + num_features = np.count_nonzero(m_3_splits.coef_) + assert num_features <= max_features diff --git a/tests/test_logistic.py b/tests/test_logistic.py new file mode 100644 index 0000000..9d5578c --- /dev/null +++ b/tests/test_logistic.py @@ -0,0 +1,489 @@ +from copy import deepcopy + +import numpy as np +import numpy.testing as nptst +import pytest +from pytest_lazy_fixtures import lf +from scipy.sparse import csr_matrix +from sklearn.datasets import make_classification +from sklearn.metrics import accuracy_score, f1_score +from sklearn.utils import class_weight + +# from sklearn.utils.estimator_checks import parametrize_with_checks +from glmnet import LogitNet + +# from tests.conftest import min_acceptable_accuracy +from tests.util import sanity_check_logistic + + +@pytest.fixture +def bn_x_y(): + np.random.seed(488881) + return make_classification(n_samples=300, random_state=6601) + + +@pytest.fixture +def bn_x(bn_x_y): + return bn_x_y[0] + + +@pytest.fixture +def bn_y(bn_x_y): + return bn_x_y[1] + + +@pytest.fixture +def bn_x_sparse(bn_x_y): + return csr_matrix(bn_x_y[0]) + + +@pytest.fixture +def bn_x_y_wide(): + return make_classification(n_samples=100, n_features=150, random_state=1105) + + +@pytest.fixture +def bn_x_wide(bn_x_y_wide): + return bn_x_y_wide[0] + + +@pytest.fixture +def bn_y_wide(bn_x_y_wide): + return bn_x_y_wide[1] + + +@pytest.fixture +def bn_x_wide_sparse(bn_x_wide): + return csr_matrix(bn_x_wide) + + +@pytest.fixture( + params=[ + (lf("bn_x"), lf("bn_y")), + (lf("bn_x_sparse"), lf("bn_y")), + (lf("bn_x_wide"), lf("bn_y_wide")), + (lf("bn_x_wide_sparse"), lf("bn_y_wide")), + ] +) +def binomial_inputs(request): + return request.param + + +@pytest.fixture +def mul_x_y(): + np.random.seed(488881) + return make_classification( + n_samples=400, + n_classes=3, + n_informative=15, + n_features=25, + random_state=10585, + ) + + +@pytest.fixture +def mul_x(mul_x_y): + return mul_x_y[0] + + +@pytest.fixture +def mul_y(mul_x_y): + return mul_x_y[1] + + +@pytest.fixture +def mul_x_sparse(mul_x_y): + return csr_matrix(mul_x_y[0]) + + +@pytest.fixture +def mul_x_y_wide(): + return make_classification( + n_samples=400, + n_classes=3, + n_informative=15, + n_features=500, + random_state=15841, + ) + + +@pytest.fixture +def mul_x_wide(mul_x_y_wide): + return mul_x_y_wide[0] + + +@pytest.fixture +def mul_y_wide(mul_x_y_wide): + return mul_x_y_wide[1] + + +@pytest.fixture +def mul_x_wide_sparse(mul_x_wide): + return csr_matrix(mul_x_wide) + + +@pytest.fixture( + params=[ + (lf("mul_x"), lf("mul_y")), + (lf("mul_x_sparse"), lf("mul_y")), + (lf("mul_x_wide"), lf("mul_y_wide")), + (lf("mul_x_wide_sparse"), lf("mul_y_wide")), + ] +) +def multinomial_inputs(request): + return request.param + + +@pytest.fixture(params=[0.0, 0.25, 0.50, 0.75, 1.0]) +def alphas(request): + return request.param + + +@pytest.fixture( + params=[ + "accuracy", + "roc_auc", + "average_precision", + "log_loss", + "precision_macro", + "precision_micro", + "precision_weighted", + "f1_micro", + "f1_macro", + "f1_weighted", + ] +) +def scoring(request): + return request.param + + +@pytest.fixture( + params=[ + "accuracy", + "log_loss", + "precision_macro", + "precision_micro", + "precision_weighted", + "f1_micro", + "f1_macro", + "f1_weighted", + ] +) +def multinomial_scoring(request): + return request.param + + +@pytest.fixture +def mutinomial_score_list(): + return [ + "accuracy", + "log_loss", + "precision_macro", + "precision_micro", + "precision_weighted", + "f1_micro", + "f1_macro", + "f1_weighted", + ] + + +# I don't think I understand what this test +# does enough to fix this right now? +# @pytest.mark.filterwarnings +# @parametrize_with_checks([LogitNet()]) +# def test_estimator_interface(estimator, check): +# check(estimator) + + +@pytest.fixture +def m(): + return LogitNet() + + +@pytest.mark.parametrize("inputs", [(lf("binomial_inputs")), (lf("multinomial_inputs"))]) +def test_with_defaults(m, inputs): + x, y = inputs + m = m.fit(x, y) + sanity_check_logistic(m, x) + + # check selection of lambda_best + assert m.lambda_best_inx_ <= m.lambda_max_inx_ + + # check full path predict + p = m.predict(x, lamb=m.lambda_path_) + assert p.shape[-1] == m.lambda_path_.size + + +# TODO: could probably parametrize predict/predict_proba +# but I don't want to get into that territory yet +@pytest.mark.parametrize("inputs", [(lf("binomial_inputs")), (lf("multinomial_inputs"))]) +def test_one_row_predict(m, inputs): + # Verify that predicting on one row gives only one row of output + X, y = inputs + m.fit(X, y) + p = m.predict(X[0].reshape((1, -1))) + assert p.shape == (1,) + + +@pytest.mark.parametrize("inputs", [(lf("binomial_inputs")), (lf("multinomial_inputs"))]) +def test_one_row_predict_proba(m, inputs): + # Verify that predict_proba on one row gives 2D output + X, y = inputs + m.fit(X, y) + p = m.predict_proba(X[0].reshape((1, -1))) + assert p.shape == (1, len(np.unique(y))) + + +@pytest.mark.parametrize("inputs", [(lf("binomial_inputs")), (lf("multinomial_inputs"))]) +def test_one_row_predict_with_lambda(m, inputs): + # One row to predict along with lambdas should give 2D output + lamb = [0.01, 0.02, 0.04, 0.1] + X, y = inputs + m.fit(X, y) + p = m.predict(X[0].reshape((1, -1)), lamb=lamb) + assert p.shape == (1, len(lamb)) + + +@pytest.fixture +def lamb(): + return [0.01, 0.02, 0.04, 0.1] + + +@pytest.mark.parametrize("inputs", [(lf("binomial_inputs")), (lf("multinomial_inputs"))]) +def test_one_row_predict_proba_with_lambda(m, inputs, lamb): + # One row to predict_proba along with lambdas should give 3D output + X, y = inputs + m.fit(X, y) + p = m.predict_proba(X[0].reshape((1, -1)), lamb=lamb) + assert p.shape == (1, len(np.unique(y)), len(lamb)) + + +@pytest.fixture() +def malphas(alphas): + return LogitNet(alpha=alphas, random_state=41041) + + +@pytest.mark.parametrize("malpha", [(lf("malphas"))]) +def test_alphas(malpha, bn_x, bn_y, min_acceptable_accuracy): + malpha = malpha.fit(bn_x, bn_y) + score = accuracy_score(bn_y, malpha.predict(bn_x)) + assert score > min_acceptable_accuracy + + +@pytest.fixture +def lower_limits(bn_x): + return np.repeat(-1, bn_x.shape[1]) + + +@pytest.fixture +def upper_limits(): + return 0 + + +@pytest.fixture +def m_coef_limits(lower_limits, upper_limits): + return LogitNet( + lower_limits=lower_limits, + upper_limits=upper_limits, + random_state=69265, + alpha=0, + ) + + +def test_coef_limits(bn_x, bn_y, m_coef_limits): + m_coef_limits = m_coef_limits.fit(bn_x, bn_y) + assert np.all(m_coef_limits.coef_ >= -1) + assert np.all(m_coef_limits.coef_ <= 0) + + +@pytest.fixture +def m_alpha_1(): + return LogitNet(alpha=1) + + +def test_relative_penalties(bn_x, bn_y, m_alpha_1): + p = bn_x.shape[1] + + # m1 no relative penalties applied + + m_alpha_2 = deepcopy(m_alpha_1) + m_alpha_1.fit(bn_x, bn_y) + + # find the nonzero indices from LASSO + nonzero = np.nonzero(m_alpha_1.coef_[0]) + + # unpenalize those nonzero coefs + penalty = np.repeat(1, p) + penalty[nonzero] = 0 + + # refit the model with the unpenalized coefs + m_alpha_2.fit(bn_x, bn_y, relative_penalties=penalty) + + # verify that the unpenalized coef ests exceed the penalized ones + # in absolute value + assert np.all(np.abs(m_alpha_1.coef_[0]) <= np.abs(m_alpha_2.coef_[0])) + + +@pytest.fixture +def min_n_splits(): + return 3 + + +@pytest.fixture +def msplits(n_splits): + return LogitNet(n_splits=n_splits, random_state=41041) + + +def test_n_splits(msplits, bn_x, bn_y, n_splits, min_n_splits): + if n_splits > 0 and n_splits < min_n_splits: + with pytest.raises(ValueError, match="n_splits must be at least 3"): + msplits = msplits.fit(bn_x, bn_y) + else: + msplits = msplits.fit(bn_x, bn_y) + sanity_check_logistic(msplits, bn_x) + + +@pytest.fixture +def m_scoring(scoring): + return LogitNet(scoring=scoring) + + +def test_cv_scoring(m_scoring, bn_x, bn_y, min_acceptable_accuracy): + m_scoring = m_scoring.fit(bn_x, bn_y) + score = accuracy_score(bn_y, m_scoring.predict(bn_x)) + assert score > min_acceptable_accuracy + + +@pytest.fixture +def multiscoring(multinomial_scoring): + return LogitNet(scoring=multinomial_scoring) + + +def test_cv_scoring_multinomial(m_scoring, mul_x, mul_y, mutinomial_score_list, even_miner_acceptable_accuracy): + if m_scoring.scoring in mutinomial_score_list: + m_scoring = m_scoring.fit(mul_x, mul_y) + score = accuracy_score(mul_y, m_scoring.predict(mul_x)) + assert score >= even_miner_acceptable_accuracy + else: + with pytest.raises(ValueError, match=r".*multiclass.*"): + m_scoring.fit(mul_x, mul_y) + + +@pytest.fixture +def m_no_splits(): + return LogitNet(n_splits=0) + + +def test_predict_without_cv(m_no_splits, bn_x, bn_y): + m_no_splits = m_no_splits.fit(bn_x, bn_y) + + # should not make prediction unless value is passed for lambda + with pytest.raises(ValueError): + m_no_splits.predict(bn_x) + + +@pytest.mark.xfail +def test_coef_interpolation(m_no_splits): + m_no_splits = m_no_splits.fit(bn_x, bn_y) + + # predict for a value of lambda between two values on the computed path + lamb_lo = m_no_splits.lambda_path_[1] + lamb_hi = m_no_splits.lambda_path_[2] + + # a value not equal to one on the computed path + lamb_mid = (lamb_lo + lamb_hi) / 2.0 + + pred_lo = m_no_splits.predict_proba(bn_x, lamb=lamb_lo) + pred_hi = m_no_splits.predict_proba(bn_x, lamb=lamb_hi) + pred_mid = m_no_splits.predict_proba(bn_x, lamb=lamb_mid) + + assert nptst.assert_allclose(pred_lo, pred_mid) + assert nptst.assert_allclose(pred_hi, pred_mid) + + +def test_lambda_clip_warning(bn_x, bn_y, m_no_splits): + m_no_splits = m_no_splits.fit(bn_x, bn_y) + + with pytest.warns(RuntimeWarning): + m_no_splits.predict(bn_x, lamb=m_no_splits.lambda_path_[0] + 1) + + with pytest.warns(RuntimeWarning): + m_no_splits.predict(bn_x, lamb=m_no_splits.lambda_path_[-1] - 1) + + +@pytest.fixture +def ones_like_y(bn_y): + return np.ones_like(bn_y) + + +def test_single_class_exception(m, bn_x, ones_like_y): + with pytest.raises(ValueError, match="Training data need to contain at least 2 classes."): + m.fit(bn_x, ones_like_y) + + +@pytest.fixture +def m_random(random_int): + return LogitNet(random_state=random_int) + + +def test_random_state_cv(m_random, bn_x, bn_y, random_int): + m_random.fit(bn_x, bn_y) + assert m_random._cv.random_state == random_int + + +@pytest.fixture +def m_maxfeatures(max_features): + return LogitNet(max_features=max_features) + + +def test_max_features(m_maxfeatures, mul_x_wide_sparse, mul_y_wide, max_features): + m_maxfeatures = m_maxfeatures.fit(mul_x_wide_sparse, mul_y_wide) + num_features = np.count_nonzero(m_maxfeatures.coef_, axis=1) + assert np.all(num_features <= max_features) + + +@pytest.fixture +def m_f1_micro(): + return LogitNet(scoring="f1_micro") + + +@pytest.fixture +def to_keep(mul_y): + class_0_idx = np.where(mul_y == 0) + to_drop = class_0_idx[0][:-3] + to_keep = np.ones(len(mul_y), dtype=bool) + to_keep[to_drop] = False + return to_keep + + +@pytest.fixture +def kept_y(mul_y, to_keep): + return mul_y[to_keep] + + +@pytest.fixture +def kept_x(mul_x_wide_sparse, to_keep): + return mul_x_wide_sparse[to_keep] + + +@pytest.fixture +def sample_weight(kept_y): + sample_weight = class_weight.compute_sample_weight("balanced", kept_y) + sample_weight[0] = 0.0 + return sample_weight + + +@pytest.fixture +def unweighted_acc(m_f1_micro, kept_x, kept_y, sample_weight): + m_f1_micro = m_f1_micro.fit(kept_x, kept_y) + return f1_score(kept_y, m_f1_micro.predict(kept_x), sample_weight=sample_weight, average="micro") + + +@pytest.fixture +def weighted_acc(m_f1_micro, kept_x, kept_y, sample_weight): + m_f1_micro = m_f1_micro.fit(kept_x, kept_y, sample_weight) + return f1_score(kept_y, m_f1_micro.predict(kept_x), sample_weight=sample_weight, average="micro") + + +def test_use_sample_weights(weighted_acc, unweighted_acc): + assert weighted_acc >= unweighted_acc diff --git a/tests/test_pandas.py b/tests/test_pandas.py new file mode 100644 index 0000000..a5eb974 --- /dev/null +++ b/tests/test_pandas.py @@ -0,0 +1,45 @@ +# what is the point of this? +# Pandas isn't required nor is it an +# optional dependency. +from importlib.util import find_spec + +import pytest +from sklearn.datasets import make_classification, make_regression + +from glmnet import ElasticNet, LogitNet + +from .util import sanity_check_logistic, sanity_check_regression + + +@pytest.fixture +def elastic_net_model(): + return ElasticNet(n_splits=3, random_state=123) + + +@pytest.fixture +def logit_net_model(): + return LogitNet(n_splits=3, random_state=123) + + +@pytest.mark.skipif(not find_spec("pandas"), reason="Pandas is required") +def test_elasticnet_pandas(elastic_net_model): + import pandas as pd + + x, y = make_regression(random_state=561) + df = pd.DataFrame(x) + df["y"] = y + + elastic_net_model = elastic_net_model.fit(df.drop(["y"], axis=1), df.y) + sanity_check_regression(elastic_net_model, x) + + +@pytest.mark.skipif(not find_spec("pandas"), reason="Pandas is required") +def test_logitnet_pandas(logit_net_model): + import pandas as pd + + x, y = make_classification(random_state=1105) + df = pd.DataFrame(x) + df["y"] = y + + logit_net_model = logit_net_model.fit(df.drop(["y"], axis=1), df.y) + sanity_check_logistic(logit_net_model, x) diff --git a/tests/test_util.py b/tests/test_util.py new file mode 100644 index 0000000..b819f26 --- /dev/null +++ b/tests/test_util.py @@ -0,0 +1,31 @@ +import numpy as np +import pytest + +from glmnet.util import _interpolate_model + + +@pytest.fixture +def lambda_path(): + return np.array((0.99,)) + + +@pytest.fixture +def coef_path(rng): + return rng.random(size=(5, 1)) + + +@pytest.fixture +def intercept_path(rng): + return rng.random(size=(1,)) + + +def test_interpolate_model_intercept_only(lambda_path, coef_path, intercept_path): + # would be nice to use assertWarnsRegex to check the message, but this + # fails due to http://bugs.python.org/issue20484 + with pytest.warns(RuntimeWarning, match="lambda_path has a single value.*"): + _interpolate_model( + lambda_path, + coef_path, + intercept_path, + 0.99, + ) diff --git a/glmnet/tests/util.py b/tests/util.py similarity index 57% rename from glmnet/tests/util.py rename to tests/util.py index 4b5e73d..f3a7c46 100644 --- a/glmnet/tests/util.py +++ b/tests/util.py @@ -1,30 +1,36 @@ +from typing import Final + import numpy as np +WRONG_SIZE_FOR_COEF_PATH: Final[int] = 2 +WRONG_NUMBER_OF_DIMS_FOR_LOGISTIC_COEF_PATH: Final[int] = 3 +WRONG_NUMBER_OF_DIMS_FOR_LOGISTIC_INTERCEPT: Final[int] = 2 + def sanity_check_logistic(m, x): sanity_check_model_attributes(m) sanity_check_cv_attrs(m, is_clf=True) assert m.classes_ is not None - assert m.coef_path_.ndim == 3, "wrong number of dimensions for coef_path_" + assert m.coef_path_.ndim == WRONG_NUMBER_OF_DIMS_FOR_LOGISTIC_COEF_PATH, "wrong number of dimensions for coef_path_" - n_classes = len(m.classes_) - if len(m.classes_) == 2: # binomial is a special case - n_classes = 1 + n_classes = 1 if len(m.classes_) == WRONG_SIZE_FOR_COEF_PATH else len(m.classes_) assert m.coef_path_.shape[0] == n_classes, "wrong size for coef_path_" - assert m.intercept_path_.ndim == 2, "wrong number of dimensions for intercept_path_" + assert ( + m.intercept_path_.ndim == WRONG_NUMBER_OF_DIMS_FOR_LOGISTIC_INTERCEPT + ), "wrong number of dimensions for intercept_path_" # check preds at random value of lambda - l = np.random.choice(m.lambda_path_) - p = m.predict(x, lamb=l) + lam = np.random.choice(m.lambda_path_) + p = m.predict(x, lamb=lam) check_logistic_predict(m, x, p) - p = m.predict_proba(x, lamb=l) + p = m.predict_proba(x, lamb=lam) check_logistic_predict_proba(m, x, p) # if cv ran, check default behavior of predict and predict_proba - if m.n_splits >= 3: + if m.n_splits >= TOO_MANY_SPLITS: p = m.predict(x) check_logistic_predict(m, x, p) @@ -33,30 +39,38 @@ def sanity_check_logistic(m, x): def check_logistic_predict(m, x, p): - assert p.shape[0] == x.shape[0], "%r != %r" % (p.shape[0], x.shape[0]) - assert np.all(np.in1d(np.unique(p),m.classes_)) + assert p.shape[0] == x.shape[0], f"{p.shape[0]!r} != {x.shape[0]!r}" + assert np.all(np.isin(np.unique(p), m.classes_)) def check_logistic_predict_proba(m, x, p): assert p.shape[0] == x.shape[0] assert p.shape[1] == len(m.classes_) - assert np.all(p >= 0) and np.all(p <= 1.), "predict_proba values outside [0,1]" + assert np.all(p >= 0) and np.all(p <= 1.0), "predict_proba values outside [0,1]" + + +WRONG_NUMBER_OF_DIMS_FOR_REGRESSION_COEF_PATH: Final[int] = 2 +WRONG_NUMBER_OF_DIMS_FOR_REGRESSION_INTERCEPT: Final[int] = 1 def sanity_check_regression(m, x): sanity_check_model_attributes(m) sanity_check_cv_attrs(m) - assert m.coef_path_.ndim == 2, "wrong number of dimensions for coef_path_" - assert m.intercept_path_.ndim == 1, "wrong number of dimensions for intercept_path_" + assert ( + m.coef_path_.ndim == WRONG_NUMBER_OF_DIMS_FOR_REGRESSION_COEF_PATH + ), "wrong number of dimensions for coef_path_" + assert ( + m.intercept_path_.ndim == WRONG_NUMBER_OF_DIMS_FOR_REGRESSION_INTERCEPT + ), "wrong number of dimensions for intercept_path_" # check predict at random value of lambda - l = np.random.choice(m.lambda_path_) - p = m.predict(x, lamb=l) + lam = np.random.choice(m.lambda_path_) + p = m.predict(x, lamb=lam) assert p.shape[0] == x.shape[0] # if cv ran, check default behavior of predict - if m.n_splits >= 3: + if m.n_splits >= TOO_MANY_SPLITS: p = m.predict(x) assert p.shape[0] == x.shape[0] @@ -69,8 +83,11 @@ def sanity_check_model_attributes(m): assert m.jerr_ == 0, "jerr is non-zero" +TOO_MANY_SPLITS: Final[int] = 3 + + def sanity_check_cv_attrs(m, is_clf=False): - if m.n_splits >= 3: + if m.n_splits >= TOO_MANY_SPLITS: if is_clf: assert m.coef_.shape[-1] == m.coef_path_.shape[1], "wrong size for coef_" else: @@ -79,8 +96,4 @@ def sanity_check_cv_attrs(m, is_clf=False): assert m.cv_mean_score_.size == m.n_lambda_, "wrong size for cv_mean_score_" assert m.cv_standard_error_.size == m.n_lambda_, "wrong size for cv_standard_error_" assert m.lambda_max_ is not None, "lambda_max_ is not set" - assert (m.lambda_max_inx_ >= 0 and m.lambda_max_inx_ < m.n_lambda_, - "lambda_max_inx_ is outside bounds of lambda_path_") assert m.lambda_best_ is not None, "lambda_best_ is not set" - assert (m.lambda_best_inx_ >= 0 and m.lambda_best_inx_ < m.n_lambda_, - "lambda_best_inx_ is outside bounds of lambda_path_") diff --git a/tools/generate_f2pymod.py b/tools/generate_f2pymod.py new file mode 100644 index 0000000..b6bc02e --- /dev/null +++ b/tools/generate_f2pymod.py @@ -0,0 +1,298 @@ +#!/usr/bin/env python3 +""" +Process f2py template files (`filename.pyf.src` -> `filename.pyf`) + +Usage: python generate_pyf.py filename.pyf.src -o filename.pyf +""" + +import argparse +import os +import re +import subprocess + + +# START OF CODE VENDORED FROM `numpy.distutils.from_template` +############################################################# +""" +process_file(filename) + + takes templated file .xxx.src and produces .xxx file where .xxx + is .pyf .f90 or .f using the following template rules: + + '<..>' denotes a template. + + All function and subroutine blocks in a source file with names that + contain '<..>' will be replicated according to the rules in '<..>'. + + The number of comma-separated words in '<..>' will determine the number of + replicates. + + '<..>' may have two different forms, named and short. For example, + + named: +
where anywhere inside a block '
' will be replaced with
+ 'd', 's', 'z', and 'c' for each replicate of the block.
+
+ <_c> is already defined: <_c=s,d,c,z>
+ <_t> is already defined: <_t=real,double precision,complex,double complex>
+
+ short:
+ , a short form of the named, useful when no
appears inside
+ a block.
+
+ In general, '<..>' contains a comma separated list of arbitrary
+ expressions. If these expression must contain a comma|leftarrow|rightarrow,
+ then prepend the comma|leftarrow|rightarrow with a backslash.
+
+ If an expression matches '\\