diff --git a/.copier-answers.yml b/.copier-answers.yml new file mode 100644 index 0000000..b6c46c7 --- /dev/null +++ b/.copier-answers.yml @@ -0,0 +1,16 @@ +# Changes here will be overwritten by Copier +_commit: v1.4.0 +_src_path: gh:lincc-frameworks/python-project-template +author_email: lincc-frameworks-team@lists.lsst.org +author_name: LINCC Frameworks +create_example_module: false +custom_install: true +include_docs: true +include_notebooks: true +mypy_type_checking: basic +package_name: regionsearch +preferred_linter: black +project_license: BSD +project_name: kbmod-regionsearch +use_gitlfs: none +use_isort: true diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 0000000..76e043c --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,63 @@ + + +## Change Description + +- [ ] My PR includes a link to the issue that I am addressing + + + +## Solution Description + + + + +## Code Quality +- [ ] I have read the Contribution Guide +- [ ] My code follows the code style of this project +- [ ] My code builds (or compiles) cleanly without any errors or warnings +- [ ] My code contains relevant comments and necessary documentation + +## Project-Specific Pull Request Checklists + + +### Bug Fix Checklist +- [ ] My fix includes a new test that breaks as a result of the bug (if possible) +- [ ] My change includes a breaking change + - [ ] My change includes backwards compatibility and deprecation warnings (if possible) + +### New Feature Checklist +- [ ] I have added or updated the docstrings associated with my feature using the [NumPy docstring format](https://numpydoc.readthedocs.io/en/latest/format.html) +- [ ] I have updated the tutorial to highlight my new feature (if appropriate) +- [ ] I have added unit/End-to-End (E2E) test cases to cover my new feature +- [ ] My change includes a breaking change + - [ ] My change includes backwards compatibility and deprecation warnings (if possible) + +### Documentation Change Checklist +- [ ] Any updated docstrings use the [NumPy docstring format](https://numpydoc.readthedocs.io/en/latest/format.html) + +### Build/CI Change Checklist +- [ ] If required or optional dependencies have changed (including version numbers), I have updated the README to reflect this +- [ ] If this is a new CI setup, I have added the associated badge to the README + + + +### Other Change Checklist +- [ ] Any new or updated docstrings use the [NumPy docstring format](https://numpydoc.readthedocs.io/en/latest/format.html). +- [ ] I have updated the tutorial to highlight my new feature (if appropriate) +- [ ] I have added unit/End-to-End (E2E) test cases to cover any changes +- [ ] My change includes a breaking change + - [ ] My change includes backwards compatibility and deprecation warnings (if possible) diff --git a/.github/workflows/build-documentation.yml b/.github/workflows/build-documentation.yml new file mode 100644 index 0000000..22fa54a --- /dev/null +++ b/.github/workflows/build-documentation.yml @@ -0,0 +1,34 @@ +# This workflow will install Python dependencies, build the package and then build the documentation. + +name: Build documentation + + +on: + push: + branches: [ main ] + pull_request: + branches: [ main ] + +jobs: + build: + + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + - name: Set up Python 3.11 + uses: actions/setup-python@v4 + with: + python-version: '3.11' + - name: Install dependencies + run: | + sudo apt-get update + python -m pip install --upgrade pip + if [ -f docs/requirements.txt ]; then pip install -r docs/requirements.txt; fi + pip install . + - name: Install notebook requirements + run: | + sudo apt-get install pandoc + - name: Build docs + run: | + sphinx-build -T -E -b html -d docs/build/doctrees ./docs docs/build/html diff --git a/.github/workflows/linting.yml b/.github/workflows/linting.yml new file mode 100644 index 0000000..0a24705 --- /dev/null +++ b/.github/workflows/linting.yml @@ -0,0 +1,36 @@ +# This workflow will install Python dependencies, then perform static linting analysis. +# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions + +name: Lint + +on: + push: + branches: [ main ] + pull_request: + branches: [ main ] + +jobs: + build: + runs-on: ubuntu-latest + strategy: + matrix: + python-version: ['3.8', '3.9', '3.10', '3.11'] + + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + sudo apt-get update + python -m pip install --upgrade pip + pip install . + pip install .[dev] + if [ -f requirements.txt ]; then pip install -r requirements.txt; fi + - name: Analyze code with linter + + uses: psf/black@stable + with: + src: ./src diff --git a/.github/workflows/publish-to-pypi.yml b/.github/workflows/publish-to-pypi.yml new file mode 100644 index 0000000..5ea43bc --- /dev/null +++ b/.github/workflows/publish-to-pypi.yml @@ -0,0 +1,39 @@ +# This workflow will upload a Python Package using Twine when a release is created +# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python#publishing-to-package-registries + +# This workflow uses actions that are not certified by GitHub. +# They are provided by a third-party and are governed by +# separate terms of service, privacy policy, and support +# documentation. + +name: Upload Python Package + +on: + release: + types: [published] + +permissions: + contents: read + +jobs: + deploy: + + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.11' + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install build + - name: Build package + run: python -m build + - name: Publish package + uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29 + with: + user: __token__ + password: ${{ secrets.PYPI_API_TOKEN }} diff --git a/.github/workflows/smoke-test.yml b/.github/workflows/smoke-test.yml new file mode 100644 index 0000000..c19a82c --- /dev/null +++ b/.github/workflows/smoke-test.yml @@ -0,0 +1,38 @@ +# This workflow will run daily at 06:45. +# It will install Python dependencies and run tests with a variety of Python versions. +# See documentation for help debugging smoke test issues: +# https://lincc-ppt.readthedocs.io/en/latest/practices/ci_testing.html#version-culprit + +name: Unit test smoke test + +on: + schedule: + - cron: 45 6 * * * + +jobs: + build: + + runs-on: ubuntu-latest + strategy: + matrix: + python-version: ['3.8', '3.9', '3.10', '3.11'] + + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + sudo apt-get update + python -m pip install --upgrade pip + pip install . + pip install .[dev] + if [ -f requirements.txt ]; then pip install -r requirements.txt; fi + - name: List dependencies + run: | + pip list + - name: Run unit tests with pytest + run: | + python -m pytest tests diff --git a/.github/workflows/testing-and-coverage.yml b/.github/workflows/testing-and-coverage.yml new file mode 100644 index 0000000..6d0649a --- /dev/null +++ b/.github/workflows/testing-and-coverage.yml @@ -0,0 +1,37 @@ +# This workflow will install Python dependencies, run tests and report code coverage with a variety of Python versions +# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions + +name: Unit test and code coverage + +on: + push: + branches: [ main ] + pull_request: + branches: [ main ] + +jobs: + build: + + runs-on: ubuntu-latest + strategy: + matrix: + python-version: ['3.8', '3.9', '3.10', '3.11'] + + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + sudo apt-get update + python -m pip install --upgrade pip + pip install . + pip install .[dev] + if [ -f requirements.txt ]; then pip install -r requirements.txt; fi + - name: Run unit tests with pytest + run: | + python -m pytest tests --cov=regionsearch --cov-report=xml + - name: Upload coverage report to codecov + uses: codecov/codecov-action@v3 diff --git a/.github/workflows/type-checking.yml b/.github/workflows/type-checking.yml new file mode 100644 index 0000000..d5f80e4 --- /dev/null +++ b/.github/workflows/type-checking.yml @@ -0,0 +1,35 @@ +# This workflow will install Python dependencies, then perform static type checking analysis. +# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions + +name: mypy Type checking + +on: + push: + branches: [ main ] + pull_request: + branches: [ main ] + +jobs: + build: + runs-on: ubuntu-latest + strategy: + matrix: + python-version: ['3.8', '3.9', '3.10', '3.11'] + + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + sudo apt-get update + python -m pip install --upgrade pip + pip install . + pip install .[dev] + if [ -f requirements.txt ]; then pip install -r requirements.txt; fi + - name: Analyze code with mypy + + run: | + mypy ./src ./tests --ignore-missing-imports diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..523b316 --- /dev/null +++ b/.gitignore @@ -0,0 +1,140 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST +_version.py + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ +_readthedocs/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# vscode +.vscode/ + +# dask +dask-worker-space/ + +# tmp directory +tmp/ diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..5e0330e --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,126 @@ +repos: + + # Compare the local template version to the latest remote template version + # This hook should always pass. It will print a message if the local version + # is out of date. + - repo: https://github.com/lincc-frameworks/pre-commit-hooks + rev: v0.1.1 + hooks: + - id: check-lincc-frameworks-template-version + name: Check template version + description: Compare current template version against latest + verbose: true + + # Clear output from jupyter notebooks so that only the input cells are committed. + - repo: local + hooks: + - id: jupyter-nb-clear-output + name: Clear output from Jupyter notebooks + description: Clear output from Jupyter notebooks. + files: \.ipynb$ + stages: [commit] + language: system + entry: jupyter nbconvert --clear-output + + # Run unit tests, verify that they pass. Note that coverage is run against + # the ./src directory here because that is what will be committed. In the + # github workflow script, the coverage is run against the installed package + # and uploaded to Codecov by calling pytest like so: + # `python -m pytest --cov= --cov-report=xml` + - repo: local + hooks: + - id: pytest-check + name: Run unit tests + description: Run unit tests with pytest. + entry: bash -c "if python -m pytest --co -qq; then python -m pytest --cov=./src --cov-report=html; fi" + language: system + pass_filenames: false + always_run: true + + # prevents committing directly branches named 'main' and 'master'. + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.4.0 + hooks: + - id: no-commit-to-branch + name: Prevent main branch commits + description: Prevent the user from committing directly to the primary branch. + - id: check-added-large-files + name: Check for large files + description: Prevent the user from committing very large files. + args: ['--maxkb=500'] + + # verify that pyproject.toml is well formed + - repo: https://github.com/abravalheri/validate-pyproject + rev: v0.13 + hooks: + - id: validate-pyproject + name: Validate pyproject.toml + description: Verify that pyproject.toml adheres to the established schema. + + # Automatically sort the imports used in .py files + - repo: https://github.com/pycqa/isort + rev: 5.12.0 + hooks: + - id: isort + name: isort (python files in src/ and tests/) + description: Sort and organize imports in .py files. + types: [python] + files: ^(src|tests)/ + + + # Analyze the code style and report code that doesn't adhere. + - repo: https://github.com/psf/black + rev: 23.3.0 + hooks: + - id: black + types: [python] + files: ^(src|tests)/ + # It is recommended to specify the latest version of Python + # supported by your project here, or alternatively use + # pre-commit's default_language_version, see + # https://pre-commit.com/#top_level-default_language_version + language_version: python3.10 + + + # Analyze type hints and report errors. + - repo: local + hooks: + - id: mypy + name: mypy (python files in src/ and tests/) + entry: mypy + language: system + types: [python] + files: ^(src|tests)/ + args: + [ + + "--ignore-missing-imports", # Ignore imports without type hints + + ] + + # Make sure Sphinx can build the documentation while explicitly omitting + # notebooks from the docs, so users don't have to wait through the execution + # of each notebook or each commit. By default, these will be checked in the + # GitHub workflows. + - repo: local + hooks: + - id: sphinx-build + name: Build documentation with Sphinx + entry: sphinx-build + language: system + always_run: true + exclude_types: [file, symlink] + args: + [ + "-M", # Run sphinx in make mode, so we can use -D flag later + # Note: -M requires next 3 args to be builder, source, output + "html", # Specify builder + "./docs", # Source directory of documents + "./_readthedocs", # Output directory for rendered documents + "-T", # Show full trace back on exception + "-E", # Don't use saved env; always read all files + "-d", # Flag for cached environment and doctrees + "./docs/_build/doctrees", # Directory + "-D", # Flag to override settings in conf.py + "exclude_patterns=notebooks/*", # Exclude our notebooks from pre-commit + ] diff --git a/.prepare_project.sh b/.prepare_project.sh new file mode 100644 index 0000000..17b1b5e --- /dev/null +++ b/.prepare_project.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash + +echo "Initializing local git repository" +{ + gitversion=( $(git version | sed 's/^.* //;s/\./ /g') ) + if let "${gitversion[0]}<2"; then + # manipulate directly + git init . && echo 'ref: refs/heads/main' >.git/HEAD + elif let "${gitversion[0]}==2 & ${gitversion[1]}<34"; then + # rename master to main + git init . && { git branch -m master main 2>/dev/null || true; }; + else + # set the initial branch name to main + git init --initial-branch=main >/dev/null + fi +} > /dev/null + +echo "Installing package and runtime dependencies in local environment" +pip install -e . > /dev/null + +echo "Installing developer dependencies in local environment" +pip install -e .'[dev]' > /dev/null + +echo "Installing pre-commit" +pre-commit install > /dev/null diff --git a/.readthedocs.yml b/.readthedocs.yml new file mode 100644 index 0000000..79bfc27 --- /dev/null +++ b/.readthedocs.yml @@ -0,0 +1,22 @@ +# .readthedocs.yml +# Read the Docs configuration file +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details + +# Required +version: 2 + +build: + os: ubuntu-22.04 + tools: + python: "3.10" + +# Build documentation in the docs/ directory with Sphinx +sphinx: + configuration: docs/conf.py + +# Optionally declare the Python requirements required to build your docs +python: + install: + - requirements: docs/requirements.txt + - method: pip + path: . diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..aee3ea7 --- /dev/null +++ b/LICENSE @@ -0,0 +1,28 @@ +BSD 3-Clause License + +Copyright (c) 2023, LINCC Frameworks + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 0000000..a5622f1 --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,31 @@ +# Makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= -T -E -d _build/doctrees -D language=en +EXCLUDENB ?= -D exclude_patterns="notebooks/*","_build","**.ipynb_checkpoints" +SPHINXBUILD ?= sphinx-build +SOURCEDIR = . +BUILDDIR = ../_readthedocs/ + +.PHONY: help clean Makefile no-nb no-notebooks + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +# Build all Sphinx docs locally, except the notebooks +no-nb no-notebooks: + @$(SPHINXBUILD) -M html "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(EXCLUDENB) $(O) + +# Cleans up files generated by the build process +clean: + rm -r "_build/doctrees" + rm -r "$(BUILDDIR)" + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + diff --git a/docs/conf.py b/docs/conf.py new file mode 100644 index 0000000..26ec313 --- /dev/null +++ b/docs/conf.py @@ -0,0 +1,47 @@ +# Configuration file for the Sphinx documentation builder. +# +# For the full list of built-in configuration values, see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + + +import os +import sys + +import autoapi +from importlib.metadata import version + +# Define path to the code to be documented **relative to where conf.py (this file) is kept** +sys.path.insert(0, os.path.abspath('../src/')) + +# -- Project information ----------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information + +project = "kbmod-regionsearch" +copyright = "2023, LINCC Frameworks" +author = "LINCC Frameworks" +release = version("kbmod-regionsearch") +# for example take major/minor +version = ".".join(release.split(".")[:2]) + +# -- General configuration --------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration + +extensions = ["sphinx.ext.mathjax", "sphinx.ext.napoleon", "sphinx.ext.viewcode"] + +extensions.append("autoapi.extension") +extensions.append("nbsphinx") + +templates_path = [] +exclude_patterns = ['_build', '**.ipynb_checkpoints'] + +master_doc = "index" # This assumes that sphinx-build is called from the root directory +html_show_sourcelink = False # Remove 'view source code' from top of page (for html, not python) +add_module_names = False # Remove namespaces from class/method signatures + +autoapi_type = "python" +autoapi_dirs = ["../src"] +autoapi_ignore = ["*/__main__.py", "*/_version.py"] +autoapi_add_toc_tree_entry = False +autoapi_member_order = "bysource" + +html_theme = "sphinx_rtd_theme" diff --git a/docs/index.rst b/docs/index.rst new file mode 100644 index 0000000..daec648 --- /dev/null +++ b/docs/index.rst @@ -0,0 +1,13 @@ +.. regionsearch documentation main file. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Welcome to regionsearch's documentation! +======================================================================================== + +.. toctree:: + :hidden: + + Home page + API Reference + Notebooks diff --git a/docs/notebooks.rst b/docs/notebooks.rst new file mode 100644 index 0000000..acfca2f --- /dev/null +++ b/docs/notebooks.rst @@ -0,0 +1,5 @@ +Notebooks +======================================================================================== + +.. toctree:: + diff --git a/docs/notebooks/README.md b/docs/notebooks/README.md new file mode 100644 index 0000000..a521ae1 --- /dev/null +++ b/docs/notebooks/README.md @@ -0,0 +1 @@ +Put your Jupyter notebooks here :) diff --git a/docs/requirements.txt b/docs/requirements.txt new file mode 100644 index 0000000..5c0f7d8 --- /dev/null +++ b/docs/requirements.txt @@ -0,0 +1,9 @@ +sphinx==6.1.3 +sphinx-rtd-theme==1.2.0 +sphinx-autoapi==2.0.1 +nbsphinx +ipython +jupytext +jupyter +matplotlib +numpy diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..ade8e49 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,71 @@ +[project] +name = "kbmod-regionsearch" +license = {file = "LICENSE"} +readme = "README.md" +authors = [ + { name = "LINCC Frameworks", email = "lincc-frameworks-team@lists.lsst.org" } +] +classifiers = [ + "Development Status :: 4 - Beta", + "License :: OSI Approved :: BSD License", + "Intended Audience :: Developers", + "Intended Audience :: Science/Research", + "Operating System :: OS Independent", + "Programming Language :: Python", +] +dynamic = ["version"] +dependencies = [ + "ipykernel", # Support for Jupyter notebooks + "astropy", + "jplephem", +] + +# On a mac, install optional dependencies with `pip install '.[dev]'` (include the single quotes) +[project.optional-dependencies] +dev = [ + "pytest", + "pytest-cov", # Used to report total code coverage + "pre-commit", # Used to run checks before finalizing a git commit + "sphinx==6.1.3", # Used to automatically generate documentation + "sphinx-rtd-theme==1.2.0", # Used to render documentation + "sphinx-autoapi==2.0.1", # Used to automatically generate api documentation + "black", # Used for static linting of files + "mypy", # Used for static type checking of files + # if you add dependencies here while experimenting in a notebook and you + # want that notebook to render in your documentation, please add the + # dependencies to ./docs/requirements.txt as well. + "nbconvert", # Needed for pre-commit check to clear output from Python notebooks + "nbsphinx", # Used to integrate Python notebooks into Sphinx documentation + "ipython", # Also used in building notebooks into Sphinx + "matplotlib", # Used in sample notebook intro_notebook.ipynb + "numpy", # Used in sample notebook intro_notebook.ipynb +] + +[build-system] +requires = [ + "setuptools>=62", # Used to build and package the Python project + "setuptools_scm>=6.2", # Gets release version from git. Makes it available programmatically +] +build-backend = "setuptools.build_meta" + +[tool.setuptools_scm] +write_to = "src/kbmod/regionsearch/_version.py" + +[tool.pytest.ini_options] +testpaths = [ + "tests", +] + +[tool.black] +line-length = 110 + +[tool.isort] +profile = "black" + +[tool.setuptools.package-data] +regionsearch = ["py.typed"] + +[tool.setuptools.packages.find] +where = ["src"] +include = ["kbmod"] +namespaces = true diff --git a/src/kbmod/__init__.py b/src/kbmod/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/kbmod/regionsearch/__init__.py b/src/kbmod/regionsearch/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/kbmod/regionsearch/abstractions.py b/src/kbmod/regionsearch/abstractions.py new file mode 100644 index 0000000..a6c31dc --- /dev/null +++ b/src/kbmod/regionsearch/abstractions.py @@ -0,0 +1,120 @@ +""" +Abstract classes declaring the methods required for composing a backend and an observation indexer implementation into a region search implementation +that can be used to perform region searches on a source of observations. The observation sources could be a database, a file, or a simulation. The observation sources +must provide at least a unique observation identifier, sky position (for example: right ascension, declination), time, observation location, and field of view. +""" + +from abc import ABC, abstractmethod + +import numpy +from astropy.coordinates import Angle, EarthLocation, SkyCoord # type: ignore +from astropy.time import Time # type: ignore + +from kbmod.regionsearch.region_search import Filter + + +class Backend(ABC): + """ + An abstract mixin class with a method that returns observation identifiers that satisfy constraints provided in a filter. + + Developers will subclass ObservationIndexer for accessing specific observation sources. + The backend will implement the `region_search` + method, and then compose the subclass with an implementation of ObservationIndexer to create a concrete class that can be used to + perform region searches. + + A backend has access to a set of pointings that have at least a unique observation identifier, position, time, observation location, and field of view. + A backend may use an ObservationIndexer implementation to assign cluster indices to the pointings. + A backend will implement the region_search method which returns observation identifiers that match a filter. + The filter is a set of constraints on the pointings known to the backend. + + Parameters + ---------- + kwargs + Keyword arguments to pass to the super class. The implementation should + extract any keyword arguments it needs and pass the rest to the super class. + It is important for composition that the implementation use `**kwargs` in the signature + and pass `**kwargs` to the super class. It is also important that the implementation + and the ObservationIndexer implementation do not use the same keyword arguments or there will + be a conflict and only one of them will have access to the keyword argument. Which one + depends on the order of the superclasses in the most derived class. + """ + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + @abstractmethod + def region_search(self, filter: Filter) -> numpy.ndarray: + """Returns observation identifiers of pointings that match the filter. + + Parameters + ---------- + filter : Filter + The filter to use for the search. + The filter is a set of constraints on the pointings known to the backend. + The filter meaning is defined by the backend and typically specifies a region + of the sky given as a right ascension, declination, a field of view and a distance. + + Returns + ------- + numpy.ndarray + Observation identifiers of pointings that match the given filter. + """ + if not hasattr(self, "observations_to_indices"): + raise NotImplementedError("region_search requires an implementation of observations_to_indices") + return numpy.array([]) + + +class ObservationIndexer(ABC): + """ + Abstract mixin class with a method that assigns cluster indices to pointings. A complete + implementation may assign cluster indices to source observations to improve region search + performance. + + Developers should subclass ObservationIndexer, implement the `observations_to_indices` + method to assign grouping indexes to all the pointings, and then compose the subclass + with an implementation of Backend to create a concrete class that can be used to + perform region searches. + + Parameters + ---------- + kwargs + Keyword arguments to pass to the super class. The implementation should + extract any keyword arguments it needs and pass the rest to the super class. + It is important for composition that the implementation use `**kwargs` in the signature + and pass `**kwargs` to the super class. It is also important that the implementation + and the Backend implementation do not use the same keyword arguments or there will + be a conflict and only one of them will have access to the keyword argument. Which one + depends on the order of the superclasses in the most derived class. + """ + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + @abstractmethod + def observations_to_indices( + self, pointing: SkyCoord, time: Time, fov: Angle, location: EarthLocation + ) -> numpy.ndarray: + """Returns a numpy.ndarray of cluster indices for each of the observations in the arguments. + The interpretation of the cluster indices is up to the implementation. The indices should + be integers and should be unique for each cluster. The indices should be the same for + observations that are in the same cluster. An observation could be in multiple clusters. + Each argument should be of equal length or must be broadcastable to equal length. + + Parameters + ---------- + pointing : astropy.coordinates.SkyCoord + The pointing of each observation. + time : astropy.time.Time + The time of each observation. + fov : astropy.coordinates.Angle + The field of view of each observation. The field of view is the radius of the + circular region centered on the pointing and contains all the data in the observation. + location : astropy.coordinates.EarthLocation + The location of each observation. + + Returns + ------- + numpy.ndarray + Array of cluster indices for each of the observations. + """ + pass diff --git a/src/kbmod/regionsearch/backend.py b/src/kbmod/regionsearch/backend.py new file mode 100644 index 0000000..5ecf3e0 --- /dev/null +++ b/src/kbmod/regionsearch/backend.py @@ -0,0 +1,104 @@ +""" +Provides implementations of ``abstractions.Backend`` that may be composed with an implementation of ``abstractions.ObserverationIndexer`` to provide a complete region search. +""" + +from dataclasses import dataclass + +import numpy as np +from astropy import coordinates as coord # type: ignore +from astropy import time as time +from astropy import units as units + +from kbmod.regionsearch.abstractions import Backend +from kbmod.regionsearch.region_search import Filter + + +@dataclass(init=False) +class ObservationList(Backend): + """A backend for sources provided explicitily to instance in lists of ra, dec, time, observation location and field of view. + + Attributes + ---------- + observation_ra : coord.Angle + The right ascension of the observations. + observation_dec : coord.Angle + The declination of the observations. + observation_time : time.Time + The time of the observations. + observation_location : coord.EarthLocation + The location of the observations. This should be the location of the telescope. + observation_fov : coord.Angle + The field of view of the observations. This should enclose any imagery that is associated with the observation. + observation_identifier : np.ndarray + The observation identifier. This is an array of values that uniquely identifies each observation. + + Notes + ----- + The attributes observation_ra, observation_dec, observation_time, observation_location and observation_fov store lists of values comprising each observation. + They must all have the same shape. + """ + + observation_ra: coord.Angle + observation_dec: coord.Angle + observation_time: time.Time + observation_location: coord.EarthLocation + observation_fov: coord.Angle + observation_identifier: np.ndarray + + def __init__( + self, + observation_ra: coord.Angle, + observation_dec: coord.Angle, + observation_time: time.Time, + observation_location: coord.EarthLocation, + observation_fov: coord.Angle, + observation_identifier: np.ndarray, + **kwargs + ) -> None: + super().__init__(**kwargs) + + self.observation_ra = observation_ra + self.observation_dec = observation_dec + self.observation_time = observation_time + self.observation_location = observation_location + self.observation_fov = observation_fov + self.observation_identifier = observation_identifier + if ( + observation_dec.shape != observation_ra.shape + or observation_time.shape != observation_ra.shape + or observation_location.shape != observation_ra.shape + or observation_fov.shape != observation_ra.shape + or observation_identifier.shape != observation_ra.shape + ): + raise ValueError( + "observation_ra, observation_dec, observation_time, observation_location, observation_fov and observation_identifier must have the same shape" + ) + + def region_search(self, filter: Filter) -> np.ndarray: + """ + Returns a numpy.ndarray of observation identifiers that match the filter. + The filter must have attributes search_ra, search_dec, search_time, search_location, and search_fov. + + Parameters + ---------- + filter : Filter + The filter to use for the search. + The filter must have attributes search_ra, search_dec, search_time, search_location, and search_fov. + + Returns + ------- + numpy.ndarray[int] + A list of matching indices. + """ + matching_observation_identifier = np.array([], dtype=self.observation_identifier.dtype) + if not hasattr(self, "observations_to_indices"): + raise NotImplementedError("region_search requires an implementation of observations_to_indices") + pointing = coord.SkyCoord(filter.search_ra, filter.search_dec) + matching_index = self.observations_to_indices(pointing, None, filter.search_fov, None) # type: ignore + pointing = coord.SkyCoord(self.observation_ra, self.observation_dec) + self.observation_index = self.observations_to_indices( # type: ignore + pointing, self.observation_time, self.observation_fov, self.observation_location + ) + index_list = np.nonzero(self.observation_index == matching_index)[0] + matching_observation_identifier = self.observation_identifier[index_list] + return matching_observation_identifier diff --git a/src/kbmod/regionsearch/indexers.py b/src/kbmod/regionsearch/indexers.py new file mode 100644 index 0000000..a627ea9 --- /dev/null +++ b/src/kbmod/regionsearch/indexers.py @@ -0,0 +1,106 @@ +""" +Provides implementations of ``abstractions.ObserverationIndexer`` that may be composed with an implementation of ``abstractions.Backend`` to provide a complete region search. +""" +import math + +import numpy +from astropy import coordinates # type: ignore +from astropy import units as units +from astropy.coordinates import ( # type: ignore + EarthLocation, + SkyCoord, + solar_system_ephemeris, +) +from astropy.time import Time # type: ignore + +from kbmod.regionsearch.abstractions import ObservationIndexer + + +class PartitionIndexer(ObservationIndexer): + """ + Partitions the observations into those that intersect a configured cone and those that do not. + The cone is defined by an ra, dec and field of view angle with an origin at the solar system barycenter. + The observation cones are similiarly defined by an ra, dec, and field of view angle with an origin given by the observation location and time. + The observations are assigned an index of is_in_index if they intersect the cone and an index of is_out_index if they do not. + """ + + def __init__( + self, + search_ra: coordinates.Longitude, + search_dec: coordinates.Latitude, + search_distance: coordinates.Distance, + search_fov: coordinates.Angle, + is_in_index: int, + is_out_index: int, + **kwargs + ): + """Initializes the PartitionIndexer object. + + Parameters + ---------- + + search_ra : astropy.coordinates.Longitude + The right ascension of the center of the sphere. + search_dec : astropy.coordinates.Latitude + The declination of the center of the sphere. + search_distance : astropy.coordinates.Distance + The distance from the barycenter to the center of the sphere. + search_fov : astropy.coordinates.Angle + The angle subtended by the sphere at the barycenter. + is_in_index : int + The index to assign to observations that intersect the sphere. + is_out_index : int + The index to assign to observations that do not intersect the sphere. + + Notes + ----- + The partition sphere a center specifed by an ra, dec and distance from the barycenter. + The sphere radius is specified by an angle subtended by the sphere at the barycenter. + """ + super().__init__(**kwargs) + self.obssky = coordinates.SkyCoord(search_ra, search_dec, distance=search_distance, frame="icrs") + self.fov = search_fov + self.is_in_index = is_in_index + self.is_out_index = is_out_index + + def observations_to_indices( + self, pointing: SkyCoord, time: Time, fov: coordinates.Angle, location: EarthLocation + ) -> numpy.ndarray: + """ + Returns a numpy array of indices for each observation specified by pointing, time, fov and location. + + Parameters + ---------- + pointing : astropy.coordinates.SkyCoord + The pointing of the observations. + time : astropy.time.Time + The time of the observations. + fov : astropy.coordinates.Angle + The field of view of the observations. + location : astropy.coordinates.EarthLocation + The location of the observations. This should be the location of the telescope. + + Returns + ------- + numpy.ndarray + A numpy array of indices for each observation specified by pointing, time, fov and location. + An index of is_in_index is assigned to observations with a cone that intersect the sphere. + An index of is_out_index is assigned to observations with a cone that do not intersect the sphere. + + Notes + ----- + The observation cone has an apex at the location of the telescope at the time of the observation with an axis that points in the direction of the pointing and an angle of fov. + """ + if time is None: + separation = self.obssky.separation(pointing) + else: + if location is None: + location = time.location + + with solar_system_ephemeris.set("de432s"): + obs_pos_itrs = location.get_itrs(obstime=time) + # the next line is slow + observer_to_target = self.obssky.transform_to(obs_pos_itrs).gcrs + separation = observer_to_target.separation(pointing) + indices = numpy.where(separation < (self.fov + fov) * 0.5, self.is_in_index, self.is_out_index) + return indices diff --git a/src/kbmod/regionsearch/py.typed b/src/kbmod/regionsearch/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/src/kbmod/regionsearch/region_search.py b/src/kbmod/regionsearch/region_search.py new file mode 100644 index 0000000..881d320 --- /dev/null +++ b/src/kbmod/regionsearch/region_search.py @@ -0,0 +1,55 @@ +"""Region search facility for kbmod.""" + +from dataclasses import dataclass + +import astropy.units as u # type: ignore +from astropy import units as u +from astropy.coordinates import Angle, Distance # type: ignore + + +@dataclass(init=False) +class Filter: + """A class for specifying region search filters.""" + + search_ra: Angle = None + search_dec: Angle = None + search_distance: Distance = None + search_fov: Angle = None + + def __init__( + self, + search_ra: Angle = None, + search_dec: Angle = None, + search_distance: Distance = None, + search_fov: Angle = None, + ): + self.with_ra(search_ra) + self.with_dec(search_dec) + self.with_distance(search_distance) + self.with_fov(search_fov) + + def __repr__(self): + return f"Filter(ra={self.search_ra}, dec={self.search_dec}, search_distance={self.search_distance})" + + def with_ra(self, search_ra: Angle): + """Sets the right ascension of the center of the search region.""" + if search_ra is not None: + self.search_ra = Angle(search_ra, unit=u.deg) + return self + + def with_dec(self, search_dec: Angle): + """Sets the declination of the center of the search region.""" + if search_dec is not None: + self.search_dec = Angle(search_dec, unit=u.deg) + return self + + def with_distance(self, search_distance: Distance): + """Sets the minimum distance from the barycenter to the search region.""" + if search_distance is not None: + self.search_distance = Distance(search_distance, unit=u.au) + return self + + def with_fov(self, search_fov: Angle): + if search_fov is not None: + self.search_fov = Angle(search_fov, unit=u.deg) + return self diff --git a/src/kbmod/regionsearch/utilities.py b/src/kbmod/regionsearch/utilities.py new file mode 100644 index 0000000..e852010 --- /dev/null +++ b/src/kbmod/regionsearch/utilities.py @@ -0,0 +1,317 @@ +import os +import random +import typing + +import astropy.table # type: ignore +import numpy as np +from astropy import time +from astropy import units as u +from astropy.coordinates import ( # type: ignore + EarthLocation, + SkyCoord, + solar_system_ephemeris, +) + + +class RegionSearchClusterData(object): + """Randomly generated cluster data for testing the region search. + + Each instance of this class contains lists of pointings, observation times, reference pointings and distances arranged so + pointings grouped by reference pointing and distance view a position given by the reference pointing and distance from the + barycenter. + + Attributes + ---------- + version : int + The version of the data. This is manually incremented whenever a material change is made to the data generation. It appears in the filename and the + metadata and is used to check that data in a file is compatible with the code. + seed : int + The seed for the random number generator. This appears in the filename. It is used to generate the data idempotently. + clustercnt : int + The number of clusters to generate. This appears in the filename. + samplespercluster : int + The number of samples per cluster to generate. This appears in the filename. + data_loaded : bool + True if the data have been loaded from a file. + data_generated : bool + True if the data have been generated. + """ + + def __init__( + self, + basename: str = "clustered-data", + suffix: str = ".ecsv", + format: str = "ascii.ecsv", + seed: int = 0, + clustercnt: int = 10, + samplespercluster: int = 1000, + removecache: bool = False, + ) -> None: + """ + Parameters + ---------- + basename : str + The base name of the file to read or write. This is the first part of the file name. + The default value is "clustered-data". + The full name includes the directory, basename, seed, clustercnt, samplespercluster, version, section name, and suffix. + suffix : str + The suffix of the file to read or write. Thisis the last part of the file name. + The default value is ".ecsv" and seems the best choice for astropy tables. + format : str + The format of the file to read or write. This is coordinated with the suffix. + The default value is "ascii.ecsv" and seems the best choice for astropy tables. + seed : int + The seed for the random number generator. This is included in the filename. + clustercnt : int + The number of clusters to generate. This is included in the filename. + samplespercluster : int + The number of samples per cluster to generate. This is included in the filename. + removecache : bool + If True, remove the cache files and regenerate the data. + If False, use the cache files if they exist. + + Notes + ----- + The data are generated if they do not exist. + If the data are generated, they are saved in the tmp directory if that exists. + If the filename is found in the directory "data" then that will be used. + The files in "tmp" are ignored by git. The files in "data" are controlled by git. + The full name includes the directory, basename, seed, clustercnt, samplespercluster, version, section name, and suffix. + The version is checked when reading the file. + """ + # See https://docs.astropy.org/en/stable/io/unified.html#table-serialization-methods + self.version = "1" + # seed for random number generator + self.seed = seed + # number of clusters in test + self.clustercnt = clustercnt + # number of samples per cluster + self.samplespercluster = samplespercluster + # data are not loaded + self.data_loaded = False + # data are not generated + self.data_generated = False + + sections = [ + ["samples", ["bary_to_target", "observer_to_target", "observation_geolocation", "cluster_id"]], + ["clusters", ["clusters", "clusterdistances"]], + ] + + # tmp data overrides data. Only tmp gets written. Only data is controlled in git + # for dirname in ["tmp", "data"]: + # Only remove cache in tmp. This also implies generating data in tmp. + if removecache: + for section in sections: + filename = self.__filename("tmp", basename, section[0], suffix) + if os.path.exists(filename): + os.remove(filename) + self.generate_and_save(basename, suffix, format, sections) + else: + for dirname in ["tmp", "data"]: + # sections must be grouped together + if not self.data_loaded: + data_loaded_count = 0 + for section in sections: + filename = self.__filename(dirname, basename, section[0], suffix) + if os.path.exists(filename) and self.read_table(filename, format, list(section[1])): + data_loaded_count += 1 + else: + # early out since this is not a complete set of data + break + if data_loaded_count == len(sections): + self.data_loaded = True + # early out since this is a complete set of data + break + + # if data are not loaded, generate them + if not self.data_loaded: + self.generate_and_save(basename, suffix, format, sections) + + if not self.data_loaded: + raise Exception(f"Could not read or generate data") + + def generate_and_save(self, basename, suffix, format, sections): + """ + Generate the data and save them in the tmp directory. + + Parameters + ---------- + basename : str + """ + if self._generate_data(): + if os.path.isdir("tmp"): + for section in sections: + filename = self.__filename("tmp", basename, section[0], suffix) + self.write_table(filename, format, section[1]) + self.data_loaded = True + self.data_generated = True + + def __filename(self, dirname, basename, sectionname, suffix): + """ + Return the full filename. + + Parameters + ---------- + dirname : str + The directory name. This is "tmp" or "data". + basename : str + The base name of the file to read or write. + sectionname : str + The section name. + suffix : str + The suffix of the file to read or write. Thisis the last part of the file name. + + Returns + ------- + str + The full filename. + """ + return f"{dirname}/{basename}-{self.seed}-{self.clustercnt}-{self.samplespercluster}-{self.version}-{sectionname}{suffix}" + + def read_table(self, filename: str, format: str, colnames: typing.List[str]): + """ + Read the table from the file and validate the version. + + Parameters + ---------- + filename : str + The full filename. + format : str + The format of the file to read or write. + colnames : typing.List[str] + The list of column names to read from the file. The file may contain other columns but it must have these columns. + + Returns + ------- + bool + True if the table was read and the version is valid and all of the columns are present. + False if the table was not read or the version is invalid or any of the columns are missing. + """ + hold_version = self.version + try: + table = astropy.table.Table.read(filename, format=format) + for metakey in table.meta.keys(): + setattr(self, metakey, table.meta[metakey]) + if self.version != hold_version: + self.version = hold_version + raise Exception(f"Version mismatch: {self.version} != {table.meta['version']}") + for column in colnames: + if column not in table.colnames: + raise Exception(f"Column {column} not found in {filename}") + setattr(self, column, table[column]) + finally: + self.version = hold_version + return True + + def write_table(self, filename: str, format: str, colnames: typing.List[str]): + """ + Write the table to the file including all the columns in colnames. + + Parameters + ---------- + filename : str + The full filename. + format : str + The format of the file to read or write. + colnames : typing.List[str] + The list of column names to write to the file. + """ + if os.path.exists(filename): + os.remove(filename) + with open(filename, "w") as f: + coldata = [getattr(self, column) for column in colnames] + table = astropy.table.Table( + data=coldata, + names=colnames, + ) + table.meta["version"] = self.version + table.write(f, format=format, overwrite=True) + return True + + def _generate_data(self): + """ + Generate the data for the test. + """ + if self.data_loaded: + return + + random.seed(self.seed) + # timerange to assign to samples. The time of a sample is not related to the cluster or sample in cluster. + self.timerange = time.Time( + ["2022-06-01T00:00:00.000", "2023-06-01T00:00:00.000"], format="isot", scale="utc" + ) + self.timerange.format = "mjd" + # the nominal distances for each cluster. + if self.clustercnt <= 1: + self.clusterdistances = [random.randrange(2, 10000)] + else: + self.clusterdistances = [2] + self.clusterdistances.extend([random.randrange(2, 10000) for _ in range(self.clustercnt - 1)]) + + clusters = [[random.uniform(0.0, 360.0), random.uniform(0, 180.0)] for _ in range(self.clustercnt)] + + self.clusters = [[i[0] - 180.0, i[1] - 90.0] for i in clusters] + + # the number of rows in the test dataset + self.rowcnt = self.clustercnt * self.samplespercluster + self.cluster_id = np.array([i for i in range(self.clustercnt) for _ in range(self.samplespercluster)]) + + baryra = [ + (i[0] + random.uniform(-1, 1)) % 360.0 - 180.0 + for i in clusters + for _ in range(self.samplespercluster) + ] * u.deg + barydec = [ + (i[1] + random.uniform(-1, 1)) % 180.0 - 90.0 + for i in clusters + for _ in range(self.samplespercluster) + ] * u.deg + bary_distance = [ + self.clusterdistances[i] for i in range(self.clustercnt) for _ in range(self.samplespercluster) + ] * u.au + + # Vera Rubin Observatory for all samples + # see https://www.lsst.org/scientists/keynumbers + self.observation_geolocation = EarthLocation.from_geodetic( + [-70.749417] * self.rowcnt * u.deg, [-30.244633] * self.rowcnt * u.deg, [2647] * self.rowcnt * u.m + ) + + valmjd = np.array([random.uniform(*self.timerange.value) for _ in range(self.rowcnt)]) + valobservation_time = time.Time( + # round to millisecond in lieu of modifying file format for column. + val=valmjd, + format="mjd", + scale="utc", + # location=self.observation_geolocation, + ) + valobservation_time.format = "isot" + # convert to exact isot representation so read after write serialization is exact + valisot = valobservation_time.value + observation_time = time.Time( + val=valisot, + format="isot", + scale="utc", + location=self.observation_geolocation, + ) + + with solar_system_ephemeris.set("de432s"): + self.bary_to_target = SkyCoord(ra=baryra, dec=barydec, distance=bary_distance) + obs_pos_itrs = observation_time.location.get_itrs(obstime=observation_time) + # the next line is slow + # the transform_to changes the angular units in the frame from degrees to radians. Change them back for cache read after write consistency. + self.observer_to_target = SkyCoord( + self.bary_to_target.transform_to(obs_pos_itrs).gcrs.spherical, obstime=observation_time + ) + return True + + @property + def observation_pointing(self): + return SkyCoord(ra=self.observer_to_target.ra, dec=self.observer_to_target.dec) + + @property + def observation_time(self): + return self.observer_to_target.obstime + + @property + def observation_distance(self): + return self.observer_to_target.distance diff --git a/tests/regionsearch/conftest.py b/tests/regionsearch/conftest.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/regionsearch/test_backend.py b/tests/regionsearch/test_backend.py new file mode 100644 index 0000000..0ba803a --- /dev/null +++ b/tests/regionsearch/test_backend.py @@ -0,0 +1,132 @@ +"""Tests for the Backend classes.""" + +import numpy as np +import pytest +from astropy import units as u # type: ignore +from astropy.coordinates import ( # type: ignore + Angle, + Distance, + EarthLocation, + Latitude, + Longitude, +) +from astropy.time import Time # type: ignore + +from kbmod.regionsearch import abstractions, backend, indexers, utilities +from kbmod.regionsearch.region_search import Filter + + +def test_backend_abstract(): + """Tests that the backend raises an exception when region_search is called without an indexer.""" + + class TestBackend(abstractions.Backend): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def region_search(self, filter: Filter) -> np.ndarray: + return super().region_search(filter) + + b = TestBackend() + assert b is not None + with pytest.raises(NotImplementedError): + b.region_search(Filter()) + assert False, "expected NotImplementedError when calling region_search without an indexer" + + +def test_observationlist_init(): + """Tests the ObservationList backend's constructor.""" + data = utilities.RegionSearchClusterData(clustercnt=5, samplespercluster=10, removecache=True) + + ra = data.observation_pointing.ra + dec = data.observation_pointing.dec + time = data.observation_time + location = data.observation_geolocation + fov = np.ones([data.rowcnt]) * Angle(1, "deg") + observation_identifier = data.cluster_id + b = backend.ObservationList(ra, dec, time, location, fov, observation_identifier) + assert all(b.observation_ra == ra) + assert all(b.observation_dec == dec) + assert all(b.observation_time == time) + assert all(b.observation_location == location) + assert all(b.observation_fov == fov) + assert all(b.observation_identifier == observation_identifier) + + +def test_observationlist_consistency(): + """Tests the ObservationList backend's constructor raise error if all elements are not equal in length.""" + data = utilities.RegionSearchClusterData(clustercnt=5, samplespercluster=10, removecache=True) + + ra = data.observation_pointing.ra + dec = data.observation_pointing.dec + time = data.observation_time + location = data.observation_geolocation + fov = Angle(1, "deg") + observation_identifier = data.cluster_id + with pytest.raises(ValueError): + backend.ObservationList(ra, dec, time, location, fov, observation_identifier) + + +def test_observationlist_missing_observation_to_indices(): + """Tests that the backend raises an exception when region_search is called without an indexer.""" + data = utilities.RegionSearchClusterData(clustercnt=5, samplespercluster=10, removecache=True) + + ra = data.observation_pointing.ra + dec = data.observation_pointing.dec + time = data.observation_time + location = data.observation_geolocation + fov = np.ones([data.rowcnt]) * Angle(1, "deg") + observation_identifier = data.cluster_id + b = backend.ObservationList(ra, dec, time, location, fov, observation_identifier) + with pytest.raises(NotImplementedError): + b.region_search(Filter()) + assert ( + False + ), "Expect NotImplementedError when region_search is called without an observation_to_indices method (missing ObservationIdexer)." + assert True + + +def test_observationlist_partition(): + """Tests the ObservationList backend with partition indexer.""" + + # Compose a region search + class TestRegionSearchList(backend.ObservationList, indexers.PartitionIndexer): + """A test class for the ObservationList Backend with Partition indexer.""" + + def __init__(self, **kwargs) -> None: + super().__init__(**kwargs) + + data = utilities.RegionSearchClusterData(clustercnt=5, samplespercluster=10, removecache=True) + ra = data.observation_pointing.ra + dec = data.observation_pointing.dec + time = data.observation_time + location = data.observation_geolocation + fov = np.ones([data.rowcnt]) * Angle(2, "deg") + observation_identifier = np.array([f"file:epyc/observations/{i:04d}" for i in range(data.rowcnt)]) + clusteri = 0 + search_ra = (data.clusters[clusteri][0]) * u.deg + search_dec = (data.clusters[clusteri][1]) * u.deg + search_distance = data.clusterdistances[clusteri] * u.au + search_fov = Angle(4.0, "deg") + regionsearch = TestRegionSearchList( + observation_ra=ra, + observation_dec=dec, + observation_time=time, + observation_location=location, + observation_fov=fov, + observation_identifier=observation_identifier, + search_ra=search_ra, + search_dec=search_dec, + search_distance=search_distance, + search_fov=search_fov, + is_in_index=clusteri, + is_out_index=~clusteri, + ) + assert regionsearch is not None + searchresults = regionsearch.region_search( + Filter( + search_ra=search_ra, search_dec=search_dec, search_distance=search_distance, search_fov=search_fov + ) + ) + assert searchresults is not None + checkresults = observation_identifier[np.nonzero(data.cluster_id == clusteri)[0]] + assert np.all(searchresults == checkresults) diff --git a/tests/regionsearch/test_filter.py b/tests/regionsearch/test_filter.py new file mode 100644 index 0000000..62741df --- /dev/null +++ b/tests/regionsearch/test_filter.py @@ -0,0 +1,62 @@ +"""Test the `Filter` class for region search""" +from astropy import units as u + +from kbmod.regionsearch.region_search import Filter + + +def test_filter_bareargs() -> None: + """Verify the output of the `Filter` class using bare quantities""" + output = Filter(42, -28, 30) + assert output.search_ra == 42 * u.deg + assert output.search_dec == -28 * u.deg + assert output.search_distance == 30 * u.au + + +def test_filter_namedargs() -> None: + """Verify the output of the `Filter` class using bare quantities""" + output = Filter(search_ra=42, search_dec=-28, search_distance=30) + assert output.search_ra == 42 * u.deg + assert output.search_dec == -28 * u.deg + assert output.search_distance == 30 * u.au + + +def test_filter_unitargs() -> None: + """Verify the output of the `Filter` class using quantities with units""" + output = Filter(42 * u.deg, -28 * u.deg, 30 * u.au, 1 * u.deg) + assert output.search_ra == 42 * u.deg + assert output.search_dec == -28 * u.deg + assert output.search_distance == 30 * u.au + assert output.search_fov == 1 * u.deg + + +def test_filter_none() -> None: + """Verify the output of the `Filter` class with no arguments""" + output = Filter() + assert output.search_ra is None + assert output.search_dec is None + assert output.search_distance is None + assert output.search_fov is None + + +def test_filter_literate() -> None: + """Verify the output of the `Filter` class set using literate methods""" + output = Filter() + output.with_ra(0 * u.deg) + output.with_dec(0 * u.deg) + output.with_distance(30 * u.au) + assert output.search_ra == 0 * u.deg + assert output.search_dec == 0 * u.deg + assert output.search_distance == 30 * u.au + + +def test_filter_literate2() -> None: + """Verify the output of the `Filter` class set using literate methods""" + # fmt: off + output = Filter()\ + .with_ra(0 * u.deg)\ + .with_dec(0 * u.deg)\ + .with_distance(30 * u.au) + # fmt: on + assert output.search_ra == 0 * u.deg + assert output.search_dec == 0 * u.deg + assert output.search_distance == 30 * u.au diff --git a/tests/regionsearch/test_indexer.py b/tests/regionsearch/test_indexer.py new file mode 100644 index 0000000..2b704b3 --- /dev/null +++ b/tests/regionsearch/test_indexer.py @@ -0,0 +1,31 @@ +"""Tests for indexer classes.""" + +import astropy.units as u +import numpy + +from kbmod.regionsearch.indexers import PartitionIndexer +from kbmod.regionsearch.utilities import RegionSearchClusterData + + +def test_partition_indexer(): + """Test the PartitionIndexer""" + data = RegionSearchClusterData(clustercnt=2, samplespercluster=5, removecache=True) + clusteri = 0 + indexer = PartitionIndexer( + search_ra=(data.clusters[clusteri][0]) * u.deg, + search_dec=(data.clusters[clusteri][1]) * u.deg, + search_distance=data.clusterdistances[clusteri] * u.au, + search_fov=2.0 * u.deg, + is_in_index=clusteri, + is_out_index=~clusteri, + ) + assert indexer is not None + indices = indexer.observations_to_indices( + pointing=data.observation_pointing, + time=data.observation_time, + fov=2.0 * u.deg, + location=data.observation_geolocation, + ) + assert indices is not None + indexref = numpy.where(data.cluster_id == clusteri, clusteri, ~clusteri) + assert numpy.all(indexref == indices) diff --git a/tests/regionsearch/test_utilities.py b/tests/regionsearch/test_utilities.py new file mode 100644 index 0000000..076e0d9 --- /dev/null +++ b/tests/regionsearch/test_utilities.py @@ -0,0 +1,45 @@ +"""Tests for utilities.""" + +import numpy as np + +from kbmod.regionsearch.utilities import RegionSearchClusterData + + +def test_cache_write_read(): + """Test for idempotency of the cache write/read.""" + data1 = RegionSearchClusterData(samplespercluster=5, removecache=True) + assert data1 is not None + assert data1.data_loaded + assert data1.data_generated == True + # check that written files can be read + data2 = RegionSearchClusterData(samplespercluster=5, removecache=False) + assert data2 is not None + assert data2.data_loaded + assert data2.data_generated == False + assert all(data1.observation_pointing == data2.observation_pointing) + # time.mjd2 likely will differ by a few nanoseconds so exact equality is not expected + assert all(data1.observation_time - data2.observation_time < 1e-8) + assert all(data1.observation_geolocation == data2.observation_geolocation) + assert all(data1.cluster_id == data2.cluster_id) + assert all(data1.clusterdistances == data2.clusterdistances) + assert all(data1.bary_to_target == data2.bary_to_target) + assert all(data1.observer_to_target.ra == data2.observer_to_target.ra) + assert all(data1.observer_to_target.dec == data2.observer_to_target.dec) + assert all(data1.observer_to_target.distance == data2.observer_to_target.distance) + assert all(data1.observer_to_target.obstime.location == data2.observer_to_target.obstime.location) + assert all(data1.observer_to_target.obstime == data2.observer_to_target.obstime) + # Check frame equality since special care must be taken to get the units in frame._data to match and so it could regress. + assert all(data1.observer_to_target.frame == data2.observer_to_target.frame) + assert all(data1.observer_to_target.cartesian == data2.observer_to_target.cartesian) + assert all(data1.observer_to_target == data2.observer_to_target) + + +def test_clusterdata_stats(): + """Verify that the cluster data groups by cluster as expected.""" + clustercnt = 100 + samplespercluster = 5 + data = RegionSearchClusterData(clustercnt=clustercnt, samplespercluster=samplespercluster) + assert data is not None + for clusteri in range(data.clustercnt): + clustersamples = len(np.nonzero(clusteri == data.cluster_id)[0]) + assert clustersamples == samplespercluster diff --git a/tmp/.githold b/tmp/.githold new file mode 100644 index 0000000..e69de29