diff --git a/.github/workflows/python-docs-only.yml b/.github/workflows/build_docs.yaml similarity index 54% rename from .github/workflows/python-docs-only.yml rename to .github/workflows/build_docs.yaml index 9220a934b..9511f6f1b 100644 --- a/.github/workflows/python-docs-only.yml +++ b/.github/workflows/build_docs.yaml @@ -1,59 +1,67 @@ -name: Build and Push Documentation +name: Build and Upload Docs -on: - release: - types: - - created +"on": + push: + tags: + - "*" + branches: + - "main" + pull_request: {} workflow_dispatch: jobs: - Docs: - name: Build and push documentation - runs-on: "ubuntu-latest" + build_sphinx_docs: + name: Build and upload documentation + runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: conda-incubator/setup-miniconda@v2 with: auto-update-conda: true - python-version: "3.10" - channels: conda-forge,defaults + python-version: "3.11" + channels: conda-forge channel-priority: strict show-channel-urls: true - - name: configure and install conda and requirements + + - name: configure and install requirements and documenteer shell: bash -l {0} run: | conda config --set always_yes yes conda install --quiet --file=requirements.txt - conda install lsst-documenteer-pipelines + conda install --quiet pip + pip install "documenteer[guide]" + - name: install rubin_sim shell: bash -l {0} run: | echo `pwd` ls ${{ github.workspace }} python -m pip install . + - name: download rubin_sim_data components needed to set up metrics for metricList shell: bash -l {0} run: | export RUBIN_SIM_DATA_DIR=${{ github.workspace }} rs_download_data --tdqm_disable -d 'maf,throughputs' - - name: conda list + + - name: check conda and documenteer shell: bash -l {0} - run: conda list - - name: build documentation + run: | + conda list + + - name: build docs shell: bash -l {0} run: | export RUBIN_SIM_DATA_DIR=${{ github.workspace }} cd doc python metric_list.py - make html - - name: Install LTD Conveyor - shell: bash -l {0} - run: | - python -m pip install ltd-conveyor + package-docs build + - name: upload documentation - shell: bash -l {0} - env: - LTD_PASSWORD: ${{ secrets.LTD_PASSWORD }} - LTD_USERNAME: ${{ secrets.LTD_USERNAME }} - run: | - ltd upload --gh --dir doc/_build/html --product rubin-sim + uses: lsst-sqre/ltd-upload@v1 + with: + project: "rubin-sim" + dir: "docs/_build/html" + username: ${{ secrets.ltd_username }} + password: ${{ secrets.ltd_password }} + diff --git a/.github/workflows/build_pypi.yml b/.github/workflows/build_pypi.yml new file mode 100644 index 000000000..73b878982 --- /dev/null +++ b/.github/workflows/build_pypi.yml @@ -0,0 +1,89 @@ +name: Build and Publish PyPI + +on: + push: + branches: + - main + tags: + - "*" + +jobs: + tests: + name: Check Tests (${{ matrix.python-version }}, ${{ matrix.os }}) + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: ["ubuntu-latest"] + python-version: ["3.11"] + steps: + - uses: actions/checkout@v4 + - uses: conda-incubator/setup-miniconda@v2 + with: + python-version: ${{ matrix.python-version }} + auto-update-conda: true + channels: conda-forge,defaults + miniforge-variant: Mambaforge + use-mamba: true + channel-priority: strict + show-channel-urls: true + + - name: configure conda and install requirements + shell: bash -l {0} + run: | + mamba config --set always_yes yes + mamba install --quiet --file=requirements.txt + mamba install --quiet --file=test-requirements.txt + + - name: install rubin_sim + shell: bash -l {0} + run: | + echo `pwd` + python -m pip install . + + - name: conda list + shell: bash -l {0} + run: conda list + + - name: run unit tests + shell: bash -l {0} + run: | + pytest -r a -v --cov=rubin_sim --cov=tests --cov-report=xml --cov-report=term --cov-branch + + - name: Upload coverage to codecov + uses: codecov/codecov-action@v2 + with: + file: coverage.xml + + pypi: + name: Build and upload to PyPI + runs-on: ubuntu-latest + needs: [tests] + if: startsWith(github.ref, 'refs/tags/') + + steps: + - uses: actions/checkout@v4 + with: + # Need to clone everything to embed the version. + fetch-depth: 0 + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: 3.11 + cache: "pip" + + - name: Install dependencies for build + run: | + python -m pip install --upgrade pip + pip install --upgrade setuptools build + + - name: Build and create distribution + run: | + python -m build --skip-dependency-check + + - name: Upload to lsst-sp PyPI + uses: pypa/gh-action-pypi-publish@release/v1 + with: + user: __token__ + password: ${{ secrets.SP_PYPI_UPLOADS }} diff --git a/.github/workflows/ruff.yaml b/.github/workflows/ruff.yaml new file mode 100644 index 000000000..b7e2b3e68 --- /dev/null +++ b/.github/workflows/ruff.yaml @@ -0,0 +1,24 @@ +name: Ruff and iSort +on: + # Trigger the workflow on push (to main) or pull request + push: + branches: + - main + pull_request: + branches: + - main + workflow_dispatch: + +jobs: + isort: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: isort/isort-action@v1 + with: + requirements-files: "requirements.txt test-requirements.txt" + ruff: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: chartboost/ruff-action@v1 \ No newline at end of file diff --git a/.github/workflows/python-all-tests.yml b/.github/workflows/run_all_tests.yaml similarity index 69% rename from .github/workflows/python-all-tests.yml rename to .github/workflows/run_all_tests.yaml index 3346c6443..34f69d353 100644 --- a/.github/workflows/python-all-tests.yml +++ b/.github/workflows/run_all_tests.yaml @@ -1,4 +1,4 @@ -name: Run All Tests +name: Run All of the Tests on: push: @@ -16,39 +16,50 @@ jobs: fail-fast: True matrix: os: ["ubuntu-latest-8-cores"] - python-version: ["3.10"] + python-version: ["3.11"] steps: - - uses: actions/checkout@v3 - + - uses: actions/checkout@v4 - uses: conda-incubator/setup-miniconda@v2 with: auto-update-conda: true python-version: ${{ matrix.python-version }} channels: conda-forge,defaults + miniforge-variant: Mambaforge + use-mamba: true channel-priority: strict show-channel-urls: true + - name: configure conda and install requirements shell: bash -l {0} run: | - conda config --set always_yes yes - conda install --quiet --file=requirements.txt - conda install --quiet --file=test-requirements.txt + mamba config --set always_yes yes + mamba install --quiet --file=requirements.txt + mamba install --quiet --file=test-requirements.txt + - name: install rubin_sim shell: bash -l {0} run: | echo `pwd` ls ${{ github.workspace }} python -m pip install . + - name: download rubin_sim_data components needed for unit tests shell: bash -l {0} run: | export RUBIN_SIM_DATA_DIR=${{ github.workspace }}/data_dir rs_download_data --force --tdqm_disable + - name: conda list shell: bash -l {0} run: conda list + - name: run tests shell: bash -l {0} run: | export RUBIN_SIM_DATA_DIR=${{ github.workspace }}/data_dir - pytest + pytest -r a -v --cov=rubin_sim --cov=tests --cov-report=xml --cov-report=term --cov-branch + + - name: Upload coverage to codecov + uses: codecov/codecov-action@v2 + with: + file: coverage.xml \ No newline at end of file diff --git a/.github/workflows/python-tests-doc.yml b/.github/workflows/run_tests_docs.yaml similarity index 88% rename from .github/workflows/python-tests-doc.yml rename to .github/workflows/run_tests_docs.yaml index 8672e8650..a331a54f0 100644 --- a/.github/workflows/python-tests-doc.yml +++ b/.github/workflows/run_tests_docs.yaml @@ -18,54 +18,60 @@ jobs: fail-fast: false matrix: os: ["ubuntu-latest", "macos-latest"] - python-version: ["3.10"] + python-version: ["3.11"] steps: - - uses: actions/checkout@v3 - + - uses: actions/checkout@v4 - uses: conda-incubator/setup-miniconda@v2 with: auto-update-conda: true python-version: ${{ matrix.python-version }} channels: conda-forge,defaults + miniforge-variant: Mambaforge + use-mamba: true channel-priority: strict show-channel-urls: true + - name: configure conda and install requirements shell: bash -l {0} run: | - conda config --set always_yes yes - conda install --quiet --file=requirements.txt - conda install --quiet --file=test-requirements.txt + mamba config --set always_yes yes + mamba install --quiet --file=requirements.txt + mamba install --quiet --file=test-requirements.txt + - name: install rubin_sim shell: bash -l {0} run: | echo `pwd` ls ${{ github.workspace }} python -m pip install . + - name: download rubin_sim_data components needed for unit tests shell: bash -l {0} run: | export RUBIN_SIM_DATA_DIR=${{ github.workspace }}/data_dir rs_download_data --tdqm_disable -d 'site_models,throughputs,skybrightness,skybrightness_pre' rs_download_data --tdqm_disable -d tests --force + - name: conda list shell: bash -l {0} run: conda list + - name: run tests shell: bash -l {0} run: | export RUBIN_SIM_DATA_DIR=${{ github.workspace }}/data_dir - pytest + pytest -r a -v Docs: name: Build and push documentation needs: Tests runs-on: "macos-latest" steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: conda-incubator/setup-miniconda@v2 with: auto-update-conda: true - python-version: "3.10" + python-version: "3.11" channels: conda-forge,defaults channel-priority: strict show-channel-urls: true diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e9676c85c..fd37f258c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,5 +1,27 @@ repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.4.0 + hooks: + - id: check-yaml + - id: end-of-file-fixer + - id: trailing-whitespace + - id: check-toml - repo: https://github.com/psf/black rev: 23.7.0 hooks: - id: black + # It is recommended to specify the latest version of Python + # supported by your project here, or alternatively use + # pre-commit's default_language_version, see + # https://pre-commit.com/#top_level-default_language_version + language_version: python3.11 + - repo: https://github.com/pycqa/isort + rev: 5.12.0 + hooks: + - id: isort + name: isort (python) + - repo: https://github.com/astral-sh/ruff-pre-commit + # Ruff version. + rev: v0.0.278 + hooks: + - id: ruff \ No newline at end of file diff --git a/README.md b/README.md index c2daf7018..1bb256f1d 100644 --- a/README.md +++ b/README.md @@ -11,28 +11,43 @@ Scheduler, survey strategy analysis, and other simulation tools for Rubin Observ # Installation -Prerequisites: A working [conda installation ](https://www.anaconda.com/products/individual) - ### Conda Installation ### If you are only running `rubin_sim` code and not making changes. If you will be editing the code or need the very latest verison, use the pip instructions below. ``` -conda create -n rubin-sim -c conda-forge rubin_sim # Create a new environment +conda create -n rubin-sim -c conda-forge rubin_sim ## Create a new environment and install rubin_sim conda activate rubin-sim -rs_download_data # Downloads ~2Gb of data to $RUBIN_SIM_DATA_DIR (~/rubin_sim_data if unset) -conda install -c conda-forge jupyter # Optional install of jupyter +rs_download_data ## Downloads a few of data to $RUBIN_SIM_DATA_DIR (~/rubin_sim_data if unset) +conda install -c conda-forge jupyter ## Optional install of jupyter ``` Note that this is not the best option for developers working on their own metrics - a pip installation from their own fork of the repo may work better. -### Pip Installation ### +### Pip installation ### + +``` +pip install rubin_sim +``` + +Please note that the pip installation of pyoorb does not come with the necessary data files. +To actually use pyoorb, the data files are most easily installable via conda with + ``` + conda install -c conda-forge openorb-data + conda install -c conda-forge openorb-data-de405 + ``` +The pip installation of `rubin_sim` will install the pip version of `pyoorb` which is +more up-to-date compared to the conda-forge version of `openorb`. For the purposes of +`rubin_sim`, the functionality is essentially the same however. + + +### Developer Installation ### To install `rubin_sim` from source using pip, with all required dependencies: ``` -git clone https://github.com/lsst/rubin_sim.git ; cd rubin_sim # clone and cd into repo -conda create -n rubin-sim ; conda activate rubin-sim # optional (but recommended) new conda env -conda install -c conda-forge --file=all_req.txt # substitute mamba for conda if you like +git clone https://github.com/lsst/rubin_sim.git ; cd rubin_sim ## clone and cd into repo +conda create -n rubin-sim ; conda activate rubin-sim ## optional (but recommended) new conda env +conda install -c conda-forge --file=all_req.txt ## substitute mamba for conda if you like pip install -e . -rs_download_data # Downloads ~2Gb of data to $RUBIN_SIM_DATA_DIR (~/rubin_sim_data if unset) +rs_download_data ## Downloads a few GB of data to $RUBIN_SIM_DATA_DIR (~/rubin_sim_data if unset) ``` Note that external collaborators will likely want to follow similar directions, using a fork of our rubin_sim github repo first (and then clone from there). @@ -41,8 +56,8 @@ Note that external collaborators will likely want to follow similar directions, **Optional: Set $RUBIN_SIM_DATA_DIR data directory.** By default, `rubin_sim` will download needed data files to `$HOME/rubin_sim_data`. If you would like the data to save elsewhere, you should set the `RUBIN_SIM_DATA_DIR` environment variable. In bash `export RUBIN_SIM_DATA_DIR="/my/preferred/data/path"` (note, always make sure this is set before trying to run `rubin_sim` packages, so put in your .bashrc or whatnot). Another possibility is to set the location via sym-link, `ln -s /my/preferred/data/path ~/rubin_sim_data`. ``` -export RUBIN_SIM_DATA_DIR=$HOME/rubin_sim_data # Optional. Set the data directory path via env variable -rs_download_data # Downloads ~2Gb of data to $RUBIN_SIM_DATA_DIR +export RUBIN_SIM_DATA_DIR=$HOME/rubin_sim_data ## Optional. Set the data directory path via env variable +rs_download_data ## Downloads a few GB of data to $RUBIN_SIM_DATA_DIR ``` If you are only interested in a subset of the data, you can specify which directories to download, e.g. ``` @@ -57,22 +72,25 @@ If you have a previous installation of rubin_sim or wish to update your data dow git clone https://github.com/lsst/rubin_sim_notebooks.git cd rubin_sim_notebooks # Example: make a plot of the number of visits per pointing -jupyter notebook maf/tutorial/Survey\ Footprint.ipynb +jupyter notebook maf/tutorial/Survey_footprint.ipynb ``` -### Additional installation and download options ### +### Downloading additional skybrightness_pre skybrightness files ### -Optional dependencies used by some of the more esoteric MAF functions: -``` -conda install -c conda-forge sncosmo sympy george -``` +The default skybrightness_pre directory downloaded above contains only one month of pre-calculated skybrightness files. +If you wish to run the scheduler for a longer time period, or need this information outside of the span of that month period, +you will need to download a larger set of pre-computed sky data. -Optional download all the (43 Gb) of pre-computed sky data. Only needed if you are planning to run full 10 year scheduler simulations. Not needed for MAF, etc.: +To download the entire optional set all the (43 Gb) of pre-computed sky data. ``` rs_download_sky ``` - +Note that subsets of this data can get downloaded via http directly from +``` +https://s3df.slac.stanford.edu/data/rubin/sim-data/sims_skybrightness_pre/h5_2023_09_12/ +``` +(the file names reflect the range of MJD covered within each data file). # Documentation @@ -90,76 +108,3 @@ make html ## Getting Help ## Questions about `rubin_sim` can be posted on the [sims slack channel](https://lsstc.slack.com/archives/C2LQ5JW9W), or on https://community.lsst.org/ (tag @yoachim and/or @ljones so we get notifications about it). - -# Mix and match data files - -If someone finds themselves in a situation where they want to use the latest code, but an older version of the data files, one could mix and match by: -``` -git checkout -rs_download_data --force -git checkout master -``` -And viola, one has the current version of the code, but the data files from a previous version. - - -# Notes on installing/running on hyak (and other clusters) - -A new anaconda install is around 11 GB (and hyak has a home dir quota of 10GB), so ensure your anaconda dir and the rubin_sim_data dir are not in your home directory. Helpful link to the anaconda linux install instructions: https://docs.anaconda.com/anaconda/install/linux/ - -The `conda activate` command fails in a bash script. One must first `source ~/anaconda3/etc/profile.d/conda.sh -` (replace with path to your anaconda install if different), then `conda activate rubin`. - -The conda create command failed a few times. It looks like creating the conda environment and then installing dependencies in 3-4 batches can be a work-around. - -Handy command to get a build node on hyak `srun -p build --time=2:00:00 --mem=20G --pty /bin/bash` - - -# Developer Guide - -If you have push permissions to rubin_sim, you can make changes to the code by checking out a new branch, making edits, push and then make a pull request. -However, we do expect many users who wish to contribute metrics will not have these permissions -- for these contributors the easiest way to do development on rubin_sim may be the following: - - create a fork of rubin_sim - - pip install the fork copy as above (but git clone your own fork, and then use this copy of rubin_sim) - - edit the code in your fork of rubin_sim, test it, etc. - - issue a PR from your fork to our original lsst/rubin_sim repository - -When contributing code, metrics for MAF can be placed into either rubin_sim/rubin_sim/maf/metrics or rubin_sim/rubin_sim/maf/mafContrib (preferably rubin_sim/maf/metrics). Adding a unit test in the appropriate rubin_sim/tests directory is desirable. For unit tests, all filename should start with `test_` so py.test can automatically find them. An example notebook can be contributed to lsst/rubin_sim_notebooks. - -When contributing to the package, make sure you reformat the code with `black` before commiting. -The package ships with a `pre-commit` configuration file, which allows developers to install a git hook that will reformat the code before commiting. -Most IDEs also contains `black` reformat add-ons. - -To install the `pre-commit` hook first install the `pre-commit` package with: -``` -conda install -c conda-forge pre-commit -``` - -Then, install the hook with: -``` -pre-commit install -``` - -## Updating data files - -(This must be done by project developers only at this time). -To update the source contents of the data files: - -* Update the files in your local installation -* If you are updating the baseline sim, create a symlink of the new database to baseline.db -* Create a new tar file with a new name, e.g., `tar -chvzf maf_2021_06_01.tgz maf` (no `-h` if symlinks should stay as symlinks) -* Copy your new tar file to S3DF USDF s3dflogin.slac.stanford.edu:/sdf/group/rubin/web_data/sim-data/rubin_sim_data/ -* You can check that it is uploaded here: https://s3df.slac.stanford.edu/data/rubin/sim-data/rubin_sim_data/ -* Update `rubin_sim/data/rs_download_data.py` so the `data_dict` function uses your new filename -* Push and merge the change to `bin/rs_download_data` -* Add a new tag, with a message indicating how the data package was changed. - -## Updating throughputs - -Process for updating pre-computed files if system throughputs change. - -1) update rubin_sim_data/throughputs files -2) update rubin_sim/rubin_sim/utils/sys_eng_vals.py -3) recompute sky brightness files with rubin_sim.skybrightness.recalc_mags -4) remake skybrightness_pre files with rubin_sim/rubin_sim/skybrightness_pre/data/generate_hdf5.py -5) remake dark sky map with rubin_sim/rubin_sim/skybrightness_pre/data/generate_dark_sky.py -6) tar and update files at SDF (throughputs, skybrightness, skybrightness_pre) diff --git a/README_dev.md b/README_dev.md new file mode 100644 index 000000000..e7eeb1d5f --- /dev/null +++ b/README_dev.md @@ -0,0 +1,62 @@ +# Notes on installing/running on hyak (and other clusters) + +A new anaconda install is around 11 GB (and hyak has a home dir quota of 10GB), so ensure your anaconda dir and the rubin_sim_data dir are not in your home directory. Helpful link to the anaconda linux install instructions: https://docs.anaconda.com/anaconda/install/linux/ + +The `conda activate` command fails in a bash script. One must first `source ~/anaconda3/etc/profile.d/conda.sh +` (replace with path to your anaconda install if different), then `conda activate rubin`. + +The conda create command failed a few times. It looks like creating the conda environment and then installing dependencies in 3-4 batches can be a work-around. + +Handy command to get a build node on hyak `srun -p build --time=2:00:00 --mem=20G --pty /bin/bash` + + +# Developer Guide + +If you have push permissions to rubin_sim, you can make changes to the code by checking out a new branch, making edits, push and then make a pull request. +However, we do expect many users who wish to contribute metrics will not have these permissions -- for these contributors the easiest way to do development on rubin_sim may be the following: + - create a fork of rubin_sim + - pip install the fork copy as above (but git clone your own fork, and then use this copy of rubin_sim) + - edit the code in your fork of rubin_sim, test it, etc. + - issue a PR from your fork to our original lsst/rubin_sim repository + +When contributing code, metrics for MAF can be placed into either rubin_sim/rubin_sim/maf/metrics or rubin_sim/rubin_sim/maf/mafContrib (preferably rubin_sim/maf/metrics). Adding a unit test in the appropriate rubin_sim/tests directory is desirable. For unit tests, all filename should start with `test_` so py.test can automatically find them. An example notebook can be contributed to lsst/rubin_sim_notebooks. + +When contributing to the package, make sure you reformat the code with `black` before commiting. +The package ships with a `pre-commit` configuration file, which allows developers to install a git hook that will reformat the code before commiting. +Most IDEs also contains `black` reformat add-ons. + +To install the `pre-commit` hook first install the `pre-commit` package with: +``` +conda install -c conda-forge pre-commit +``` + +Then, install the hook with: +``` +pre-commit install +``` + +## Updating data files + +(This must be done by project developers only at this time). +To update the source contents of the data files: + +* Update the files in your local installation +* If you are updating the baseline sim, create a symlink of the new database to baseline.db +* Create a new tar file with a new name, e.g., `tar -chvzf maf_2021_06_01.tgz maf` (no `-h` if symlinks should stay as symlinks) +* Copy your new tar file to S3DF USDF s3dflogin.slac.stanford.edu:/sdf/group/rubin/web_data/sim-data/rubin_sim_data/ +* You can check that it is uploaded here: https://s3df.slac.stanford.edu/data/rubin/sim-data/rubin_sim_data/ +* Update `rubin_sim/data/rs_download_data.py` so the `data_dict` function uses your new filename +* Push and merge the change to `bin/rs_download_data` +* Add a new tag, with a message indicating how the data package was changed. + +## Updating throughputs + +Process for updating pre-computed files if system throughputs change. + +1) update rubin_sim_data/throughputs files +2) update rubin_sim/rubin_sim/utils/sys_eng_vals.py +3) recompute sky brightness files with rubin_sim.skybrightness.recalc_mags +4) remake skybrightness_pre files with rubin_sim/rubin_sim/skybrightness_pre/data/generate_hdf5.py +5) remake dark sky map with rubin_sim/rubin_sim/skybrightness_pre/data/generate_dark_sky.py +6) tar and update files at SDF (throughputs, skybrightness, skybrightness_pre) + diff --git a/all_req.txt b/all_req.txt index 7c2157f83..2a90dcb28 100644 --- a/all_req.txt +++ b/all_req.txt @@ -7,9 +7,7 @@ pandas numexpr palpy scipy -sqlite sqlalchemy -six astropy pytables h5py @@ -24,10 +22,9 @@ requests shapely skyfield tqdm -jupyter pytest -pytest-flake8 pytest-cov pytest-black -black>=22.3.0 -flake8 +black +ruff +jupyter diff --git a/pyproject.toml b/pyproject.toml index 22ac1bd98..a31d41530 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,10 +1,13 @@ [build-system] -requires = [ "setuptools", "setuptools_scm" ] +requires = [ + "setuptools", + "setuptools-scm<8.0"] build-backend = "setuptools.build_meta" [project] name = "rubin-sim" description = "Scheduler, survey strategy analysis, and other simulation tools for Rubin Observatory." +readme = "README.md" license = { text = "GPL" } classifiers = [ "Intended Audience :: Science/Research", @@ -12,10 +15,46 @@ classifiers = [ "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", "Topic :: Scientific/Engineering :: Astronomy", ] urls = {documentation = "https://rubin-sim.lsst.io", repository = "https://github.com/lsst/rubin_sim" } dynamic = [ "version" ] +dependencies = [ + "numpy", + "matplotlib", + "healpy", + "pandas", + "numexpr", + "palpy", + "scipy", + "sqlalchemy", + "astropy", + "tables", + "h5py", + "astroplan", + "colorcet", + "cycler", + "george", + "scikit-learn", + "requests", + "shapely", + "skyfield", + "pyoorb", + "tqdm", +] + +[project.optional-dependencies] +test = [ + "pytest", + "black", + "ruff", + "pytest-cov", + "pytest-black", +] +dev = [ + "documenteer[pipelines]", +] [project.scripts] rs_download_data = "rubin_sim.data.rs_download_data:rs_download_data" @@ -34,10 +73,6 @@ scimaf_dir = "rubin_sim.maf.scimaf_dir:scimaf_dir" show_maf = "rubin_sim.maf.show_maf:show_maf" make_lsst_obs = "rubin_sim.moving_objects.make_lsst_obs:make_lsst_obs" -[project.optional-dependencies] -dev = [ - "documenteer[pipelines]", -] [tool.setuptools.dynamic] version = { attr = "setuptools_scm.get_version" } @@ -46,6 +81,7 @@ version = { attr = "setuptools_scm.get_version" } where = [ "" ] [tool.setuptools_scm] +#version_file = "rubin_sim/version.py" write_to = "rubin_sim/version.py" write_to_template = """ # Generated by setuptools_scm @@ -55,10 +91,6 @@ __version__ = "{version}" [tool.pytest.ini_options] addopts = "--black --ignore-glob=*/version.py --ignore-glob=*data_dir/*" -flake8-ignore = ["E133", "E203", "E226", "E228", "N802", "N803", "N806", "N812", "N813", "N815", "N816", "W503"] -flake8-max-line-length = 110 -flake8-max-doc-length = 79 -asyncio_mode = "auto" [tool.mypy] disallow_untyped_defs = "True" @@ -67,7 +99,7 @@ exclude = "version.py" [tool.black] line-length = 110 -target-version = ["py310"] +target-version = ["py311"] [tool.isort] profile = "black" @@ -93,6 +125,7 @@ ignore = [ "D200", "D205", "D400", + "E712", ] line-length = 110 select = [ @@ -100,9 +133,8 @@ select = [ "F", # pyflakes "N", # pep8-naming "W", # pycodestyle - #"D", # pydocstyle ] -target-version = "py310" +target-version = "py311" extend-select = [ "RUF100", # Warn about unused noqa ] diff --git a/requirements.txt b/requirements.txt index f798a6c50..f8a47b6e8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,9 +7,7 @@ pandas numexpr palpy scipy -sqlite sqlalchemy -six astropy pytables h5py diff --git a/rubin_sim/data/__init__.py b/rubin_sim/data/__init__.py index ffdedc40a..052dbdd30 100644 --- a/rubin_sim/data/__init__.py +++ b/rubin_sim/data/__init__.py @@ -1 +1 @@ -from .data_sets import * +from .data_sets import * # noqa: F403 diff --git a/rubin_sim/data/rs_download_data.py b/rubin_sim/data/rs_download_data.py index f5f7821db..7511de3cb 100644 --- a/rubin_sim/data/rs_download_data.py +++ b/rubin_sim/data/rs_download_data.py @@ -127,7 +127,6 @@ def rs_download_data(): dirs = [key for key in dirs if "orbits_precompute" not in key] # See if base URL is alive - s = requests.Session() url_base = args.url_base try: r = requests.get(url_base) diff --git a/rubin_sim/data/rs_download_sky.py b/rubin_sim/data/rs_download_sky.py index 406389705..178737973 100644 --- a/rubin_sim/data/rs_download_sky.py +++ b/rubin_sim/data/rs_download_sky.py @@ -34,8 +34,8 @@ def handle_starttag(self, tag, attrs): tag : `str` The name of the tag converted to lower case. attrs : `list` - A list of (name, value) pairs containing the attributes found inside the - tag’s <> brackets + A list of (name, value) pairs containing the attributes + found inside the tag’s <> brackets """ try: self.filenames diff --git a/rubin_sim/maf/batches/altaz_batch.py b/rubin_sim/maf/batches/altaz_batch.py index 73649d449..5be86856d 100644 --- a/rubin_sim/maf/batches/altaz_batch.py +++ b/rubin_sim/maf/batches/altaz_batch.py @@ -32,26 +32,26 @@ def altazHealpix( extraInfoLabel=None, metric_name="NVisits Alt/Az", ): - """Generate a set of metrics measuring the number visits as a function of alt/az - plotted on a HealpixSkyMap. + """ + Generate a set of metrics measuring the number visits as a function + of alt/az plotted on a HealpixSkyMap. Parameters ---------- - colmap : dict, optional - A dictionary with a mapping of column names. Default will use OpsimV4 column names. - run_name : str, optional - The name of the simulated survey. Default is "opsim". - extraSql : str, optional - Additional constraint to add to any sql constraints (e.g. 'propId=1' or 'fieldID=522'). - Default None, for no additional constraints. - extraInfoLabel : str, optional - Additional info_label to add before any below (i.e. "WFD"). Default is None. - metric_name : str, optional + colmap : `dict`, optional + A dictionary with a mapping of column names. + run_name : `str`, optional + The name of the simulated survey. + extraSql : `str`, optional + Additional constraint to add to any sql constraints. + extraInfoLabel : `str`, optional + Additional info_label to add before any below (i.e. "WFD"). + metric_name : `str`, optional Unique name to assign to metric Returns ------- - metric_bundleDict + metric_bundleDict : `dict` of {`str`: `maf.MetricBundle`} """ colmap, slicer, metric = basicSetup(metric_name=metric_name, colmap=colmap) @@ -101,26 +101,26 @@ def altazLambert( extraInfoLabel=None, metric_name="Nvisits as function of Alt/Az", ): - """Generate a set of metrics measuring the number visits as a function of alt/az - plotted on a LambertSkyMap. + """ + Generate a set of metrics measuring the number visits as a + function of alt/az plotted on a LambertSkyMap. Parameters ---------- - colmap : dict, optional - A dictionary with a mapping of column names. Default will use OpsimV4 column names. - runName : str, optional - The name of the simulated survey. Default is "opsim". - extraSql : str, optional - Additional constraint to add to any sql constraints (e.g. 'propId=1' or 'fieldID=522'). - Default None, for no additional constraints. - extraInfoLabel : str, optional - Additional info_label to add before any below (i.e. "WFD"). Default is None. - metric_name : str, optional + colmap : `dict`, optional + A dictionary with a mapping of column names. + runName : `str`, optional + The name of the simulated survey. + extraSql : `str`, optional + Additional constraint to add to any sql constraints. + extraInfoLabel : `str`, optional + Additional info_label to add before any below (i.e. "WFD"). + metric_name : `str`, optional Unique name to assign to metric Returns ------- - metric_bundleDict + metric_bundleDict : `dict` of {`str`: `maf.MetricBundle`} """ colmap, slicer, metric = basicSetup(metric_name=metric_name, colmap=colmap) diff --git a/rubin_sim/maf/batches/col_map_dict.py b/rubin_sim/maf/batches/col_map_dict.py index f243b8af8..b46747554 100644 --- a/rubin_sim/maf/batches/col_map_dict.py +++ b/rubin_sim/maf/batches/col_map_dict.py @@ -1,24 +1,4 @@ -__all__ = ("get_col_map", "col_map_dict") - - -def get_col_map(opsdb): - """Get the colmap dictionary, if you already have a database object. - - Parameters - ---------- - opsdb : rubin_sim.maf.db.Database or rubin_sim.maf.db.OpsimDatabase - - Returns - ------- - dictionary - """ - try: - version = opsdb.opsimVersion - version = "opsim" + version.lower() - except AttributeError: - version = "fbs" - colmap = col_map_dict(version) - return colmap +__all__ = ("col_map_dict",) def col_map_dict(dict_name=None): @@ -217,6 +197,6 @@ def col_map_dict(dict_name=None): col_map["metadataAngleList"] = ["rotSkyPos"] else: - raise ValueError(f"No built in column dict with name {dictMap}") + raise ValueError(f"No built in column dict with name {dict_name}") return col_map diff --git a/rubin_sim/maf/batches/common.py b/rubin_sim/maf/batches/common.py index b1fa67be5..e438c8c97 100644 --- a/rubin_sim/maf/batches/common.py +++ b/rubin_sim/maf/batches/common.py @@ -35,19 +35,20 @@ def filter_list(all=True, extra_sql=None, extra_info_label=None): Parameters ---------- all : `bool`, optional - Include 'all' in the list of filters and as part of the colors/orders dictionaries. - Default True. - extra_sql : str, optional - Additional sql constraint to add to sqlconstraints returned per filter. - Default None. - extra_info_label : str, optional + Include 'all' in the list of filters and as part of the + colors/orders dictionaries. + extra_sql : `str`, optional + Additional sql constraint to add to constraints returned per filter. + extra_info_label : `str`, optional Substitute info_label to add to info_label strings composed per band. - Default None. Returns ------- - list, dict, dict - List of filter names, dictionary of colors (for plots), dictionary of orders (for display) + filterlist : `list` [`str] + colors : `dict` {`str`: `str`} + orders : `dict` {`str`: int} + sqls : `dict` {`str`: `str`} + info_labels : `dict` {`str`: `str} """ if all: filterlist = ("all", "u", "g", "r", "i", "z", "y") @@ -83,8 +84,21 @@ def filter_list(all=True, extra_sql=None, extra_info_label=None): return filterlist, colors, orders, sqls, info_labels -def standard_summary(withCount=True): - """A set of standard summary metrics, to calculate Mean, RMS, Median, #, Max/Min, and # 3-sigma outliers.""" +def standard_summary(with_count=True): + """A set of standard summary metrics, to calculate + Mean, RMS, Median, #, Max/Min, and # 3-sigma outliers. + + Parameters + ---------- + with_count : `bool`, optional + Include the "Count" metric in the set of summary metrics or not. + + Returns + ------- + standardSummary : `list` [`maf.BaseMetric`] + List of metrics appropriate to use to summarize the results from + another metric. + """ standardSummary = [ metrics.MeanMetric(), metrics.RmsMetric(), @@ -94,15 +108,21 @@ def standard_summary(withCount=True): metrics.NoutliersNsigmaMetric(metric_name="N(+3Sigma)", n_sigma=3), metrics.NoutliersNsigmaMetric(metric_name="N(-3Sigma)", n_sigma=-3.0), ] - if withCount: + if with_count: standardSummary += [metrics.CountMetric()] return standardSummary def extended_summary(): - """An extended set of summary metrics, to calculate all that is in the standard summary stats, - plus 25/75 percentiles.""" + """An extended set of summary metrics, to calculate all that is in + the standard summary stats, plus 25/75 percentiles. + Returns + -------- + extendedSummary : `list` [`maf.BaseMetric`] + List of metrics appropriate to use to summarize the results + from another metric. + """ extendedStats = standard_summary() extendedStats += [ metrics.PercentileMetric(metric_name="25th%ile", percentile=25), @@ -123,21 +143,24 @@ def lightcurve_summary(): def standard_metrics(colname, replace_colname=None): - """A set of standard simple metrics for some quantity. Typically would be applied with unislicer. + """A set of standard simple metrics for some quantity. + Typically would be applied with unislicer. Parameters ---------- - colname : str + colname : `str` The column name to apply the metrics to. - replace_colname: str or None, optional + replace_colname: `str` or None, optional Value to replace colname with in the metric_name. - i.e. if replace_colname='' then metric name is Mean, instead of Mean Airmass, or - if replace_colname='seeingGeom', then metric name is Mean seeingGeom instead of Mean seeingFwhmGeom. - Default is None, which does not alter the metric name. + i.e. if replace_colname='' then metric name is Mean, + instead of Mean Airmass, or + if replace_colname='seeingGeom', then metric name is + Mean seeingGeom instead of Mean seeingFwhmGeom. Returns ------- - List of configured metrics. + standardMetrics : `list` [`maf.BaseMetric`] + List of appropriate MAF metrics to evaluate a distribution. """ standardMetrics = [ metrics.MeanMetric(colname), @@ -155,21 +178,24 @@ def standard_metrics(colname, replace_colname=None): def extended_metrics(colname, replace_colname=None): - """An extended set of simple metrics for some quantity. Typically applied with unislicer. + """An extended set of simple metrics for some quantity. + Typically applied with unislicer. Parameters ---------- - colname : str + colname : `str` The column name to apply the metrics to. - replace_colname: str or None, optional + replace_colname: `str` or None, optional Value to replace colname with in the metric_name. - i.e. if replace_colname='' then metric name is Mean, instead of Mean Airmass, or - if replace_colname='seeingGeom', then metric name is Mean seeingGeom instead of Mean seeingFwhmGeom. - Default is None, which does not alter the metric name. + i.e. if replace_colname='' then metric name is Mean, + instead of Mean Airmass, or + if replace_colname='seeingGeom', then metric name is + Mean seeingGeom instead of Mean seeingFwhmGeom. Returns ------- - List of configured metrics. + extendedMetrics : `list` [`maf.BaseMetric`] + List of appropriate MAF metrics to evaluate a distribution. """ extendedMetrics = standard_metrics(colname, replace_colname=None) extendedMetrics += [ @@ -190,21 +216,23 @@ def extended_metrics(colname, replace_colname=None): def standard_angle_metrics(colname, replace_colname=None): - """A set of standard simple metrics for some quantity which is a wrap-around angle. + """A set of standard simple metrics for a wrap-around angle quantity. Parameters ---------- - colname : str + colname : `str` The column name to apply the metrics to. - replace_colname: str or None, optional + replace_colname: `str` or None, optional Value to replace colname with in the metric_name. - i.e. if replace_colname='' then metric name is Mean, instead of Mean Airmass, or - if replace_colname='seeingGeom', then metric name is Mean seeingGeom instead of Mean seeingFwhmGeom. - Default is None, which does not alter the metric name. + i.e. if replace_colname='' then metric name is Mean, + instead of Mean Airmass, or + if replace_colname='seeingGeom', then metric name is + Mean seeingGeom instead of Mean seeingFwhmGeom. Returns ------- - List of configured metrics. + standardAngleMetrics : `list` [`maf.BaseMetric`] + List of appropriate MAF metrics for angle distributions. """ standardAngleMetrics = [ metrics.MeanAngleMetric(colname), @@ -223,22 +251,26 @@ def standard_angle_metrics(colname, replace_colname=None): def summary_completeness_at_time(times, h_val, h_index=0.33): - """A simple list of summary metrics to be applied to the Discovery_Time or PreviouslyKnown metrics. - (can be used with any moving object metric which returns the time of discovery). + """A simple list of summary metrics to be applied to the Discovery_Time + or PreviouslyKnown metrics. + (can be used with any moving object metric which returns the + time of discovery). Parameters ---------- - times : np.ndarray or list + times : `np.ndarray `or list` [`float`] The times at which to evaluate the completeness @ Hval. - h_val : float - The H value at which to evaluate the completeness (cumulative and differential). - h_index : float, optional - The index of the power law to integrate H over (for cumulative completeness). - Default is 0.33. + h_val : `float` + The H value at which to evaluate the completeness + (cumulative and differential). + h_index : `float`, optional + The index of the power law to integrate H over + (for cumulative completeness). Returns ------- - List of moving object MoCompletenessAtTime metrics (cumulative and differential) + summaryMetrics : `list` [`maf.MoCompletenessAtTimeMetric`] + List of completeness metrics to be evaluated at the specified times. """ summaryMetrics = [ metrics.MoCompletenessAtTimeMetric(times=times, hval=h_val, hindex=h_index, cumulative=False), @@ -248,19 +280,23 @@ def summary_completeness_at_time(times, h_val, h_index=0.33): def summary_completeness_over_h(requiredChances=1, Hindex=0.33): - """A simple list of summary metrics to be applied to the Discovery_N_Chances metric. + """A simple list of summary metrics to be applied to the + Discovery_N_Chances metric. Parameters ---------- - requiredChances : int, optional - Number of discovery opportunities required to consider an object 'discovered'. - Hindex : float, optional - The index of the power law to integrate H over (for cumulative completeness). - Default is 0.33. + requiredChances : `int`, optional + Number of discovery opportunities required to consider an + object 'discovered'. + Hindex : `float`, optional + The index of the power law to integrate H over + (for cumulative completeness). Returns ------- - List of moving object MoCompleteness metrics (cumulative and differential) + summaryMetrics : `list` [`maf.MoCompletenessMetric`] + List of moving object MoCompleteness metrics + (cumulative and differential) """ summaryMetrics = [ metrics.MoCompletenessMetric(threshold=requiredChances, cumulative=False, hindex=Hindex), @@ -270,19 +306,25 @@ def summary_completeness_over_h(requiredChances=1, Hindex=0.33): def fraction_population_at_threshold(thresholds, optnames=None): - """Creates a list of summary metrics to be applied to any moving object metric - which reports a float value, calculating the fraction of the population above X. + """Creates a list of summary metrics to be applied to any moving object + metric which reports a float value, calculating the fraction of the + population above X. Parameters ---------- - thresholds : list of float - The thresholds at which to calculate what fraction of the population exceeds these values. - optnames : list of str, optional - If provided, these names will be used instead of the threshold values when constructing - the metric names. This allows more descriptive summary statistic names. + thresholds : `list` [`float`] + The thresholds at which to calculate what fraction of the population + exceeds these values. + optnames : `list` [`str`], optional + If provided, these names will be used instead of the threshold values + when constructing the metric names. + This allows more descriptive summary statistic names. + Returns ------- - List of moving object MoCompleteness metrics (differential fractions of the population). + fracMetrics : `list` [`maf.MoCompletenessMetric`] + List of moving object MoCompleteness metrics + (differential fractions of the population). """ fracMetrics = [] for i, threshold in enumerate(thresholds): @@ -298,6 +340,25 @@ def fraction_population_at_threshold(thresholds, optnames=None): def microlensing_summary(metric_type, npts_required=10, Fisher_sigmatE_tE_cutoff=0.1): + """Calculate summary metrics for the microlensing population metrics. + + Parameters + ----------- + metric_type : `str` + Identify whether the metric is "Npts" or Fisher" + npts_required : `int`, optional + Count the fraction of microlensing events with more than npts_required + observations + Fisher_sigmatE_tE_cutoff : `float`, optional + Count the fraction of microlensing events with characterization + uncertainty less than this + + Returns + ------- + microlensingSummary : `list` [`maf.BaseMetric`] + List of appropriate MAF metrics for this type of microlensing + metric with the specified threshold values. + """ if metric_type != "Npts" and metric_type != "Fisher": raise Exception('metric_type must be "Npts" or "Fisher"') if metric_type == "Npts": diff --git a/rubin_sim/maf/batches/ddf_batch.py b/rubin_sim/maf/batches/ddf_batch.py index 4cd7e6939..dc964dad2 100644 --- a/rubin_sim/maf/batches/ddf_batch.py +++ b/rubin_sim/maf/batches/ddf_batch.py @@ -18,22 +18,34 @@ def ddfBatch( extra_info_label=None, ): """ + A set of metrics to evaluate DDF fields. + Parameters ---------- - nside : `int` (512) - The HEALpix nside to run most of the metrics on. default 512. - radius : `float` (2.5) - The radius to select around each ddf (degrees). Default 2.5. Note that - Going too large will result in more background being selected, which - can throw off things like the median number of visits. But going too - small risks missing some DDF area on the double Euclid field, or a regular - field with large dithers. - nside_sne : `int` (128) - The HEALpix nside to use with the SNe metric. - extra_sql : `str` (None) - Additional sql constraint (such as night<=365) to add to the necessary sql constraints below - extra_info_label : `str` (None) + run_name : `str`, optional + The name of the simulation (for plot titles and file outputs). + nside : `int`, optional + The HEALpix nside to run most of the metrics on. + radius : `float` + The radius to select around each ddf (degrees). + The default value of 2.5 degrees has been chosen to balance + selecting a large enough area to ensure gathering all of the double + Euclid field or a run with large dithers, + while not including too much background area (which can + skew metrics of the median number of visits, etc.). + nside_sne : `int`, optional + The HEALpix nside to use with the SNe metric. The default is lower + than the default nside for other metrics, as the SNe metric is + more computationally expensive. + extra_sql : `str`, optional + Additional sql constraint (such as night<=365) to add to the + necessary sql constraints for each metric. + extra_info_label : `str`, optional Additional description information to add (alongside the extra_sql) + + Returns + ------- + metric_bundleDict : `dict` of `maf.MetricBundle` """ bundle_list = [] @@ -51,7 +63,7 @@ def ddfBatch( } del ddfs["EDFS_a"] del ddfs["EDFS_b"] - # Let's include an arbitrary point that should be in the WFD for comparision + # Let's include an arbitrary point that should be in the WFD for comparison ddfs["WFD"] = {"ra": 0, "dec": -20.0} ra, dec = hpid2_ra_dec(nside, np.arange(hp.nside2npix(nside))) @@ -122,7 +134,8 @@ def ddfBatch( daymax_step=3, coadd_night=True, gamma_name="gamma_DDF.hdf5", - metric_name=f"SNNSNMetric {fieldname}", # have to add here, as must be in reduceDict key + # have to add field name here, to avoid reduceDict key collissions + metric_name=f"SNNSNMetric {fieldname}", ) bundle_list.append( maf.MetricBundle( @@ -219,7 +232,9 @@ def ddfBatch( ) # Weak lensing visits - # XXX-bad magic numbers everywhere + # The "magic numbers" here scale the final depth into + # approximately consistent visits per year - final depth is + # determined by arbitrary definition of 'good sample' lim_ebv = 0.2 offset = 0.1 mag_cuts = { @@ -450,8 +465,9 @@ def ddfBatch( ) # Now to compute some things at just the center of the DDF - # For these metrics, add a requirement that the 'note' label match the DDF, - # to avoid WFD visits skewing the results (we want to exclude these) + # For these metrics, add a requirement that the 'note' label + # match the DDF, to avoid WFD visits skewing the results + # (we want to exclude non-DD visits), ptslicer = maf.UserPointsSlicer(np.mean(ddfs[ddf]["ra"]), np.mean(ddfs[ddf]["dec"])) displayDict["group"] = "Cadence" @@ -563,7 +579,8 @@ def ddfBatch( # Histogram of the season lengths, all filters def rfunc(simdata): - # Sometimes number of seasons is 10, sometimes 11 (depending on where survey starts/end) + # Sometimes number of seasons is 10, sometimes 11 + # (depending on where survey starts/end) # so normalize it so there's always 11 values if len(simdata) < 11: simdata = np.concatenate([simdata, np.array([0], float)]) diff --git a/rubin_sim/maf/batches/filterchange_batch.py b/rubin_sim/maf/batches/filterchange_batch.py index ddaf30a50..08686c5b7 100644 --- a/rubin_sim/maf/batches/filterchange_batch.py +++ b/rubin_sim/maf/batches/filterchange_batch.py @@ -58,25 +58,25 @@ def setupMetrics(colmap, wholesurvey=False): def filtersPerNight(colmap=None, runName="opsim", nights=1, extraSql=None, extraInfoLabel=None): - """Generate a set of metrics measuring the number and rate of filter changes over a given span of nights. + """Generate a set of metrics measuring the number and rate of filter + changes over a given span of nights. Parameters ---------- - colmap : dict, optional - A dictionary with a mapping of column names. Default will use OpsimV4 column names. - run_name : str, optional - The name of the simulated survey. Default is "opsim". - nights : int, optional - Size of night bin to use when calculating metrics. Default is 1. - extraSql : str, optional - Additional constraint to add to any sql constraints (e.g. 'propId=1' or 'fieldID=522'). - Default None, for no additional constraints. - extraInfoLabel : str, optional - Additional info_label to add before any below (i.e. "WFD"). Default is None. + colmap : `dict`, optional + A dictionary with a mapping of column names. + run_name : `str`, optional + The name of the simulated survey. + nights : `int`, optional + Size of night bin to use when calculating metrics. + extraSql : `str`, optional + Additional constraint to add to any sql constraints. + extraInfoLabel : `str`, optional + Additional info_label to add before any below (i.e. "WFD"). Returns ------- - metric_bundleDict + metric_bundleDict : `dict` of `maf.MetricBundle` """ if colmap is None: @@ -125,23 +125,23 @@ def filtersPerNight(colmap=None, runName="opsim", nights=1, extraSql=None, extra def filtersWholeSurvey(colmap=None, runName="opsim", extraSql=None, extraInfoLabel=None): - """Generate a set of metrics measuring the number and rate of filter changes over the entire survey. + """Generate a set of metrics measuring the number and rate of filter + changes over the entire survey. Parameters ---------- - colmap : dict, optional - A dictionary with a mapping of column names. Default will use OpsimV4 column names. - run_name : str, optional - The name of the simulated survey. Default is "opsim". - extraSql : str, optional - Additional constraint to add to any sql constraints (e.g. 'propId=1' or 'fieldID=522'). - Default None, for no additional constraints. - extraInfoLabel : str, optional - Additional info_label to add before any below (i.e. "WFD"). Default is None. + colmap : `dict`, optional + A dictionary with a mapping of column names. + run_name : `str`, optional + The name of the simulated survey. + extraSql : `str`, optional + Additional constraint to add to any sql constraints. + extraInfoLabel : `str`, optional + Additional info_label to add before any below (i.e. "WFD"). Returns ------- - metric_bundleDict + metric_bundleDict : `dict` of `maf.MetricBundle` """ if colmap is None: colmap = col_map_dict() diff --git a/rubin_sim/maf/batches/glance_batch.py b/rubin_sim/maf/batches/glance_batch.py index 8779952bd..4edf1fd01 100644 --- a/rubin_sim/maf/batches/glance_batch.py +++ b/rubin_sim/maf/batches/glance_batch.py @@ -26,33 +26,34 @@ def glanceBatch( sql_constraint=None, slicer_camera="LSST", ): - """Generate a handy set of metrics that give a quick overview of how well a survey performed. + """Generate a handy set of metrics that give a quick overview + of how well a survey performed. This is a meta-set of other batches, to some extent. Parameters ---------- - colmap : dict, optional - A dictionary with a mapping of column names. Default will use OpsimV4 column names. - run_name : str, optional - The name of the simulated survey. Default is "opsim". - nside : int, optional - The nside for the healpix slicers. Default 64. - filternames : list of str, optional + colmap : `dict`, optional + A dictionary with a mapping of column names. + run_name : `str`, optional + The name of the simulated survey. + nside : `int`, optional + The nside for the healpix slicers. + filternames : `list` of `str`, optional The list of individual filters to use when running metrics. - Default is ('u', 'g', 'r', 'i', 'z', 'y'). There is always an all-visits version of the metrics run as well. - nyears : int (10) + nyears : `int`, optional How many years to attempt to make hourglass plots for - pairnside : int (32) - nside to use for the pair fraction metric (it's slow, so nice to use lower resolution) - sql_constraint : str or None, optional + pairnside : `int`, optional + nside to use for the pair fraction metric + (it's slow, so nice to use lower resolution) + sql_constraint : `str` or None, optional Additional SQL constraint to apply to all metrics. - slicer_camera : str ('LSST') + slicer_camera : `str` Sets which spatial slicer to use. options are 'LSST' and 'ComCam' Returns ------- - metric_bundleDict + metric_bundleDict : `dict` of `maf.MetricBundle` """ if isinstance(colmap, str): raise ValueError("colmap must be a dictionary, not a string") @@ -290,7 +291,6 @@ def glanceBatch( metrics.AreaSummaryMetric(decreasing=False, metric_name="best18k"), metrics.PercentileMetric(col="metricdata", percentile=90), ] - # Maybe parallax and proper motion, fraction of visits in a good pair for SS displayDict["caption"] = r"Parallax precision of an $r=20$ flat SED star" metric = metrics.ParallaxMetric( m5_col=colmap["fiveSigmaDepth"], diff --git a/rubin_sim/maf/batches/hourglass_batch.py b/rubin_sim/maf/batches/hourglass_batch.py index eb5994fb2..877e7985b 100644 --- a/rubin_sim/maf/batches/hourglass_batch.py +++ b/rubin_sim/maf/batches/hourglass_batch.py @@ -14,16 +14,20 @@ def hourglassPlots(colmap=None, runName="opsim", nyears=10, extraSql=None, extra Parameters ---------- - colmap : dict, optional - A dictionary with a mapping of column names. Default will use OpsimV4 column names. - run_name : str, optional - The name of the simulated survey. Default is "opsim". - nyears : int (10), optional - How many years to attempt to make hourglass plots for. Default is 10. - extraSql : str, optional - Add an extra sql constraint before running metrics. Default None. - extraInfoLabel : str, optional - Add an extra piece of info_label before running metrics. Default None. + colmap : `dict`, optional + A dictionary with a mapping of column names. + run_name : `str`, optional + The name of the simulated survey. + nyears : `int`, optional + How many years to attempt to make hourglass plots for. + extraSql : `str`, optional + Add an extra sql constraint before running metrics. + extraInfoLabel : `str`, optional + Add an extra piece of info_label before running metrics. + + Returns + ------- + metric_bundleDict : `dict` of `maf.MetricBundle` """ if colmap is None: colmap = col_map_dict() @@ -31,7 +35,7 @@ def hourglassPlots(colmap=None, runName="opsim", nyears=10, extraSql=None, extra sql = "" info_label = "" - # Add additional sql constraint (such as wfdWhere) and info_label, if provided. + # Add additional sql constraint (such as wfdWhere) and info_label if (extraSql is not None) and (len(extraSql) > 0): sql = extraSql if extraInfoLabel is None: diff --git a/rubin_sim/maf/batches/info_batch.py b/rubin_sim/maf/batches/info_batch.py index 7151df124..0ecaf0104 100644 --- a/rubin_sim/maf/batches/info_batch.py +++ b/rubin_sim/maf/batches/info_batch.py @@ -1,11 +1,11 @@ -__all__ = ("metadata_bundle_dicts",) +__all__ = ("info_bundle_dicts",) import numpy as np import rubin_sim.maf.batches as batches -def metadata_bundle_dicts(allsky_slicer, wfd_slicer, opsim="opsim", colmap=batches.col_map_dict()): +def info_bundle_dicts(allsky_slicer, wfd_slicer, opsim="opsim", colmap=batches.col_map_dict()): # Set up the bundle dicts # Some of these metrics are reproduced in other scripts - srd and cadence bdict = {} @@ -47,7 +47,7 @@ def metadata_bundle_dicts(allsky_slicer, wfd_slicer, opsim="opsim", colmap=batch bdict.update(batches.nvisitsM5Maps(colmap, opsim, slicer=slicer, extraInfoLabel=tag)) bdict.update(batches.tEffMetrics(colmap, opsim, slicer=slicer, extraInfoLabel=tag)) - # And number of visits for the first year and then halfway through the survey + # And number of visits for the first year and halfway through the survey bdict.update( batches.nvisitsM5Maps( colmap, diff --git a/rubin_sim/maf/batches/metadata_batch.py b/rubin_sim/maf/batches/metadata_batch.py index f7ad58ad4..4f849cca8 100644 --- a/rubin_sim/maf/batches/metadata_batch.py +++ b/rubin_sim/maf/batches/metadata_batch.py @@ -33,38 +33,44 @@ def metadataBasics( extraInfoLabel=None, slicer=None, ): - """Calculate basic metrics on visit metadata 'value' (e.g. airmass, normalized airmass, seeing..). - Calculates this around the sky (HealpixSlicer), makes histograms of all visits (OneDSlicer), - and calculates statistics on all visits (UniSlicer) for the quantity in all visits and per filter. + """Calculate basic metrics on visit metadata 'value' + (e.g. airmass, normalized airmass, seeing..). + Calculates this around the sky (HealpixSlicer), + makes histograms of all visits (OneDSlicer), + and calculates statistics on all visits (UniSlicer) for the quantity, + in all visits and per filter. Currently have a hack for HA & normairmass. Parameters ---------- - value : str - The column name for the quantity to evaluate. (column name in the database or created by a stacker). - colmap : dict or None, optional - A dictionary with a mapping of column names. Default will use OpsimV4 column names. - runName : str, optional - The name of the simulated survey. Default is "opsim". - valueName : str, optional - The name of the value to be reported in the results_db and added to the metric. - This is intended to help standardize metric comparison between sim versions. + value : `str` + The column name for the quantity to evaluate. + (column name in the database or created by a stacker). + colmap : `dict` or None, optional + A dictionary with a mapping of column names. + runName : `str`, optional + The name of the simulated survey. + valueName : `str`, optional + The name of the value to be reported in the results_db and + added to the metric. + This is intended to help standardize metric comparison between + sim versions. value = name as it is in the database (seeingFwhmGeom, etc). - valueName = name to be recorded ('seeingGeom', etc.). Default is None, which will match 'value'. - groupName : str, optional - The group name for this quantity in the display_dict. Default is the same as 'valueName', capitalized. - extraSql : str, optional - Additional constraint to add to any sql constraints (e.g. 'propId=1' or 'fieldID=522'). - Default None, for no additional constraints. - extraInfoLabel : str, optional - Additional info_labels to add before any below (i.e. "WFD"). Default is None. + valueName = name to be recorded ('seeingGeom', etc.). + groupName : `str`, optional + The group name for this quantity in the display_dict. + None will default to the same as 'valueName', capitalized. + extraSql : `str`, optional + Additional constraint to add to any sql constraints. + extraInfoLabel : `str`, optional + Additional info_labels to add before any below (i.e. "WFD"). slicer : `rubin_sim.maf.slicers.BaseSlicer`, optional Optionally use a different slicer than an nside=64 healpix slicer. Returns ------- - metric_bundleDict + metric_bundleDict : `dict` of `maf.MetricBundle` """ if colmap is None: colmap = col_map_dict() @@ -99,7 +105,7 @@ def metadataBasics( else: skyslicer = slicers.HealpixSlicer(nside=64, lon_col=raCol, lat_col=decCol, lat_lon_deg=degrees) - # Hack to make HA work, but really I need to account for any stackers/colmaps. + # Hack to make HA work, but really need to account for any stackers if value == "HA": stackerList = [stackers.HourAngleStacker(lst_col=colmap["lst"], ra_col=raCol, degrees=degrees)] elif value == "normairmass": @@ -107,7 +113,8 @@ def metadataBasics( else: stackerList = None - # Summarize values over all and per filter (min/mean/median/max/percentiles/outliers/rms). + # Summarize values over all and per filter + # (min/mean/median/max/percentiles/outliers/rms). slicer = slicers.UniSlicer() for f in filterlist: for m in extended_metrics(value, replace_colname=valueName): @@ -142,7 +149,8 @@ def metadataBasics( ) bundleList.append(bundle) - # Make maps of min/median/max for all and per filter, per RA/Dec, with standard summary stats. + # Make maps of min/median/max for all and per filter, + # per RA/Dec, with standard summary stats. plotDict = {"percentile_clip": 98} mList = [] mList.append(metrics.MinMetric(value, metric_name="Min %s" % (valueName))) @@ -184,37 +192,42 @@ def metadataBasicsAngle( extraInfoLabel=None, slicer=None, ): - """Calculate basic metrics on visit metadata 'value', where value is a wrap-around angle. + """Calculate basic metrics on visit metadata 'value', + where value is a wrap-around angle. - Calculates extended standard metrics (with unislicer) on the quantity (all visits and per filter), + Calculates extended standard metrics (with unislicer) on the quantity + (all visits and per filter), makes histogram of the value (all visits and per filter), Parameters ---------- value : `str` - The column name for the quantity to evaluate. (column name in the database or created by a stacker). + The column name for the quantity to evaluate. + (column name in the database or created by a stacker). colmap : `dict` or None, optional - A dictionary with a mapping of column names. Default will use OpsimV4 column names. + A dictionary with a mapping of column names. runName : `str`, optional - The name of the simulated survey. Default is "opsim". + The name of the simulated survey. valueName : `str`, optional - The name of the value to be reported in the results_db and added to the metric. - This is intended to help standardize metric comparison between sim versions. + The name of the value to be reported in the results_db + and added to the metric. + This is intended to help standardize metric comparison + between sim versions. value = name as it is in the database (seeingFwhmGeom, etc). - valueName = name to be recorded ('seeingGeom', etc.). Default is None, which will match 'value'. + valueName = name to be recorded ('seeingGeom', etc.). groupName : `str`, optional - The group name for this quantity in the display_dict. Default is the same as 'valueName', capitalized. + The group name for this quantity in the display_dict. + None will default to the same as 'valueName', capitalized. extraSql : `str`, optional - Additional constraint to add to any sql constraints (e.g. 'propId=1' or 'fieldID=522'). - Default None, for no additional constraints. + Additional constraint to add to any sql constraints. extraInfoLabel : `str`, optional - Additional info_label to add before any below (i.e. "WFD"). Default is None. + Additional info_label to add before any below (i.e. "WFD"). slicer : `rubin_sim.maf.slicer.BaseSlicer` or None, optional Returns ------- - metric_bundleDict + metric_bundleDict : `dict` of `maf.MetricBundle` """ if colmap is None: colmap = col_map_dict() @@ -281,7 +294,8 @@ def metadataBasicsAngle( ) bundleList.append(bundle) - # Make maps of min/median/max for all and per filter, per RA/Dec, with standard summary stats. + # Make maps of min/median/max for all and per filter, + # per RA/Dec, with standard summary stats. mList = [] mList.append(metrics.MeanAngleMetric(value, metric_name="AngleMean %s" % (valueName))) mList.append(metrics.FullRangeAngleMetric(value, metric_name="AngleRange %s" % (valueName))) @@ -314,26 +328,26 @@ def metadataBasicsAngle( def allMetadata(colmap=None, runName="opsim", extraSql=None, extraInfoLabel=None, slicer=None): """Generate a large set of metrics about the metadata of each visit - - distributions of airmass, normalized airmass, seeing, sky brightness, single visit depth, - hour angle, distance to the moon, and solar elongation. - The exact metadata which is analyzed is set by the colmap['metadataList'] value. + distributions of airmass, normalized airmass, seeing, sky brightness, + single visit depth, hour angle, distance to the moon, and solar elongation. + The exact metadata which is analyzed is set by the colmap['metadataList']. Parameters ---------- colmap : `dict` or None, optional - A dictionary with a mapping of column names. Default will use OpsimV4 column names. + A dictionary with a mapping of column names. runName : `str`, optional - The name of the simulated survey. Default is "opsim". + The name of the simulated survey. extraSql : `str`, optional - Sql constraint (such as WFD only). Default is None. + Sql constraint (such as WFD only). extraInfoLabel : `str`, optional - Metadata to identify the sql constraint (such as WFD). Default is None. + Metadata to identify the sql constraint (such as WFD). slicer : `rubin_sim.maf.slicer.BaseSlicer` or None, optional Optionally use something other than an nside=64 healpix slicer. Returns ------- - metric_bundleDict + metric_bundleDict : `dict` of `maf.MetricBundle` """ if colmap is None: @@ -384,34 +398,37 @@ def metadataMaps( extraInfoLabel=None, slicer=None, ): - """Calculate 25/50/75 percentile values on maps across sky for a single metadata value. + """Calculate 25/50/75 percentile values on maps across sky + for a single metadata value. Parameters ---------- value : `str` - The column name for the quantity to evaluate. (column name in the database or created by a stacker). + The column name for the quantity to evaluate. + (column name in the database or created by a stacker). colmap : `dict` or None, optional - A dictionary with a mapping of column names. Default will use OpsimV4 column names. + A dictionary with a mapping of column names. runName : `str`, optional - The name of the simulated survey. Default is "opsim". + The name of the simulated survey. valueName : `str`, optional - The name of the value to be reported in the results_db and added to the metric. - This is intended to help standardize metric comparison between sim versions. + The name of the value to be reported in the results_db + and added to the metric. + This is intended to help standardize metric comparison + between sim versions. value = name as it is in the database (seeingFwhmGeom, etc). - valueName = name to be recorded ('seeingGeom', etc.). Default is None, which will match 'value'. + valueName = name to be recorded ('seeingGeom', etc.). groupName : `str`, optional - The group name for this quantity in the display_dict. Default is the same as 'valueName', capitalized. + The group name for this quantity in the display_dict. extraSql : `str`, optional - Additional constraint to add to any sql constraints (e.g. 'propId=1' or 'fieldID=522'). - Default None, for no additional constraints. + Additional constraint to add to any sql constraints. extraInfoLabel : `str`, optional - Additional info_label to add before any below (i.e. "WFD"). Default is None. + Additional info_label to add before any below (i.e. "WFD"). slicer : `rubin_sim.maf.slicer.BaseSlicer` or None, optional Optionally use something other than an nside=64 HealpixSlicer Returns ------- - metric_bundleDict + metric_bundleDict : `dict` of `maf.MetricBundle` """ if colmap is None: colmap = col_map_dict() @@ -432,15 +449,15 @@ def metadataMaps( displayDict = {"group": groupName, "subgroup": subgroup} - raCol = colmap["ra"] - decCol = colmap["dec"] + ra_col = colmap["ra"] + dec_col = colmap["dec"] degrees = colmap["raDecDeg"] # Set up basic all and per filter sql constraints. filterlist, colors, orders, sqls, info_label = filter_list( all=True, extra_sql=extraSql, extra_info_label=extraInfoLabel ) - # Hack to make HA work, but really I need to account for any stackers/colmaps. + # Hack to make HA work, but really need to account for any stackers if value == "HA": stackerList = [stackers.HourAngleStacker(lst_col=colmap["lst"], ra_col=ra_col, degrees=degrees)] elif value == "normairmass": @@ -448,7 +465,8 @@ def metadataMaps( else: stackerList = None - # Make maps of 25/median/75 for all and per filter, per RA/Dec, with standard summary stats. + # Make maps of 25/median/75 for all and per filter, + # per RA/Dec, with standard summary stats. mList = [] mList.append( metrics.PercentileMetric(value, percentile=25, metric_name="25thPercentile %s" % (valueName)) @@ -458,7 +476,7 @@ def metadataMaps( metrics.PercentileMetric(value, percentile=75, metric_name="75thPercentile %s" % (valueName)) ) if slicer is None: - slicer = slicers.HealpixSlicer(nside=64, lat_col=decCol, lon_col=raCol, lat_lon_deg=degrees) + slicer = slicers.HealpixSlicer(nside=64, lat_col=dec_col, lon_col=ra_col, lat_lon_deg=degrees) for f in filterlist: for m in mList: @@ -486,25 +504,26 @@ def metadataMaps( def firstYearMetadata(colmap=None, runName="opsim", extraSql=None, extraInfoLabel=None, slicer=None): - """Measure the distribution of some basic metadata in the first year of operations - + """Measure the distribution of some basic metadata in the first year + of operations - distributions of airmass, seeing, sky brightness, single visit depth. Parameters ---------- colmap : `dict` or None, optional - A dictionary with a mapping of column names. Default will use OpsimV4 column names. + A dictionary with a mapping of column names. runName : `str`, optional - The name of the simulated survey. Default is "opsim". + The name of the simulated survey. extraSql : `str`, optional - Sql constraint (such as WFD only). Default is None. + Sql constraint (such as WFD only). extraInfoLabel : `str`, optional - Metadata to identify the sql constraint (such as WFD). Default is None. + Metadata to identify the sql constraint (such as WFD). slicer : `rubin_sim.maf.slicer.BaseSlicer` or None, optional Optionally use something other than an nside=64 healpix slicer. Returns ------- - metric_bundleDict + metric_bundleDict : `dict` of `maf.MetricBundle` """ if colmap is None: diff --git a/rubin_sim/maf/batches/moving_objects_batch.py b/rubin_sim/maf/batches/moving_objects_batch.py index 6afcd46ab..5769b16fc 100644 --- a/rubin_sim/maf/batches/moving_objects_batch.py +++ b/rubin_sim/maf/batches/moving_objects_batch.py @@ -302,7 +302,7 @@ def discovery_batch( magStacker = stackers.MoMagStacker(loss_col="dmag_trail", magtype=magtype) detection_losses = " trailing loss" else: - # This is SNR losses, plus additional loss due to detecting with stellar PSF. + # SNR losses, plus additional loss due to detecting with stellar PSF. magStacker = stackers.MoMagStacker(loss_col="dmag_detect", magtype=magtype) detection_losses = " detection loss" @@ -649,30 +649,33 @@ def _configure_child_bundles(parentBundle): def run_completeness_summary(bdict, h_mark, times, out_dir, results_db): """ - Calculate completeness and create completeness bundles from all N_Chances and Time (child) metrics - of the (discovery) bundles in bdict, and write completeness at h_mark to results_db, save bundle to disk. + Calculate completeness and create completeness bundles from all + N_Chances and Time (child) metrics of the (discovery) bundles in bdict, + and write completeness at h_mark to results_db, save bundle to disk. This should be done after combining any sub-sets of the metric results. Parameters ---------- - bdict : dict of metric_bundles + bdict : `dict` of `maf.MetricBundle` Dict containing ~rubin_sim.maf.MoMetricBundles, including bundles we're expecting to contain completeness. - h_mark : float + h_mark : `float` h_mark value to add to completeness plotting dict. - If not defined (None), then the h_mark from the plotdict from the metric will be used if available. - If None and h_mark not in plot_dict, then median of h_range value will be used. - times : np.ndarray + If not defined (None), then the h_mark from the plotdict from the + metric bundle will be used if available. + If None and h_mark not in plot_dict, then median of h_range values + will be used. + times : `np.ndarray` The times at which to calculate completeness (over time). - out_dir : str + out_dir : `str` Output directory to save completeness bundles to disk. - results_db : ~rubin_sim.maf.db.results_db + results_db : `maf.ResultsDb` Results database to save information about completeness bundle. Returns ------- - dict of metric_bundles + metricDict : `dict` of `maf.MetricBundles` A dictionary of the new completeness bundles. Keys match original keys, with additions of "[Differential,Cumulative]Completeness@Time" and "[Differential,Cumulative]Completeness" to distinguish new entries. @@ -691,7 +694,8 @@ def _compbundles(b, bundle, h_mark, results_db): summaryTimeMetrics2 = summary_completeness_at_time(times, h_val=h_mark - 2, h_index=0.33) summaryHMetrics = summary_completeness_over_h(requiredChances=1, Hindex=0.33) comp = {} - # Bundle = single metric bundle. Add differential and cumulative completeness. + # Bundle = single metric bundle. + # Add differential and cumulative completeness. if "Time" in bundle.metric.name: for metric in summaryTimeMetrics: newkey = b + " " + metric.name @@ -763,7 +767,8 @@ def plot_completeness( fig_format="pdf", ): """Plot a minor subset of the completeness results.""" - # Separate some subsets to plot together - first just the simple 15 and 30 night detection loss metrics. + # Separate some subsets to plot together - + # first just the simple 15 and 30 night detection loss metrics. keys = [ "3_pairs_in_30_nights_detection_loss", "3_pairs_in_15_nights_detection_loss", @@ -783,7 +788,8 @@ def plot_completeness( elif "Cumulative" in k: plotComp[k] = bdictCompleteness[k] - # Add plot dictionaries to code 30 nights red, 15 nights blue, differentials dotted. + # Add plot dictionaries to code 30 nights red, 15 nights blue, + # differentials dotted. def _codePlot(key): plotDict = {} if "Differential" in k: @@ -811,7 +817,8 @@ def _codePlot(key): figroot = run_name display_dict = deepcopy(first.display_dict) - # Plot completeness as a function of time. Make custom plot, then save it with PlotHandler. + # Plot completeness as a function of time. + # Make custom plot, then save it with PlotHandler. fig = plt.figure(figsize=(8, 6)) for k in plotTimes: plt.plot( @@ -1264,34 +1271,37 @@ def characterization_outer_batch( def run_fraction_summary(bdict, h_mark, out_dir, results_db): """ - Calculate fractional completeness of the population for color and lightcurve metrics. + Calculate fractional completeness of the population for + color and lightcurve metrics. This should be done after combining any sub-sets of the metric results. Parameters ---------- - bdict : dict of metric_bundles - Dict containing ~rubin_sim.maf.MoMetricBundles, - including bundles we're expecting to contain lightcurve/color evaluations. - h_mark : float + bdict : `dict` of `maf.MoMetricBundle` + Dict containing bundles contianing lightcurve/color evaluations. + h_mark : `float` h_mark value to add to completeness plotting dict. - If defined, this value is used. If None, but h_mark in plot_dict for metric, then this value (-2) is - used. If h_mark not in plotdict, then the median h_range value - 2 is used. - times : np.ndarray + If defined, this value is used. + If None, but h_mark in plot_dict for metric, then this value (-2) is + used. If h_mark not in plotdict, then use the median h_range value-2. + times : `np.ndarray` The times at which to calculate completeness (over time). - out_dir : str + out_dir : `str` Output directory to save completeness bundles to disk. - results_db : ~rubin_sim.maf.db.results_db + results_db : `maf.ResultsDb` Results database to save information about completeness bundle. Returns ------- - dict of metric_bundles - Dictionary of the metric bundles for the fractional evaluation of the population. + metricDict : `dict` of `maf.MetricBundle` + Dictionary of the metric bundles for the fractional evaluation + of the population. """ fractions = {} - # Look for metrics from asteroid or outer solar system color/lightcurve metrics. + # Look for metrics from asteroid or outer solar system + # color/lightcurve metrics. inversionSummary = fraction_population_at_threshold([1], ["Lightcurve Inversion"]) asteroidColorSummary = fraction_population_at_threshold( [4, 3, 2, 1], @@ -1319,7 +1329,7 @@ def run_fraction_summary(bdict, h_mark, out_dir, results_db): h_mark = bundle.plot_dict["h_mark"] - 2 if h_mark is None: h_mark = np.median(bundle.slicer.slice_points["H"]) - 2 - # Make sure we didn't push h_mark outside the range of H values for metric + # Make sure we didn't push h_mark outside the range of H values if h_mark < bundle.slicer.slice_points["H"].min(): h_mark = bundle.slicer.slice_points["H"].min() for k in asteroidSummaryMetrics: @@ -1337,7 +1347,7 @@ def run_fraction_summary(bdict, h_mark, out_dir, results_db): bundle, summary_metric, h_mark=h_mark, results_db=results_db ) - # Write the fractional populations bundles to disk, so we can re-read them later. + # Write fractional populations bundles to disk, so we can re-read later. for b, bundle in fractions.items(): bundle.write(out_dir=out_dir, results_db=results_db) return fractions @@ -1479,7 +1489,7 @@ def plot_activity(bdict, figroot=None, results_db=None, out_dir=".", fig_format= outfile_root=figroot + "_activityDays", ) if len(activity_deg) > 0: - # Plot (mean) likelihood of detection of activity over X amount of orbit + # Plot mean likelihood of detection of activity over X amount of orbit ph = plots.PlotHandler(fig_format=fig_format, results_db=results_db, out_dir=out_dir) ph.set_metric_bundles(activity_deg) ph.joint_metric_names = "Chances of detecting activity covering X deg" @@ -1493,30 +1503,34 @@ def plot_activity(bdict, figroot=None, results_db=None, out_dir=".", fig_format= def read_and_combine(orbitRoot, baseDir, splits, metricfile): - """Read and combine the metric results from split locations, returning a single bundle. + """Read and combine the metric results from split locations, + returning a single bundle. This will read the files from baseDir/orbitRoot_[split]/metricfile - where split = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], etc. (the subsets the original orbit file was split into). + where split = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], etc. + (the subsets the original orbit file was split into). Parameters ---------- - orbitRoot: str + orbitRoot : `str` The root of the orbit file - l7_5k, mbas_5k, etc. - baseDir: str + baseDir: `str` The root directory containing the subset directories. (e.g. '.' often) - splits: np.ndarray or list of ints - The integers describing the split directories (e.g. [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) - metricfile: str + splits:` np.ndarray` or `list` of `ints` + The integers describing the split directories + (e.g. [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + metricfile: `str` The metric filename. Returns ------- - ~rubin_sim.maf.bundle - A single metric bundle containing the combined data from each of the subsets. + metric_bundle : `~rubin_sim.maf.MoMetricBundle` + A single metric bundle containing the combined data from the subsets. - Note that this won't work for particularly complex metric values, such as the parent Discovery metrics. - However, you can read and combine their child metrics, as for these we can propagate the data masks. + Note that this won't work for particularly complex metric values, + such as the parent Discovery metrics. However, you can read and combine + their child metrics, as for these we can propagate the data masks. """ subsets = {} for i in splits: @@ -1548,7 +1562,8 @@ def combine_subsets(mbSubsets): # Join metric values. joint.slicer = slicer joint.metric = first.metric - # Don't just use the slicer shape to define the metric_values, because of CompletenessBundles. + # Don't just use the slicer shape to define the metric_values, + # because of CompletenessBundles. metric_values = np.zeros(first.metric_values.shape, float) metric_values_mask = np.zeros(first.metric_values.shape, bool) for i in mbSubsets: diff --git a/rubin_sim/maf/batches/openshutter_batch.py b/rubin_sim/maf/batches/openshutter_batch.py index a81410612..6919d8912 100644 --- a/rubin_sim/maf/batches/openshutter_batch.py +++ b/rubin_sim/maf/batches/openshutter_batch.py @@ -15,15 +15,18 @@ def openshutterFractions(colmap=None, runName="opsim", extraSql=None, extraInfoL Parameters ---------- - colmap : dict, optional - A dictionary with a mapping of column names. Default will use OpsimV4 column names. - runName : str, optional - The name of the simulated survey. Default is "opsim". - extraSql : str, optional + colmap : `dict`, optional + A dictionary with a mapping of column names. + runName : `str`, optional + The name of the simulated survey. + extraSql : `str`, optional Additional constraint to add to any sql constraints (e.g. 'night<365') - Default None, for no additional constraints. - extraInfoLabel : str, optional - Additional info_label to add before any below (i.e. "WFD"). Default is None. + extraInfoLabel : `str`, optional + Additional info_label to add before any below (i.e. "WFD"). + + Returns + ------- + metric_bundleDict : `dict` of `maf.MetricBundle` """ if colmap is None: colmap = col_map_dict() @@ -55,7 +58,8 @@ def openshutterFractions(colmap=None, runName="opsim", extraSql=None, extraInfoL slicer = slicers.UniSlicer() bundle = mb.MetricBundle(metric, slicer, extraSql, info_label=subgroup, display_dict=displayDict) bundleList.append(bundle) - # Count the number of nights total in the survey (start to finish of observations). + # Count the number of nights total in the survey + # (start to finish of observations). displayDict["caption"] = "Number of nights from start to finish of survey, %s." % subgroup.lower() metric = metrics.FullRangeMetric(colmap["night"]) slicer = slicers.UniSlicer() diff --git a/rubin_sim/maf/batches/radar_limited.py b/rubin_sim/maf/batches/radar_limited.py index 7ef9f952a..9a37d9774 100644 --- a/rubin_sim/maf/batches/radar_limited.py +++ b/rubin_sim/maf/batches/radar_limited.py @@ -23,12 +23,12 @@ def radar_limited( srd_only=False, mjd0=None, ): - """A batch of metrics for looking at survey performance relative to the SRD and the main - science drivers of LSST. + """A batch of metrics for looking at survey performance + relative to the SRD and the main science drivers of LSST. Parameters ---------- - run_name : `str`, optional + runName : `str`, optional The simulation run name that should appear as plot titles. benchmarkArea : `float`, optional The area to use for SRD metrics (sq degrees) @@ -37,11 +37,16 @@ def radar_limited( minNvisits : `int`, optional The minimum number of visits to use for SRD metrics. long_microlensing : `bool`, optional - Add the longer running microlensing metrics to the batch (subset of crossing times only) + Add the longer running microlensing metrics to the batch + (a subset of crossing times only) srd_only : `bool`, optional Only return the SRD metrics mjd0 : float, optional The modified Julian date start date of the survey. + + Returns + ------- + metric_bundleDict : `dict` of `maf.MetricBundle` """ bundleList = [] @@ -55,9 +60,10 @@ def radar_limited( allfilterinfo_label, ) = filter_list(all=True) - standardStats = standard_summary(withCount=False) + standardStats = standard_summary(with_count=False) - # This is the default slicer for most purposes in this batch. Note that the cache is off - + # This is the default slicer for most purposes in this batch. + # Note that the cache is on - # if the metric requires a dust map, this is not the right slicer to use. healpixslicer = slicers.HealpixSlicer(nside=nside, use_cache=True) subsetPlots = [plots.HealpixSkyMap(), plots.HealpixHistogram()] @@ -219,7 +225,7 @@ def radar_limited( bundleList.append(bundle) displayDict["order"] += 1 - # Parallax normalized to 'best possible' if all visits separated by 6 months. + # Parallax normalized to 'best possible'. # This separates the effect of cadence from depth. for rmag in rmags_para: metric = metrics.ParallaxMetric( @@ -252,7 +258,8 @@ def radar_limited( ) bundleList.append(bundle) displayDict["order"] += 1 - # Parallax problems can be caused by HA and DCR degeneracies. Check their correlation. + # Parallax problems can be caused by HA and DCR degeneracies. + # Check their correlation. for rmag in rmags_para: metric = metrics.ParallaxDcrDegenMetric( metric_name="Parallax-DCR degeneracy @ %.1f" % (rmag), rmag=rmag @@ -334,7 +341,7 @@ def radar_limited( plot_dict=plotDict, plot_funcs=subsetPlots, display_dict=displayDict, - summary_metrics=standard_summary(withCount=False), + summary_metrics=standard_summary(with_count=False), ) bundleList.append(bundle) @@ -355,15 +362,17 @@ def radar_limited( plot_dict=plotDict, plot_funcs=subsetPlots, display_dict=displayDict, - summary_metrics=standard_summary(withCount=False), + summary_metrics=standard_summary(with_count=False), ) bundleList.append(bundle) displayDict["order"] += 1 - # Better version of the rapid revisit requirements: require a minimum number of visits between - # dtMin and dtMax, but also a minimum number of visits between dtMin and dtPair (the typical pair time). + # Better version of the rapid revisit requirements: + # require a minimum number of visits between + # dtMin and dtMax, but also a minimum number of visits + # between dtMin and dtPair (the typical pair time). # 1 means the healpix met the requirements (0 means did not). - dTmin = 40.0 / 60.0 # (minutes) 40s minumum for rapid revisit range + dTmin = 40.0 / 60.0 # (minutes) 40s minimum for rapid revisit range dTpairs = 20.0 # minutes (time when pairs should start kicking in) dTmax = 30.0 # 30 minute maximum for rapid revisit range nOne = 82 # Number of revisits between 40s-30m required @@ -454,7 +463,8 @@ def radar_limited( ######################### ######################### - # Run this per filter, to look at variations in counts of galaxies in blue bands? + # Run this per filter, to look at variations in + # counts of galaxies in blue bands? displayDict = { "group": "Galaxies", "subgroup": "Galaxy Counts", @@ -502,7 +512,8 @@ def radar_limited( displayDict["subgroup"] = f"{subgroupCount}: Static Science" ## Static Science - # Calculate the static science metrics - effective survey area, mean/median coadded depth, stdev of + # Calculate the static science metrics - effective survey area, + # mean/median coadded depth, stdev of # coadded depth and the 3x2ptFoM emulator. dustmap = maps.DustMap(nside=nside, interp=False) @@ -555,7 +566,8 @@ def radar_limited( bundleList.append(bundle) ## LSS Science - # The only metric we have from LSS is the NGals metric - which is similar to the GalaxyCountsExtended + # The only metric we have from LSS is the NGals metric - + # which is similar to the GalaxyCountsExtended # metric, but evaluated only on the depth/dust cuts footprint. subgroupCount += 1 displayDict["subgroup"] = f"{subgroupCount}: LSS" @@ -563,7 +575,8 @@ def radar_limited( plotDict = {"n_ticks": 5} ## WL metrics - # Calculates the number of visits per pointing, after removing parts of the footprint due to dust/depth + # Calculates the number of visits per pointing, after removing + # parts of the footprint due to dust/depth # Count visits in gri bands. subgroupCount += 1 displayDict["subgroup"] = f"{subgroupCount}: WL" @@ -893,14 +906,13 @@ def radar_limited( displayDict["group"] = "Variables/Transients" displayDict["subgroup"] = "KNe" n_events = 10000 - displayDict[ - "caption" - ] = f"KNe metric, injecting {n_events} lightcurves over the entire sky, GW170817-like only. Ignoring DDF observations." + caption = f"KNe metric, injecting {n_events} lightcurves over the entire sky, GW170817-like only." + caption += " Ignoring DDF observations." + displayDict["caption"] = caption displayDict["order"] = 0 # Kilonova parameters inj_params_list = [ {"mej_dyn": 0.005, "mej_wind": 0.050, "phi": 30, "theta": 25.8}, - # {"mej_dyn": 0.005, "mej_wind": 0.050, "phi": 30, "theta": 0.0}, # no longer preferred ] filename = maf.get_kne_filename(inj_params_list) kneslicer = maf.generate_kn_pop_slicer(n_events=n_events, n_files=len(filename), d_min=10, d_max=600) @@ -922,9 +934,9 @@ def radar_limited( bundleList.append(bundle) n_events = 10000 - displayDict[ - "caption" - ] = f"KNe metric, injecting {n_events} lightcurves over the entire sky, entire model population. Ignoring DDF observations." + caption = f"KNe metric, injecting {n_events} lightcurves over the entire sky, entire model population." + caption += " Ignoring DDF observations." + displayDict["caption"] = caption displayDict["order"] = 1 # Kilonova parameters filename = maf.get_kne_filename(None) diff --git a/rubin_sim/maf/batches/science_radar_batch.py b/rubin_sim/maf/batches/science_radar_batch.py index 0c74cfa4b..e8a1c1f74 100644 --- a/rubin_sim/maf/batches/science_radar_batch.py +++ b/rubin_sim/maf/batches/science_radar_batch.py @@ -24,15 +24,33 @@ def science_radar_batch( srd_only=False, mjd0=None, ): - """A batch of metrics for looking at survey performance relative to the SRD and the main - science drivers of LSST. + """A batch of metrics for looking at survey performance relative to the + SRD and the main science drivers of LSST. Parameters ---------- - long_microlensing : `bool` (True) - Add the longer running microlensing metrics to the batch (subset of crossing times only) - srd_only : `bool` (False) + runName : `str`, optional + The name of the opsim run. + nside : `int`, optional + The nside to use for the HealpixSlicer (where relevant). + benchmarkArea : `float`, optional + The benchmark value to use for fO_Area comparison. + benchmarkNvisits : `float`, optional + The benchmark value to use for fO_Nvisits comparisons. + minNvisits : `float`, optional + The value to use to establish the area covered by at least minNvis. + long_microlensing : `bool`, optional + Add the longer running microlensing metrics to the batch + (at a subset of crossing times only) + srd_only : `bool` , optional Only return the SRD metrics + mjd0 : `float`, optional + Set the start time for the survey, for metrics which need + this information. + + Returns + ------- + metric_bundleDict : `dict` of `maf.MetricBundle` """ bundleList = [] @@ -46,10 +64,11 @@ def science_radar_batch( allfilterinfo_label, ) = filter_list(all=True) - standardStats = standard_summary(withCount=False) + standardStats = standard_summary(with_count=False) - # This is the default slicer for most purposes in this batch. Note that the cache is off - - # if the metric requires a dust map, this is not the right slicer to use. + # This is the default slicer for most purposes in this batch. + # Note that the cache is on - if the metric requires a dust map, + # this is not the right slicer to use. healpixslicer = slicers.HealpixSlicer(nside=nside, use_cache=True) subsetPlots = [plots.HealpixSkyMap(), plots.HealpixHistogram()] @@ -210,7 +229,7 @@ def science_radar_batch( bundleList.append(bundle) displayDict["order"] += 1 - # Parallax normalized to 'best possible' if all visits separated by 6 months. + # Parallax normalized to 'best possible'. # This separates the effect of cadence from depth. for rmag in rmags_para: metric = metrics.ParallaxMetric( @@ -243,7 +262,8 @@ def science_radar_batch( ) bundleList.append(bundle) displayDict["order"] += 1 - # Parallax problems can be caused by HA and DCR degeneracies. Check their correlation. + # Parallax problems can be caused by HA and DCR degeneracies. + # Check their correlation. for rmag in rmags_para: metric = metrics.ParallaxDcrDegenMetric( metric_name="Parallax-DCR degeneracy @ %.1f" % (rmag), rmag=rmag @@ -325,7 +345,7 @@ def science_radar_batch( plot_dict=plotDict, plot_funcs=subsetPlots, display_dict=displayDict, - summary_metrics=standard_summary(withCount=False), + summary_metrics=standard_summary(with_count=False), ) bundleList.append(bundle) @@ -346,15 +366,17 @@ def science_radar_batch( plot_dict=plotDict, plot_funcs=subsetPlots, display_dict=displayDict, - summary_metrics=standard_summary(withCount=False), + summary_metrics=standard_summary(with_count=False), ) bundleList.append(bundle) displayDict["order"] += 1 - # Better version of the rapid revisit requirements: require a minimum number of visits between - # dtMin and dtMax, but also a minimum number of visits between dtMin and dtPair (the typical pair time). + # Better version of the rapid revisit requirements: + # require a minimum number of visits between + # dtMin and dtMax, but also a minimum number of visits + # between dtMin and dtPair (the typical pair time). # 1 means the healpix met the requirements (0 means did not). - dTmin = 40.0 / 60.0 # (minutes) 40s minumum for rapid revisit range + dTmin = 40.0 / 60.0 # (minutes) 40s minimum for rapid revisit range dTpairs = 20.0 # minutes (time when pairs should start kicking in) dTmax = 30.0 # 30 minute maximum for rapid revisit range nOne = 82 # Number of revisits between 40s-30m required @@ -445,7 +467,8 @@ def science_radar_batch( ######################### ######################### - # Run this per filter, to look at variations in counts of galaxies in blue bands? + # Run this per filter, to look at variations in counts + # of galaxies in blue bands? displayDict = { "group": "Galaxies", "subgroup": "Galaxy Counts", @@ -528,8 +551,9 @@ def science_radar_batch( displayDict["subgroup"] = f"{subgroupCount}: Static Science" ## Static Science - # Calculate the static science metrics - effective survey area, mean/median coadded depth, stdev of - # coadded depth and the 3x2ptFoM emulator. + # Calculate the static science metrics - effective survey area, + # mean/median coadded depth, stdev of coadded depth and + # the 3x2ptFoM emulator. dustmap = maps.DustMap(nside=nside, interp=False) pix_area = hp.nside2pixarea(nside, degrees=True) @@ -581,13 +605,14 @@ def science_radar_batch( bundleList.append(bundle) ## LSS Science - # The only metric we have from LSS is the NGals metric - which is similar to the GalaxyCountsExtended + # The only metric we have from LSS is the NGals metric - + # which is similar to the GalaxyCountsExtended # metric, but evaluated only on the depth/dust cuts footprint. subgroupCount += 1 displayDict["subgroup"] = f"{subgroupCount}: LSS" displayDict["order"] = 0 plotDict = {"n_ticks": 5} - # Have to include all filters in query, so that we check for all-band coverage. + # Have to include all filters in query to check for filter coverage. # Galaxy numbers calculated using 'bandpass' images only though. sqlconstraint = 'note not like "DD%"' info_label = f"{bandpass} band galaxies non-DD" @@ -623,7 +648,8 @@ def science_radar_batch( bundleList.append(bundle) ## WL metrics - # Calculates the number of visits per pointing, after removing parts of the footprint due to dust/depth + # Calculates the number of visits per pointing, + # after removing parts of the footprint due to dust/depth. # Count visits in gri bands. subgroupCount += 1 displayDict["subgroup"] = f"{subgroupCount}: WL" @@ -866,7 +892,8 @@ def science_radar_batch( ) # Calculate the expected AGN structure function error - # These agn test magnitude values are determined by looking at the baseline median m5 depths + # These agn test magnitude values are determined by + # looking at the baseline median m5 depths # For v1.7.1 these values are: agn_m5 = {"u": 22.89, "g": 23.94, "r": 23.5, "i": 22.93, "z": 22.28, "y": 21.5} # And the expected medians SF error at those values is about 0.04 @@ -984,7 +1011,8 @@ def science_radar_batch( tdc_plots = [plots.HealpixSkyMap(), plots.HealpixHistogram()] plotDict = {"x_min": 0.01, "color_min": 0.01, "percentile_clip": 70, "n_ticks": 5} tdc_summary = [metrics.MeanMetric(), metrics.MedianMetric(), metrics.RmsMetric()] - # Ideally need a way to do better on calculating the summary metrics for the high accuracy area. + # Ideally need a way to do better on calculating the summary metrics + # for the high accuracy area. slicer = slicers.HealpixSlicer(nside=nside_tdc, use_cache=False) tdcMetric = metrics.TdcMetric(metric_name="TDC") dustmap = maps.DustMap(nside=nside_tdc, interp=False) @@ -1312,14 +1340,13 @@ def science_radar_batch( displayDict["group"] = "Variables/Transients" displayDict["subgroup"] = "KNe" n_events = 500000 - displayDict[ - "caption" - ] = f"KNe metric, injecting {n_events} lightcurves over the entire sky, GW170817-like only. Ignoring DDF observations." + caption = f"KNe metric, injecting {n_events} lightcurves over the entire sky, GW170817-like only." + caption += " Ignoring DDF observations." + displayDict["caption"] = caption displayDict["order"] = 0 # Kilonova parameters inj_params_list = [ {"mej_dyn": 0.005, "mej_wind": 0.050, "phi": 30, "theta": 25.8}, - # {"mej_dyn": 0.005, "mej_wind": 0.050, "phi": 30, "theta": 0.0}, # no longer preferred ] filename = maf.get_kne_filename(inj_params_list) kneslicer = maf.generate_kn_pop_slicer(n_events=n_events, n_files=len(filename), d_min=10, d_max=600) @@ -1341,9 +1368,9 @@ def science_radar_batch( bundleList.append(bundle) n_events = 500000 - displayDict[ - "caption" - ] = f"KNe metric, injecting {n_events} lightcurves over the entire sky, entire model population. Ignoring DDF observations." + caption = f"KNe metric, injecting {n_events} lightcurves over the entire sky, entire model population." + caption += " Ignoring DDF observations." + displayDict["caption"] = caption displayDict["order"] = 1 # Kilonova parameters filename = maf.get_kne_filename(None) @@ -1626,7 +1653,8 @@ def science_radar_batch( "Number of stars in %s band with an measurement uncertainty due to crowding " "of less than 0.2 mag" % f ) - # Configure the NstarsMetric - note 'filtername' refers to the filter in which to evaluate crowding + # Configure the NstarsMetric - note 'filtername' refers to the + # filter in which to evaluate crowding metric = metrics.NstarsMetric( crowding_error=0.2, filtername=f, @@ -1656,7 +1684,8 @@ def science_radar_batch( "Number of stars in %s band with an measurement uncertainty " "of less than 0.2 mag, not considering crowding" % f ) - # Configure the NstarsMetric - note 'filtername' refers to the filter in which to evaluate crowding + # Configure the NstarsMetric - note 'filtername' refers to the + # filter in which to evaluate crowding metric = metrics.NstarsMetric( crowding_error=0.2, filtername=f, diff --git a/rubin_sim/maf/batches/skycoverage.py b/rubin_sim/maf/batches/skycoverage.py index 4b55c2a14..a6caa0688 100644 --- a/rubin_sim/maf/batches/skycoverage.py +++ b/rubin_sim/maf/batches/skycoverage.py @@ -16,15 +16,18 @@ def meanRADec(colmap=None, runName="opsim", extraSql=None, extraInfoLabel=None): Parameters ---------- - colmap : dict, optional - A dictionary with a mapping of column names. Default will use OpsimV4 column names. - runName : str, optional - The name of the simulated survey. Default is "opsim". - extraSql : str, optional + colmap : `dict`, optional + A dictionary with a mapping of column names. + runName : `str`, optional + The name of the simulated survey. + extraSql : `str`, optional Additional constraint to add to any sql constraints (e.g. 'night<365') - Default None, for no additional constraints. - extraInfoLabel : str, optional - Additional info_label to add before any below (i.e. "WFD"). Default is None. + extraInfoLabel : `str`, optional + Additional info_label to add before any below (i.e. "WFD"). + + Returns + ------- + metric_bundleDict : `dict` of `maf.MetricBundle` """ if colmap is None: colmap = col_map_dict() @@ -79,15 +82,18 @@ def eastWestBias(colmap=None, runName="opsim", extraSql=None, extraInfoLabel=Non Parameters ---------- - colmap : dict, optional - A dictionary with a mapping of column names. Default will use OpsimV4 column names. - runName : str, optional - The name of the simulated survey. Default is "opsim". - extraSql : str, optional + colmap : `dict`, optional + A dictionary with a mapping of column names. + runName : `str`, optional + The name of the simulated survey. + extraSql : `str`, optional Additional constraint to add to any sql constraints (e.g. 'night<365') - Default None, for no additional constraints. - extraInfoLabel : str, optional - Additional info_label to add before any below (i.e. "WFD"). Default is None. + extraInfoLabel : `str`, optional + Additional info_label to add before any below (i.e. "WFD"). + + Returns + ------- + metric_bundleDict : `dict` of `maf.MetricBundle` """ if colmap is None: colmap = col_map_dict() diff --git a/rubin_sim/maf/batches/slew_batch.py b/rubin_sim/maf/batches/slew_batch.py index fbe4d8a4d..4aa505a14 100644 --- a/rubin_sim/maf/batches/slew_batch.py +++ b/rubin_sim/maf/batches/slew_batch.py @@ -14,20 +14,19 @@ def slewBasics(colmap=None, run_name="opsim", sql_constraint=None): """Generate a simple set of statistics about the slew times and distances. - These slew statistics can be run on the summary or default tables. Parameters ---------- - colmap : dict or None, optional - A dictionary with a mapping of column names. Default will use OpsimV4 column names. - runName : str, optional - The name of the simulated survey. Default is "opsim". - sqlConstraint : str or None, optional - SQL constraint to add to metrics. (note this runs on summary table). + colmap : `dict` or None, optional + A dictionary with a mapping of column names. + runName : `str`, optional + The name of the simulated survey. + sqlConstraint : `str` or None, optional + SQL constraint to add to metrics. Returns ------- - metric_bundleDict + metric_bundleDict : `dict` of `maf.MetricBundle` """ if colmap is None: @@ -49,7 +48,7 @@ def slewBasics(colmap=None, run_name="opsim", sql_constraint=None): } # Add total number of slews. metric = metrics.CountMetric(colmap["slewtime"], metric_name="Slew Count") - displayDict["caption"] = "Total number of slews recorded in summary table." + displayDict["caption"] = "Total number of slews recorded." displayDict["order"] += 1 bundle = mb.MetricBundle(metric, slicer, sql_constraint, info_label=info_label, display_dict=displayDict) bundleList.append(bundle) diff --git a/rubin_sim/maf/batches/srd_batch.py b/rubin_sim/maf/batches/srd_batch.py index da39515c7..335b175ce 100644 --- a/rubin_sim/maf/batches/srd_batch.py +++ b/rubin_sim/maf/batches/srd_batch.py @@ -33,20 +33,21 @@ def fOBatch( Parameters ---------- colmap : `dict` or None, optional - A dictionary with a mapping of column names. Default will use OpsimV4 column names. + A dictionary with a mapping of column names. runName : `str`, optional - The name of the simulated survey. Default is "opsim". + The name of the simulated survey. extraSql : `str` or None, optional Additional sql constraint to apply to all metrics. extraInfoLabel : `str` or None, optional Additional info_label to apply to all results. slicer : `rubin_sim.maf.slicer.HealpixSlicer` or None, optional - This must be a HealpixSlicer or some kind, although could be a HealpixSubsetSlicer. - Default is a HealpixSlicer with nside=64. + This must be a HealpixSlicer or some kind, + although could be a HealpixSubsetSlicer. + None will default to HealpixSlicer with nside=64. Returns ------- - metric_bundleDict + metric_bundleDict : `dict` of `maf.MetricBundle` """ if colmap is None: colmap = col_map_dict() @@ -55,7 +56,7 @@ def fOBatch( sql = "" info_label = "All visits" - # Add additional sql constraint (such as wfdWhere) and info_label, if provided. + # Add additional sql constraint (such as wfdWhere) and info_label if (extraSql is not None) and (len(extraSql) > 0): sql = extraSql if extraInfoLabel is None: @@ -168,9 +169,9 @@ def astrometryBatch( Parameters ---------- colmap : `dict` or None, optional - A dictionary with a mapping of column names. Default will use OpsimV4 column names. + A dictionary with a mapping of column names. runName : `str`, optional - The name of the simulated survey. Default is "opsim". + The name of the simulated survey. extraSql : `str` or None, optional Additional sql constraint to apply to all metrics. extraInfoLabel : `str` or None, optional @@ -180,7 +181,7 @@ def astrometryBatch( Returns ------- - metric_bundleDict + metric_bundleDict : `dict` of `maf.MetricBundle` """ if colmap is None: colmap = col_map_dict() @@ -188,7 +189,7 @@ def astrometryBatch( sql = "" info_label = "All visits" - # Add additional sql constraint (such as wfdWhere) and info_label, if provided. + # Add additional sql constraint (such as wfdWhere) and info_label if (extraSql is not None) and (len(extraSql) > 0): sql = extraSql if extraInfoLabel is None: @@ -273,7 +274,7 @@ def astrometryBatch( bundleList.append(bundle) displayDict["order"] += 1 - # Parallax normalized to 'best possible' if all visits separated by 6 months. + # Parallax normalized to 'best possible' # This separates the effect of cadence from depth. for rmag in rmags_para: metric = metrics.ParallaxMetric( @@ -318,7 +319,8 @@ def astrometryBatch( ) bundleList.append(bundle) displayDict["order"] += 1 - # Parallax problems can be caused by HA and DCR degeneracies. Check their correlation. + # Parallax problems can be caused by HA and DCR degeneracies. + # Check their correlation. for rmag in rmags_para: metric = metrics.ParallaxDcrDegenMetric( metric_name="Parallax-DCR degeneracy @ %.1f" % (rmag), @@ -479,19 +481,20 @@ def rapidRevisitBatch( Parameters ---------- colmap : `dict` or None, optional - A dictionary with a mapping of column names. Default will use OpsimV4 column names. + A dictionary with a mapping of column names. runName : `str`, optional - The name of the simulated survey. Default is "opsim". + The name of the simulated survey. extraSql : `str` or None, optional Additional sql constraint to apply to all metrics. extraInfoLabel : `str` or None, optional Additional info_label to apply to all results. slicer : `rubin_sim_maf.slicers.HealpixSlicer` or None, optional - Optionally, specify something other than an nside=64 healpix slicer. (must be a healpix slicer) + Optionally, specify something other than an nside=64 healpix slicer. + (must be a healpix slicer) Returns ------- - metric_bundleDict + metric_bundleDict : `dict` of `maf.MetricBundle` """ if colmap is None: colmap = col_map_dict() @@ -499,7 +502,7 @@ def rapidRevisitBatch( sql = "" info_label = "All visits" - # Add additional sql constraint (such as wfdWhere) and info_label, if provided. + # Add additional sql constraint (such as wfdWhere) and info_label. if (extraSql is not None) and (len(extraSql) > 0): sql = extraSql if extraInfoLabel is None: @@ -554,15 +557,17 @@ def rapidRevisitBatch( plot_funcs=subsetPlots, info_label=info_label, display_dict=displayDict, - summary_metrics=standard_summary(withCount=False), + summary_metrics=standard_summary(with_count=False), ) bundleList.append(bundle) displayDict["order"] += 1 - # Better version of the rapid revisit requirements: require a minimum number of visits between - # dtMin and dtMax, but also a minimum number of visits between dtMin and dtPair (the typical pair time). + # Better version of the rapid revisit requirements: + # require a minimum number of visits between + # dtMin and dtMax, but also a minimum number of visits + # between dtMin and dtPair (the typical pair time). # 1 means the healpix met the requirements (0 means did not). - dTmin = 40.0 / 60.0 # (minutes) 40s minumum for rapid revisit range + dTmin = 40.0 / 60.0 # (minutes) 40s minimum for rapid revisit range dTpairs = 20.0 # minutes (time when pairs should start kicking in) dTmax = 30.0 # 30 minute maximum for rapid revisit range nOne = 82 # Number of revisits between 40s-30m required diff --git a/rubin_sim/maf/batches/time_batch.py b/rubin_sim/maf/batches/time_batch.py index dda2aa69f..f1a5ee73c 100644 --- a/rubin_sim/maf/batches/time_batch.py +++ b/rubin_sim/maf/batches/time_batch.py @@ -23,26 +23,27 @@ def intraNight( display_group="IntraNight", subgroup="Pairs", ): - """Generate a set of statistics about the pair/triplet/etc. rate within a night. + """Generate a set of statistics about the pair/triplet/etc. rate + within a night. Parameters ---------- - colmap : dict or None, optional - A dictionary with a mapping of column names. Default will use OpsimV4 column names. - runName : str, optional - The name of the simulated survey. Default is "opsim". - nside : int, optional - Nside for the healpix slicer. Default 64. - extraSql : str or None, optional + colmap : `dict` or None, optional + A dictionary with a mapping of column names. + runName : `str`, optional + The name of the simulated survey. + nside : `int`, optional + Nside for the healpix slicer. + extraSql : `str` or None, optional Additional sql constraint to apply to all metrics. - extraInfoLabel : str or None, optional + extraInfoLabel : `str` or None, optional Additional info_label to apply to all results. - slicer : slicer object (None) + slicer : `maf.BaseSlicer` or None Optionally use something other than a HealpixSlicer Returns ------- - metric_bundleDict + metric_bundleDict : `dict` of `maf.MetricBundle` """ if colmap is None: @@ -63,7 +64,8 @@ def intraNight( if slicer is None: slicer = slicers.HealpixSlicer(nside=nside, lat_col=decCol, lon_col=raCol, lat_lon_deg=degrees) - # Look for the fraction of visits in gri where there are pairs within dtMin/dtMax. + # Look for the fraction of visits in gri where there are pairs + # within dtMin/dtMax. displayDict = { "group": display_group, "subgroup": subgroup, @@ -131,7 +133,7 @@ def intraNight( ) bundleList.append(bundle) - # Look at the fraction of visits which have another visit within dtMax, gri. + # Look at the fraction of visits which have another visit within dtMax gri. dtMax = 60.0 metric = metrics.NRevisitsMetric( mjd_col=colmap["mjd"], @@ -180,7 +182,8 @@ def intraNight( bundleList.append(bundle) # Max Timespans (in each night) - # Run in all filters, u+g, g+r, r+i, i+z and z+y filters, and individual filters + # Run in all filters, u+g, g+r, r+i, i+z and z+y filters, + # and individual filters metric = metrics.NightTimespanMetric(percentile=75, night_col=colmap["night"], mjd_col=colmap["mjd"]) displayDict["caption"] = "75th percentile value of the maximum intra-night timespan, on each night" @@ -315,26 +318,27 @@ def interNight( display_group="InterNight", subgroup="Night gaps", ): - """Generate a set of statistics about the spacing between nights with observations. + """Generate a set of statistics about the spacing between nights + with observations. Parameters ---------- - colmap : dict or None, optional - A dictionary with a mapping of column names. Default will use OpsimV4 column names. - runName : str, optional - The name of the simulated survey. Default is "opsim". - nside : int, optional - Nside for the healpix slicer. Default 64. - extraSql : str or None, optional + colmap : `dict` or None, optional + A dictionary with a mapping of column names. + runName : `str`, optional + The name of the simulated survey. + nside : `int`, optional + Nside for the healpix slicer. + extraSql : `str` or None, optional Additional sql constraint to apply to all metrics. - extraInfoLabel : str or None, optional + extraInfoLabel : `str` or None, optional Additional info_label to use for all outputs. - slicer : slicer object (None) + slicer : `maf.BaseSlicer` or None Optionally use something other than a HealpixSlicer Returns ------- - metric_bundleDict + metric_bundleDict : `dict` of `maf.MetricBundle` """ if colmap is None: @@ -420,7 +424,8 @@ def interNight( ) bundleList.append(bundle) - # 20th percentile inter-night gap (each and all filters) - aimed at active rolling years + # 20th percentile inter-night gap (each and all filters) - + # aimed at active rolling years def rfunc(simdata): return np.percentile(simdata, 20) @@ -479,16 +484,17 @@ def timeGaps( display_group="Time Coverage", subgroup=None, ): - """Generate a set of statistics about the spacing between nights with observations. + """Generate a set of statistics about the spacing + between nights with observations. Parameters ---------- colmap : `dict`, optional - A dictionary with a mapping of column names. Default will use FBS column names. + A dictionary with a mapping of column names. runName : `str`, optional - The name of the simulated survey. Default is "opsim". + The name of the simulated survey. nside : `int`, optional - Nside for the healpix slicer. Default 64. + Nside for the healpix slicer. extraSql : `str`, optional Additional sql constraint to apply to all metrics. extraInfoLabel : `str`, optional @@ -498,7 +504,7 @@ def timeGaps( Returns ------- - bundle_dict: `dict` of `rubin_sim.maf.MetricBundle` + bundle_dict : `dict` of `rubin_sim.maf.MetricBundle` """ if colmap is None: @@ -612,22 +618,22 @@ def seasons( Parameters ---------- - colmap : dict or None, optional - A dictionary with a mapping of column names. Default will use OpsimV4 column names. - runName : str, optional - The name of the simulated survey. Default is "opsim". - nside : int, optional - Nside for the healpix slicer. Default 64. - extraSql : str or None, optional + colmap : `dict` or None, optional + A dictionary with a mapping of column names. + runName : `str`, optional + The name of the simulated survey. + nside : `int`, optional + Nside for the healpix slicer. + extraSql : `str` or None, optional Additional sql constraint to apply to all metrics. - extraInfoLabel : str or None, optional + extraInfoLabel : `str` or None, optional Additional info_label to use for all outputs. - slicer : slicer object (None) + slicer : `maf.BaseSlicer` or None` Optionally use something other than a HealpixSlicer Returns ------- - metric_bundleDict + metric_bundleDict : `dict` of `maf.MetricBundle` """ if colmap is None: @@ -684,7 +690,8 @@ def seasons( ) bundleList.append(bundle) - # 80th percentile season length - aimed at finding season length during rolling or long years + # 80th percentile season length - + # aimed at finding season length during rolling or long years def rfunc(simdata): return np.percentile(simdata, 80) diff --git a/rubin_sim/maf/batches/time_sci_batch.py b/rubin_sim/maf/batches/time_sci_batch.py index 9e900f99e..2fb45c3f6 100644 --- a/rubin_sim/maf/batches/time_sci_batch.py +++ b/rubin_sim/maf/batches/time_sci_batch.py @@ -18,24 +18,24 @@ def phaseGap( extraSql=None, extraInfoLabel=None, ): - """Generate a set of statistics about the pair/triplet/etc. rate within a night. + """Generate a set of statistics about period coverage and phase gaps. Parameters ---------- - colmap : dict or None, optional - A dictionary with a mapping of column names. Default will use OpsimV4 column names. - runName : str, optional - The name of the simulated survey. Default is "opsim". - nside : int, optional - Nside for the healpix slicer. Default 64. - extraSql : str or None, optional + colmap : `dict` or None, optional + A dictionary with a mapping of column names. + runName : `str`, optional + The name of the simulated survey. + nside : `int`, optional + Nside for the healpix slicer. + extraSql : `str` or None, optional Additional sql constraint to apply to all metrics. - extraInfoLabel : str or None, optional + extraInfoLabel : `str` or None, optional Additional info_label to apply to all results. Returns ------- - metric_bundleDict + metric_bundleDict : `dict` of `maf.MetricBundle` """ if colmap is None: diff --git a/rubin_sim/maf/batches/visitdepth_batch.py b/rubin_sim/maf/batches/visitdepth_batch.py index 871dd6bba..ed5082e5b 100644 --- a/rubin_sim/maf/batches/visitdepth_batch.py +++ b/rubin_sim/maf/batches/visitdepth_batch.py @@ -1,4 +1,5 @@ -"""Sets of metrics to look at general sky coverage - nvisits/coadded depth/Teff. +"""Sets of metrics to look at general sky coverage - +nvisits/coadded depth/Teff. """ __all__ = ("nvisitsM5Maps", "tEffMetrics", "nvisitsPerNight", "nvisitsPerSubset") @@ -12,7 +13,7 @@ import rubin_sim.maf.stackers as stackers import rubin_sim.maf.utils as mafUtils -from .col_map_dict import col_map_dict, get_col_map +from .col_map_dict import col_map_dict from .common import filter_list, standard_summary @@ -44,7 +45,7 @@ def nvisitsM5Maps( Returns ------- - metric_bundleDict + metric_bundleDict : `dict` of `maf.MetricBundle` """ if colmap is None: colmap = col_map_dict() @@ -214,12 +215,12 @@ def tEffMetrics( Additional constraint to add to any sql constraints. extraInfoLabel : `str`, optional Additional info_label to add before any below (i.e. "WFD"). - slicer : `rubin_sim.maf.slicer` or None, optional + slicer : `rubin_sim.maf.BaseSlicer` or None, optional Optionally, use something other than an nside=64 healpix slicer Returns ------- - metric_bundleDict + metric_bundleDict : `dict` of `maf.MetricBundle` """ if colmap is None: colmap = col_map_dict() @@ -340,7 +341,7 @@ def nvisitsPerNight( Returns ------- - metric_bundleDict + metric_bundleDict : `dict` of `maf.MetricBundle` """ if colmap is None: colmap = col_map_dict() @@ -400,7 +401,7 @@ def nvisitsPerSubset( colmap : `dict` or None, optional A dictionary with a mapping of column names. runName : `str`, optional - The name of the simulated survey. Default is "opsim". + The name of the simulated survey. binNights : `int`, optional Number of nights to count in each bin. constraint : `str` or None, optional @@ -418,7 +419,7 @@ def nvisitsPerSubset( metric_bundleDict : `dict` of `rubin_sim.maf.MetricBundle` """ if colmap is None: - colmap = get_col_map("FBS") + colmap = col_map_dict() bdict = {} bundleList = [] diff --git a/rubin_sim/maf/db/__init__.py b/rubin_sim/maf/db/__init__.py index 075e9fdf1..b0ab5c014 100644 --- a/rubin_sim/maf/db/__init__.py +++ b/rubin_sim/maf/db/__init__.py @@ -1,2 +1,2 @@ -from .results_db import * -from .tracking_db import * +from .results_db import * # noqa: F403 +from .tracking_db import * # noqa: F403 diff --git a/rubin_sim/maf/db/results_db.py b/rubin_sim/maf/db/results_db.py index 40dc6de9a..b46aef7c4 100644 --- a/rubin_sim/maf/db/results_db.py +++ b/rubin_sim/maf/db/results_db.py @@ -109,7 +109,9 @@ class PlotRow(Base): """ Define contents and format of plot list table. - (Table to list all plots, link them to relevant metrics in MetricList, and provide info on filename). + (Table to list all plots, + link them to relevant metrics in MetricList, + and provide info on filename). """ __tablename__ = "plots" @@ -133,8 +135,8 @@ class SummaryStatRow(Base): """ Define contents and format of the summary statistics table. - (Table to list and link summary stats to relevant metrics in MetricList, and provide summary stat name, - value and potentially a comment). + (Table to list and link summary stats to relevant metrics in MetricList, + and provide summary stat name, value and potentially a comment). """ __tablename__ = "summarystats" @@ -155,18 +157,19 @@ def __repr__(self): class ResultsDb: - """The ResultsDb is a sqlite database containing information on the metrics run via MAF, - the plots created, the display information (such as captions), and any summary statistics output. + """ResultsDb is a sqlite database containing information on the metrics + run via MAF, the plots created, the display information (such as captions), + and any summary statistics output. """ def __init__(self, out_dir=None, database=None, verbose=False): """ - Instantiate the results database, creating metrics, plots and summarystats tables. + Set up the resultsDb database. """ - # We now require results_db to be a sqlite file (for simplicity). Leaving as attribute though. + # We now require results_db to be a sqlite file (for simplicity). + # Leaving as attribute though. self.driver = "sqlite" - # Connect to database - # for sqlite, connecting to non-existent database creates it automatically + # connecting to non-existent database creates it automatically if database is None: # Using default value for database name, should specify directory. if out_dir is None: @@ -190,7 +193,8 @@ def __init__(self, out_dir=None, database=None, verbose=False): # If this is a new file, then we should record date and version later. needs_version = not os.path.isfile(self.database) - # Connect to the specified file; this will create the database if it doesn't exist. + # Connect to the specified file; + # this will create the database if it doesn't exist. already_file = os.path.isfile(self.database) db_address = url.URL.create(self.driver, database=self.database) @@ -207,7 +211,8 @@ def __init__(self, out_dir=None, database=None, verbose=False): % (self.driver, self.database) ) self.slen = 1024 - # Check if we have a database matching this schema (with metric_info_label) + # Check if we have a database matching this schema + # (with metric_info_label) query = text("select * from metrics limit 1") cols = self.session.execute(query)._metadata.keys if "sql_constraint" not in cols: @@ -225,10 +230,13 @@ def __init__(self, out_dir=None, database=None, verbose=False): def update_database(self): """Update the results_db from 'metricMetaData' to 'metric_info_label' - and now also changing the camel case to snake case (metricId to metric_id, etc.). + and now also changing the camel case to snake case + (metricId to metric_id, etc.). - This updates results_db to work with the current version of MAF, including RunComparison and showMaf. - There is also a 'downgrade_database' to revert to the older style with 'metricMetadata. + This updates results_db to work with the current version of MAF, + including RunComparison and showMaf. + There is also a 'downgrade_database' to revert to the older style + with 'metricMetadata. """ warnings.warn( "Updating database to match new schema." @@ -241,7 +249,8 @@ def update_database(self): query = text("alter table metrics rename column metricMetadata to metric_info_label") self.session.execute(query) self.session.commit() - # Check for metricId vs. metric_id separately, as this change happened independently. + # Check for metricId vs. metric_id separately, + # as this change happened independently. if "metricId" in cols: for table in ["metrics", "summarystats", "plots", "displays"]: query = text(f"alter table {table} rename column metricId to metric_id") @@ -305,7 +314,8 @@ def update_database(self): def downgrade_database(self): """ Downgrade resultsDb to work with v0.10 0: @@ -517,8 +530,9 @@ def update_plot(self, metric_id, plot_type, plot_file, overwrite=False): plot_file : `str` The filename for this plot overwrite : `bool` - Replaces existing row with the same metric_id and plot_type, if True. - Default False, in which case additional plot is added to output (e.g. with different range) + If True, replaces existing row. + If False, an additional plot is added to the output (e.g. with + a different range of color values, etc). """ self.open() plotinfo = self.session.query(PlotRow).filter_by(metric_id=metric_id, plot_type=plot_type).all() @@ -534,11 +548,12 @@ def update_summary_stat(self, metric_id, summary_name, summary_value, ntry=3, pa """ Add a row to or update a row in the summary statistic table. - Most summary statistics will be a simple name (string) + value (float) pair. - For special summary statistics which must return multiple values, the base name - can be provided as 'name', together with a np recarray as 'value', where the - recarray also has 'name' and 'value' columns (and each name/value pair is then saved - as a summary statistic associated with this same metric_id). + Most summary statistics will be a simple name (string) + value (float) + pair. For special summary statistics which must return multiple values, + the base name can be provided as 'name', together with a np.ndarray as + 'value', where the array also has 'name' and 'value' columns + (and each name/value pair is then saved as a summary statistic + associated with this same metric_id). Parameters ---------- @@ -546,17 +561,17 @@ def update_summary_stat(self, metric_id, summary_name, summary_value, ntry=3, pa The metric Id of this metric bundle summary_name : `str` The name of this summary statistic - summary_value: : `float` or `numpy.ndarray` + summary_value: : `float` or `np.ndarray` The value for this summary statistic. - If this is a numpy recarray, then it should also have 'name' and 'value' columns to save - each value to rows in the summary statistic table. + If this is a np.ndarray, then it should also have 'name' and + 'value' columns to save each value to rows in the summary stats. ntry : `int`, opt - The number of times to retry if database is locked. Default 3 times. + The number of times to retry if database is locked. pause_time : `int`, opt - Time to wait until trying again. Default 100s. + Time to wait until trying again. """ - # Allow for special summary statistics which return data in a np structured array with - # 'name' and 'value' columns. (specificially needed for TableFraction summary statistic). + # Allow for special summary statistics which return data in a + # np.ndarray with 'name' and 'value' columns. self.open() tries = 0 if isinstance(summary_value, np.ndarray): @@ -573,9 +588,9 @@ def update_summary_stat(self, metric_id, summary_name, summary_value, ntry=3, pa summary_value=value["value"], ) success = False - # This can hit a locked database if running jobs in parallel - # have it try a few times before actually failing since nothing should - # be writing for a long time. + # This can hit a locked database if running in parallel + # have it try a few times before actually failing + # since nothing should be writing for a long time. while (not success) & (tries < ntry): try: self.session.add(summarystat) @@ -647,7 +662,8 @@ def get_metric_id_like( metric_info_label_like=None, run_name=None, ): - """Find metric bundle Ids from the metric table, but search for names 'like' the values. + """Find metric bundle Ids from the metric table, + but search for names 'like' the values. (instead of a strict match from get_metric_id). Parameters @@ -701,7 +717,8 @@ def get_all_metric_ids(self): @staticmethod def build_summary_name(metric_name, metric_info_label, slicer_name, summary_stat_name=None): - """Standardize a complete summary metric name, combining the metric + slicer + summary + info_label""" + """Standardize a complete summary metric name, + combining the metric + slicer + summary + info_label.""" if metric_info_label is None: metric_info_label = "" if slicer_name is None: @@ -727,7 +744,8 @@ def get_summary_stats( """ Get the summary stats (optionally for metric_id list). Optionally, also specify the summary metric name. - Returns a numpy array of the metric information + summary statistic information. + Returns a numpy array of the metric information + + summary statistic information. Parameters ---------- @@ -745,7 +763,7 @@ def get_summary_stats( Returns ------- summarystats : `np.recarray` - Numpy recarray containing the selected summary statistic information. + Numpy recarray containing the selected summary stat information. """ if metric_id is not None: if not hasattr(metric_id, "__iter__"): @@ -754,7 +772,8 @@ def get_summary_stats( ] summarystats = [] self.open() - # Join the metric table and the summarystat table, based on the metricID (the second filter) + # Join the metric table and the summarystat table, + # based on the metricID (the second filter) query = self.session.query(MetricRow, SummaryStatRow).filter( MetricRow.metric_id == SummaryStatRow.metric_id ) @@ -805,8 +824,8 @@ def get_summary_stats( def get_plot_files(self, metric_id=None, with_sim_name=False): """ - Return the metric_id, name, info_label, and all plot info (optionally for metric_id list). - Returns a numpy array of the metric information + plot file names. + Find the metric_id, name, info_label, and all plot info + (optionally for metric_id list). Parameters ---------- @@ -824,7 +843,8 @@ def get_plot_files(self, metric_id=None, with_sim_name=False): ``metric_name`` The metric name ``metric_info_label`` - info_label extracted from the sql constraint (usually the filter) + info_label extracted from the sql constraint + (usually the filter) ``plot_type`` The plot type ``plot_file`` @@ -843,14 +863,16 @@ def get_plot_files(self, metric_id=None, with_sim_name=False): self.open() plotfiles = [] for mid in metric_id: - # Join the metric table and the plot table based on the metricID (the second filter does the join) + # Join the metric table and the plot table based on the metricID + # (the second filter does the join) query = ( self.session.query(MetricRow, PlotRow) .filter(MetricRow.metric_id == mid) .filter(MetricRow.metric_id == PlotRow.metric_id) ) for m, p in query: - # The plot_file typically ends with .pdf (but the rest of name can have '.' or '_') + # The plot_file typically ends with .pdf + # (but the rest of name can have '.' or '_') thumb_file = "thumb." + ".".join(p.plot_file.split(".")[:-1]) + ".png" plot_file_fields = ( m.metric_id, @@ -926,7 +948,8 @@ def get_metric_info(self, metric_id=None, with_sim_name=False): ``sql_constraint`` The full sql constraint used in the bundleGroup ``metric_info_label`` - Metadata extracted from the `sql_constraint` (usually the filter) + Metadata extracted from the `sql_constraint` + (usually the filter) ``metric_datafile`` The file name of the file with the metric data itself. ``run_name`` @@ -942,7 +965,7 @@ def get_metric_info(self, metric_id=None, with_sim_name=False): self.open() metricInfo = [] for mId in metric_id: - # Query for all rows in metrics and displays that match any of the metric_ids. + # Query for all rows in metrics and displays that match metric_ids query = self.session.query(MetricRow).filter(MetricRow.metric_id == mId) for m in query: base_metric_name = m.metric_name.split("_")[0] @@ -978,13 +1001,12 @@ def get_metric_info(self, metric_id=None, with_sim_name=False): def get_metric_display_info(self, metric_id=None): """ - Get the contents of the metrics and displays table, together with the 'basemetric_name' - (optionally, for metric_id list). + Get the contents of the metrics and displays table, + together with the 'basemetric_name' (optionally, for metric_id list). Returns a numpy array of the metric information + display information. - One underlying assumption here is that all metrics have some display info. - In newer batches, this may not be the case, as the display info gets auto-generated when the - metric is plotted. + An underlying assumption here is that all metrics have some + display info. This may not always be the case. """ if metric_id is None: metric_id = self.get_all_metric_ids() @@ -995,7 +1017,7 @@ def get_metric_display_info(self, metric_id=None): self.open() metricInfo = [] for mId in metric_id: - # Query for all rows in metrics and displays that match any of the metric_ids. + # Query for all rows in metrics and displays that match metric_ids. query = ( self.session.query(MetricRow, DisplayRow) .filter(MetricRow.metric_id == mId) @@ -1038,7 +1060,9 @@ def get_metric_display_info(self, metric_id=None): return metricInfo def get_run_name(self): - """Return a list of the run_names for the metric bundles in the database.""" + """Return a list of the run_names for the metric bundles in + the database. + """ self.open() query = self.session.query(MetricRow.run_name.distinct()).all() run_name = [] diff --git a/rubin_sim/maf/db/tracking_db.py b/rubin_sim/maf/db/tracking_db.py index 48888b507..c4f27dd59 100644 --- a/rubin_sim/maf/db/tracking_db.py +++ b/rubin_sim/maf/db/tracking_db.py @@ -1,7 +1,6 @@ __all__ = ("TrackingDb", "add_run_to_database") import os -import warnings from sqlalchemy import Column, Integer, String, create_engine from sqlalchemy.engine import url @@ -17,7 +16,8 @@ class RunRow(Base): """ Define contents and format of run list table. - Table to list all available MAF results, along with their opsim run and some comment info. + Table to list all available MAF results, + along with their opsim run and some comment info. """ __tablename__ = "runs" @@ -57,16 +57,17 @@ def __repr__(self): class TrackingDb: - """Sqlite database to track MAF output runs and their locations, for show_maf""" + """Sqlite database to track MAF output runs and their locations, + for show_maf + """ def __init__(self, database=None, trackingDbverbose=False): """ - Instantiate the results database, creating metrics, plots and summarystats tables. + Set up the Tracking Database. """ self.verbose = trackingDbverbose self.driver = "sqlite" - # Connect to database - # for sqlite, connecting to non-existent database creates it automatically + # connecting to non-existent database creates it automatically if database is None: # Default is a file in the current directory. self.database = os.path.join(os.getcwd(), "trackingDb_sqlite.db") @@ -108,33 +109,35 @@ def add_run( Parameters ---------- - run_group : str, optional + run_group : `str`, optional Set a name to group this run with (eg. "Tier 1, 2016"). - run_name : str, optional + run_name : `str`, optional Set a name for the opsim run. - run_comment : str, optional + run_comment : `str`, optional Set a comment describing the opsim run. - run_version : str, optional + run_version : `str`, optional Set the version of opsim. - run_date : str, optional + run_date : `str`, optional Set the date the opsim run was created. - maf_comment : str, optional + maf_comment : `str`, optional Set a comment to describe the MAF analysis. - maf_version : str, optional + maf_version : `str`, optional Set the version of MAF used for analysis. - maf_date : str, optional + maf_date : `str`, optional Set the date the MAF analysis was run. - maf_dir : str, optional + maf_dir : `str`, optional The relative path to the MAF directory. - db_file : str, optional + db_file : `str`, optional The relative path to the Opsim SQLite database file. - maf_run_id : int, optional - The maf_run_id to assign to this record in the database (note this is a primary key!). - If this run (ie the maf_dir) exists in the database already, this will be ignored. + maf_run_id : `int`, optional + The maf_run_id to assign to this record in the database + (note this is a primary key!). + If this run (ie the maf_dir) exists in the database already, + this will be ignored. Returns ------- - int + maf_run_id : `int` The maf_run_id stored in the database. """ if run_group is None: @@ -258,20 +261,19 @@ def add_run_to_database( maf_dir : `str` Path to the directory where the MAF results are located. tracking_db_file : `str` - Full filename (+path) to the tracking database storing the MAF run information. + Full filename (+path) to the tracking database to use. run_group: `str`, optional - Name to use to group this run with other opsim runs. Default None. + Name to use to group this run with other opsim runs. run_name : `str`, optional - Name of the opsim run. If not provided, will attempt to use run_name from configSummary.txt. + Name of the opsim run. run_comment : `str`, optional - Comment about the opsim run. If not provided, will attempt to use runComment from configSummary.txt. + Comment about the opsim run. run_version : `str`, optional - Value to use for the opsim version information. If not provided, will attempt to use the value from - configSummary.txt + Value to use for the opsim version information. maf_comment : `str`, optional - Comment about the MAF analysis. If not provided, no comment will be recorded. + Comment about the MAF analysis. db_file : `str`, optional - Relative path + name of the opsim database file. If not provided, no location will be recorded. + Relative path + name of the opsim database file. """ maf_dir = os.path.abspath(maf_dir) if not os.path.isdir(maf_dir): @@ -281,25 +283,22 @@ def add_run_to_database( # Connect to resultsDb for additional information if available if os.path.isfile(os.path.join(maf_dir, "resultsDb_sqlite.db")) and not skip_extras: - try: - resdb = ResultsDb(maf_dir) - if run_name is None: - run_name = resdb.get_run_name() - if len(run_name) > 1: - run_name = 0 - else: - run_name = run_name[0] - if maf_version is None or maf_date is None: - resdb.open() - query = resdb.session.query(VersionRow).all() - for v in query: - if maf_version is None: - maf_version = v.version - if maf_date is None: - maf_date = v.run_date - resdb.close() - except: - warnings.warn(f"Could not pull run information from resultsDb file for {maf_dir}") + resdb = ResultsDb(maf_dir) + if run_name is None: + run_name = resdb.get_run_name() + if len(run_name) > 1: + run_name = 0 + else: + run_name = run_name[0] + if maf_version is None or maf_date is None: + resdb.open() + query = resdb.session.query(VersionRow).all() + for v in query: + if maf_version is None: + maf_version = v.version + if maf_date is None: + maf_date = v.run_date + resdb.close() print("Adding to tracking database at %s:" % (tracking_db_file)) print(" Maf_dir = %s" % (maf_dir)) diff --git a/rubin_sim/maf/ddf_dir.py b/rubin_sim/maf/ddf_dir.py index 03087b005..57a272c89 100755 --- a/rubin_sim/maf/ddf_dir.py +++ b/rubin_sim/maf/ddf_dir.py @@ -33,7 +33,6 @@ def ddf_dir(): for filename, name in zip(db_files, run_names): if os.path.isdir(name + "_ddf"): shutil.rmtree(name + "_ddf") - colmap = batches.col_map_dict() bdict = {} bdict.update(batches.ddfBatch(run_name=name, nside=args.nside)) diff --git a/rubin_sim/maf/maf_contrib/__init__.py b/rubin_sim/maf/maf_contrib/__init__.py index 35abd63af..d1dcc956f 100644 --- a/rubin_sim/maf/maf_contrib/__init__.py +++ b/rubin_sim/maf/maf_contrib/__init__.py @@ -12,7 +12,6 @@ from .periodic_metric import * from .periodic_star_metric import * from .periodic_star_modulation_metric import * -from .phot_prec_metrics import * from .presto_color_kne_pop_metric import * from .star_count_mass_metric import * from .star_count_metric import * diff --git a/rubin_sim/maf/maf_contrib/cadence_over_visibility_window_metric.py b/rubin_sim/maf/maf_contrib/cadence_over_visibility_window_metric.py index cf43a1dc2..2775e1b56 100644 --- a/rubin_sim/maf/maf_contrib/cadence_over_visibility_window_metric.py +++ b/rubin_sim/maf/maf_contrib/cadence_over_visibility_window_metric.py @@ -2,14 +2,11 @@ # from astropy.visualization import astropy_mpl_style # plt.style.use(astropy_mpl_style) -import astropy.units as u -import matplotlib.pylab as plt import numpy as np from astropy.time import Time, TimeDelta import rubin_sim.maf.db as db import rubin_sim.maf.metricBundles as metricBundles -import rubin_sim.maf.metrics as metrics import rubin_sim.maf.slicers as slicers from rubin_sim.maf.metrics import BaseMetric diff --git a/rubin_sim/maf/maf_contrib/calc_expected_visits.py b/rubin_sim/maf/maf_contrib/calc_expected_visits.py index 9e7198255..f4257a59a 100644 --- a/rubin_sim/maf/maf_contrib/calc_expected_visits.py +++ b/rubin_sim/maf/maf_contrib/calc_expected_visits.py @@ -9,10 +9,6 @@ import numpy as np -import rubin_sim.maf.db as db -import rubin_sim.maf.metricBundles as metricBundles -import rubin_sim.maf.metrics as metrics -import rubin_sim.maf.slicers as slicers from rubin_sim.maf.metrics import BaseMetric from .calculate_lsst_field_visibility_astropy import calculate_lsst_field_visibility diff --git a/rubin_sim/maf/maf_contrib/calculate_lsst_field_visibility_astropy.py b/rubin_sim/maf/maf_contrib/calculate_lsst_field_visibility_astropy.py index 257ca1358..507f8c60a 100644 --- a/rubin_sim/maf/maf_contrib/calculate_lsst_field_visibility_astropy.py +++ b/rubin_sim/maf/maf_contrib/calculate_lsst_field_visibility_astropy.py @@ -140,7 +140,7 @@ def plot_visibility(ts, target_alts, sun_alts, hrs_visible_per_night, min_alt): idx = np.where(target_alts > -1e5) ax1.plot((ts - 2450000)[idx], target_alts[idx], "b-", label="Target altitude") ax1.set_xlabel("JD") - ax1.set_ylabel("Maximum altitude [$^{\circ}$]", color="b") + ax1.set_ylabel(r"Maximum altitude [$^{\circ}$]", color="b") ax1.xaxis.label.set_fontsize(18) ax1.yaxis.label.set_fontsize(18) for label in ax1.get_xticklabels(): diff --git a/rubin_sim/maf/maf_contrib/example_new_stacker.py b/rubin_sim/maf/maf_contrib/example_new_stacker.py deleted file mode 100644 index 467b47843..000000000 --- a/rubin_sim/maf/maf_contrib/example_new_stacker.py +++ /dev/null @@ -1,52 +0,0 @@ -# Example of a stacker added to the repo. -# ljones@astro.washington.edu - -__all__ = ("YearlyDitherStacker",) - -import numpy as np - -from rubin_sim.maf.stackers import BaseStacker, wrapRADec - - -class YearlyDitherStacker(BaseStacker): - """Add a dither of half the FOV depending on the year of the survey.""" - - def __init__(self, exp_mjd_col="expMJD", ra_col="fieldRA", dec_col="fieldDec"): - # Names of columns we want to add. - self.cols_added = ["yearlyDitherRA", "yearlyDitherDec"] - # Names of columns we need from database. - self.cols_req = [exp_mjd_col, ra_col, dec_col] - # List of units for our new columns. - self.units = ["rad", "rad"] - # Set our dither offset. - self.dither_offset = 1.75 / 180.0 * np.pi - # And save the column names. - self.exp_mjd_col = exp_mjd_col - self.ra_col = ra_col - self.dec_col = dec_col - - def _run(self, sim_data): - # What 'year' is each visit in? - year = np.floor((sim_data[self.exp_mjd_col] - sim_data[self.exp_mjd_col][0]) / 365.25) - # Define dither based on year. - dither_ra = np.zeros(len(sim_data[self.ra_col])) - dither_dec = np.zeros(len(sim_data[self.dec_col])) - # In y1, y3, y5, y6, y8 & y10 ra dither = 0. - # In y2 & y7, ra dither = +ditherOffset - # In y4 & y9, ra dither = -ditherOffset - condition = (year == 2) | (year == 7) - dither_ra[condition] = self.dither_offset - condition = (year == 4) | (year == 9) - dither_ra[condition] = -1.0 * self.dither_offset - # In y1, y2, y4, y6, y7 & y9, dec dither = 0 - # In y3 & y8, dec dither = -ditherOffset - # In y5 & y10, dec dither = ditherOffset - condition = (year == 3) | (year == 8) - dither_dec[condition] = -1.0 * self.dither_offset - condition = (year == 5) | (year == 10) - dither_dec[condition] = self.dither_offset - # Calculate actual RA/Dec and wrap into appropriate range. - sim_data["yearlyDitherRA"], sim_data["yearlyDitherDec"] = wrapRADec( - sim_data[self.ra_col] + dither_ra, sim_data[self.dec_col] + dither_dec - ) - return sim_data diff --git a/rubin_sim/maf/maf_contrib/grb_transient_metric.py b/rubin_sim/maf/maf_contrib/grb_transient_metric.py index 7b4a80015..e7a1f77fe 100644 --- a/rubin_sim/maf/maf_contrib/grb_transient_metric.py +++ b/rubin_sim/maf/maf_contrib/grb_transient_metric.py @@ -157,7 +157,6 @@ def run(self, data_slice, slice_point=None): # Total number of transients that could go off back-to-back n_trans_max = np.floor(self.survey_duration / (self.trans_duration / 365.25)) tshifts = np.arange(self.n_phase_check) * self.trans_duration / float(self.n_phase_check) - n_detected = 0 n_trans_max = 0 for tshift in tshifts: # Compute the total number of back-to-back transients are possible to detect diff --git a/rubin_sim/maf/maf_contrib/kne_metrics.py b/rubin_sim/maf/maf_contrib/kne_metrics.py index 19c7353ac..1d7606ba9 100644 --- a/rubin_sim/maf/maf_contrib/kne_metrics.py +++ b/rubin_sim/maf/maf_contrib/kne_metrics.py @@ -19,7 +19,7 @@ def get_kne_filename(inj_params_list=None): Parameters ---------- - inj_params_list : list of dict + inj_params_list : `list` [`dict`] parameters for the kilonova model such as mass of the dynamical ejecta (mej_dyn), mass of the disk wind ejecta (mej_wind), semi opening angle of the cylindrically-symmetric ejecta @@ -37,7 +37,8 @@ def get_kne_filename(inj_params_list=None): if inj_params_list is None or len(inj_params_list) == 0: return file_list - # Otherwise find the parameters for each file and then find the relevant matches. + # Otherwise find the parameters for each file and + # then find the relevant matches. params = {} matched_files = [] for filename in file_list: @@ -84,7 +85,7 @@ class KnLc: Parameters ---------- - file_list : list of str (None) + file_list : `list` [`str`] or None List of file paths to load. If None, loads up all the files from data/bns/ """ @@ -113,12 +114,17 @@ def interp(self, t, filtername, lc_indx=0): Parameters ---------- - t : array of floats + t : `np.ndarray`, (N,) The times to interpolate the light curve to. - filtername : str + filtername : `str` The filter. one of ugrizy - lc_index : int (0) -   Which file to use. + lc_index : `int`, optional + Which file to use. + + Returns + ------- + result : `np.ndarray`, (N,) + Array of lightcurve brightnesses at the times of t.= """ result = np.interp( @@ -172,8 +178,8 @@ def __init__( ) def _multi_detect(self, around_peak): - """ - Simple detection criteria: detect at least a certain number of times + """Simple detection criteria: + detect at least a certain number of times """ result = 1 # Detected data points @@ -195,30 +201,29 @@ def _ztfrest_simple( select_red=False, select_blue=False, ): - """ - Selection criteria based on rise or decay rate; simplified version of - the methods employed by the ZTFReST project + """Selection criteria based on rise or decay rate; + simplified version of the methods employed by the ZTFReST project (Andreoni & Coughlin et al., 2021) Parameters ---------- - around_peak : array + around_peak : `np.ndarray`, (N,) indexes corresponding to 5sigma detections - mags : array + mags : `np.ndarray`, (N,) magnitudes obtained interpolating models on the data_slice - t : array + t : `np.ndarray`, (N,) relative times - filters : array + filters : `np.ndarray`, (N,) filters in which detections happened - min_dt : float + min_dt : `float` minimum time gap between first and last detection in a given band - min_fade : float + min_fade : `float` fade rate threshold (positive, mag/day) - max_rise : float + max_rise : `float` rise rate threshold (negative, mag/day) - select_red : bool + select_red : `bool` if True, only red 'izy' filters will be considered - select_blue : bool + select_blue : `bool` if True, only blue 'ugr' filters will be considered Examples @@ -242,9 +247,9 @@ def _ztfrest_simple( fil = [] # Check time gaps and rise or fade rate for each band for f in set(filters): - if select_red is True and not (f in "izy"): + if select_red is True and f not in "izy": continue - elif select_blue is True and not (f in "ugr"): + elif select_blue is True and f not in "ugr": continue times_f = t[around_peak][np.where(filters == f)[0]] mags_f = mags[around_peak][np.where(filters == f)[0]] @@ -278,9 +283,8 @@ def _ztfrest_simple( return result def _multi_color_detect(self, filters): - """ - Color-based simple detection criteria: detect at least twice, - with at least two filters + """Color-based simple detection criteria: + detect at least twice, with at least two filters """ result = 1 # detected in at least two filters @@ -290,14 +294,13 @@ def _multi_color_detect(self, filters): return result def _red_color_detect(self, filters, min_det=4): - """ - Detected at least min_det times in either izy colors + """Detected at least min_det times in either izy colors Parameters ---------- - filters : array + filters : `np.ndarray`, (N,) filters in which detections happened - min_det : float or int + min_det : `float` or `int` minimum number of detections required in izy bands """ result = 1 @@ -314,14 +317,13 @@ def _red_color_detect(self, filters, min_det=4): return result def _blue_color_detect(self, filters, min_det=4): - """ - Detected at least min_det times in either ugr colors + """Detected at least min_det times in either ugr colors Parameters ---------- - filters : array + filters : `np.ndarray`, (N,) filters in which detections happened - min_det : float or int + min_det : `float` or `int` minimum number of detections required in ugr bands """ result = 1 @@ -426,27 +428,27 @@ def generate_kn_pop_slicer( Parameters ---------- - t_start : float (1) + t_start : `float`, optional The night to start kilonova events on (days) - t_end : float (3652) + t_end : `float`, optional The final night of kilonova events - n_events : int (10000) + n_events : `int`, optional The number of kilonova events to generate - seed : float + seed : `float`, optional The seed passed to np.random - n_files : int (308) + n_files : `int`, optional The number of different kilonova lightcurves to use This should match the length of the filenames list passed to the KNePopMetric directly. - d_min : float or int (10) + d_min : `float` or `int`, optional Minimum luminosity distance (Mpc) - d_max : float or int (300) + d_max : `float` or `int`, optional Maximum luminosity distance (Mpc) - ra, dec : np.array (None) + ra, dec : `np.ndarray`, (N,) or None The ra and dec to use for event positions. Generates uniformly on the spehere if None. (degrees) """ def rndm(a, b, g, size=1): - """Power-law gen for pdf(x)\propto x^{g-1} for a<=x<=b""" + """Power-law gen for pdf(x) \propto x^{g-1} for a<=x<=b""" r = np.random.random(size=size) ag, bg = a**g, b**g return (ag + (bg - ag) * r) ** (1.0 / g) diff --git a/rubin_sim/maf/maf_contrib/lss_obs_strategy/__init__.py b/rubin_sim/maf/maf_contrib/lss_obs_strategy/__init__.py index 5021be46c..d8a5dcc0a 100644 --- a/rubin_sim/maf/maf_contrib/lss_obs_strategy/__init__.py +++ b/rubin_sim/maf/maf_contrib/lss_obs_strategy/__init__.py @@ -6,4 +6,3 @@ from .galaxy_counts_with_pixel_calibration import * from .masking_algorithm_generalized import * from .os_bias_analysis import * -from .save_bundle_data_npz_format import * diff --git a/rubin_sim/maf/maf_contrib/lss_obs_strategy/artificial_structure_calculation.py b/rubin_sim/maf/maf_contrib/lss_obs_strategy/artificial_structure_calculation.py index 7bf240ace..bfb5c0d1f 100644 --- a/rubin_sim/maf/maf_contrib/lss_obs_strategy/artificial_structure_calculation.py +++ b/rubin_sim/maf/maf_contrib/lss_obs_strategy/artificial_structure_calculation.py @@ -67,7 +67,6 @@ from rubin_sim.maf.maf_contrib.lss_obs_strategy.masking_algorithm_generalized import ( masking_algorithm_generalized, ) -from rubin_sim.maf.maf_contrib.lss_obs_strategy.save_bundle_data_npz_format import save_bundle_data_npz_format from rubin_sim.maf.metrics import CountMetric as NumObsMetric @@ -534,19 +533,8 @@ def artificial_structure_calculation( save_early=False, ) b_group.run_all() - # ------------------------------------------------------------------------ + b_group.write_all() - # save the raw num_gal data. - if save_raw_num_gal_data: - out_dir_new = "numGalData_beforeMasking_before0pt" - if not os.path.exists("%s%s/%s" % (path, out_dir, out_dir_new)): - os.makedirs("%s%s/%s" % (path, out_dir, out_dir_new)) - save_bundle_data_npz_format( - "%s%s/%s" % (path, out_dir, out_dir_new), - my_bundles, - "numGalData_unmasked_no0pt", - filter_band, - ) # ------------------------------------------------------------------------ # print out tot(num_gal) associated with each strategy # write to the readme as well @@ -591,14 +579,9 @@ def artificial_structure_calculation( # save the num_gal data. if save_num_gal_data_after_masking: out_dir_new = "numGalData_afterBorderMasking" - if not os.path.exists("%s%s/%s" % (path, out_dir, out_dir_new)): - os.makedirs("%s%s/%s" % (path, out_dir, out_dir_new)) - save_bundle_data_npz_format( - "%s%s/%s" % (path, out_dir, out_dir_new), - my_bundles, - "numGalData_masked", - filter_band, - ) + for b in my_bundles: + my_bundles[b].write(out_dir=out_dir_new) + # ------------------------------------------------------------------------ # print out tot(num_gal) associated with each strategy # write to the readme as well @@ -972,14 +955,9 @@ def artificial_structure_calculation( # save the raw num_gal data. if save_num_gal_data_after0pt: out_dir_new = "numGalData_afterBorderMasking_after0pt" - if not os.path.exists("%s%s/%s" % (path, out_dir, out_dir_new)): - os.makedirs("%s%s/%s" % (path, out_dir, out_dir_new)) - save_bundle_data_npz_format( - "%s%s/%s" % (path, out_dir, out_dir_new), - my_bundles, - "numGalData_masked_with0pt", - filter_band, - ) + for b in my_bundles: + my_bundles[b].write(out_dir=out_dir_new) + # ------------------------------------------------------------------------ # print out tot(num_gal) associated with each strategy # add to the read me as well @@ -1020,14 +998,9 @@ def artificial_structure_calculation( # save the num_gal data. if saveNumGalDataAfterPoisson: out_dir_new = "numGalData_afterBorderMasking_after0pt_afterPoisson" - if not os.path.exists("%s%s/%s" % (path, out_dir, out_dir_new)): - os.makedirs("%s%s/%s" % (path, out_dir, out_dir_new)) - save_bundle_data_npz_format( - "%s%s/%s" % (path, out_dir, out_dir_new), - my_bundles, - "numGalData_masked_with0pt_withPoisson", - filter_band, - ) + for b in my_bundles: + my_bundles[b].write(out_dir=out_dir_new) + # ------------------------------------------------------------------------ # print out tot(num_gal) associated with each strategy # add to the read me as well @@ -1089,14 +1062,9 @@ def artificial_structure_calculation( # save the deltaN/N data if save_delta_n_by_n_data: out_dir_new = "deltaNByNData" - if not os.path.exists("%s%s/%s" % (path, out_dir, out_dir_new)): - os.makedirs("%s%s/%s" % (path, out_dir, out_dir_new)) - save_bundle_data_npz_format( - "%s%s/%s" % (path, out_dir, out_dir_new), - my_bundles, - "deltaNByNData_masked", - filter_band, - ) + for b in my_bundles: + my_bundles[b].write(out_dir=out_dir_new) + # ------------------------------------------------------------------------ # Calculate total power # add to the read me as well diff --git a/rubin_sim/maf/maf_contrib/lss_obs_strategy/coadd_m5_analysis.py b/rubin_sim/maf/maf_contrib/lss_obs_strategy/coadd_m5_analysis.py index 62820cd81..9e58b1761 100644 --- a/rubin_sim/maf/maf_contrib/lss_obs_strategy/coadd_m5_analysis.py +++ b/rubin_sim/maf/maf_contrib/lss_obs_strategy/coadd_m5_analysis.py @@ -28,7 +28,6 @@ from rubin_sim.maf.maf_contrib.lss_obs_strategy.masking_algorithm_generalized import ( masking_algorithm_generalized, ) -from rubin_sim.maf.maf_contrib.lss_obs_strategy.save_bundle_data_npz_format import save_bundle_data_npz_format def coadd_m5_analysis( @@ -468,14 +467,8 @@ def coadd_m5_analysis( # save the unmasked data? if saveun_masked_coadd_data: out_dir_new = "unmaskedCoaddData" - if not os.path.exists("%s%s/%s" % (path, out_dir, out_dir_new)): - os.makedirs("%s%s/%s" % (path, out_dir, out_dir_new)) - save_bundle_data_npz_format( - "%s%s/%s" % (path, out_dir, out_dir_new), - coadd_bundle, - "coaddM5Data_unmasked", - filter_band, - ) + for b in coadd_bundle: + coadd_bundle[b].write(out_dir=out_dir_new) # ------------------------------------------------------------------------ # mask the edges @@ -518,14 +511,8 @@ def coadd_m5_analysis( # save the masked data? if save_masked_coadd_data and (pixel_radius_for_masking > 0): out_dir_new = "maskedCoaddData" - if not os.path.exists("%s%s/%s" % (path, out_dir, out_dir_new)): - os.makedirs("%s%s/%s" % (path, out_dir, out_dir_new)) - save_bundle_data_npz_format( - "%s%s/%s" % (path, out_dir, out_dir_new), - coadd_bundle, - "coaddM5Data_masked", - filter_band, - ) + for b in coadd_bundle: + coadd_bundle[b].write(out_dir=out_dir_new) # ------------------------------------------------------------------------ # plot comparison plots diff --git a/rubin_sim/maf/maf_contrib/lss_obs_strategy/new_dither_stackers.py b/rubin_sim/maf/maf_contrib/lss_obs_strategy/new_dither_stackers.py deleted file mode 100644 index 46902452b..000000000 --- a/rubin_sim/maf/maf_contrib/lss_obs_strategy/new_dither_stackers.py +++ /dev/null @@ -1,1168 +0,0 @@ -############################################################################################################## -# Purpose: implement new dithering strategies. - -# The stackers here follow the naming scheme: [Pattern]Dither[Field]Per[Timescale]. The absence of the -# keyword 'Field' implies dither assignment to all fields. - -# Dithers are restricted to the hexagon inscribed in the circle with radius maxDither, where maxDither is the -# required dither offset (generally taken to be radius of the FOV). - -# Humna Awan: humna.awan@rutgers.edu -# Last updated: 06/11/16 -############################################################################################################### -__all__ = ( - "RepulsiveRandomDitherPerNightStacker", - "FermatSpiralDitherPerNightStacker", - "PentagonDitherPerSeasonStacker", - "PentagonDiamondDitherPerSeasonStacker", - "SpiralDitherPerSeasonStacker", -) - -# deprecated -# "RepulsiveRandomDitherFieldPerVisitStacker", -# "FermatSpiralDitherFieldPerVisitStacker", -# "RepulsiveRandomDitherFieldPerNightStacker", -# "FermatSpiralDitherFieldPerNightStacker", -# "PentagonDiamondDitherFieldPerSeasonStacker", - -import numpy as np - -from rubin_sim.maf.stackers import BaseStacker, SpiralDitherFieldPerVisitStacker, polygon_coords, wrap_ra_dec -from rubin_sim.utils import calc_season - - -class RepulsiveRandomDitherFieldPerVisitStacker(BaseStacker): - """ - Repulsive-randomly dither the RA and Dec pointings up to max_dither degrees from center, - different offset per visit for each field. - - Note: dithers are confined to the hexagon inscribed in the circle with radius max_dither. - - Parameters - ------------------- - ra_col: str - name of the RA column in the data. Default: 'fieldRA'. - dec_col : str - name of the Dec column in the data. Default: 'fieldDec'. - field_id_col : str - name of the fieldID column in the data. Default: 'field_id_col'. - max_dither: float - radius of the maximum dither offset, in degrees. Default: 1.75 - random_seed: int - random seed for the numpy random number generation for the dither offsets. - Default: None. - print_info: `bool` - set to True to print out information about the number of squares considered, - number of points chosen, and the filling factor. Default: False - """ - - def __init__( - self, - ra_col="fieldRA", - dec_col="fieldDec", - field_id_col="fieldID", - max_dither=1.75, - random_seed=None, - print_info=False, - ): - # Instantiate the RandomDither object and set internal variables. - self.ra_col = ra_col - self.dec_col = dec_col - self.field_id_col = field_id_col - # Convert max_dither from degrees (internal units for ra/dec are radians) - self.max_dither = np.radians(max_dither) - self.random_seed = random_seed - self.print_info = print_info - # self.units used for plot labels - self.units = ["rad", "rad"] - # Values required for framework operation: this specifies the names of the new columns. - self.cols_added = [ - "repulsiveRandomDitherFieldPerVisitRa", - "repulsiveRandomDitherFieldPerVisitDec", - ] - # Values required for framework operation: this specifies the data columns required from the database. - self.cols_req = [self.ra_col, self.dec_col, self.field_id_col] - - def _generate_rep_random_offsets(self, noffsets, num_tiles): - # Goal: Tile the circumscribing square with squares. Discard those that fall outside the hexagon. - # Then choose a square repulsive-randomly (i.e. choose without replacement), and choose a random - # point from the chosen square. - noffsets = int(noffsets) - num_tiles = int(num_tiles) - - square_side = self.max_dither * 2 # circumscribing square. center at (0,0) - tile_side = square_side / np.sqrt(num_tiles) - - x_center = np.zeros(num_tiles) # x-coords of the tiles' center - y_center = np.zeros(num_tiles) # y-coords of the tiles' center - - # fill in x-coordinates - k = 0 - x_center[k] = -tile_side * ((np.sqrt(num_tiles) / 2.0) - 0.5) # far left x-coord - - temp_xarr = [] - temp_xarr.append(x_center[k]) - while k < (np.sqrt(num_tiles) - 1): - # fill xCoords for squares right above the x-axis - k += 1 - x_center[k] = x_center[k - 1] + tile_side - temp_xarr.append(x_center[k]) - - # fill in the rest of the x_center array - indices = np.arange(k + 1, len(x_center)) - indices = indices % len(temp_xarr) - temp_xarr = np.array(temp_xarr) - x_center[k + 1 : num_tiles] = temp_xarr[indices] - - # fill in the y-coords - i = 0 - temp = np.empty(len(temp_xarr)) - while i < num_tiles: - # the highest y-center coord above the x-axis - if i == 0: - temp.fill(tile_side * ((np.sqrt(num_tiles) / 2.0) - 0.5)) - # y-centers below the top one - else: - temp.fill(y_center[i - 1] - tile_side) - - y_center[i : i + len(temp)] = temp - i += len(temp) - - # set up the hexagon - b = np.sqrt(3.0) * self.max_dither - m = np.sqrt(3.0) - h = self.max_dither * np.sqrt(3.0) / 2.0 - - # find the points that are inside hexagon - inside_hex = np.where( - (y_center < m * x_center + b) - & (y_center > m * x_center - b) - & (y_center < -m * x_center + b) - & (y_center > -m * x_center - b) - & (y_center < h) - & (y_center > -h) - )[0] - - num_points_inside_hex = len(inside_hex) - if self.print_info: - print("NumPointsInsideHexagon: ", num_points_inside_hex) - print("Total squares chosen: ", len(x_center)) - print( - "Filling factor for repRandom (Number of points needed/Number of points in hexagon): ", - float(noffsets) / num_points_inside_hex, - ) - - # keep only the points that are inside the hexagon - temp_x = x_center.copy() - temp_y = y_center.copy() - x_center = list(temp_x[inside_hex]) - y_center = list(temp_y[inside_hex]) - x_center_copy = list(np.array(x_center).copy()) # in case need to reuse the squares - y_center_copy = list(np.array(y_center).copy()) # in case need to reuse the squares - - # initiate the offsets' array - x_off = np.zeros(noffsets) - y_off = np.zeros(noffsets) - # randomly select a point from the inside_hex points. assign a random offset from within that square and - # then delete it from inside_hex array - for q in range(0, noffsets): - rand_num = np.random.rand() - rand_index_for_squares = int(np.floor(rand_num * num_points_inside_hex)) - - if rand_index_for_squares > len(x_center): - while rand_index_for_squares > len(x_center): - rand_num = np.random.rand() - rand_index_for_squares = int(np.floor(rand_num * num_points_inside_hex)) - rand_nums = np.random.rand(2) - rand_x_offset = (rand_nums[0] - 0.5) * (tile_side / 2.0) # subtract 0.5 to get +/- delta - rand_y_offset = (rand_nums[1] - 0.5) * (tile_side / 2.0) - - new_x = x_center[rand_index_for_squares] + rand_x_offset - new_y = y_center[rand_index_for_squares] + rand_y_offset - - # make sure the offset is within the hexagon - good_condition = ( - (new_y <= m * new_x + b) - & (new_y >= m * new_x - b) - & (new_y <= -m * new_x + b) - & (new_y >= -m * new_x - b) - & (new_y <= h) - & (new_y >= -h) - ) - if not (good_condition): - while not (good_condition): - rand_nums = np.random.rand(2) - rand_x_offset = (rand_nums[0] - 0.5) * (tile_side / 2.0) # subtract 0.5 to get +/- delta - rand_y_offset = (rand_nums[1] - 0.5) * (tile_side / 2.0) - - new_x = x_center[rand_index_for_squares] + rand_x_offset - new_y = y_center[rand_index_for_squares] + rand_y_offset - - good_condition = ( - (new_y <= m * new_x + b) - & (new_y >= m * new_x - b) - & (new_y <= -m * new_x + b) - & (new_y >= -m * new_x - b) - & (new_y <= h) - & (new_y >= -h) - ) - - x_off[q] = x_center[rand_index_for_squares] + rand_x_offset - y_off[q] = y_center[rand_index_for_squares] + rand_y_offset - - if len(x_center) == 0: - # have used all the squares ones - print("Starting reuse of the squares inside the hexagon") - x_center = x_center_copy.copy() - y_center = y_center_copy.copy() - x_center.pop(rand_index_for_squares) - y_center.pop(rand_index_for_squares) - num_points_inside_hex -= 1 - - self.x_off = x_off - self.y_off = y_off - - def _run(self, sim_data, cols_present=False): - # Generate random numbers for dither, using defined seed value if desired. - if self.random_seed is not None: - np.random.seed(self.random_seed) - - # analysis is simplified if deal with each field separately. - for fieldid in np.unique(sim_data[self.field_id_col]): - # Identify observations of this field. - match = np.where(sim_data[self.field_id_col] == fieldid)[0] - noffsets = len(match) - num_tiles = np.ceil(np.sqrt(noffsets) * 1.5) ** 2 # number of tiles must be a perfect square. - # arbitarily chosen factor of 1.5 to have more than necessary tiles inside hexagon. - self._generate_rep_random_offsets(noffsets, num_tiles) - # Add to RA and dec values. - sim_data["repulsiveRandomDitherFieldPerVisitRa"][match] = sim_data[self.ra_col][ - match - ] + self.x_off / np.cos(sim_data[self.dec_col][match]) - sim_data["repulsiveRandomDitherFieldPerVisitDec"][match] = ( - sim_data[self.dec_col][match] + self.y_off - ) - - # Wrap back into expected range. - ( - sim_data["repulsiveRandomDitherFieldPerVisitRa"], - sim_data["repulsiveRandomDitherFieldPerVisitDec"], - ) = wrap_ra_dec( - sim_data["repulsiveRandomDitherFieldPerVisitRa"], - sim_data["repulsiveRandomDitherFieldPerVisitDec"], - ) - return sim_data - - -class RepulsiveRandomDitherFieldPerNightStacker(RepulsiveRandomDitherFieldPerVisitStacker): - """ - Repulsive-randomly dither the RA and Dec pointings up to max_dither degrees from center, one dither offset - per new night of observation of a field. - - Note: dithers are confined to the hexagon inscribed in the circle with with radius max_dither - - Parameters - ------------------- - ra_col: str - name of the RA column in the data. Default: 'fieldRA'. - dec_col : str - name of the Dec column in the data. Default: 'fieldDec'. - field_id_col : str - name of the fieldID column in the data. Default: 'field_id_col'. - night_col : str - name of the night column in the data. Default: 'night'. - max_dither: float - radius of the maximum dither offset, in degrees. Default: 1.75 - random_seed: int - random seed for the numpy random number generation for the dither offsets. - Default: None. - print_info: `bool` - set to True to print out information about the number of squares considered, - number of points chosen, and the filling factor. Default: False - """ - - def __init__( - self, - ra_col="fieldRA", - dec_col="fieldDec", - field_id_col="fieldID", - night_col="night", - max_dither=1.75, - random_seed=None, - print_info=False, - ): - # Instantiate the RandomDither object and set internal variables. - super(RepulsiveRandomDitherFieldPerNightStacker, self).__init__( - ra_col=ra_col, - dec_col=dec_col, - field_id_col=field_id_col, - max_dither=max_dither, - random_seed=random_seed, - print_info=print_info, - ) - self.night_col = night_col - # Values required for framework operation: this specifies the names of the new columns. - self.cols_added = [ - "repulsiveRandomDitherFieldPerNightRa", - "repulsiveRandomDitherFieldPerNightDec", - ] - # Values required for framework operation: this specifies the data columns required from the database. - self.cols_req.append(self.night_col) - - def _run(self, sim_data, cols_present=False): - # Generate random numbers for dither, using defined seed value if desired. - if self.random_seed is not None: - np.random.seed(self.random_seed) - - for fieldid in np.unique(sim_data[self.field_id_col]): - # Identify observations of this field. - match = np.where(sim_data[self.field_id_col] == fieldid)[0] - - noffsets = len(match) - num_tiles = np.ceil(np.sqrt(noffsets) * 1.5) ** 2 - self._generate_rep_random_offsets(noffsets, num_tiles) - - # Apply dithers, increasing each night. - vertex_idxs = np.arange(0, len(match), 1) - nights = sim_data[self.night_col][match] - vertex_idxs = np.searchsorted(np.unique(nights), nights) - vertex_idxs = vertex_idxs % len(self.x_off) - - sim_data["repulsiveRandomDitherFieldPerNightRa"][match] = sim_data[self.ra_col][ - match - ] + self.x_off[vertex_idxs] / np.cos(sim_data[self.dec_col][match]) - sim_data["repulsiveRandomDitherFieldPerNightDec"][match] = ( - sim_data[self.dec_col][match] + self.y_off[vertex_idxs] - ) - # Wrap into expected range. - ( - sim_data["repulsiveRandomDitherFieldPerNightRa"], - sim_data["repulsiveRandomDitherFieldPerNightDec"], - ) = wrap_ra_dec( - sim_data["repulsiveRandomDitherFieldPerNightRa"], - sim_data["repulsiveRandomDitherFieldPerNightDec"], - ) - return sim_data - - -class RepulsiveRandomDitherPerNightStacker(RepulsiveRandomDitherFieldPerVisitStacker): - """ - Repulsive-randomly dither the RA and Dec pointings up to max_dither degrees from center, one dither offset - per night for all the fields. - - Note: dithers are confined to the hexagon inscribed in the circle with with radius max_dither - - Parameters - ------------------- - ra_col: str - name of the RA column in the data. Default: 'fieldRA'. - dec_col : str - name of the Dec column in the data. Default: 'fieldDec'. - night_col : str - name of the night column in the data. Default: 'night'. - max_dither: float - radius of the maximum dither offset, in degrees. Default: 1.75 - random_seed: int - random seed for the numpy random number generation for the dither offsets. - Default: None. - print_info: `bool` - set to True to print out information about the number of squares considered, - number of points chosen, and the filling factor. Default: False - """ - - def __init__( - self, - ra_col="fieldRA", - dec_col="fieldDec", - night_col="night", - max_dither=1.75, - random_seed=None, - print_info=False, - ): - # Instantiate the RepulsiveRandomDitherFieldPerVisitStacker object and set internal variables. - super(RepulsiveRandomDitherPerNightStacker, self).__init__( - ra_col=ra_col, - dec_col=dec_col, - max_dither=max_dither, - random_seed=random_seed, - print_info=print_info, - ) - self.night_col = night_col - # Values required for framework operation: this specifies the names of the new columns. - self.cols_added = [ - "repulsiveRandomDitherPerNightRa", - "repulsiveRandomDitherPerNightDec", - ] - # Values required for framework operation: this specifies the data columns required from the database. - self.cols_req.append(self.night_col) - - def _run(self, sim_data, cols_present=False): - # Generate random numbers for dither, using defined seed value if desired. - if self.random_seed is not None: - np.random.seed(self.random_seed) - - # Generate the random dither values, one per night. - nights = np.unique(sim_data[self.night_col]) - num_nights = len(nights) - num_tiles = np.ceil(np.sqrt(num_nights) * 1.5) ** 2 - self._generate_rep_random_offsets(num_nights, num_tiles) - - # Add to RA and dec values. - for n, x, y in zip(nights, self.x_off, self.y_off): - match = np.where(sim_data[self.night_col] == n)[0] - sim_data["repulsiveRandomDitherPerNightRa"][match] = sim_data[self.ra_col][match] + x / np.cos( - sim_data[self.dec_col][match] - ) - sim_data["repulsiveRandomDitherPerNightDec"][match] = sim_data[self.dec_col][match] + y - # Wrap RA/Dec into expected range. - ( - sim_data["repulsiveRandomDitherPerNightRa"], - sim_data["repulsiveRandomDitherPerNightDec"], - ) = wrap_ra_dec( - sim_data["repulsiveRandomDitherPerNightRa"], - sim_data["repulsiveRandomDitherPerNightDec"], - ) - return sim_data - - -class FermatSpiralDitherFieldPerVisitStacker(BaseStacker): - """ - Offset along a Fermat's spiral with num_points, out to a maximum radius of max_dither. - Sequential offset for each visit to a field. - - Note: dithers are confined to the hexagon inscribed in the circle with radius max_dither - Note: Fermat's spiral is defined by r= c*sqrt(n), theta= n*angle, n= integer - - Parameters - ------------------- - ra_col: str - name of the RA column in the data. Default: 'fieldRA'. - dec_col : str - name of the Dec column in the data. Default: 'fieldDec'. - field_id_col : str - name of the fieldID column in the data. Default: 'fieldID'. - num_points: int - number of points in the spiral. Default: 60 - max_dither: float - radius of the maximum dither offset, in degrees. Default: 1.75 - gold_angle: float - angle in degrees defining the spiral: theta= multiple of gold_angle - Default: 137.508 - - """ - - def __init__( - self, - ra_col="fieldRA", - dec_col="fieldDec", - field_id_col="fieldID", - num_points=60, - max_dither=1.75, - gold_angle=137.508, - ): - self.ra_col = ra_col - self.dec_col = dec_col - self.field_id_col = field_id_col - # Convert max_dither from degrees (internal units for ra/dec are radians) - self.num_points = num_points - self.gold_angle = gold_angle - self.max_dither = np.radians(max_dither) - # self.units used for plot labels - self.units = ["rad", "rad"] - # Values required for framework operation: this specifies the names of the new columns. - self.cols_added = [ - "fermatSpiralDitherFieldPerVisitRa", - "fermatSpiralDitherFieldPerVisitDec", - ] - # Values required for framework operation: this specifies the data columns required from the database. - self.cols_req = [self.ra_col, self.dec_col, self.field_id_col] - - def _generate_fermat_spiral_offsets(self): - # Fermat's spiral: r= c*sqrt(n), theta= n*angle - # Golden spiral: r= c*sqrt(n), theta= n*137.508degrees - n = np.arange(0, self.num_points) - theta = np.radians(n * self.gold_angle) - rmax = np.sqrt(theta.max() / np.radians(self.gold_angle)) - scaling_factor = 0.8 * self.max_dither / rmax - r = scaling_factor * np.sqrt(n) - - self.x_off = r * np.cos(theta) - self.y_off = r * np.sin(theta) - - def _run(self, sim_data, cols_present=False): - # Generate the spiral offset vertices. - self._generate_fermat_spiral_offsets() - # Now apply to observations. - for fieldid in np.unique(sim_data[self.field_id_col]): - match = np.where(sim_data[self.field_id_col] == fieldid)[0] - # Apply sequential dithers, increasing with each visit. - vertex_idxs = np.arange(0, len(match), 1) - vertex_idxs = vertex_idxs % self.num_points - sim_data["fermatSpiralDitherFieldPerVisitRa"][match] = sim_data[self.ra_col][match] + self.x_off[ - vertex_idxs - ] / np.cos(sim_data[self.dec_col][match]) - sim_data["fermatSpiralDitherFieldPerVisitDec"][match] = ( - sim_data[self.dec_col][match] + self.y_off[vertex_idxs] - ) - # Wrap into expected range. - ( - sim_data["fermatSpiralDitherFieldPerVisitRa"], - sim_data["fermatSpiralDitherFieldPerVisitDec"], - ) = wrap_ra_dec( - sim_data["fermatSpiralDitherFieldPerVisitRa"], - sim_data["fermatSpiralDitherFieldPerVisitDec"], - ) - return sim_data - - -class FermatSpiralDitherFieldPerNightStacker(FermatSpiralDitherFieldPerVisitStacker): - """ - Offset along a Fermat's spiral with num_points, out to a maximum radius of max_dither. - one dither offset per new night of observation of a field. - - Note: dithers are confined to the hexagon inscribed in the circle with with radius max_dither - Note: Fermat's spiral is defined by r= c*sqrt(n), theta= n*angle, n= integer - - Parameters - ----------- - ra_col: str - name of the RA column in the data. Default: 'fieldRA'. - dec_col : str - name of the Dec column in the data. Default: 'fieldDec'. - field_id_col : str - name of the fieldID column in the data. Default: 'fieldID'. - night_col : str - name of the night column in the data. Default: 'night'. - num_points: int - number of points in the spiral. Default: 60 - max_dither: float - radius of the maximum dither offset, in degrees. Default: 1.75 - gold_angle: float - angle in degrees defining the spiral: theta= multiple of gold_angle. Default: 137.508 - """ - - def __init__( - self, - ra_col="fieldRA", - dec_col="fieldDec", - field_id_col="fieldID", - night_col="night", - num_points=60, - max_dither=1.75, - gold_angle=137.508, - ): - super(FermatSpiralDitherFieldPerNightStacker, self).__init__( - ra_col=ra_col, - dec_col=dec_col, - field_id_col=field_id_col, - num_points=num_points, - max_dither=max_dither, - gold_angle=gold_angle, - ) - self.night_col = night_col - # Values required for framework operation: this specifies the names of the new columns. - self.cols_added = [ - "fermatSpiralDitherFieldPerNightRa", - "fermatSpiralDitherFieldPerNightDec", - ] - # Values required for framework operation: this specifies the data columns required from the database. - self.cols_req.append(self.night_col) - - def _run(self, sim_data, cols_present=False): - # Generate the spiral offset vertices. - self._generate_fermat_spiral_offsets() - # Now apply to observations. - for fieldid in np.unique(sim_data[self.field_id_col]): - match = np.where(sim_data[self.field_id_col] == fieldid)[0] - # Apply sequential dithers, increasing with each visit. - vertex_idxs = np.arange(0, len(match), 1) - nights = sim_data[self.night_col][match] - vertex_idxs = np.searchsorted(np.unique(nights), nights) - vertex_idxs = vertex_idxs % self.num_points - sim_data["fermatSpiralDitherFieldPerNightRa"][match] = sim_data[self.ra_col][match] + self.x_off[ - vertex_idxs - ] / np.cos(sim_data[self.dec_col][match]) - sim_data["fermatSpiralDitherFieldPerNightDec"][match] = ( - sim_data[self.dec_col][match] + self.y_off[vertex_idxs] - ) - # Wrap into expected range. - ( - sim_data["fermatSpiralDitherFieldPerNightRa"], - sim_data["fermatSpiralDitherFieldPerNightDec"], - ) = wrap_ra_dec( - sim_data["fermatSpiralDitherFieldPerNightRa"], - sim_data["fermatSpiralDitherFieldPerNightDec"], - ) - return sim_data - - -class FermatSpiralDitherPerNightStacker(FermatSpiralDitherFieldPerVisitStacker): - """ - Offset along a Fermat's spiral with num_points, out to a maximum radius of max_dither. - Sequential offset per night for all fields. - - Note: dithers are confined to the hexagon inscribed in the circle with with radius max_dither - Note: Fermat's spiral is defined by r= c*sqrt(n), theta= n*angle, n= integer - - Parameters - ---------- - ra_col: str - name of the RA column in the data. Default: 'fieldRA'. - dec_col : str - name of the Dec column in the data. Default: 'fieldDec'. - field_id_col : str - name of the fieldID column in the data. Default: 'fieldID'. - night_col : str - name of the night column in the data. Default: 'night'. - num_points: int - number of points in the spiral. Default: 60 - max_dither: float - radius of the maximum dither offset, in degrees. Default: 1.75 - gold_angle: float - angle in degrees defining the spiral: theta= multiple of gold_angle - Default: 137.508 - """ - - def __init__( - self, - ra_col="fieldRA", - dec_col="fieldDec", - field_id_col="fieldID", - night_col="night", - num_points=60, - max_dither=1.75, - gold_angle=137.508, - ): - super(FermatSpiralDitherPerNightStacker, self).__init__( - ra_col=ra_col, - dec_col=dec_col, - field_id_col=field_id_col, - num_points=num_points, - max_dither=max_dither, - gold_angle=gold_angle, - ) - self.night_col = night_col - # Values required for framework operation: this specifies the names of the new columns. - self.cols_added = [ - "fermatSpiralDitherPerNightRa", - "fermatSpiralDitherPerNightDec", - ] - # Values required for framework operation: this specifies the data columns required from the database. - self.cols_req.append(self.night_col) - - def _run(self, sim_data, cols_present=False): - # Generate the spiral offset vertices. - self._generate_fermat_spiral_offsets() - - vertex_id = 0 - nights = np.unique(sim_data[self.night_col]) - for n in nights: - match = np.where(sim_data[self.night_col] == n)[0] - vertex_id = vertex_id % self.num_points - - sim_data["fermatSpiralDitherPerNightRa"][match] = sim_data[self.ra_col][match] + self.x_off[ - vertex_id - ] / np.cos(sim_data[self.dec_col][match]) - sim_data["fermatSpiralDitherPerNightDec"][match] = ( - sim_data[self.dec_col][match] + self.y_off[vertex_id] - ) - vertex_id += 1 - - # Wrap into expected range. - ( - sim_data["fermatSpiralDitherPerNightRa"], - sim_data["fermatSpiralDitherPerNightDec"], - ) = wrap_ra_dec( - sim_data["fermatSpiralDitherPerNightRa"], - sim_data["fermatSpiralDitherPerNightDec"], - ) - return sim_data - - -class PentagonDitherFieldPerSeasonStacker(BaseStacker): - """ - Offset along two pentagons, one inverted and inside the other. - Sequential offset for each field on a visit in new season. - - Parameters - ----------- - ra_col: str - name of the RA column in the data. Default: 'fieldRA'. - dec_col : str - name of the Dec column in the data. Default: 'fieldDec'. - field_id_col : str - name of the fieldID column in the data. Default: 'fieldID'. - exp_mjd_col : str - name of the date/time stamp column in the data. Default: 'expMJD'. - max_dither: float - radius of the maximum dither offset, in degrees. Default: 1.75 - wrap_last_season: `bool` - set to False to all consider 11 seasons independently. - set to True to wrap 0th and 10th season, leading to a total of 10 seasons. - Default: True - """ - - def __init__( - self, - ra_col="fieldRA", - dec_col="fieldDec", - field_id_col="fieldID", - exp_mjd_col="expMJD", - max_dither=1.75, - wrap_last_season=True, - ): - self.ra_col = ra_col - self.dec_col = dec_col - self.field_id_col = field_id_col - self.exp_mjd_col = exp_mjd_col - # Convert max_dither from degrees (internal units for ra/dec are radians) - self.max_dither = np.radians(max_dither) - self.wrap_last_season = wrap_last_season - # self.units used for plot labels - self.units = ["rad", "rad"] - # Values required for framework operation: this specifies the names of the new columns. - self.cols_added = [ - "pentagonDitherFieldPerSeasonRa", - "pentagonDitherFieldPerSeasonDec", - ] - # Values required for framework operation: this specifies the data columns required from the database. - self.cols_req = [self.ra_col, self.dec_col, self.field_id_col, self.exp_mjd_col] - - def _generate_pentagon_offsets(self): - # inner pentagon tuples - nside = 5 - inner = polygon_coords(nside, self.max_dither / 2.5, 0.0) - # outer pentagon tuples - outer_temp = polygon_coords(nside, self.max_dither / 1.3, np.pi) - # reorder outer tuples' order - outer = [] - outer[0:3] = outer_temp[2:5] - outer[4:6] = outer_temp[0:2] - # join inner and outer coordiantes' array - self.x_off = np.concatenate((zip(*inner)[0], zip(*outer)[0]), axis=0) - self.y_off = np.concatenate((zip(*inner)[1], zip(*outer)[1]), axis=0) - - def _run(self, sim_data, cols_present=False): - # find the seasons associated with each visit. - seasons = calc_season(sim_data[self.ra_col], simdata[self.exp_mjd_col]) - # check how many entries in the >10 season - ind = np.where(seasons > 9)[0] - # should be only 1 extra seasons .. - if len(np.unique(seasons[ind])) > 1: - raise ValueError("Too many seasons (more than 11). Check SeasonStacker.") - - if self.wrap_last_season: - print("Seasons to wrap ", np.unique(seasons[ind])) - # wrap the season around: 10th == 0th - seasons[ind] = seasons[ind] % 10 - - # Generate the spiral offset vertices. - self._generate_pentagon_offsets() - - # Now apply to observations. - for fieldid in np.unique(sim_data[self.field_id_col]): - match = np.where(sim_data[self.field_id_col] == fieldid)[0] - seasons_visited = seasons[match] - # Apply sequential dithers, increasing with each season. - vertex_idxs = np.searchsorted(np.unique(seasons_visited), seasons_visited) - vertex_idxs = vertex_idxs % len(self.x_off) - sim_data["pentagonDitherFieldPerSeasonRa"][match] = sim_data[self.ra_col][match] + self.x_off[ - vertex_idxs - ] / np.cos(sim_data[self.dec_col][match]) - sim_data["pentagonDitherFieldPerSeasonDec"][match] = ( - sim_data[self.dec_col][match] + self.y_off[vertex_idxs] - ) - # Wrap into expected range. - ( - sim_data["pentagonDitherFieldPerSeasonRa"], - sim_data["pentagonDitherFieldPerSeasonDec"], - ) = wrap_ra_dec( - sim_data["pentagonDitherFieldPerSeasonRa"], - sim_data["pentagonDitherFieldPerSeasonDec"], - ) - return sim_data - - -class PentagonDiamondDitherFieldPerSeasonStacker(BaseStacker): - """ - Offset along a diamond circumscribed by a pentagon. - Sequential offset for each field on a visit in new season. - - Parameters - ------------------- - ra_col: str - name of the RA column in the data. Default: 'fieldRA'. - dec_col : str - name of the Dec column in the data. Default: 'fieldDec'. - field_id_col : str - name of the fieldID column in the data. Default: 'fieldID'. - exp_mjd_col : str - name of the date/time stamp column in the data. Default: 'expMJD'. - max_dither: float - radius of the maximum dither offset, in degrees. Default: 1.75 - wrap_last_season: `bool` - set to False to all consider 11 seasons independently. - set to True to wrap 0th and 10th season, leading to a total of 10 seasons. - Default: True - """ - - def __init__( - self, - ra_col="fieldRA", - dec_col="fieldDec", - field_id_col="fieldID", - exp_mjd_col="expMJD", - max_dither=1.75, - wrap_last_season=True, - ): - self.ra_col = ra_col - self.dec_col = dec_col - self.field_id_col = field_id_col - self.exp_mjd_col = exp_mjd_col - # Convert max_dither from degrees (internal units for ra/dec are radians) - self.max_dither = np.radians(max_dither) - self.wrap_last_season = wrap_last_season - # self.units used for plot labels - self.units = ["rad", "rad"] - # Values required for framework operation: this specifies the names of the new columns. - self.cols_added = [ - "pentagonDiamondDitherFieldPerSeasonRa", - "pentagonDiamondDitherFieldPerSeasonDec", - ] - # Values required for framework operation: this specifies the data columns required from the database. - self.cols_req = [self.ra_col, self.dec_col, self.field_id_col, self.exp_mjd_col] - - def _generate_offsets(self): - # outer pentagon tuples - pent_coord = polygon_coords(5, self.max_dither / 1.3, 0) - # inner diamond tuples - diamond_coord = polygon_coords(4, self.max_dither / 2.5, np.pi / 2) - - # join inner and outer coordiantes' array + a point in the middle (origin) - self.x_off = np.concatenate(([0], zip(*diamond_coord)[0], zip(*pent_coord)[0]), axis=0) - self.y_off = np.concatenate(([0], zip(*diamond_coord)[1], zip(*pent_coord)[1]), axis=0) - - def _run(self, sim_data, cols_present=False): - # find the seasons associated with each visit. - seasons = calc_season(sim_data[self.ra_col], sim_data[self.exp_mjd_col]) - - # check how many entries in the >10 season - ind = np.where(seasons > 9)[0] - # should be only 1 extra seasons .. - if len(np.unique(seasons[ind])) > 1: - raise ValueError("Too many seasons (more than 11). Check SeasonStacker.") - - if self.wrap_last_season: - print("Seasons to wrap ", np.unique(seasons[ind])) - # wrap the season around: 10th == 0th - seasons[ind] = seasons[ind] % 10 - - # Generate the spiral offset vertices. - self._generate_offsets() - - # Now apply to observations. - for fieldid in np.unique(sim_data[self.field_id_col]): - match = np.where(sim_data[self.field_id_col] == fieldid)[0] - seasons_visited = seasons[match] - # Apply sequential dithers, increasing with each season. - vertex_idxs = np.searchsorted(np.unique(seasons_visited), seasons_visited) - vertex_idxs = vertex_idxs % len(self.x_off) - sim_data["pentagonDiamondDitherFieldPerSeasonRa"][match] = sim_data[self.ra_col][ - match - ] + self.x_off[vertex_idxs] / np.cos(sim_data[self.dec_col][match]) - sim_data["pentagonDiamondDitherFieldPerSeasonDec"][match] = ( - sim_data[self.dec_col][match] + self.y_off[vertex_idxs] - ) - # Wrap into expected range. - ( - sim_data["pentagonDiamondDitherFieldPerSeasonRa"], - sim_data["pentagonDiamondDitherFieldPerSeasonDec"], - ) = wrap_ra_dec( - sim_data["pentagonDiamondDitherFieldPerSeasonRa"], - sim_data["pentagonDiamondDitherFieldPerSeasonDec"], - ) - return sim_data - - -class PentagonDitherPerSeasonStacker(PentagonDitherFieldPerSeasonStacker): - """ - Offset along two pentagons, one inverted and inside the other. - Sequential offset for all fields every season. - - Parameters - ------------------- - ra_col: str - name of the RA column in the data. Default: 'fieldRA'. - dec_col : str - name of the Dec column in the data. Default: 'fieldDec'. - field_id_col : str - name of the fieldID column in the data. Default: 'fieldID'. - exp_mjd_col : str - name of the date/time stamp column in the data. Default: 'expMJD'. - max_dither: float - radius of the maximum dither offset, in degrees. Default: 1.75 - wrap_last_season: `bool` - set to False to all consider 11 seasons independently. - set to True to wrap 0th and 10th season, leading to a total of 10 seasons. - Default: True - """ - - def __init__( - self, - ra_col="fieldRA", - dec_col="fieldDec", - field_id_col="fieldID", - exp_mjd_col="expMJD", - night_col="night", - max_dither=1.75, - wrap_last_season=True, - ): - super(PentagonDitherPerSeasonStacker, self).__init__( - ra_col=ra_col, - dec_col=dec_col, - field_id_col=field_id_col, - exp_mjd_col=exp_mjd_col, - max_dither=max_dither, - wrap_last_season=wrap_last_season, - ) - # Values required for framework operation: this specifies the names of the new columns. - self.cols_added = ["pentagonDitherPerSeasonRa", "pentagonDitherPerSeasonDec"] - - def _run(self, sim_data, cols_present=False): - # find the seasons associated with each visit. - seasons = calc_season(sim_data[self.ra_col], sim_data[self.exp_mjd_col]) - years = sim_data[self.nightCol] % 365.25 - - # check how many entries in the >10 season - ind = np.where(seasons > 9)[0] - # should be only 1 extra seasons .. - if len(np.unique(seasons[ind])) > 1: - raise ValueError("Too many seasons (more than 11). Check SeasonStacker.") - - if self.wrap_last_season: - # check how many entries in the >10 season - print( - "Seasons to wrap ", - np.unique(seasons[ind]), - "with total entries: ", - len(seasons[ind]), - ) - seasons[ind] = seasons[ind] % 10 - - # Generate the spiral offset vertices. - self._generate_pentagon_offsets() - # print details - print("Total visits for all fields:", len(seasons)) - print("") - - # Add to RA and dec values. - vertex_id = 0 - for s in np.unique(seasons): - match = np.where(seasons == s)[0] - # print details - print("season", s) - print( - "numEntries ", - len(match), - "; ", - float(len(match)) / len(seasons) * 100, - "% of total", - ) - match_years = np.unique(years[match]) - print("Corresponding years: ", match_years) - for i in match_years: - print(" Entries in year", i, ": ", len(np.where(i == years[match])[0])) - print("") - vertex_id = vertex_id % len(self.x_off) - sim_data["pentagonDitherPerSeasonRa"][match] = sim_data[self.ra_col][match] + self.x_off[ - vertex_id - ] / np.cos(sim_data[self.dec_col][match]) - sim_data["pentagonDitherPerSeasonDec"][match] = ( - sim_data[self.dec_col][match] + self.y_off[vertex_id] - ) - vertex_id += 1 - - # Wrap into expected range. - ( - sim_data["pentagonDitherPerSeasonRa"], - sim_data["pentagonDitherPerSeasonDec"], - ) = wrap_ra_dec( - sim_data["pentagonDitherPerSeasonRa"], - sim_data["pentagonDitherPerSeasonDec"], - ) - return sim_data - - -class PentagonDiamondDitherPerSeasonStacker(PentagonDiamondDitherFieldPerSeasonStacker): - """ - Offset along a diamond circumscribed by a pentagon. - Sequential offset for all fields every season. - - Parameters - ------------------- - ra_col: str - name of the RA column in the data. Default: 'fieldRA'. - dec_col : str - name of the Dec column in the data. Default: 'fieldDec'. - field_id_col : str - name of the fieldID column in the data. Default: 'fieldID'. - exp_mjd_col : str - name of the date/time stamp column in the data. Default: 'expMJD'. - max_dither: float - radius of the maximum dither offset, in degrees. Default: 1.75 - wrap_last_season: `bool` - set to False to all consider 11 seasons independently. - set to True to wrap 0th and 10th season, leading to a total of 10 seasons. - Default: True - """ - - def __init__( - self, - ra_col="fieldRA", - dec_col="fieldDec", - field_id_col="fieldID", - exp_mjd_col="expMJD", - max_dither=1.75, - wrap_last_season=True, - ): - super(PentagonDiamondDitherPerSeasonStacker, self).__init__( - ra_col=ra_col, - dec_col=dec_col, - field_id_col=field_id_col, - exp_mjd_col=exp_mjd_col, - max_dither=max_dither, - wrap_last_season=wrap_last_season, - ) - # Values required for framework operation: this specifies the names of the new columns. - self.cols_added = [ - "pentagonDiamondDitherPerSeasonRa", - "pentagonDiamondDitherPerSeasonDec", - ] - - def _run(self, sim_data, cols_present=False): - # find the seasons associated with each visit. - seasons = calc_season(sim_data[self.ra_col], sim_data[self.exp_mjd_col]) - - # check how many entries in the >10 season - ind = np.where(seasons > 9)[0] - # should be only 1 extra seasons .. - if len(np.unique(seasons[ind])) > 1: - raise ValueError("Too many seasons (more than 11). Check SeasonStacker.") - - if self.wrap_last_season: - print("Seasons to wrap ", np.unique(seasons[ind])) - # wrap the season around: 10th == 0th - seasons[ind] = seasons[ind] % 10 - - # Generate the spiral offset vertices. - self._generate_offsets() - - uniq_seasons = np.unique(seasons) - # Add to RA and dec values. - vertex_id = 0 - for s in uniq_seasons: - match = np.where(seasons == s)[0] - vertex_id = vertex_id % len(self.x_off) - sim_data["pentagonDiamondDitherPerSeasonRa"][match] = sim_data[self.ra_col][match] + self.x_off[ - vertex_id - ] / np.cos(sim_data[self.dec_col][match]) - sim_data["pentagonDiamondDitherPerSeasonDec"][match] = ( - sim_data[self.dec_col][match] + self.y_off[vertex_id] - ) - vertex_id += 1 - - # Wrap into expected range. - ( - sim_data["pentagonDiamondDitherPerSeasonRa"], - sim_data["pentagonDiamondDitherPerSeasonDec"], - ) = wrap_ra_dec( - sim_data["pentagonDiamondDitherPerSeasonRa"], - sim_data["pentagonDiamondDitherPerSeasonDec"], - ) - return sim_data - - -class SpiralDitherPerSeasonStacker(SpiralDitherFieldPerVisitStacker): - """ - Offsets along a 10pt spiral. Sequential offset for all fields every seaso along a 10pt spiral. - - Parameters - ------------------- - raCol: str - name of the RA column in the data. Default: 'fieldRA'. - decCol : str - name of the Dec column in the data. Default: 'fieldDec'. - fieldIdCol : str - name of the fieldID column in the data. Default: 'fieldID'. - exp_mjd_col : str - name of the date/time stamp column in the data. Default: 'expMJD'. - maxDither: float - radius of the maximum dither offset, in degrees. Default: 1.75 - wrap_last_season: `bool` - set to False to all consider 11 seasons independently. - set to True to wrap 0th and 10th season, leading to a total of 10 seasons. - Default: True - numPoints: int: number of points in the spiral. Default: 10 - nCoils: int: number of coils the spiral. Default: 3 - """ - - def __init__( - self, - ra_col="fieldRA", - dec_col="fieldDec", - field_id_col="fieldID", - exp_mjd_col="expMJD", - max_dither=1.75, - wrap_last_season=True, - num_points=10, - n_coils=3, - ): - super(SpiralDitherPerSeasonStacker, self).__init__( - ra_col=ra_col, - dec_col=dec_col, - field_id_col=field_id_col, - n_coils=n_coils, - num_points=num_points, - max_dither=max_dither, - ) - self.exp_mjd_col = exp_mjd_col - self.wrap_last_season = wrap_last_season - # Values required for framework operation: this specifies the names of the new columns. - self.cols_added = ["spiralDitherPerSeasonRa", "spiralDitherPerSeasonDec"] - self.cols_req.append(self.exp_mjd_col) - - def _run(self, sim_data, cols_present=False): - # find the seasons associated with each visit. - seasons = calc_season(sim_data[self.raCol], sim_data[self.exp_mjd_col]) - - # check how many entries in the >10 season - ind = np.where(seasons > 9)[0] - # should be only 1 extra seasons .. - if len(np.unique(seasons[ind])) > 1: - raise ValueError("Too many seasons (more than 11). Check SeasonStacker.") - - if self.wrap_last_season: - print("Seasons to wrap ", np.unique(seasons[ind])) - # wrap the season around: 10th == 0th - seasons[ind] = seasons[ind] % 10 - - # Generate the spiral offset vertices. - self._generateSpiralOffsets() - - # Add to RA and dec values. - vertex_id = 0 - for s in np.unique(seasons): - match = np.where(seasons == s)[0] - vertex_id = vertex_id % self.numPoints - sim_data["spiralDitherPerSeasonRa"][match] = sim_data[self.raCol][match] + self.xOff[ - vertex_id - ] / np.cos(sim_data[self.decCol][match]) - sim_data["spiralDitherPerSeasonDec"][match] = sim_data[self.decCol][match] + self.yOff[vertex_id] - vertex_id += 1 - - # Wrap into expected range. - ( - sim_data["spiralDitherPerSeasonRa"], - sim_data["spiralDitherPerSeasonDec"], - ) = wrap_ra_dec(sim_data["spiralDitherPerSeasonRa"], sim_data["spiralDitherPerSeasonDec"]) - return sim_data diff --git a/rubin_sim/maf/maf_contrib/lss_obs_strategy/num_obs_metric.py b/rubin_sim/maf/maf_contrib/lss_obs_strategy/num_obs_metric.py deleted file mode 100644 index 101ac0048..000000000 --- a/rubin_sim/maf/maf_contrib/lss_obs_strategy/num_obs_metric.py +++ /dev/null @@ -1,30 +0,0 @@ -##################################################################################################### -# Purpose: Calculate the number of observations in a given data_slice. - -# Humna Awan: humna.awan@rutgers.edu -# Last updated: 06/10/16 -##################################################################################################### -__all__ = ("NumObsMetric",) - -from rubin_sim.maf.metrics import BaseMetric - - -class NumObsMetric(BaseMetric): - """Calculate the number of observations per data slice. - e.g. HealPix pixel when using HealPix slicer. - - Parameters - ----------- - night_col : `str` - Name of the night column in the data; basically just need it to - acccess the data for each visit. Default: 'night'. - nside : `int` - HEALpix resolution parameter. Default: 128 - """ - - def __init__(self, night_col="night", nside=128, metric_name="NumObsMetric", **kwargs): - self.night_col = night_col - super(NumObsMetric, self).__init__(col=self.night_col, metric_name=metric_name, **kwargs) - - def run(self, data_slice, slice_point=None): - return len(data_slice) diff --git a/rubin_sim/maf/maf_contrib/lss_obs_strategy/readme.md b/rubin_sim/maf/maf_contrib/lss_obs_strategy/readme.md index 8e8d260a2..f524aca27 100644 --- a/rubin_sim/maf/maf_contrib/lss_obs_strategy/readme.md +++ b/rubin_sim/maf/maf_contrib/lss_obs_strategy/readme.md @@ -15,7 +15,7 @@ Options 1-4 were implemented for both [Awan+2016](https://arxiv.org/abs/1605.005 ---- Helper modules include -- `newDitherStackers.py` which contains the dither stackers (only those that are not already incorporated in [sims_maf](https://github.com/lsst/sims_maf/blob/master/python/lsst/sims/maf/stackers/ditherStackers.py). +- `newDitherStackers.py` which contains the dither stackers (only those that are not already incorporated in [sims_maf](https://github.com/lsst/sims_maf/blob/master/python/lsst/sims/maf/stackers/ditherStackers.py). DEPRECATED. - `plotBundleMaps.py` which contains the function for plotting skymaps, power spectra, and cartview plots. -- `saveBundleData_npzFormat.py` which contains the function for saving data as npz files. +- `saveBundleData_npzFormat.py` which contains the function for saving data as npz files. DEPRECATED. - `constantsForPipeline.py` which contains variables used in the code, including plot colors and power law constants for the galaxy luminosity functions for different redshift bins (based on mock catalogs from Padilla+). \ No newline at end of file diff --git a/rubin_sim/maf/maf_contrib/lss_obs_strategy/save_bundle_data_npz_format.py b/rubin_sim/maf/maf_contrib/lss_obs_strategy/save_bundle_data_npz_format.py deleted file mode 100644 index 73af0bd9a..000000000 --- a/rubin_sim/maf/maf_contrib/lss_obs_strategy/save_bundle_data_npz_format.py +++ /dev/null @@ -1,40 +0,0 @@ -##################################################################################################### -# Purpose: save data from a metricBundle object as .npz files. - -# Humna Awan: humna.awan@rutgers.edu -##################################################################################################### - -__all__ = ("save_bundle_data_npz_format",) - - -def save_bundle_data_npz_format(path, bundle, base_filename, filter_band): - """ - Save data in the metricBundle. For each key, a new .npz file is created to - save the content of the metricBundle object. - - Parameters - ---------- - path : str - path to the directory where the files should be saved - bundle : metricBundle - object whose contents are to be saved. - base_filename : str - basic filename wanted. Final filename would be - __.npz - filter_band : str - filter of the data in the bundle, e.g. 'r' - - """ - # run over keys in the bundle and save the data - for dither in bundle: - outfile = "%s_%s_%s.npz" % (base_filename, filter_band, dither) - bundle[dither].slicer.write_data( - "%s/%s" % (path, outfile), - bundle[dither].metricValues, - metric_name=bundle[dither].metric.name, - simDataName=bundle[dither].run_name, - constraint=bundle[dither].constraint, - metadata=bundle[dither].metadata, - display_dict=bundle[dither].displayDict, - plot_dict=bundle[dither].plotDict, - ) diff --git a/rubin_sim/maf/maf_contrib/microlensing_metric.py b/rubin_sim/maf/maf_contrib/microlensing_metric.py index 1755eb771..4bda276c5 100644 --- a/rubin_sim/maf/maf_contrib/microlensing_metric.py +++ b/rubin_sim/maf/maf_contrib/microlensing_metric.py @@ -27,15 +27,15 @@ def microlensing_amplification(t, impact_parameter=1, crossing_time=1825.0, peak Parameters ---------- - t : float + t : `float` The time of observation (days) - impact_parameter : float (1) + impact_parameter : `float` The impact paramter (0 means big amplification) - crossing_time : float (1825) + crossing_time : `float` Einstein crossing time (days) - peak_time : float (100) + peak_time : `float` The peak time (days) - blending_factor: float (1) + blending_factor: `float` The blending factor where 1 is unblended """ @@ -48,18 +48,17 @@ def microlensing_amplification(t, impact_parameter=1, crossing_time=1825.0, peak def microlensing_amplification_fsfb(t, impact_parameter=1, crossing_time=1825.0, peak_time=100, fs=20, fb=20): - """The microlensing amplification - in terms of source flux and blend flux + """The microlensing amplification in terms of source flux and blend flux Parameters ---------- - t : float + t : `float` The time of observation (days) - impact_parameter : float (1) + impact_parameter : `float` The impact paramter (0 means big amplification) - crossing_time : float (1825) + crossing_time : `float` Einstein crossing time (days) - peak_time : float (100) + peak_time : `float` The peak time (days) """ @@ -80,9 +79,9 @@ def info_peak_before_t0(impact_parameter=1, crossing_time=100.0): Parameters ---------- - impact_parameter : float (1) + impact_parameter : `float` The impact paramter (0 means big amplification) - crossing_time : float (1825) + crossing_time : `float` Einstein crossing time (days) """ @@ -103,18 +102,18 @@ def coefficients_pspl(t, t0, te, u0, fs, fb): Parameters ---------- - t : float + t : `float` The time of observation (days usually as JD or HJD) - u0 : float + u0 : `float` The impact parameter (0 means high magnification) - te : float + te : `float` Einstein crossing time (days) - t0 : float + t0 : `float` Time of peak (days usually as JD or HJD) - fs : float + fs : `float` Source flux (counts/s but here fs = 10.**(-0.4*mag_source) in the respective passband) - fb : float + fb : `float` Blend flux (counts/s but here fs = 10.**(-0.4*mag_blend) in the respective passband) """ @@ -150,18 +149,18 @@ def coefficients_fsfb(t, t0, te, u0, fs, fb): Parameters ---------- - t : float + t : `float` The time of observation (days usually as JD or HJD) - u0 : float + u0 : `float` The impact parameter (0 means high magnification) - te : float + te : `float` Einstein crossing time (days) - t0 : float + t0 : `float` Time of peak (days usually as JD or HJD) - fs : float + fs : `float` Source flux (counts/s but here fs = 10.**(-0.4*mag_source) in the respective passband) - fb : float + fb : `float` Blend flux (counts/s but here fs = 10.**(-0.4*mag_blend) in the respective passband) """ @@ -172,34 +171,35 @@ def coefficients_fsfb(t, t0, te, u0, fs, fb): def fisher_matrix(t, t0, te, u0, fs, fb, snr, filters="ugriz", filter_name="i"): - """The Fisher (information) matrix relying on first derivatives wrt t0,te,u0,fs,fb - relying on with cse optimized coefficents. This implementation for - the non-linear magnification is subject to the respective underlying - assumptions, i.e. relatively small uncertainties. NB the Fisher matrix - is using first derivatives, alternatively one could consider second derivatives - the resulting covariance matrix as inverse of the Fisher matrix is a - Cramer Rao lower bound of the respective uncertainty. The function returns - the full information matrix for all t. + """The Fisher (information) matrix relying on first derivatives + wrt t0,te,u0,fs,fb, relying on with cse optimized coefficents. + This implementation for the non-linear magnification is subject to the + respective underlying assumptions, i.e. relatively small uncertainties. + NB the Fisher matrix is using first derivatives, alternatively one + could consider second derivatives the resulting covariance matrix as + inverse of the Fisher matrix is a Cramer Rao lower bound of the + respective uncertainty. + The function returns the full information matrix for all t. via Markus Hundertmark markus.hundertmark@uni-heidelberg.de Parameters ---------- - t : float + t : `float` The time of observation (days usually as JD or HJD) - u0 : float + u0 : `float` The impact parameter (0 means high magnification) - te : float + te : `float` Einstein crossing time (days) - t0 : float + t0 : `float` Time of peak (days usually as JD or HJD) - fs : float + fs : `float` Source flux (counts/s but here fs = 10.**(-0.4*mag_source) in the respective passband) - fb : float + fb : `float` Blend flux (counts/s but here fs = 10.**(-0.4*mag_blend) in the respective passband) - snr : float + snr : `float` Signal to noise ratio in order to infer the flux uncertainty """ # indices are 0:te, 1:u0, 2:t0 @@ -228,7 +228,8 @@ def fisher_matrix(t, t0, te, u0, fs, fb, snr, filters="ugriz", filter_name="i"): # microlenisng_amplification function to replace usqr = (t_value - t0) ** 2 / te**2 + u0**2 mu = (usqr + 2.0) / (usqr * (usqr + 4)) ** 0.5 - flux_err = (fs * mu + fb) / snr[i] # may need to check shape of snr and t + # may need to check shape of snr and t + flux_err = (fs * mu + fb) / snr[i] matrix = matrix / (flux_err**2) fis_mat = fis_mat + matrix @@ -238,30 +239,28 @@ def fisher_matrix(t, t0, te, u0, fs, fb, snr, filters="ugriz", filter_name="i"): class MicrolensingMetric(metrics.BaseMetric): """ Quantifies detectability of Microlensing events. - Can also return the number of datapoints within two crossing times of the peak of event. + Can also return the number of datapoints within two crossing times + of the peak of event. Parameters ---------- - metric_calc: str - Type of metric. If metric_calc == 'detect', returns the number of microlensing events - detected within certain parameters. If metric_calc == 'Npts', returns the number of points + metric_calc : `str` + Type of metric. + If metric_calc == 'detect', returns the number of microlensing events + detected within certain parameters. + If metric_calc == 'Npts', returns the number of points within two crossing times of the peak of the vent. - Default is 'detect' - - pts_needed : int - Number of an object's lightcurve points required to be above the 5-sigma limiting depth - before it is considered detected. - - time_before_peak: int or str + pts_needed : `int` + Number of an object's lightcurve points required to be above the + 5-sigma limiting depth before it is considered detected. + time_before_peak: `int` or `str` Number of days before lightcurve peak to qualify event as triggered. - If time_before_peak == 'optimal', the number of days before the lightcurve peak - is the time of maximal information. - Default is 0. - - detect: bool - By default we trigger which only looks at points before the peak of the lightcurve. - When detect = True, observations on either side of the lightcurve are considered. - Default is False. + If time_before_peak == 'optimal', the number of days before the + lightcurve peak is the time of maximal information. + detect: `bool` + By default we trigger which only looks at points before the peak + of the lightcurve. When detect = True, observations on either side + of the lightcurve are considered. Notes ----- @@ -270,7 +269,6 @@ class MicrolensingMetric(metrics.BaseMetric): crossing_time : float (days) impact_parameter : float (positive) blending_factors (optional): float (between 0 and 1) - """ def __init__( @@ -323,7 +321,7 @@ def __init__( ) def run(self, data_slice, slice_point=None): - if self.detect == True and self.time_before_peak > 0: + if self.detect is True and self.time_before_peak > 0: raise Exception("When detect = True, time_before_peak must be zero") # Generate the lightcurve for this object @@ -331,13 +329,13 @@ def run(self, data_slice, slice_point=None): t = data_slice[self.mjd_col] - np.min(data_slice[self.night_col]) t = t - t.min() - # Try for if a blending factor slice was added if not default to no blending factor - try: - try: - test = slice_point["apparent_m_no_blend_u"] - amplitudes = np.zeros(len(t)) - individual_mags = True - except KeyError: + # + # Try for if a blending factor slice was added + if "apparent_m_no_blend_u" in slice_point: + amplitudes = np.zeros(len(t)) + individual_mags = True + else: + if "blending_factor" in slice_point: amplitudes = microlensing_amplification( t, impact_parameter=slice_point["impact_parameter"], @@ -346,15 +344,14 @@ def run(self, data_slice, slice_point=None): blending_factor=slice_point["blending_factor"], ) individual_mags = False - - except KeyError: - amplitudes = microlensing_amplification( - t, - impact_parameter=slice_point["impact_parameter"], - crossing_time=slice_point["crossing_time"], - peak_time=slice_point["peak_time"], - ) - individual_mags = False + else: + amplitudes = microlensing_amplification( + t, + impact_parameter=slice_point["impact_parameter"], + crossing_time=slice_point["crossing_time"], + peak_time=slice_point["peak_time"], + ) + individual_mags = False filters = np.unique(data_slice[self.filter_col]) amplified_mags = amplitudes * 0 @@ -366,7 +363,7 @@ def run(self, data_slice, slice_point=None): for filtername in filters: infilt = np.where(data_slice[self.filter_col] == filtername)[0] - if individual_mags == True: + if individual_mags is True: fs = slice_point["apparent_m_no_blend_{}".format(filtername)] fb = slice_point["apparent_m_{}".format(filtername)] - fs amplitudes = microlensing_amplification_fsfb( @@ -452,12 +449,14 @@ def run(self, data_slice, slice_point=None): n_post.append(len(outfilt)) elif self.metric_calc == "Fisher": - if individual_mags == True: + if individual_mags is True: fs = slice_point["apparent_m_no_blend_{}".format(filtername)] fb = slice_point["apparent_m_{}".format(filtername)] - fs else: fs = self.mags[filtername] - fb = fs # assuming that in populated areas of the galaxy the blend fraction is 0.5 + # assuming that in populated areas of the galaxy + # the blend fraction is 0.5 + fb = fs fis_mat = fis_mat_wzeros + fisher_matrix( t[infilt], @@ -481,19 +480,20 @@ def run(self, data_slice, slice_point=None): try: cov = np.linalg.inv(fis_mat) sigmat_e_t_e = cov[0, 0] ** 0.5 / slice_point["crossing_time"] - sigmau0_u0 = cov[1, 1] ** 0.5 / slice_point["impact_parameter"] - corr_btwn_t_eu0 = cov[0, 1] / ( - slice_point["crossing_time"] * slice_point["impact_parameter"] - ) - except: + # sigmau0_u0 = cov[1, 1] ** 0.5 + # / slice_point["impact_parameter"] + # corr_btwn_t_eu0 = cov[0, 1] / + # (slice_point["crossing_time"] + # * slice_point["impact_parameter"]) + except np.linalg.LinAlgError: sigmat_e_t_e = np.inf - sigmau0_u0 = np.inf - corr_btwn_t_eu0 = np.inf + # sigmau0_u0 = np.inf + # corr_btwn_t_eu0 = np.inf npts = np.sum(n_pre) npts_post = np.sum(n_post) if self.metric_calc == "detect": - if self.detect == True: + if self.detect is True: if npts >= self.pts_needed and npts_post >= self.pts_needed: return 1 else: @@ -522,34 +522,39 @@ def generate_microlensing_slicer( nside=128, filtername="r", ): - """ - Generate a UserPointSlicer with a population of microlensing events. To be used with - MicrolensingMetric + """Generate a UserPointSlicer with a population of microlensing events. + To be used with MicrolensingMetric Parameters ---------- - min_crossing_time : float (1) + min_crossing_time : `float` The minimum crossing time for the events generated (days) - max_crossing_time : float (10) + max_crossing_time : `float` The max crossing time for the events generated (days) - t_start : float (1) + t_start : `float` The night to start generating peaks (days) - t_end : float (3652) + t_end : `float` The night to end generating peaks (days) - n_events : int (10000) + n_events : `int` Number of microlensing events to generate - seed : float (42) + seed : `float` Random number seed - nside : int (128) + nside : `int` HEALpix nside, used to pick which stellar density map to load - filtername : str ('r') + filtername : `str` The filter to use for the stellar density map - """ - np.random.seed(seed) - crossing_times = np.random.uniform(low=min_crossing_time, high=max_crossing_time, size=n_events) - peak_times = np.random.uniform(low=t_start, high=t_end, size=n_events) - impact_paramters = np.random.uniform(low=0, high=1, size=n_events) + Returns + ------- + slicer : `maf.UserPointsSlicer` + A slicer populated by microlensing events + (each slice_point is a different event) + """ + # Seed the random number generator and generate random parameters + rng = np.random.default_rng(seed) + crossing_times = rng.uniform(low=min_crossing_time, high=max_crossing_time, size=n_events) + peak_times = rng.uniform(low=t_start, high=t_end, size=n_events) + impact_paramters = rng.uniform(low=0, high=1, size=n_events) map_dir = os.path.join(get_data_dir(), "maps", "TriMaps") data = np.load(os.path.join(map_dir, "TRIstarDensity_%s_nside_%i.npz" % (filtername, nside))) @@ -562,14 +567,15 @@ def generate_microlensing_slicer( bin_indx = np.where(bins[1:] >= star_mag)[0].min() density_used = star_density[:, bin_indx].ravel() order = np.argsort(density_used) - # I think the model might have a few outliers at the extreme, let's truncate it a bit + # I think the model might have a few outliers at the extreme, + # let's truncate it a bit density_used[order[-10:]] = density_used[order[-11]] # now, let's draw N from that distribution squared dist = density_used[order] ** 2 cumm_dist = np.cumsum(dist) cumm_dist = cumm_dist / np.max(cumm_dist) - uniform_draw = np.random.uniform(size=n_events) + uniform_draw = rng.uniform(size=n_events) indexes = np.floor(np.interp(uniform_draw, cumm_dist, np.arange(cumm_dist.size))) hp_ids = order[indexes.astype(int)] gal_l, gal_b = hpid2_ra_dec(nside, hp_ids, nest=True) diff --git a/rubin_sim/maf/maf_contrib/periodic_star_modulation_metric.py b/rubin_sim/maf/maf_contrib/periodic_star_modulation_metric.py index 3a994bc49..34a88fa6c 100644 --- a/rubin_sim/maf/maf_contrib/periodic_star_modulation_metric.py +++ b/rubin_sim/maf/maf_contrib/periodic_star_modulation_metric.py @@ -11,25 +11,25 @@ from .periodic_star_metric import PeriodicStar -""" This metric is based on the PeriodicStar metric +""" This metric is based on the PeriodicStar metric It was modified in a way to reproduce attempts to identify period/ phase modulation (Blazhko effect) - in RR Lyrae stars. - We are not implementing a period/ phase modulation in the light curve, but rather use short baselines - (e.g.: 20 days) of observations to test how well we can recover the period, phase and amplitude. We - do this as such an attempt is also useful for other purposes, i.e. if we want to test whether we - can just recover period, phase and amplitude from short baselines at all, without necessarily having + in RR Lyrae stars. + We are not implementing a period/ phase modulation in the light curve, but rather use short baselines + (e.g.: 20 days) of observations to test how well we can recover the period, phase and amplitude. We + do this as such an attempt is also useful for other purposes, i.e. if we want to test whether we + can just recover period, phase and amplitude from short baselines at all, without necessarily having in mind to look for period/ phase modulations. - Like in the PeriodicStar metric, the light curve of an RR Lyrae star, or a periodic star in general, - is approximated as a simple sin wave. Other solutions might make use of light curve templates to + Like in the PeriodicStar metric, the light curve of an RR Lyrae star, or a periodic star in general, + is approximated as a simple sin wave. Other solutions might make use of light curve templates to generate light curves. Two other modifications we introduced for the PeriodicStarModulationMetric are: - In contrast to the PeriodicStar metric, we allow for a random phase offset to mimic observation + In contrast to the PeriodicStar metric, we allow for a random phase offset to mimic observation starting at random phase. - Also, we vary the periods and amplitudes within +/- 10 % to allow for a more realistic + Also, we vary the periods and amplitudes within +/- 10 % to allow for a more realistic sample of variable stars. - + This metric is based on the cadence note: - N. Hernitschek, K. Stassun, LSST Cadence Note: Cadence impacts on reliable classification + N. Hernitschek, K. Stassun, LSST Cadence Note: Cadence impacts on reliable classification of standard-candle variable stars (2021) https://docushare.lsst.org/docushare/dsweb/Get/Document-37673 """ diff --git a/rubin_sim/maf/maf_contrib/phot_prec_metrics.py b/rubin_sim/maf/maf_contrib/phot_prec_metrics.py deleted file mode 100644 index ccb93e64f..000000000 --- a/rubin_sim/maf/maf_contrib/phot_prec_metrics.py +++ /dev/null @@ -1,162 +0,0 @@ -""" -Photometric precision metrics -Authors: Sergey Koposov, Thomas Collett -""" - -__all__ = ("SNMetric", "ThreshSEDSNMetric", "SEDSNMetric") - -import numpy as np - -from rubin_sim.maf.metrics import BaseMetric - -twopi = 2.0 * np.pi - - -class RelRmsMetric(BaseMetric): - """Relative scatter metric (RMS over median).""" - - def run(self, data_slice, slice_point=None): - return np.std(data_slice[self.colname]) / np.median(data_slice[self.colname]) - - -class SNMetric(BaseMetric): - """Calculate the signal to noise metric in a given filter for an object of a given magnitude. - We assume point source aperture photometry and assume that we do - the measurement over the stack - """ - - def __init__( - self, - m5_col="fiveSigmaDepth", - seeing_col="finSeeing", - sky_b_col="filtSkyBrightness", - exp_t_col="visitExpTime", - filter_col="filter", - metric_name="SNMetric", - filter=None, - mag=None, - **kwargs, - ): - super(SNMetric, self).__init__( - col=[m5_col, seeing_col, sky_b_col, exp_t_col, filter_col], metric_name=metric_name, **kwargs - ) - self.filter = filter - self.mag = mag - - def run(self, data_slice, slice_point=None): - # print 'x' - npoints = len(data_slice[self.seeingCol]) - seeing = data_slice[self.seeingCol] - depth5 = data_slice[self.m5Col] - # mag = depth5 - mag = self.mag - - zpt0 = 25.85 - curfilt = self.filter #'r' - zpts = {"u": zpt0, "g": zpt0, "r": zpt0, "i": zpt0, "z": zpt0, "y": zpt0} - - gain = 4.5 - - zpt_arr = np.zeros(npoints) - for filt in "ugrizy": - zpt_arr[data_slice[self.filterCol] == filt] = zpts[filt] - sky_mag_arcsec = data_slice[self.skyBCol] - exptime = data_slice[self.expTCol] - sky_adu = 10 ** (-(sky_mag_arcsec - zpt_arr) / 2.5) * exptime - sky_adu = sky_adu * np.pi * seeing**2 # adu per seeing circle - - source_fluxes = 10 ** (-mag / 2.5) - source_adu = 10 ** (-(mag - zpt_arr) / 2.5) * exptime - err_adu = np.sqrt(source_adu + sky_adu) / np.sqrt(gain) - err_fluxes = err_adu * (source_fluxes / source_adu) - - ind = data_slice[self.filterCol] == curfilt - flux0 = source_fluxes - stack_flux_err = 1.0 / np.sqrt((1 / err_fluxes[ind] ** 2).sum()) - err_mag = 2.5 / np.log(10) * stack_flux_err / flux0 - # return err_mag - return flux0 / stack_flux_err - # return (source_fluxes/err_fluxes).mean() - # 1/0 - # return errMag - # return 1.25 * np.log10(np.sum(10.**(.8*data_slice['fiveSigmaDepth']))) - - -class SEDSNMetric(BaseMetric): - """Computes the S/Ns for a given SED.""" - - def __init__( - self, - m5_col="fiveSigmaDepth", - seeing_col="finSeeing", - sky_b_col="filtSkyBrightness", - exp_t_col="visitExpTime", - filter_col="filter", - metric_name="SEDSNMetric", - # filter=None, - mags=None, - **kwargs, - ): - super(SEDSNMetric, self).__init__( - col=[m5_col, seeing_col, sky_b_col, exp_t_col, filter_col], metric_name=metric_name, **kwargs - ) - self.mags = mags - self.metrics = {} - for curfilt, curmag in mags.items(): - self.metrics[curfilt] = SNMetric(mag=curmag, filter=curfilt) - # self.filter = filter - # self.mag = mag - - def run(self, data_slice, slice_point=None): - res = {} - for curf, curm in self.metrics.items(): - curr = curm.run(data_slice, slice_point=slice_point) - res["sn_" + curf] = curr - return res - - def reduce_sn_g(self, metric_value): - # print 'x',metric_value['sn_g'] - return metric_value["sn_g"] - - def reduce_sn_r(self, metric_value): - # print 'x',metric_value['sn_r'] - return metric_value["sn_r"] - - def reduce_sn_i(self, metric_value): - return metric_value["sn_i"] - - -class ThreshSEDSNMetric(BaseMetric): - """Computes the metric whether the S/N is bigger than the threshold in all the bands for a given SED""" - - def __init__( - self, - m5_col="fiveSigmaDepth", - seeing_col="finSeeing", - sky_b_col="filtSkyBrightness", - exp_t_col="visitExpTime", - filter_col="filter", - metric_name="ThreshSEDSNMetric", - snlim=20, - # filter=None, - mags=None, - **kwargs, - ): - """Instantiate metric.""" - super(ThreshSEDSNMetric, self).__init__( - col=[m5_col, seeing_col, sky_b_col, exp_t_col, filter_col], metric_name=metric_name, **kwargs - ) - self.xmet = SEDSNMetric(mags=mags) - self.snlim = snlim - # self.filter = filter - # self.mag = mag - - def run(self, data_slice, slice_point=None): - res = self.xmet.run(data_slice, slice_point=slice_point) - cnt = 0 - for k, v in res.items(): - if v > self.snlim: - cnt += 1 - if cnt > 0: - cnt = 1 - return cnt diff --git a/rubin_sim/maf/maf_contrib/presto_color_kne_pop_metric.py b/rubin_sim/maf/maf_contrib/presto_color_kne_pop_metric.py index 759c55be7..80d258713 100644 --- a/rubin_sim/maf/maf_contrib/presto_color_kne_pop_metric.py +++ b/rubin_sim/maf/maf_contrib/presto_color_kne_pop_metric.py @@ -5,7 +5,6 @@ import warnings from itertools import combinations -import healpy as hp import numpy as np import pandas as pd @@ -35,19 +34,21 @@ def _load_hash( file_extragalactic="TotalCubeNorm_1000Obj.pkl", skyregion="extragalactic", ): - """Helper function to load large hash table without being a metric attribute. - Note, bad things could happen if you try to run different sky regions at the same time - (like, it might thrash loading one then the other. So, keep that in mind if/when this - gets extended) + """Helper function to load large hash table. + + Because this is kept outside the metric attributes, it allows easy resuse. + Note that this does mean running different sky regions at the same + time may end up thrashing the data in the hash/hash table. Parameters ---------- - skyregion : string - The skyregion of interst. Only two options: 'galactic' and 'extragalaxtic' - filePathGalactic : string - The path to the file contains galactic Prest-Color phase space information - filePathExtragalactic : string - The path to the file contains galactic Prest-Color phase space information + skyregion : `str` + The skyregion of interst. + Only two options: 'galactic' and 'extragalactic' + filePathGalactic : `str` + File containing galactic Presto-Color phase space information + filePathExtragalactic : `str` + File containing galactic Presto-Color phase space information """ if hasattr(_load_hash, "InfoDict"): @@ -82,37 +83,38 @@ def generate_presto_pop_slicer( into a UserPointSlicer object Parameters ---------- - skyregion : string - The skyregion of interst. Only two options: 'galactic' and 'extragalaxtic' - t_start : float (1) + skyregion : `str` + The skyregion of interest. + Only two options: 'galactic' and 'extragalactic' + t_start : `float` The night to start kilonova events on (days) - t_end : float (3652) + t_end : `float` The final night of kilonova events - n_events : int (10000) + n_events : `int` The number of kilonova events to generate - seed : float + seed : `float` The seed passed to np.random - n_files : int (7) + n_files : `int` The number of different kilonova lightcurves to use - d_min : float or int (10) + d_min : `float` or `int` Minimum luminosity distance (Mpc) - d_max : float or int (300) + d_max : `float` or `int` Maximum luminosity distance (Mpc) """ def rndm(a, b, g, size=1): - """Power-law gen for pdf(x)\propto x^{g-1} for a<=x<=b""" + """Power-law gen for pdf(x) proportional to x^{g-1} for a<=x<=b""" r = np.random.random(size=size) ag, bg = a**g, b**g return (ag + (bg - ag) * r) ** (1.0 / g) ra, dec = uniform_sphere(n_events, seed=seed) - ###Convert ra, dec to gl, gb + # Convert ra, dec to gl, gb gl, gb = radec2gal(ra, dec) - ###Determine if the object is in the Galaxy plane - if skyregion == "galactic": # keep the glactic events + # Determine if the object is in the Galaxy plane + if skyregion == "galactic": # keep the galactic events ra = ra[np.abs(gb) < gb_cut] dec = dec[np.abs(gb) < gb_cut] elif skyregion == "extragalactic": # keep the extragalactic events. @@ -157,15 +159,23 @@ def __init__( """ Parameters ---------- - skyregion : string - The skyregion of interst. Only two options: 'galactic' and 'extragalaxtic' + file_list : `str` or None, optional + File containing input lightcurves + mjd0 : `float`, optional + MJD of the start of the survey. + output_lc : `bool`, optional + Flag to whether or not to output lightcurve for each object. + skyregion : `str`, optional + The skyregion of interest. + Only two options: 'galactic' and 'extragalactic' + thr : `float`, optional + Threshold for "classification" of events via the Score_S """ maps = ["DustMap"] self.mjd_col = mjd_col self.m5_col = m5_col self.filter_col = filter_col self.night_col = night_col - self.pts_needed = pts_needed # detection points threshold # Boolean variable, if True the light curve will be exported self.output_lc = output_lc self.thr = thr @@ -178,30 +188,17 @@ def __init__( self.ax1 = dust_properties.ax1 cols = [self.mjd_col, self.m5_col, self.filter_col, self.night_col] - super(PrestoColorKNePopMetric, self).__init__( - col=cols, units="Detected, 0 or 1", metric_name=metric_name, maps=maps, **kwargs - ) - - def _multi_detect(self, around_peak): - """ - Simple detection criteria: detect at least a certain number of times - """ - result = 1 - # Detected data points - if np.size(around_peak) < self.pts_needed: - return 0 - - return result + super().__init__(col=cols, units="Detected, 0 or 1", metric_name=metric_name, maps=maps, **kwargs) def _presto_color_detect(self, around_peak, filters): - """ - detection criteria of presto cadence: at least three detections at two filters; + """Detection criteria of presto cadence: + at least three detections at two filters; Parameters ---------- - around_peak : array + around_peak : `np.ndarray`, (N,) indexes corresponding to 5sigma detections - filters : array + filters : `np.ndarray`, (N,) filters in which detections happened """ result = 1 @@ -228,26 +225,23 @@ def _enquiry(self, hash_table, info_dict, band1, band2, d_t1, d_t2, d_mag, color Parameters ---------- - hash_table : array + hash_table : `np.ndarray`, (N,) Contains the values of the 6-D Presto-color phase space - info_dict : dictionary + info_dict : `dict` Contains the essential information of the hash_table abobe. - - hash_table and info_dict have to be loaded from premade data Presto-color data file. - - band1, band2 : string - The two filters that comprise the Presto-color observation triplet. The filters are - the 6 bands of LSST: u, g, r, i, z, y. Band1 and band2 should be different. - - d_t1, d_t2 : float + band1, band2 : `str`, `str` + The two filters that comprise the Presto-color observation triplet. + The filters are the 6 bands of LSST: u, g, r, i, z, y. + Band1 and band2 should be different. + d_t1, d_t2 : `float`, `float` The time gaps of the Presto-color observation triplet. - - d_mag : float - The magnitude change calculated from the observations of the same band - - color : float + d_mag : `float` + The magnitude change between from the observations of the same band + color : `float` The difference in magnitude of observations in different bands. + hash_table and info_dict have to be loaded from premade data + Presto-color data file. """ # if abs(d_t1) > abs(d_t1-d_t2): @@ -261,8 +255,6 @@ def _enquiry(self, hash_table, info_dict, band1, band2, d_t1, d_t2, d_mag, color ind1 = info_dict["BandPairs"].index(band1 + band2) - d_t1grid = info_dict["dT1s"][abs(d_t1 - info_dict["dT1s"]).argmin()] - d_t2grid = info_dict["dT2s"][abs(d_t2 - info_dict["dT2s"]).argmin()] time_pair_grid = [ info_dict["dT1s"][abs(d_t1 - info_dict["dT1s"]).argmin()], info_dict["dT2s"][abs(d_t2 - info_dict["dT2s"]).argmin()], @@ -275,33 +267,31 @@ def _enquiry(self, hash_table, info_dict, band1, band2, d_t1, d_t2, d_mag, color return hash_table[ind1, ind2, ind3, ind4] def _get_score(self, result, hash_table, info_dict, thr): - """ - Get the score of a strategy from the Presto-color perspective. + """Get the score of a strategy from the Presto-color perspective. Parameters ---------- - result : dataframe - Dataframe that contains the results of the observations. The comlums include + result : `pd.DataFrame` + Dataframe that contains the results of the observations. + The columns include t: the time of the observation mag: the detected magnitude - maglim: the limit of magnitude that can be detected by LSST, fiveSigmaDepth + maglim: the limit fiveSigmaDepth that can be detected filter: the filter used for the observation - - hash_table : array + hash_table : `np.ndarray`, (N,) Contains the values of the 6-D Presto-color phase space - info_dict : dictionary + info_dict : `dict` Contains the essential information of the hash_table abobe. - - hash_table and info_dict have to be loaded from premade data Presto-color data file. - - scoreType : string + scoreType : `str` Two types of scores were designed: 'S' type involves a threshold, 'P' type work without a threshold. + thr : `float` + The threashold need for type 'S' score. + The default value is 0.003 (3-sigma) - thr : float - The threashold need for type 'S' score. The default value is 0.003 (3-sigma) - + hash_table and info_dict have to be loaded from the premade + Presto-color data file. """ time_lim1 = 8.125 / 24 # 8 h 7.5 min @@ -312,12 +302,13 @@ def _get_score(self, result, hash_table, info_dict, thr): # reset index detects = detects.reset_index(drop=True) - ts = detects.t.values # Times for valid detections - d_ts = ts.reshape(1, len(ts)) - ts.reshape(len(ts), 1) # Find out the differences between each pair + # Times for valid detections + ts = detects.t.values + # Find out the differences between each pair + d_ts = ts.reshape(1, len(ts)) - ts.reshape(len(ts), 1) - d_tindex0, d_tindex1 = np.where( - abs(d_ts) < time_lim2 - ) # The time differences should be within 32 hours (2 nights) + # The time differences should be within 32 hours (2 nights) + d_tindex0, d_tindex1 = np.where(abs(d_ts) < time_lim2) phase_space_coords = [] @@ -339,10 +330,12 @@ def _get_score(self, result, hash_table, info_dict, thr): # The band appears once will be band2 occurence = np.array([np.count_nonzero(ii == bands) for ii in bands]) - - index2 = indices[occurence == 1][0] # The index of observation in band2 - index11 = indices[occurence == 2][0] # The index of the first observation in band1 - index12 = indices[occurence == 2][1] # The index of the second observation in band1 + # The index of observation in band2 + index2 = indices[occurence == 1][0] + # The index of the first observation in band1 + index11 = indices[occurence == 2][0] + # The index of the second observation in band1 + index12 = indices[occurence == 2][1] if ( abs(d_ts[index12, index2]) < abs(d_ts[index11, index2]) @@ -379,141 +372,6 @@ def _get_score(self, result, hash_table, info_dict, thr): return score_s, max(score_p) - def _ztfrest_simple( - self, - around_peak, - mags, - t, - filters, - min_dt=0.125, - min_fade=0.3, - max_rise=-1.0, - select_red=False, - ): - """ - Selection criteria based on rise or decay rate; simplified version of - the methods employed by the ZTFReST project - (Andreoni & Coughlin et al., 2021) - Parameters - ---------- - around_peak : array - indexes corresponding to 5sigma detections - mags : array - magnitudes obtained interpolating models on the data_slice - t : array - relative times - filters : array - filters in which detections happened - min_dt : float - minimum time gap between first and last detection in a given band - min_fade : float - fade rate threshold (positive, mag/day) - max_rise : float - rise rate threshold (negative, mag/day) - select_red : bool - if True, only red 'izy' filters will be considered - Examples - ---------- - A transient: - rising by 0.74 mag/day will pass a threshold max_rise=-0.5 - rising by 0.74 mag/day will not pass a threshold max_rise=-1.0 - fading by 0.6 mag/day will pass a threshold min_fade=0.3 - fading by 0.2 mag/day will not pass a threshold min_fade=0.3 - """ - result = 1 - - # Quick check on the number of detected points - if np.size(around_peak) < self.pts_needed: - return 0 - # Quick check on the time gap between first and last detection - elif np.max(t[around_peak]) - np.min(t[around_peak]) < min_dt: - return 0 - else: - evol_rate = [] - fil = [] - # Check time gaps and rise or fade rate for each band - for f in set(filters): - if select_red is True and not (f in "izy"): - continue - times_f = t[around_peak][np.where(filters == f)[0]] - mags_f = mags[around_peak][np.where(filters == f)[0]] - dt_f = np.max(times_f) - np.min(times_f) - # Calculate the evolution rate, if the time gap condition is met - if dt_f > min_dt: - evol_rate_f = (np.max(mags_f) - np.min(mags_f)) / ( - times_f[np.where(mags_f == np.max(mags_f))[0]][0] - - times_f[np.where(mags_f == np.min(mags_f))[0]][0] - ) - evol_rate.append(evol_rate_f) - else: - evol_rate.append(0) - fil.append(f) - if len(evol_rate) == 0: - return 0 - # Check if the conditions on the evolution rate are met - if np.max(evol_rate) < min_fade and np.min(evol_rate) > max_rise: - return 0 - - return result - - def _multi_color_detect(self, filters): - """ - Color-based simple detection criteria: detect at least twice, - with at least two filters - """ - result = 1 - # detected in at least two filters - if np.size(np.unique(filters)) < 2: - return 0 - - return result - - def _red_color_detect(self, filters, min_det=4): - """ - Detected at least min_det times in either izy colors - Parameters - ---------- - filters : array - filters in which detections happened - min_det : float or int - minimum number of detections required in izy bands - """ - result = 1 - # Number of detected points in izy bands - n_red_det = ( - np.size(np.where(filters == "i")[0]) - + np.size(np.where(filters == "z")[0]) - + np.size(np.where(filters == "y")[0]) - ) - # Condition - if n_red_det < min_det: - return 0 - - return result - - def _blue_color_detect(self, filters, min_det=4): - """ - Detected at least min_det times in either ugr colors - Parameters - ---------- - filters : array - filters in which detections happened - min_det : float or int - minimum number of detections required in ugr bands - """ - result = 1 - # Number of detected points in ugr bands - n_blue_det = ( - np.size(np.where(filters == "u")[0]) - + np.size(np.where(filters == "g")[0]) - + np.size(np.where(filters == "r")[0]) - ) - # Condition - if n_blue_det < min_det: - return 0 - - return result - def run(self, data_slice, slice_point=None): data_slice.sort(order=self.mjd_col) result = {} diff --git a/rubin_sim/maf/maf_contrib/transient_ascii_sed_metric.py b/rubin_sim/maf/maf_contrib/transient_ascii_sed_metric.py index 1e6503500..814dd02b6 100644 --- a/rubin_sim/maf/maf_contrib/transient_ascii_sed_metric.py +++ b/rubin_sim/maf/maf_contrib/transient_ascii_sed_metric.py @@ -11,14 +11,13 @@ __all__ = ("TransientAsciiSEDMetric",) import os -import warnings from copy import deepcopy import numpy as np try: from sncosmo import Model, TimeSeriesSource, read_griddata_ascii -except ImportError as error: +except ImportError: pass from astropy.cosmology import Planck15 as cosmo diff --git a/rubin_sim/maf/maf_contrib/triplet_metric.py b/rubin_sim/maf/maf_contrib/triplet_metric.py index 39ba8e4d2..5707122ee 100644 --- a/rubin_sim/maf/maf_contrib/triplet_metric.py +++ b/rubin_sim/maf/maf_contrib/triplet_metric.py @@ -31,8 +31,6 @@ def run(self, data_slice, slice_point=None): times = times - 49378 # change times to smaller numbers delmax = self.delmax delmin = self.delmin - ratiomax = self.ratiomax - ratiomin = self.ratiomin total = 0 # iterate over every exposure time for counter, time in enumerate(times): diff --git a/rubin_sim/maf/maps/base_map.py b/rubin_sim/maf/maps/base_map.py index 49e52d76d..408fea651 100644 --- a/rubin_sim/maf/maps/base_map.py +++ b/rubin_sim/maf/maps/base_map.py @@ -2,8 +2,6 @@ import inspect -from six import with_metaclass - class MapsRegistry(type): """ @@ -42,7 +40,7 @@ def help(cls, doc=False): print(" added to slice_point: ", ",".join(maps.keynames)) -class BaseMap(with_metaclass(MapsRegistry, object)): +class BaseMap(metaclass=MapsRegistry): """ """ def __init__(self, **kwargs): diff --git a/rubin_sim/maf/maps/create_gaia_density_map.py b/rubin_sim/maf/maps/create_gaia_density_map.py index b1696712b..03245e833 100755 --- a/rubin_sim/maf/maps/create_gaia_density_map.py +++ b/rubin_sim/maf/maps/create_gaia_density_map.py @@ -15,7 +15,7 @@ if __name__ == "__main__": # Hide imports here so documentation builds from rubin_sim.catalogs.db import DBObject - from rubin_sim.utils import angular_separation, halfSpaceFromRaDec, levelFromHtmid, raDec2Hpid + from rubin_sim.utils import angular_separation, halfSpaceFromRaDec # from rubin_sim.catalogs.generation.db import CatalogDBObject # Import the bits needed to get the catalog to work diff --git a/rubin_sim/maf/metadata_dir.py b/rubin_sim/maf/metadata_dir.py index 2ed22167a..4e54792aa 100755 --- a/rubin_sim/maf/metadata_dir.py +++ b/rubin_sim/maf/metadata_dir.py @@ -11,11 +11,10 @@ matplotlib.use("Agg") from . import batches as batches -from .db import ResultsDb, TrackingDb +from .db import ResultsDb from .metric_bundles import MetricBundle, MetricBundleGroup from .metrics import CountExplimMetric from .slicers import HealpixSlicer, HealpixSubsetSlicer -from .utils import get_date_version def metadata_dir(): @@ -45,7 +44,8 @@ def metadata_dir(): ) args = parser.parse_args() - # If runNames not given, scan for sim_name databases in current directory and use those + # If runNames not given, scan for sim_name databases in current + # directory and use those # Note that 'runNames' can be full path to directories if args.db is None: @@ -89,7 +89,7 @@ def metadata_dir(): wfd_hpix = np.where(wfd_footprint == 1)[0] wfd_slicer = HealpixSubsetSlicer(nside=args.nside, hpid=wfd_hpix) - bdict = batches.metadata_bundle_dicts(allsky_slicer, wfd_slicer, sim_name, colmap) + bdict = batches.info_bundle_dicts(allsky_slicer, wfd_slicer, sim_name, colmap) # Set up the resultsDB results_db = ResultsDb(out_dir=out_dir) diff --git a/rubin_sim/maf/metrics/agnstructure.py b/rubin_sim/maf/metrics/agnstructure.py index 20fa12ebe..6f54d5db2 100644 --- a/rubin_sim/maf/metrics/agnstructure.py +++ b/rubin_sim/maf/metrics/agnstructure.py @@ -86,7 +86,7 @@ def run(self, data_slice, slice_point=None): df = np.unique(data_slice[self.filter_col]) if np.size(df) > 1: - msg = """Running structure function on multiple filters simultaneously. + msg = """Running structure function on multiple filters simultaneously. Should probably change your SQL query to limit to a single filter.""" warnings.warn(msg) if self.dust: diff --git a/rubin_sim/maf/metrics/base_metric.py b/rubin_sim/maf/metrics/base_metric.py index ed98d8f1c..bb6fb2b63 100644 --- a/rubin_sim/maf/metrics/base_metric.py +++ b/rubin_sim/maf/metrics/base_metric.py @@ -12,7 +12,6 @@ import warnings import numpy as np -from six import with_metaclass from rubin_sim.maf.stackers.get_col_info import ColInfo @@ -103,7 +102,7 @@ def add_cols(self, col_array): self.stacker_dict[col] = source -class BaseMetric(with_metaclass(MetricRegistry, object)): +class BaseMetric(metaclass=MetricRegistry): """ Base class for the metrics. Sets up some basic functionality for the MAF framework: diff --git a/rubin_sim/maf/metrics/mo_summary_metrics.py b/rubin_sim/maf/metrics/mo_summary_metrics.py index 990d40e99..451d8a1bb 100644 --- a/rubin_sim/maf/metrics/mo_summary_metrics.py +++ b/rubin_sim/maf/metrics/mo_summary_metrics.py @@ -25,21 +25,23 @@ def power_law_dndh(hvalues, hindex=0.33, no=None, ho=None, **kwargs): Parameters ---------- - hvalues : `numpy.ndarray` - The H values corresponding to each metricValue (must be the same length). + hvalues : `np.ndarray`, (N,) + The H values corresponding to each metric_value + (must be the same length). The hvalues are expected to be evenly spaced. hindex : `float`, optional The power-law index expected for the H value distribution. Default is 0.33 (dN/dH = 10^(hindex * H) ). no : `float`, optional ho: `float`, optional - If no and ho are specified, this provides an anchor for the power law distribution, - so that the expected number no of objects at ho is returned. - Do not need to be set if just doing comparative weighting. + If no and ho are specified, this provides an anchor + for the power law distribution,so that the expected number no + of objects at ho is returned. + Does not need to be set if just doing comparative weighting. Returns ------- - dndh : `numpy.ndarray` + dndh : `np.ndarray`, (N,) """ if no is None or ho is None: ho = hvalues.min() @@ -90,66 +92,72 @@ def integrate_over_h(metric_values, hvalues, dndh_func=power_law_dndh, **kwargs) metric_values : `numpy.ndarray` The metric values at each H value. hvalues : `numpy.ndarray` - The H values corresponding to each metricValue (must be the same length). + The H values corresponding to each metric_value + (must be the same length). dndh_func : function, optional - One of the dN/dH functions defined below. Default is a simple power law. + One of the dN/dH functions defined below. **kwargs : `dict`, optional Keyword arguments to pass to dndh_func Returns -------- - int_vals : `numpy.ndarray` + int_vals : `np.ndarray`, (N,) The integrated metric values. """ # Set expected H distribution. # dndh = differential size distribution (number in this bin) dndh = dndh_func(hvalues, **kwargs) - # calculate the metric values *weighted* by the number of objects in this bin and brighter + # calculate the metric values *weighted* by the number of objects + # in this bin and brighter int_vals = np.cumsum(metric_values * dndh) / np.cumsum(dndh) return int_vals def sum_over_h(metric_values, hvalues, dndh_func=power_law_dndh, **kwargs): - """Calculate the sum of the metric value multiplied by the number of objects at each H value. - This is equivalent to calculating the number of objects meeting X requirement in the - differential completeness or fraction of objects with lightcurves, etc. + """Calculate the sum of the metric value multiplied by the number of + objects at each H value. This is equivalent to calculating the + number of objects meeting X requirement in the differential completeness + or fraction of objects with lightcurves, etc. Parameters ---------- - metric_values : `numpy.ndarray` + metric_values : `np.ndarray`, (N,) The metric values at each H value. - hvalues : `numpy.ndarray` - The H values corresponding to each metricValue (must be the same length). + hvalues : `np.ndarray`, (N,) + The H values corresponding to each metric_value. dndh_func : function, optional - One of the dN/dH functions defined below. Default is a simple power law. + One of the dN/dH functions defined below. **kwargs : `dict`, optional Keyword arguments to pass to dndh_func Returns -------- - sum_vals : `numpy.ndarray` + sum_vals : `np.ndarray`, (N,) The cumulative metric values. """ # Set expected H distribution. # dndh = differential size distribution (number in this bin) dndh = dndh_func(hvalues, **kwargs) - # calculate the metric values *weighted* by the number of objects in this bin and brighter + # calculate the metric values *weighted* by the number of objects + # in this bin and brighter sum_vals = np.cumsum(metric_values * dndh) return sum_vals class TotalNumberSSO(BaseMoMetric): - """Calculate the total number of objects of a given population expected at a given H value or larger. + """Calculate the total number of objects of a given population + expected at a given H value or larger. - Operations on differential completeness values (or equivalent; fractions of the population is ok if + Operations on differential completeness values + (or equivalent; fractions of the population is ok if still a differential metric result, not cumulative). Parameters ---------- h_mark : `float`, optional - The H value at which to calculate the expected total number of objects. Default = 22. - dndh_func : `function`, optional - The dN/dH distribution to use to calculate the expected population size. + The H value at which to calculate the expected total number of objects. + dndh_func : function, optional + The dN/dH distribution used calculate the expected population size. Returns ------- @@ -173,12 +181,13 @@ def run(self, metric_vals, h_vals): class ValueAtHMetric(BaseMoMetric): """Return the metric value at a given H value. - Requires the metric values to be one-dimensional (typically, completeness values). + Requires the metric values to be one-dimensional + (typically, completeness values). Parameters ---------- h_mark : `float`, optional - The H value at which to look up the metric value. Default = 22. + The H value at which to look up the metric value. Returns ------- @@ -187,7 +196,7 @@ class ValueAtHMetric(BaseMoMetric): def __init__(self, h_mark=22, **kwargs): metric_name = "Value At H=%.1f" % (h_mark) - units = "<= %.1f" % (h_mark) + self.units = "<= %.1f" % (h_mark) super().__init__(metric_name=metric_name, **kwargs) self.h_mark = h_mark @@ -206,12 +215,13 @@ def run(self, metric_vals, h_vals): class MeanValueAtHMetric(BaseMoMetric): """Return the mean value of a metric at a given H. - Allows the metric values to be multi-dimensional (i.e. use a cloned H distribution). + Allows the metric values to be multi-dimensional + (i.e. use a cloned H distribution). Parameters ---------- h_mark : `float`, optional - The H value at which to look up the metric value. Default = 22. + The H value at which to look up the metric value. Returns ------- @@ -221,6 +231,7 @@ class MeanValueAtHMetric(BaseMoMetric): def __init__(self, h_mark=22, reduce_func=np.mean, metric_name=None, **kwargs): if metric_name is None: metric_name = "Mean Value At H=%.1f" % (h_mark) + self.units = "@ H= %.1f" % (h_mark) super().__init__(metric_name=metric_name, **kwargs) self.h_mark = h_mark self.reduce_func = reduce_func @@ -235,31 +246,37 @@ def run(self, metric_vals, h_vals): class MoCompletenessMetric(BaseMoMetric): - """Calculate the fraction of the population that meets ``threshold`` value or higher. - This is equivalent to calculating the completeness (relative to the entire population) given - the output of a Discovery_N_Chances metric, or the fraction of the population that meets a given cutoff - value for Color determination metrics. + """Calculate the fraction of the population that meets `threshold` value + or higher. This is equivalent to calculating the completeness + (relative to the entire population) given the output of a + Discovery_N_Chances metric, or the fraction of the population that meets + a given cutoff value for Color determination metrics. - Any moving object metric that outputs a float value can thus have the 'fraction of the population' - with greater than X value calculated here, as a summary statistic. + Any moving object metric that outputs a float value can thus have + the 'fraction of the population' with greater than X value calculated here, + as a summary statistic. Parameters ---------- threshold : `int`, optional - Count the fraction of the population that exceeds this value. Default = 1. + Count the fraction of the population that exceeds this value. nbins : `int`, optional - If the H values for the metric are not a cloned distribution, then split up H into this many bins. - Default 20. + If the H values for the metric are not a cloned distribution, + then split up H into this many bins. min_hrange : `float`, optional - If the H values for the metric are not a cloned distribution, then split up H into at least this - range (otherwise just use the min/max of the H values). Default 1.0 + If the H values for the metric are not a cloned distribution, + then split up H into at least this + range (otherwise just use the min/max of the H values). cumulative : `bool`, optional - If False, simply report the differential fractional value (or differential completeness). - If True, integrate over the H distribution (using IntegrateOverH) to report a cumulative fraction. - Default None which becomes True; - if metric_name is set and starts with 'Differential' this will then set to False. + If False, simply report the differential fractional value + (or differential completeness). + If True, integrate over the H distribution (using IntegrateOverH) + to report a cumulative fraction. + Default of None will use True, unless metric_name is set and starts + with "Differential" - then default will use False. hindex : `float`, optional - Use hindex as the power law to integrate over H, if cumulative is True. Default 0.3. + Use hindex as the power law to integrate over H, + if cumulative is True. """ def __init__( @@ -271,17 +288,21 @@ def __init__( hindex=0.33, **kwargs, ): - if cumulative is None: # if metric_name does not start with 'differential', then cumulative->True + if cumulative is None: + # if metric_name does not start with 'differential', + # then cumulative->True if "metric_name" not in kwargs: self.cumulative = True metric_name = "CumulativeCompleteness" - else: # 'metric_name' in kwargs: + else: + # 'metric_name' in kwargs: metric_name = kwargs.pop("metric_name") if metric_name.lower().startswith("differential"): self.cumulative = False else: self.cumulative = True - else: # cumulative was set + else: + # cumulative was set self.cumulative = cumulative if "metric_name" in kwargs: metric_name = kwargs.pop("metric_name") @@ -298,7 +319,8 @@ def __init__( units = "@H" super().__init__(metric_name=metric_name, units=units, **kwargs) self.threshold = threshold - # If H is not a cloned distribution, then we need to specify how to bin these values. + # If H is not a cloned distribution, + # then we need to specify how to bin these values. self.nbins = nbins self.min_hrange = min_hrange self.hindex = hindex @@ -314,7 +336,8 @@ def run(self, metric_values, h_vals): completeness[i] = np.where(metric_val_h[i].filled(0) >= self.threshold)[0].size completeness = completeness / float(n_ssos) else: - # The h_vals are spread more randomly among the objects (we probably used one per object). + # The h_vals are spread more randomly among the objects + # (we probably used one per object). hrange = h_vals.max() - h_vals.min() min_h = h_vals.min() if hrange < self.min_hrange: @@ -343,42 +366,50 @@ def run(self, metric_values, h_vals): class MoCompletenessAtTimeMetric(BaseMoMetric): - """Calculate the completeness (relative to the entire population) <= a given H as a function of time, - given the times of each discovery. + """Calculate the completeness (relative to the entire population) + <= a given H as a function of time, given the times of each discovery. - Input values of the discovery times can come from the Discovery_Time (child) metric or the - KnownObjects metric. + Input values of the discovery times can come from the Discovery_Time + (child) metric or the KnownObjects metric. Parameters ---------- - times : `numpy.ndarray` like - The bins to distribute the discovery times into. Same units as the discovery time (typically MJD). + times : `np.ndarray`, (N,) or `list` [`float`] + The bins to distribute the discovery times into. + Same units as the discovery time (typically MJD). hval : `float`, optional - The value of H to count completeness at (or cumulative completeness to). - Default None, in which case a value halfway through h_vals (the slicer H range) will be chosen. + The value of H to count completeness at, or cumulative completeness to. + Default None, in which case a value halfway through h_vals + (the slicer H range) will be chosen. cumulative : `bool`, optional If True, calculate the cumulative completeness (completeness <= H). If False, calculate the differential completeness (completeness @ H). - Default None which becomes 'True' unless metric_name starts with 'differential'. + Default None which becomes 'True', + unless metric_name starts with 'differential'. hindex : `float`, optional - Use hindex as the power law to integrate over H, if cumulative is True. Default 0.3. + Use hindex as the power law to integrate over H, + if cumulative is True. """ def __init__(self, times, hval=None, cumulative=None, hindex=0.33, **kwargs): self.hval = hval self.times = times self.hindex = hindex - if cumulative is None: # if metric_name does not start with 'differential', then cumulative->True + if cumulative is None: + # if metric_name does not start with 'differential', + # then cumulative->True if "metric_name" not in kwargs: self.cumulative = True metric_name = "CumulativeCompleteness@Time@H=%.2f" % self.hval - else: # 'metric_name' in kwargs: + else: + # 'metric_name' in kwargs: metric_name = kwargs.pop("metric_name") if metric_name.lower().startswith("differential"): self.cumulative = False else: self.cumulative = True - else: # cumulative was set + else: + # cumulative was set self.cumulative = cumulative if "metric_name" in kwargs: metric_name = kwargs.pop("metric_name") diff --git a/rubin_sim/maf/metrics/night_pointing_metric.py b/rubin_sim/maf/metrics/night_pointing_metric.py index 5634f24df..6079a1ded 100644 --- a/rubin_sim/maf/metrics/night_pointing_metric.py +++ b/rubin_sim/maf/metrics/night_pointing_metric.py @@ -2,7 +2,7 @@ import numpy as np from astropy import units as u -from astropy.coordinates import AltAz, EarthLocation, SkyCoord, get_body, get_sun +from astropy.coordinates import AltAz, EarthLocation, get_body, get_sun from astropy.time import Time from rubin_sim.utils import Site diff --git a/rubin_sim/maf/metrics/sn_n_sn_metric.py b/rubin_sim/maf/metrics/sn_n_sn_metric.py index 0c2eeaa22..c7103e456 100644 --- a/rubin_sim/maf/metrics/sn_n_sn_metric.py +++ b/rubin_sim/maf/metrics/sn_n_sn_metric.py @@ -1,6 +1,5 @@ __all__ = ("SNNSNMetric",) -import os import healpy as hp import numpy as np @@ -8,7 +7,6 @@ import pandas as pd from scipy.interpolate import interp1d -from rubin_sim.data import get_data_dir from rubin_sim.maf.metrics import BaseMetric from rubin_sim.maf.utils.sn_n_sn_utils import LcfastNew, SnRate, load_sne_cached from rubin_sim.phot_utils import DustValues diff --git a/rubin_sim/maf/metrics/sn_snr_metric.py b/rubin_sim/maf/metrics/sn_snr_metric.py index 8317bfc6e..3b71a6f8c 100644 --- a/rubin_sim/maf/metrics/sn_snr_metric.py +++ b/rubin_sim/maf/metrics/sn_snr_metric.py @@ -1,6 +1,5 @@ __all__ = ("SNSNRMetric",) -import time from collections.abc import Iterable import matplotlib.pylab as plt @@ -14,27 +13,21 @@ class SNSNRMetric(metrics.BaseMetric): """ - Metric to estimate the detection rate for faint supernovae (x1,color) = (-2.0,0.2) + Metric to estimate the detection rate for faint supernovae + (x1,color) = (-2.0,0.2) Parameters ---------- - list : str, optional - Name of the columns used to estimate the metric - Default : 'observationStartMJD', 'fieldRA', 'fieldDec','filter','fiveSigmaDepth', - 'visitExposureTime','night','observationId', 'numExposures','visitTime' - coadd : bool, optional + coadd : `bool`, optional to make "coaddition" per night (uses snStacker) - Default : True lim_sn : class, optional Reference data used to simulate LC points (interpolation) - names_ref : str, optional + names_ref : `str`, optional names of the simulator used to produce reference data - season : flota, optional + season : `float`, optional season num - Default : 1. - z : float, optional + z : `float`, optional redshift for this study - Default : 0.01 """ def __init__( @@ -107,15 +100,13 @@ def run(self, data_slice, slice_point=None): Parameters ---------- - data_slice : array + data_slice : `np.ndarray`, (N,) simulation data under study Returns ------- - detection rate : float - + detection rate : `float` """ - time_ref = time.time() good_filters = np.in1d(data_slice["filter"], self.filter_names) data_slice = data_slice[good_filters] if data_slice.size == 0: @@ -159,29 +150,27 @@ def process(self, sel): Parameters ----------- - sel : array + sel : `np.narray` array of observations - season : int + season : `int` season number Returns -------- record array with the following fields: - fieldRA (float) - fieldDec (float) - season (float) - band (str) - frac_obs_name_ref (float) - + fieldRA (float) + fieldDec (float) + season (float) + band (str) + frac_obs_name_ref (float) """ self.band = np.unique(sel[self.filter_col])[0] - time_ref = time.time() snr_obs = self.snr_slice(sel) # SNR for observations snr_fakes = self.snr_fakes(sel) # SNR for fakes detect_frac = self.detection_rate(snr_obs, snr_fakes) # Detection rate - snr_obs = np.asarray(snr_obs) - snr_fakes = np.asarray(snr_fakes) + # snr_obs = np.asarray(snr_obs) + # snr_fakes = np.asarray(snr_fakes) # self.plot(snr_obs, snr_fakes) # plt.show() detect_frac = np.asarray(detect_frac) @@ -189,19 +178,17 @@ def process(self, sel): return detect_frac def snr_slice(self, data_slice, j=-1, output_q=None): - """ - Estimate SNR for a given data_slice + """Estimate SNR for a given data_slice Parameters ----------- - data_slice : `np.recarray` + data_slice : `np.ndarray` j : `int`, optional output_q : `int`, optional Returns -------- - array with the following fields (all are of f8 type, except band which is of U1) - + snr : `np.ndarray` containing SNR_name_ref: Signal-To-Noise Ratio estimator season : season cadence: cadence of the season @@ -216,7 +203,6 @@ def snr_slice(self, data_slice, j=-1, output_q=None): m5: mean m5 (over the season) nvisits: median number of visits (per observation) (over the season) ExposureTime: median exposure time (per observation) (over the season) - """ # Get few infos: RA, Dec, nvisits, m5, exptime @@ -248,7 +234,8 @@ def snr_slice(self, data_slice, j=-1, output_q=None): # SN DayMax: dates-shift where shift is chosen in the input yaml file t0_lc = dates - self.shift - # for these DayMax, estimate the phases of LC points corresponding to the current data_slice MJDs + # for these DayMax, estimate the phases of LC points + # corresponding to the current data_slice MJDs time_for_lc = -t0_lc[:, None] + mjds @@ -262,7 +249,7 @@ def snr_slice(self, data_slice, j=-1, output_q=None): season_vals = np.tile(data_slice[self.season_col], (len(time_for_lc), 1)) # estimate fluxes and snr in SNR function - fluxes_tot, snr = self.snr(time_for_lc, m5_vals, flag, season_vals, t0_lc) + fluxes_tot, snr = self.snr(time_for_lc, m5_vals, flag, t0_lc) # now save the results in a record array _, idx = np.unique(snr["season"], return_inverse=True) @@ -284,17 +271,16 @@ def snr_slice(self, data_slice, j=-1, output_q=None): return snr def season_info(self, data_slice, season): - """ - Get info on seasons for each data_slice + """Get info on seasons for each data_slice Parameters ---------- - data_slice : array + data_slice : `np.ndarray`, (N,) array of observations Returns ------- - recordarray with the following fields: + info_season : `np.ndarray` season, cadence, season_length, MJDmin, MJDmax """ @@ -329,33 +315,29 @@ def season_info(self, data_slice, season): return info_season - def snr(self, time_lc, m5_vals, flag, season_vals, t0_lc): - """ - Estimate SNR vs time + def snr(self, time_lc, m5_vals, flag, t0_lc): + """Estimate SNR vs time Parameters ----------- - time_lc : - m5_vals : list(float) + time_lc : `np.ndarray`, (N,) + m5_vals : `list` [`float`] five-sigme depth values - flag : array(bool) + flag : `np.ndarray`, (N,) flag to be applied (example: selection from phase cut) - season_vals : array(float) + season_vals : `np.ndarray`, (N,) season values - t0_lc : array(float) + t0_lc : `np.ndarray`, (N,) array of T0 for supernovae Returns ------- - fluxes_tot : list(float) + fluxes_tot : `list` [`float`] list of (interpolated) fluxes - snr_tab : array with the following fields: + snr_tab : `np.ndarray`, (N,) snr_name_ref (float) : Signal-to-Noise values season (float) : season num. """ - - seasons = np.ma.array(season_vals, mask=~flag) - fluxes_tot = {} snr_tab = None @@ -374,7 +356,7 @@ def snr(self, time_lc, m5_vals, flag, season_vals, t0_lc): snr_tab = np.asarray(np.copy(snr_season), dtype=[("SNR_" + name, "f8")]) else: snr_tab = rf.append_fields(snr_tab, "SNR_" + name, np.copy(snr_season)) - """ + """ snr_tab = rf.append_fields( snr_tab, 'season', np.mean(seasons, axis=1)) """ @@ -383,7 +365,8 @@ def snr(self, time_lc, m5_vals, flag, season_vals, t0_lc): # check if any masked value remaining # this would correspond to case where no obs point has been selected # ie no points with phase in [phase_min,phase_max] - # this happens when internight gaps are large (typically larger than shift) + # this happens when internight gaps are large + # (typically larger than shift) idmask = np.where(snr_tab.mask) if len(idmask) > 0: tofill = np.copy(snr_tab["season"]) @@ -399,12 +382,13 @@ def get_season(self, t0): Parameters ---------- - t0 : list(float) + t0 : `list` [`float`] set of t0 values Returns -------- - list (float) of corresponding seasons + mean_seasons : `list` [`float`] + list (float) of corresponding seasons """ diff_min = t0[:, None] - self.info_season["MJD_min"] @@ -416,18 +400,17 @@ def get_season(self, t0): return np.mean(seasons, axis=1) def snr_fakes(self, data_slice): - """ - Estimate SNR for fake observations + """Estimate SNR for fake observations in the same way as for observations (using SNR_Season) Parameters ----------- - data_slice : array + data_slice : `np.ndarray`, (N,) array of observations Returns -------- - snr_tab : array with the following fields: + snr_tab : `np.ndarray` snr_name_ref (float) : Signal-to-Noise values season (float) : season num. @@ -447,20 +430,19 @@ def snr_fakes(self, data_slice): return snr_fakes def gen_fakes(self, slice_sel, band): - """ - Generate fake observations + """Generate fake observations according to observing values extracted from simulations Parameters ---------- - slice_sel : array + slice_sel : `np.ndarray`, (N,) array of observations - band : str + band : `str` band to consider Returns -------- - fake_obs_season : array + fake_obs_season : `np.ndarray` array of observations with the following fields observationStartMJD (float) field_ra (float) @@ -479,7 +461,6 @@ def gen_fakes(self, slice_sel, band): for val in self.info_season: cadence = val["cadence"] mjd_min = val["MJD_min"] - mjd_max = val["MJD_max"] season_length = val["season_length"] nvisits = val["nvisits"] m5 = val["m5"] @@ -511,9 +492,9 @@ def plot(self, snr_obs, snr_fakes): Parameters ---------- - snr_obs : array + snr_obs : `np.ndarray`, (N,) array estimated using snr_slice(observations) - snr_obs : array + snr_obs : `np.ndarray`, (N,) array estimated using snr_slice(fakes) """ @@ -534,13 +515,13 @@ def plot(self, snr_obs, snr_fakes): ) def plot_history(self, fluxes, mjd, flag, snr, t0_lc, dates): - """Plot history of Plot + """Plot history of lightcurve For each MJD, fluxes and snr are plotted Each plot may be saved as a png to make a video afterwards Parameters ---------- - fluxes : list(float) + fluxes : `list` [`float`] LC fluxes mjd : list(float) mjds of the fluxes @@ -554,8 +535,7 @@ def plot_history(self, fluxes, mjd, flag, snr, t0_lc, dates): date of the display (mjd) """ - dir_save = "/home/philippe/LSST/sn_metric_new/Plots" - import pylab as plt + import matplotlib.pyplot as plt plt.ion() fig, ax = plt.subplots(ncols=1, nrows=2) @@ -563,7 +543,6 @@ def plot_history(self, fluxes, mjd, flag, snr, t0_lc, dates): colors = ["b", "r"] myls = ["-", "--"] - mfc = ["b", "None"] tot_label = [] fontsize = 12 mjd_ma = np.ma.array(mjd, mask=~flag) @@ -711,8 +690,9 @@ def detection_rate(self, snr_obs, snr_fakes): return np.rec.fromrecords(rtot, names=names) def check_seasons(self, tab): - """Check wether seasons have no overlap - if it is the case: modify MJD_min and season length of the corresponding season + """Check whether seasons have no overlap + if it is the case: modify MJD_min and season length of + the corresponding season return only seasons with season_length > 30 days Parameters diff --git a/rubin_sim/maf/metrics/surfb_metric.py b/rubin_sim/maf/metrics/surfb_metric.py index 11b62f7ee..9c99d5d73 100644 --- a/rubin_sim/maf/metrics/surfb_metric.py +++ b/rubin_sim/maf/metrics/surfb_metric.py @@ -1,6 +1,5 @@ __all__ = ("SurfaceBrightLimitMetric",) -import os import warnings import numpy as np diff --git a/rubin_sim/maf/metrics/technical_metrics.py b/rubin_sim/maf/metrics/technical_metrics.py index 4f68a38ec..559a87a42 100644 --- a/rubin_sim/maf/metrics/technical_metrics.py +++ b/rubin_sim/maf/metrics/technical_metrics.py @@ -14,8 +14,7 @@ class NChangesMetric(BaseMetric): - """ - Compute the number of times a column value changes. + """Compute the number of times a column value changes. (useful for filter changes in particular). """ @@ -31,10 +30,16 @@ def run(self, data_slice, slice_point=None): class MinTimeBetweenStatesMetric(BaseMetric): - """ - Compute the minimum time between changes of state in a column value. + """Compute the minimum time between changes of state in a column value. (useful for calculating fastest time between filter changes in particular). Returns delta time in minutes! + + Parameters + ---------- + change_col : `str` + Column that we are tracking changes in. + time_col : str + Column with the time of each visit """ def __init__( @@ -44,10 +49,6 @@ def __init__( metric_name=None, **kwargs, ): - """ - change_col = column that changes state - time_col = column tracking time of each visit - """ self.change_col = change_col self.time_col = time_col if metric_name is None: @@ -57,7 +58,7 @@ def __init__( ) def run(self, data_slice, slice_point=None): - # Sort on time, to be sure we've got filter (or other col) changes in the right order. + # Sort on time, to be sure we've got changes in the right order. idxs = np.argsort(data_slice[self.time_col]) changes = data_slice[self.change_col][idxs][1:] != data_slice[self.change_col][idxs][:-1] condition = np.where(changes == True)[0] @@ -79,7 +80,15 @@ class NStateChangesFasterThanMetric(BaseMetric): """ Compute the number of changes of state that happen faster than 'cutoff'. (useful for calculating time between filter changes in particular). - 'cutoff' should be in minutes. + + Parameters + ---------- + change_col : `str` + Column that we are tracking changes in. + time_col : str + Column with the time of each visit + cutoff : `float` + The cutoff value for the time between changes (in minutes). """ def __init__( @@ -90,11 +99,6 @@ def __init__( cutoff=20, **kwargs, ): - """ - col = column tracking changes in - time_col = column keeping the time of each visit - cutoff = the cutoff value for the reduce method 'NBelow' - """ if metric_name is None: metric_name = "Number of %s changes faster than <%.1f minutes" % ( change_col, @@ -102,13 +106,14 @@ def __init__( ) self.change_col = change_col self.time_col = time_col - self.cutoff = cutoff / 24.0 / 60.0 # Convert cutoff from minutes to days. + # Convert cutoff from minutes to days. + self.cutoff = cutoff / 24.0 / 60.0 super(NStateChangesFasterThanMetric, self).__init__( col=[change_col, time_col], metric_name=metric_name, units="#", **kwargs ) def run(self, data_slice, slice_point=None): - # Sort on time, to be sure we've got filter (or other col) changes in the right order. + # Sort on time, to be sure we've got changes in the right order. idxs = np.argsort(data_slice[self.time_col]) changes = data_slice[self.change_col][idxs][1:] != data_slice[self.change_col][idxs][:-1] condition = np.where(changes == True)[0] @@ -124,10 +129,18 @@ def run(self, data_slice, slice_point=None): class MaxStateChangesWithinMetric(BaseMetric): - """ - Compute the maximum number of changes of state that occur within a given timespan. + """Compute the maximum number of changes of state that occur + within a given timespan. (useful for calculating time between filter changes in particular). - 'timespan' should be in minutes. + + Parameters + ---------- + change_col : `str` + Column that we are tracking changes in. + time_col : str + Column with the time of each visit + timespan : `float` + The timespan to count the number of changes within (in minutes). """ def __init__( @@ -138,11 +151,6 @@ def __init__( timespan=20, **kwargs, ): - """ - col = column tracking changes in - time_col = column keeping the time of each visit - timespan = the timespan to count the number of changes within (in minutes) - """ if metric_name is None: metric_name = "Max number of %s changes within %.1f minutes" % ( change_col, @@ -156,12 +164,13 @@ def __init__( ) def run(self, data_slice, slice_point=None): - # This operates slightly differently from the metrics above; those calculate only successive times - # between changes, but here we must calculate the actual times of each change. + # This operates slightly differently from the metrics above; + # those calculate only successive times between changes, but here + # we must calculate the actual times of each change. # Check if there was only one observation (and return 0 if so). if data_slice[self.change_col].size == 1: return 0 - # Sort on time, to be sure we've got filter (or other col) changes in the right order. + # Sort on time, to be sure we've got changes in the right order. idxs = np.argsort(data_slice[self.time_col]) changes = data_slice[self.change_col][idxs][:-1] != data_slice[self.change_col][idxs][1:] condition = np.where(changes == True)[0] @@ -245,14 +254,15 @@ def run(self, data_slice, slice_point=None): teff += (10.0 ** (0.8 * (data_slice[self.m5_col][match] - self.depth[f]))).sum() teff *= self.teff_base if self.normed: - # Normalize by the t_eff if each observation was at the fiducial depth. + # Normalize by the t_eff equivalent if each observation + # was at the fiducial depth. teff = teff / (self.teff_base * data_slice[self.m5_col].size) return teff class OpenShutterFractionMetric(BaseMetric): - """ - Compute the fraction of time the shutter is open compared to the total time spent observing. + """Compute the fraction of time the shutter is open + compared to the total time spent observing. """ def __init__( @@ -287,9 +297,24 @@ def run(self, data_slice, slice_point=None): class BruteOSFMetric(BaseMetric): """Assume I can't trust the slewtime or visittime colums. - This computes the fraction of time the shutter is open, with no penalty for the first exposure - after a long gap (e.g., 1st exposure of the night). Presumably, the telescope will need to focus, - so there's not much a scheduler could do to optimize keeping the shutter open after a closure. + This computes the fraction of time the shutter is open, + with no penalty for the first exposure after a long gap + (e.g., 1st exposure of the night). + Presumably, the telescope will need to focus, so there's not much a + scheduler could do to optimize keeping the shutter open after a closure. + + Parameters + ---------- + maxgap : `float` + The maximum gap between observations, in minutes. + Assume anything longer the dome has closed. + fudge : `float` + Fudge factor if a constant has to be added to the exposure time values. + This time (in seconds) is added to the exposure time. + exp_time_col : `str` + The name of the exposure time column. Assumed to be in seconds. + mjd_col : `str` + The name of the start of the exposures. Assumed to be in units of days. """ def __init__( @@ -301,18 +326,6 @@ def __init__( fudge=0.0, **kwargs, ): - """ - Parameters - ---------- - maxgap : float (10.) - The maximum gap between observations. Assume anything longer the dome has closed. - fudge : float (0.) - Fudge factor if a constant has to be added to the exposure time values (like in OpSim 3.61). - exp_time_col : str ('expTime') - The name of the exposure time column. Assumed to be in seconds. - mjd_col : str ('observationStartMJD') - The name of the start of the exposures. Assumed to be in units of days. - """ self.exp_time_col = exp_time_col self.maxgap = maxgap / 60.0 / 24.0 # convert from min to days self.mjd_col = mjd_col diff --git a/rubin_sim/maf/metrics/use_metrics.py b/rubin_sim/maf/metrics/use_metrics.py index a8a2440aa..c3ad366a6 100644 --- a/rubin_sim/maf/metrics/use_metrics.py +++ b/rubin_sim/maf/metrics/use_metrics.py @@ -17,16 +17,14 @@ def run(self, data_slice, slice_point=None): # pylint: disable=invalid-name Parameters ---------- - data_slice : numpy.NDarray - Values passed to metric by the slicer, which the metric will use to calculate - metric values at each slice_point. - slice_point : Dict + data_slice : `np.ndarray`, (N,)` + slice_point : `dict` Dictionary of slice_point metadata passed to each metric. - E.g. the ra/dec of the healpix pixel or opsim fieldId. + E.g. the ra/dec of the healpix pixel. Returns ------- - str + use_name : `str` use at each slice_point. """ use_name = None diff --git a/rubin_sim/maf/metrics/visit_groups_metric.py b/rubin_sim/maf/metrics/visit_groups_metric.py index b55814f3f..47c685580 100644 --- a/rubin_sim/maf/metrics/visit_groups_metric.py +++ b/rubin_sim/maf/metrics/visit_groups_metric.py @@ -289,13 +289,11 @@ def reduce_max_seq_lunations(self, metricval): ) max_sequence = 0 cur_sequence = 0 - in_seq = False for l in lunations: # Find visits within lunation. vl, nl = self._in_lunation(metricval["visits"], metricval["nights"], l, lunation_length) # If no visits this lunation: if len(vl) == 0: - in_seq = False max_sequence = max(max_sequence, cur_sequence) cur_sequence = 0 # Else, look to see if groups can be made from the visits. @@ -305,11 +303,9 @@ def reduce_max_seq_lunations(self, metricval): # If there was a group within this lunation: if len(nw) >= self.min_n_nights: cur_sequence += 1 - in_seq = True break # Otherwise we're not in a sequence (anymore, or still). else: - in_seq = False max_sequence = max(max_sequence, cur_sequence) cur_sequence = 0 # Pick up last sequence if were in a sequence at last lunation. diff --git a/rubin_sim/maf/metrics/weak_lensing_systematics_metric.py b/rubin_sim/maf/metrics/weak_lensing_systematics_metric.py index 6fd57aa50..3accd323e 100644 --- a/rubin_sim/maf/metrics/weak_lensing_systematics_metric.py +++ b/rubin_sim/maf/metrics/weak_lensing_systematics_metric.py @@ -4,7 +4,6 @@ from .base_metric import BaseMetric from .exgal_m5 import ExgalM5 -from .vector_metrics import VectorMetric class ExgalM5WithCuts(BaseMetric): diff --git a/rubin_sim/maf/plots/nd_plotters.py b/rubin_sim/maf/plots/nd_plotters.py index 9f4c6ca40..0b19d057e 100644 --- a/rubin_sim/maf/plots/nd_plotters.py +++ b/rubin_sim/maf/plots/nd_plotters.py @@ -198,10 +198,10 @@ def plot_binned_data1_d(self, metric_values, slicer, user_plot_dict, fignum=None xlabel = plot_dict["xlabel"] if xlabel is None: xlabel = slicer.sliceColName[axis] - if plot_dict["units"] != None: + if plot_dict["units"] is not None: xlabel += " (" + plot_dict["units"] + ")" plt.xlabel(xlabel) - if plot_dict["histRange"] != None: + if plot_dict["histRange"] is not None: plt.xlim(plot_dict["histRange"]) plt.title(plot_dict["title"]) return fig.number diff --git a/rubin_sim/maf/plots/spatial_plotters.py b/rubin_sim/maf/plots/spatial_plotters.py index 254f4be9b..d69f44a39 100644 --- a/rubin_sim/maf/plots/spatial_plotters.py +++ b/rubin_sim/maf/plots/spatial_plotters.py @@ -316,7 +316,7 @@ def __call__(self, metric_value, slicer, user_plot_dict, fignum=None): fig = plt.figure(fignum, figsize=plot_dict["figsize"]) if plot_dict["subplot"] != "111": - ax = fig.add_subplot(plot_dict["subplot"]) + fig.add_subplot(plot_dict["subplot"]) # If the mask is True everywhere (no data), just plot zeros if False not in metric_value.mask: return None @@ -632,7 +632,7 @@ def __call__(self, metric_value_in, slicer, user_plot_dict, fignum=None): # Set up valid datapoints and color_min/max values. if plot_dict["plotMask"]: # Plot all data points. - mask = np.ones(len(metric_value), dtype="bool") + good = np.ones(len(metric_value), dtype="bool") else: # Only plot points which are not masked. Flip numpy ma mask where 'False' == 'good'. good = ~metric_value.mask diff --git a/rubin_sim/maf/run_comparison/microlensing_compare.py b/rubin_sim/maf/run_comparison/microlensing_compare.py index 3762d3a25..24ed66de1 100644 --- a/rubin_sim/maf/run_comparison/microlensing_compare.py +++ b/rubin_sim/maf/run_comparison/microlensing_compare.py @@ -1,6 +1,5 @@ import glob import os -import warnings import matplotlib.pyplot as plt import numpy as np @@ -196,8 +195,6 @@ def plot_fom(results, run_names, run_types, min_t_e, max_t_e, save_folder, figur fisher_runs_idx = np.where(run_types == "Fisher") run_type_list = [detect_runs_idx, npts_runs_idx, fisher_runs_idx] - type_run_names = ["Discovery", "Npts", "Fisher"] - for t_e_range in range(len(t_e_range_list)): for run_type in range(len(run_type_list)): # sorted alphabetically according to name of run @@ -226,7 +223,7 @@ def plot_fom(results, run_names, run_types, min_t_e, max_t_e, save_folder, figur ax2.set_xlabel("Avg Number of Points") ax2.set_xscale("log") - ax3.set_xlabel("Characaterization Efficiency \n ($\sigma_{t_E}/t_E$ < 0.1)") + ax3.set_xlabel("Characaterization Efficiency \n ($\\sigma_{t_E}/t_E$ < 0.1)") plt.savefig(save_folder + "/" + figure_name + ".png", bbox_inches="tight") return @@ -274,12 +271,9 @@ def plot_compare(results, run_names, run_types, min_t_e, max_t_e, save_folder, n detect_runs_idx = np.where(run_types == "detect") npts_runs_idx = np.where(run_types == "Npts") fisher_runs_idx = np.where(run_types == "Fisher") - run_type_list = [detect_runs_idx, npts_runs_idx, fisher_runs_idx] run_name_list = np.unique(run_names) - type_run_names = ["Discovery", "Npts", "Fisher"] - for t_e_range in range(len(t_e_range_list)): for run_name in run_name_list: run_name_idxs = np.where(run_names == run_name) diff --git a/rubin_sim/maf/slicers/base_slicer.py b/rubin_sim/maf/slicers/base_slicer.py index 51aa5329c..bdaa1757d 100644 --- a/rubin_sim/maf/slicers/base_slicer.py +++ b/rubin_sim/maf/slicers/base_slicer.py @@ -9,7 +9,6 @@ import numpy as np import numpy.ma as ma -from six import with_metaclass from rubin_sim.maf.utils import get_date_version @@ -44,7 +43,7 @@ def help(cls, doc=False): print(inspect.getdoc(cls.registry[slicername])) -class BaseSlicer(with_metaclass(SlicerRegistry, object)): +class BaseSlicer(metaclass=SlicerRegistry): """ Base class for all slicers: sets required methods and implements common functionality. @@ -331,7 +330,9 @@ def output_json( # If metric values is a masked array. if hasattr(metric_values, "mask"): if "ra" in self.slice_points: - # Spatial slicer. Translate ra/dec to lon/lat in degrees and output with metric value. + # Spatial slicer. + # Translate ra/dec to lon/lat in degrees and + # output with metric value. for ra, dec, value, mask in zip( self.slice_points["ra"], self.slice_points["dec"], @@ -343,7 +344,8 @@ def output_json( lat = dec * 180.0 / np.pi metric.append([lon, lat, value]) elif "bins" in self.slice_points: - # OneD slicer. Translate bins into bin/left and output with metric value. + # OneD slicer. Translate bins into bin/left and + # output with metric value. for i in range(len(metric_values)): bin_left = self.slice_points["bins"][i] value = metric_values.data[i] @@ -436,7 +438,8 @@ def read_data(self, infilename): def read_backwards_compatible(self, restored, infilename): """Read pre v1.0 metric files.""" - # Backwards compatibility for pre-v1.0 metric outputs. To be deprecated at a future release. + # Backwards compatibility for pre-v1.0 metric outputs. + # To be deprecated at a future release. warnings.warn( "Reading pre-v1.0 metric data. To be deprecated in a future release.", FutureWarning, @@ -475,7 +478,8 @@ def read_backwards_compatible(self, restored, infilename): if o in slicer_init: slicer_init[n] = slicer_init[o] del slicer_init[o] - # An earlier backwards compatibility issue - map 'spatialkey1/spatialkey2' to 'lon_col/lat_col'. + # An earlier backwards compatibility issue - + # map 'spatialkey1/spatialkey2' to 'lon_col/lat_col'. if "spatialkey1" in slicer_init: slicer_init["lon_col"] = slicer_init["spatialkey1"] del slicer_init["spatialkey1"] diff --git a/rubin_sim/maf/slicers/base_spatial_slicer.py b/rubin_sim/maf/slicers/base_spatial_slicer.py index 4b2ded31c..e8fd3b774 100644 --- a/rubin_sim/maf/slicers/base_spatial_slicer.py +++ b/rubin_sim/maf/slicers/base_spatial_slicer.py @@ -1,8 +1,9 @@ # The base class for all spatial slicers. # Slicers are 'data slicers' at heart; spatial slicers slice data by RA/Dec and -# return the relevant indices in the sim_data to the metric. -# The primary things added here are the methods to slice the data (for any spatial slicer) -# as this uses a KD-tree built on spatial (RA/Dec type) indexes. +# return the relevant indices in the sim_data to the metric. +# The primary things added here are the methods to slice the data +# (for any spatial slicer) as this uses a KD-tree built on spatial +# (RA/Dec type) indexes. __all__ = ("BaseSpatialSlicer",) @@ -27,39 +28,37 @@ def rotate(x, y, rotation_angle_rad): class BaseSpatialSlicer(BaseSlicer): - """Base spatial slicer object, contains additional functionality for spatial slicing, - including setting up and traversing a kdtree containing the simulated data points. + """Base spatial slicer object, contains additional functionality + for spatial slicing, including setting up and traversing a kdtree + containing the simulated data points. Parameters ---------- lon_col : `str`, optional - Name of the longitude (RA equivalent) column to use from the input data. - Default fieldRA + Name of the longitude (RA equivalent) column. lat_col : `str`, optional - Name of the latitude (Dec equivalent) column to use from the input data. - Default fieldDec + Name of the latitude (Dec equivalent) column. rot_sky_pos_col_name : `str`, optional - Name of the rotSkyPos column in the input data. Only used if use_camera is True. - Describes the orientation of the camera orientation compared to the sky. - Default rotSkyPos. + Name of the rotSkyPos column in the input data. + Only used if use_camera is True. + Describes the orientation of the camera orientation on the the sky. lat_lon_deg : `bool`, optional - Flag indicating whether lat and lon values from input data are in degrees (True) or radians (False). - Default True. + Flag indicating whether lat and lon values from input data are + in degrees (True) or radians (False). verbose : `bool`, optional - Flag to indicate whether or not to write additional information to stdout during runtime. - Default True. + Verbosity flag - noisy or quiet. badval : `float`, optional - Bad value flag, relevant for plotting. Default -666. + Bad value flag, relevant for plotting. leafsize : `int`, optional - Leafsize value for kdtree. Default 100. + Leafsize value for kdtree. radius : `float`, optional - Radius for matching in the kdtree. Equivalent to the radius of the FOV. Degrees. - Default 1.75. + Radius for matching in the kdtree. + Equivalent to the radius of the FOV, in degrees. use_camera : `bool`, optional Flag to indicate whether to use the LSST camera footprint or not. - Default True. camera_footprint_file : `str`, optional - Name of the camera footprint map to use. Can be None, which will use the default. + Name of the camera footprint map to use. + Can be None, which will use the default file. """ def __init__( @@ -92,7 +91,8 @@ def __init__( "badval": self.badval, "use_camera": self.use_camera, } - # RA and Dec are required slice_point info for any spatial slicer. slice_point RA/Dec are in radians. + # RA and Dec are required slice_point info for any spatial slicer. + # slice_point RA/Dec are in radians. self.slice_points["sid"] = None self.slice_points["ra"] = None self.slice_points["dec"] = None @@ -101,16 +101,17 @@ def __init__( self.plot_funcs = [BaseHistogram, BaseSkyMap] def setup_slicer(self, sim_data, maps=None): - """Use sim_data[self.lon_col] and sim_data[self.lat_col] (in radians) to set up KDTree. + """Use sim_data[self.lon_col] and sim_data[self.lat_col] + (in radians) to set up KDTree. Parameters ----------- sim_data : `numpy.ndarray` The simulated data, including the location of each pointing. maps : `list` of `rubin_sim.maf.maps` objects, optional - List of maps (such as dust extinction) that will run to build up additional metadata at each - slice_point. This additional metadata is available to metrics via the slice_point dictionary. - Default None. + List of maps (such as dust extinction) that will run to build up + additional data at each slice_point. This additional data + is available to metrics via the slice_point dictionary. """ if maps is not None: if self.cache_size != 0 and len(maps) > 0: @@ -136,7 +137,8 @@ def setup_slicer(self, sim_data, maps=None): @wraps(self._slice_sim_data) def _slice_sim_data(islice): """Return indexes for relevant opsim data at slice_point - (slice_point=lon_col/lat_col value .. usually ra/dec).""" + (slice_point=lon_col/lat_col value .. usually ra/dec). + """ # Build dict for slice_point info slice_point = {"sid": islice} @@ -147,7 +149,8 @@ def _slice_sim_data(islice): indices = self.opsimtree.query_ball_point((sx, sy, sz), self.rad) if (self.use_camera) & (len(indices) > 0): - # Find the indices *of those indices* which fall in the camera footprint + # Find the indices *of those indices* + # which fall in the camera footprint camera_idx = self.camera( self.slice_points["ra"][islice], self.slice_points["dec"][islice], @@ -157,10 +160,12 @@ def _slice_sim_data(islice): ) indices = np.array(indices)[camera_idx] - # Loop through all the slice_point keys. If the first dimension of slice_point[key] has - # the same shape as the slicer, assume it is information per slice_point. - # Otherwise, pass the whole slice_point[key] information. Useful for stellar LF maps - # where we want to pass only the relevant LF and the bins that go with it. + # Loop through all the slice_point keys. + # If the first dimension of slice_point[key] has the same shape + # as the slicer, assume it is information per slice_point. + # Otherwise, pass the whole slice_point[key] information. + # Useful for stellar LF maps where we want to pass only the + # relevant LF and the bins that go with it. for key in self.slice_points: if len(np.shape(self.slice_points[key])) == 0: keyShape = 0 @@ -175,18 +180,19 @@ def _slice_sim_data(islice): setattr(self, "_slice_sim_data", _slice_sim_data) def _setupLSSTCamera(self): - """If we want to include the camera chip gaps, etc""" + """If we want to include the camera chip gaps, etc.""" self.camera = simsUtils.LsstCameraFootprint( units="radians", footprint_file=self.camera_footprint_file ) def _build_tree(self, sim_dataRa, sim_dataDec, leafsize=100): - """Build KD tree on sim_dataRA/Dec using utility function from mafUtils. + """Build KD tree on sim_dataRA/Dec. sim_dataRA, sim_dataDec = RA and Dec values (in radians). - leafsize = the number of Ra/Dec pointings in each leaf node.""" + leafsize = the number of Ra/Dec pointings in each leaf node. + """ self.opsimtree = simsUtils._build_tree(sim_dataRa, sim_dataDec, leafsize) def _set_rad(self, radius=1.75): - """Set radius (in degrees) for kdtree search using utility function from mafUtils.""" + """Set radius (in degrees) for kdtree search.""" self.rad = simsUtils.xyz_angular_radius(radius) diff --git a/rubin_sim/maf/slicers/healpix_comcam_slicer.py b/rubin_sim/maf/slicers/healpix_comcam_slicer.py index 4a3ae445e..bfceabed8 100644 --- a/rubin_sim/maf/slicers/healpix_comcam_slicer.py +++ b/rubin_sim/maf/slicers/healpix_comcam_slicer.py @@ -3,7 +3,6 @@ import warnings from functools import wraps -import healpy as hp import matplotlib.path as mplPath import numpy as np diff --git a/rubin_sim/maf/slicers/healpix_subset_slicer.py b/rubin_sim/maf/slicers/healpix_subset_slicer.py index a3857117b..8ed24505c 100644 --- a/rubin_sim/maf/slicers/healpix_subset_slicer.py +++ b/rubin_sim/maf/slicers/healpix_subset_slicer.py @@ -2,14 +2,12 @@ __all__ = ("HealpixSubsetSlicer",) -import warnings from functools import wraps import healpy as hp import numpy as np import rubin_sim.utils as simsUtils -from rubin_sim.maf.plots.spatial_plotters import HealpixHistogram, HealpixPowerSpectrum, HealpixSkyMap from .healpix_slicer import HealpixSlicer diff --git a/rubin_sim/maf/slicers/hourglass_slicer.py b/rubin_sim/maf/slicers/hourglass_slicer.py index c75c6a9bd..862f6d96e 100644 --- a/rubin_sim/maf/slicers/hourglass_slicer.py +++ b/rubin_sim/maf/slicers/hourglass_slicer.py @@ -1,9 +1,5 @@ __all__ = ("HourglassSlicer",) -import warnings - -import matplotlib.pyplot as plt -import numpy as np from rubin_sim.maf.plots import HourglassPlot diff --git a/rubin_sim/maf/slicers/movie_slicer.py b/rubin_sim/maf/slicers/movie_slicer.py index b1ec6de91..cdee75158 100644 --- a/rubin_sim/maf/slicers/movie_slicer.py +++ b/rubin_sim/maf/slicers/movie_slicer.py @@ -251,7 +251,7 @@ def make_movie( ] print("Attempting to call ffmpeg with:") print(" ".join(callList)) - p = subprocess.check_call(callList) + subprocess.check_call(callList) # make thumbnail gif callList = [ "ffmpeg", @@ -273,4 +273,4 @@ def make_movie( ] print("converting to animated gif with:") print(" ".join(callList)) - p2 = subprocess.check_call(callList) + subprocess.check_call(callList) diff --git a/rubin_sim/maf/slicers/time_interval_slicers.py b/rubin_sim/maf/slicers/time_interval_slicers.py index 269fbdd13..f063d9770 100644 --- a/rubin_sim/maf/slicers/time_interval_slicers.py +++ b/rubin_sim/maf/slicers/time_interval_slicers.py @@ -8,7 +8,7 @@ "TimeIntervalSlicer", "BlockIntervalSlicer", "VisitIntervalSlicer", - "SlicerNotSetup", + "SlicerNotSetupError", ) from collections import defaultdict @@ -22,7 +22,7 @@ from .base_slicer import BaseSlicer -class SlicerNotSetup(Exception): +class SlicerNotSetupError(Exception): """Thrown when a slicer is not setup for the method called.""" @@ -120,7 +120,7 @@ def __eq__(self, other_slicer): return True def _slice_sim_data(self, *args, **kwargs): - raise SlicerNotSetup() + raise SlicerNotSetupError() class BlockIntervalSlicer(TimeIntervalSlicer): diff --git a/rubin_sim/maf/stackers/base_stacker.py b/rubin_sim/maf/stackers/base_stacker.py index 8caf7e8a9..24f0c8535 100644 --- a/rubin_sim/maf/stackers/base_stacker.py +++ b/rubin_sim/maf/stackers/base_stacker.py @@ -4,7 +4,6 @@ import warnings import numpy as np -from six import with_metaclass class StackerRegistry(type): @@ -52,7 +51,7 @@ def help(cls, doc=False): print(" Default columns required: ", ",".join(stacker.cols_req)) -class BaseStacker(with_metaclass(StackerRegistry, object)): +class BaseStacker(metaclass=StackerRegistry): """Base MAF Stacker: add columns generated at run-time to the simdata array.""" # List of the names of the columns generated by the Stacker. diff --git a/rubin_sim/moving_objects/ooephemerides.py b/rubin_sim/moving_objects/ooephemerides.py index b01fa773f..0f375e4a1 100644 --- a/rubin_sim/moving_objects/ooephemerides.py +++ b/rubin_sim/moving_objects/ooephemerides.py @@ -24,6 +24,7 @@ def get_oorb_data_dir(): data_path = os.path.join(conda_dir, "share/openorb") if not os.path.isdir(data_path): data_path = None + os.environ["OORB_DATA"] = data_path if data_path is None: warnings.warn( "Failed to find path for oorb data files. " @@ -61,7 +62,6 @@ def __init__(self, ephfile=None): # Set up oorb. Call this once. if ephfile is None: - # just making a copy on our own so we don't have to chase down oorb install dir ephfile = os.path.join(get_oorb_data_dir(), "de430.dat") self.ephfile = ephfile self._init_oorb() diff --git a/rubin_sim/phot_utils/bandpass.py b/rubin_sim/phot_utils/bandpass.py index ab52b855f..9cea87e98 100644 --- a/rubin_sim/phot_utils/bandpass.py +++ b/rubin_sim/phot_utils/bandpass.py @@ -44,8 +44,9 @@ class Bandpass: sb : `np.ndarray`, (N,) Throughput array (fraction, 0-1). sampling_warning : `float` - If wavelength sampling lower than this, throw a warning because it might not - work well with Sed (nm). + If wavelength sampling lower than this, + throw a warning because it might provide accurate magnitudes + due to how magnitudes are calculated in Sed (nm). """ def __init__(self, wavelen=None, sb=None, sampling_warning=0.2): @@ -103,9 +104,9 @@ def set_bandpass(self, wavelen, sb): def imsim_bandpass(self, imsimwavelen=500.0, wavelen_min=300, wavelen_max=1150, wavelen_step=0.1): """ - Populate bandpass data with sb=0 everywhere except sb=1 at imsimwavelen. + Populate bandpass data with sb=0 everywhere except at imsimwavelen. - Sets wavelen/sb, with grid min/max/step as Parameters. Does NOT set phi. + Sets wavelen/sb, with grid min/max/step as Parameters. """ # Set up arrays. self.wavelen = np.arange( @@ -132,7 +133,8 @@ def read_throughput(self, filename): self.phi = None self.sb = None # Check for filename error. - # If given list of filenames, pass to (and return from) read_throughputList. + # If given list of filenames, + # pass to (and return from) read_throughputList. if isinstance(filename, list): warnings.warn( "Was given list of files, instead of a single file. Using read_throughputList instead" @@ -152,7 +154,7 @@ def read_throughput(self, filename): f = gzip.open(filename + ".gz", "rt") except IOError: raise IOError("The throughput file %s does not exist" % (filename)) - # The throughput file should have wavelength(A), throughput(Sb) as first two columns. + # The throughput file should have [wavelength(A), throughput(Sb)] wavelen = [] sb = [] for line in f: @@ -170,7 +172,8 @@ def read_throughput(self, filename): # Set up wavelen/sb. self.wavelen = np.array(wavelen, dtype="float") self.sb = np.array(sb, dtype="float") - # Check that wavelength is monotonic increasing and non-repeating in wavelength. (Sort on wavelength). + # Check that wavelength is monotonic increasing and + # non-repeating in wavelength. (Sort on wavelength). if len(self.wavelen) != len(np.unique(self.wavelen)): raise ValueError("The wavelength values in file %s are non-unique." % (filename)) # Sort values. @@ -197,15 +200,19 @@ def read_throughput_list( wavelen_step=0.1, ): """ - Populate bandpass data by reading from a series of files with wavelen/Sb data. + Populate bandpass data by reading from a series of files + containing wavelen/Sb data. - Multiplies throughputs (sb) from each file to give a final bandpass throughput. - Sets wavelen/sb, with grid min/max/step as Parameters. Does NOT set phi. + Multiplies throughputs (sb) from each file to give a final + bandpass throughput. + Sets wavelen/sb, with grid min/max/step as Parameters. + Does NOT set phi. """ # ComponentList = names of files in that directory. - # A typical component list of all files to build final component list, including filter, might be: - # component_list=['detector.dat', 'lens1.dat', 'lens2.dat', 'lens3.dat', - # 'm1.dat', 'm2.dat', 'm3.dat', 'atmos_std.dat', 'ideal_g.dat'] + # A typical component list of all files to build final component list, + # including filter, might be: + # ['detector.dat', 'lens1.dat', 'lens2.dat', 'lens3.dat', + # 'm1.dat', 'm2.dat', 'm3.dat', 'atmos_std.dat', 'ideal_g.dat'] # # Set up wavelen/sb on grid. self.wavelen = np.arange( @@ -242,7 +249,8 @@ def get_bandpass(self): def check_use_self(self, wavelen, sb): """ - Simple utility to check if should be using self.wavelen/sb or passed arrays. + Simple utility to check if should be using self.wavelen/sb + or passed arrays. Useful for other methods in this class. Also does data integrity check on wavelen/sb if not self. @@ -272,16 +280,17 @@ def resample_bandpass( wavelen_step=0.1, ): """ - Resamples wavelen/sb (or self.wavelen/sb) onto grid defined by min/max/step. + Resamples wavelen/sb (or self.wavelen/sb) onto grid + defined by min/max/step. - Either returns wavelen/sb (if given those arrays) or updates wavelen / Sb in self. - If updating self, resets phi to None. + Either returns wavelen/sb (if given those arrays) or + updates wavelen / Sb in self. If updating self, resets phi to None. """ # Check wavelength limits. wavelen_min = wavelen_min wavelen_max = wavelen_max wavelen_step = wavelen_step - # Is method acting on self.wavelen/sb or passed in wavelen/sb? Sort it out. + # Is method acting on self.wavelen/sb or passed in wavelen/sb? update_self = (wavelen is None) & (sb is None) if update_self: wavelen = self.wavelen @@ -291,7 +300,8 @@ def resample_bandpass( raise Exception("No overlap between known wavelength range and desired wavelength range.") # Set up gridded wavelength. wavelen_grid = np.arange(wavelen_min, wavelen_max + wavelen_step / 2.0, wavelen_step, dtype="float") - # Do the interpolation of wavelen/sb onto the grid. (note wavelen/sb type failures will die here). + # Do the interpolation of wavelen/sb onto the grid. + # (note wavelen/sb type failures will die here). f = interpolate.interp1d(wavelen, sb, fill_value=0, bounds_error=False) sb_grid = f(wavelen_grid) # Update self values if necessary. @@ -350,16 +360,18 @@ def calc_zp_t(self, photometric_parameters): Details about the photometric response of the telescope. Defaults to LSST values. """ - # ZP_t is the magnitude of a (F_nu flat) source which produced 1 count per second. + # ZP_t is the magnitude of a (F_nu flat) source which produced + # 1 count per second. # This is often also known as the 'instrumental zeropoint'. # Set gain to 1 if want to explore photo-electrons rather than adu. - # The typical LSST exposure time is 15s and this is default here, but typical zp_t definition is for 1s. + # The typical LSST exposure time is 15s and this is default here, + # but typical zp_t definition is for 1s. # SED class uses flambda in ergs/cm^2/s/nm, so need effarea in cm^2. # # Check dlambda value for integral. self.wavelen[1] - self.wavelen[0] # Set up flat source of arbitrary brightness, - # but where the units of fnu are Jansky (for AB mag zeropoint = -8.9). + # but where the units of fnu are Jansky (for AB mag zeropoint = -8.9). flatsource = Sed() flatsource.set_flat_sed() adu = flatsource.calc_adu(self, phot_params=photometric_parameters) @@ -387,7 +399,8 @@ def write_throughput(self, filename, print_header=None, write_phi=False): """ Write throughput to a file. """ - # Useful if you build a throughput up from components and need to record the combined value. + # Useful if you build a throughput up from components + # and need to record the combined value. f = open(filename, "w") # Print header. if print_header is not None: diff --git a/setup.cfg b/setup.cfg index fe667c22f..8b1378917 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,8 +1 @@ -[flake8] -max-line-length = 110 -max-doc-length = 79 -ignore = E133, E203, E226, E228, N802, N803, N806, N812, N813, N815, N816, W503 -exclude = - doc, - __init__.py, - tests/.tests + diff --git a/test-requirements.txt b/test-requirements.txt index 611612aa8..fd4670329 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1,6 +1,5 @@ pytest -pytest-flake8 pytest-cov pytest-black -black>=22.3.0 -flake8 +black +ruff diff --git a/tests/maf/test_batchcommon.py b/tests/maf/test_batchcommon.py index 3523ef358..5d63c3d01 100644 --- a/tests/maf/test_batchcommon.py +++ b/tests/maf/test_batchcommon.py @@ -8,9 +8,9 @@ def test_col_map(self): colmap = batches.col_map_dict("opsimv4") self.assertEqual(colmap["raDecDeg"], True) self.assertEqual(colmap["ra"], "fieldRA") - colmap = batches.get_col_map("_temp") + colmap = batches.col_map_dict("fbs") self.assertEqual(colmap["raDecDeg"], True) - self.assertEqual(colmap["ra"], "fieldRA") + self.assertEqual(colmap["skyBrightness"], "skyBrightness") def test_filter_list(self): filterlist, colors, orders, sqls, info_label = batches.common.filter_list(all=False, extra_sql=None) diff --git a/tests/moving_objects/test_ephemerides.py b/tests/moving_objects/test_ephemerides.py index 7d738caed..a7c38f728 100644 --- a/tests/moving_objects/test_ephemerides.py +++ b/tests/moving_objects/test_ephemerides.py @@ -26,6 +26,11 @@ def tear_down(self): del self.orbits_kep del self.ephems + def test_which_pyoorb(self): + import pyoorb + + print(pyoorb.__file__) + def test_set_orbits(self): # Test that we can set orbits. self.ephems.set_orbits(self.orbits)