From 597f246f540151852c653dda5ebcae8537825415 Mon Sep 17 00:00:00 2001 From: Joachim Moeyens Date: Wed, 27 Nov 2024 13:18:40 -0800 Subject: [PATCH] Swap to pdm (#166) * Swap to pdm * Use quivr's drop_duplicates * Upgrade adam_core >= 0.2.5 --- .github/workflows/conda-build-lint-test.yml | 28 ++-- .github/workflows/conda-publish.yml | 34 ----- .github/workflows/docker-build-lint-test.yml | 9 +- .../workflows/pip-build-integration-test.yml | 31 +++-- .../pip-build-lint-test-coverage.yml | 42 +++--- .gitignore | 8 +- .pre-commit-config.yaml | 30 ----- Dockerfile | 56 ++------ MANIFEST.in | 2 +- pyproject.toml | 122 +++++++++++++++++- recipe/conda_build_config.yaml | 4 - recipe/meta.yaml | 49 ------- requirements.txt | 24 ---- setup.cfg | 93 ------------- {thor => src/thor}/__init__.py | 19 ++- {thor => src/thor}/checkpointing.py | 46 ++----- {thor => src/thor}/clusters.py | 73 +++-------- {thor => src/thor}/config.py | 4 +- {thor => src/thor}/main.py | 52 ++------ {thor => src/thor}/observations/__init__.py | 2 +- {thor => src/thor}/observations/filters.py | 53 ++------ .../thor}/observations/observations.py | 30 +---- {thor => src/thor}/observations/photometry.py | 0 {thor => src/thor}/observations/states.py | 17 +-- .../thor}/observations/tests/__init__.py | 0 .../thor/observations/tests/conftest.py | 57 +------- src/thor/observations/tests/test_filters.py | 52 ++++++++ .../observations/tests/test_observations.py | 17 +-- .../thor}/observations/tests/test_states.py | 3 +- {thor => src/thor}/observations/utils.py | 0 {thor => src/thor}/orbit.py | 50 +++---- src/thor/orbit_determination/__init__.py | 3 + .../orbit_determination/fitted_orbits.py | 13 +- .../thor}/orbit_determination/outliers.py | 4 +- .../orbit_determination/tests/__init__.py | 0 .../tests/test_fitted_orbits.py | 24 +--- .../tests/test_outliers.py | 8 +- {thor => src/thor}/orbit_selection.py | 39 ++---- {thor => src/thor}/orbits/__init__.py | 7 +- {thor => src/thor}/orbits/attribution.py | 94 ++++---------- {thor => src/thor}/orbits/gauss.py | 32 ++--- {thor => src/thor}/orbits/gibbs.py | 4 +- {thor => src/thor}/orbits/herrick_gibbs.py | 0 {thor => src/thor}/orbits/iod.py | 91 ++++--------- {thor => src/thor}/orbits/iterators.py | 6 +- {thor => src/thor}/orbits/od.py | 122 +++++------------- {thor => src/thor}/orbits/state_transition.py | 20 ++- {thor => src/thor}/orbits/tests/__init__.py | 0 .../thor}/orbits/tests/test_attribution.py | 4 +- src/thor/projections/__init__.py | 4 + {thor => src/thor}/projections/covariances.py | 2 - {thor => src/thor}/projections/gnomonic.py | 20 +-- {thor => src/thor}/projections/transforms.py | 16 +-- {thor => src/thor}/range_and_transform.py | 25 ++-- {thor => src/thor}/tests/__init__.py | 0 {thor => src/thor}/tests/memory/__init__.py | 0 .../fixtures/896831/cluster_members.parquet | Bin .../memory/fixtures/896831/clusters.parquet | Bin .../896831/filtered_observations.parquet | Bin .../fixtures/896831/iod_orbit_members.parquet | Bin .../memory/fixtures/896831/iod_orbits.parquet | Bin .../fixtures/896831/od_orbit_members.parquet | Bin .../memory/fixtures/896831/od_orbits.parquet | Bin .../896831/recovered_orbit_members.parquet | Bin .../fixtures/896831/recovered_orbits.parquet | Bin .../896831/transformed_detections.parquet | Bin .../fixtures/input_observations.parquet | Bin .../fixtures/inputs/896831/test_orbit.parquet | Bin .../tests/memory/fixtures/inputs/config.json | 0 .../fixtures/inputs/observations.feather | Bin .../fixtures/inputs/test_orbits.parquet | Bin .../thor}/tests/memory/test_memory.py | 67 +++------- {thor => src/thor}/tests/test_clusters.py | 11 +- {thor => src/thor}/tests/test_config.py | 0 {thor => src/thor}/tests/test_main.py | 70 ++++------ {thor => src/thor}/tests/test_orbit.py | 1 - .../thor}/tests/test_orbit_selection.py | 0 src/thor/utils/__init__.py | 4 + {thor => src/thor}/utils/ades.py | 9 +- {thor => src/thor}/utils/linkages.py | 13 +- {thor => src/thor}/utils/logging.py | 6 +- {thor => src/thor}/utils/tests/__init__.py | 0 .../thor}/utils/tests/test_linkages.py | 0 thor/orbit_determination/__init__.py | 7 - thor/projections/__init__.py | 4 - thor/utils/__init__.py | 3 - thor/utils/quivr.py | 61 --------- 87 files changed, 552 insertions(+), 1249 deletions(-) delete mode 100644 .github/workflows/conda-publish.yml delete mode 100644 .pre-commit-config.yaml delete mode 100644 recipe/conda_build_config.yaml delete mode 100644 recipe/meta.yaml delete mode 100644 requirements.txt delete mode 100644 setup.cfg rename {thor => src/thor}/__init__.py (66%) rename {thor => src/thor}/checkpointing.py (88%) rename {thor => src/thor}/clusters.py (93%) rename {thor => src/thor}/config.py (96%) rename {thor => src/thor}/main.py (91%) rename {thor => src/thor}/observations/__init__.py (68%) rename {thor => src/thor}/observations/filters.py (86%) rename {thor => src/thor}/observations/observations.py (95%) rename {thor => src/thor}/observations/photometry.py (100%) rename {thor => src/thor}/observations/states.py (87%) rename {thor => src/thor}/observations/tests/__init__.py (100%) rename thor/observations/tests/test_filters.py => src/thor/observations/tests/conftest.py (60%) create mode 100644 src/thor/observations/tests/test_filters.py rename {thor => src/thor}/observations/tests/test_observations.py (86%) rename {thor => src/thor}/observations/tests/test_states.py (90%) rename {thor => src/thor}/observations/utils.py (100%) rename {thor => src/thor}/orbit.py (92%) create mode 100644 src/thor/orbit_determination/__init__.py rename {thor => src/thor}/orbit_determination/fitted_orbits.py (94%) rename {thor => src/thor}/orbit_determination/outliers.py (92%) rename {thor => src/thor}/orbit_determination/tests/__init__.py (100%) rename {thor => src/thor}/orbit_determination/tests/test_fitted_orbits.py (88%) rename {thor => src/thor}/orbit_determination/tests/test_outliers.py (83%) rename {thor => src/thor}/orbit_selection.py (92%) rename {thor => src/thor}/orbits/__init__.py (88%) rename {thor => src/thor}/orbits/attribution.py (90%) rename {thor => src/thor}/orbits/gauss.py (92%) rename {thor => src/thor}/orbits/gibbs.py (94%) rename {thor => src/thor}/orbits/herrick_gibbs.py (100%) rename {thor => src/thor}/orbits/iod.py (91%) rename {thor => src/thor}/orbits/iterators.py (96%) rename {thor => src/thor}/orbits/od.py (87%) rename {thor => src/thor}/orbits/state_transition.py (93%) rename {thor => src/thor}/orbits/tests/__init__.py (100%) rename {thor => src/thor}/orbits/tests/test_attribution.py (96%) create mode 100644 src/thor/projections/__init__.py rename {thor => src/thor}/projections/covariances.py (99%) rename {thor => src/thor}/projections/gnomonic.py (92%) rename {thor => src/thor}/projections/transforms.py (93%) rename {thor => src/thor}/range_and_transform.py (91%) rename {thor => src/thor}/tests/__init__.py (100%) rename {thor => src/thor}/tests/memory/__init__.py (100%) rename {thor => src/thor}/tests/memory/fixtures/896831/cluster_members.parquet (100%) rename {thor => src/thor}/tests/memory/fixtures/896831/clusters.parquet (100%) rename {thor => src/thor}/tests/memory/fixtures/896831/filtered_observations.parquet (100%) rename {thor => src/thor}/tests/memory/fixtures/896831/iod_orbit_members.parquet (100%) rename {thor => src/thor}/tests/memory/fixtures/896831/iod_orbits.parquet (100%) rename {thor => src/thor}/tests/memory/fixtures/896831/od_orbit_members.parquet (100%) rename {thor => src/thor}/tests/memory/fixtures/896831/od_orbits.parquet (100%) rename {thor => src/thor}/tests/memory/fixtures/896831/recovered_orbit_members.parquet (100%) rename {thor => src/thor}/tests/memory/fixtures/896831/recovered_orbits.parquet (100%) rename {thor => src/thor}/tests/memory/fixtures/896831/transformed_detections.parquet (100%) rename {thor => src/thor}/tests/memory/fixtures/input_observations.parquet (100%) rename {thor => src/thor}/tests/memory/fixtures/inputs/896831/test_orbit.parquet (100%) rename {thor => src/thor}/tests/memory/fixtures/inputs/config.json (100%) rename {thor => src/thor}/tests/memory/fixtures/inputs/observations.feather (100%) rename {thor => src/thor}/tests/memory/fixtures/inputs/test_orbits.parquet (100%) rename {thor => src/thor}/tests/memory/test_memory.py (83%) rename {thor => src/thor}/tests/test_clusters.py (97%) rename {thor => src/thor}/tests/test_config.py (100%) rename {thor => src/thor}/tests/test_main.py (86%) rename {thor => src/thor}/tests/test_orbit.py (99%) rename {thor => src/thor}/tests/test_orbit_selection.py (100%) create mode 100644 src/thor/utils/__init__.py rename {thor => src/thor}/utils/ades.py (97%) rename {thor => src/thor}/utils/linkages.py (86%) rename {thor => src/thor}/utils/logging.py (90%) rename {thor => src/thor}/utils/tests/__init__.py (100%) rename {thor => src/thor}/utils/tests/test_linkages.py (100%) delete mode 100644 thor/orbit_determination/__init__.py delete mode 100644 thor/projections/__init__.py delete mode 100644 thor/utils/__init__.py delete mode 100644 thor/utils/quivr.py diff --git a/.github/workflows/conda-build-lint-test.yml b/.github/workflows/conda-build-lint-test.yml index 616af9a3..8a32099c 100644 --- a/.github/workflows/conda-build-lint-test.yml +++ b/.github/workflows/conda-build-lint-test.yml @@ -16,6 +16,8 @@ jobs: defaults: run: shell: bash -l {0} + env: + OORB_DATA: /tmp/oorb/data steps: - name: Checkout git repo @@ -28,24 +30,16 @@ jobs: activate-environment: "thor" auto-update-conda: true python-version: ${{ matrix.python-version }} - - name: Install dependencies (excluding adam_core and quivr since they are not available via conda) + - name: Install openorb dependencies run: | - tail +3 requirements.txt > requirements_conda.txt - conda install -c defaults -c conda-forge -c astropy -c moeyensj --file requirements_conda.txt --yes - - name: Update OBSCODE.dat - run: | - cd $CONDA_PREFIX/share/oorb - curl https://www.minorplanetcenter.net/iau/lists/ObsCodes.html -o ObsCodes.html - sed -e '2d' ObsCodes.html | grep -v "<" > OBSCODE.dat - rm -f ObsCodes.html - cp OBSCODE.dat $CONDA_PREFIX/share/openorb/OBSCODE.dat - - name: Install adam_core and quivr - run: | - pip install adam-core@git+https://github.com/B612-Asteroid-Institute/adam_core@main - pip install quivr@git+https://github.com/moeyensj/quivr@concatenate-empty-attributes + sudo apt-get update -y + sudo apt-get install -y gfortran liblapack-dev + wget -P /tmp/ https://storage.googleapis.com/oorb-data/oorb_data.tar.gz + mkdir -p /tmp/oorb/ + tar -xvf /tmp/oorb_data.tar.gz -C /tmp/oorb/ - name: Build and install - run: pip install . --no-deps + run: pip install .[dev] - name: Lint - run: pre-commit run --all-files + run: pdm lint - name: Test - run: pytest . --cov + run: pdm test diff --git a/.github/workflows/conda-publish.yml b/.github/workflows/conda-publish.yml deleted file mode 100644 index 453e379c..00000000 --- a/.github/workflows/conda-publish.yml +++ /dev/null @@ -1,34 +0,0 @@ -name: conda - Publish - -on: - push: - tags: - - 'v*' - -jobs: - deploy: - - runs-on: ubuntu-latest - container: - image: continuumio/miniconda3 - - steps: - - uses: actions/checkout@v4 - - name: Install dependencies - run: | - conda install -y anaconda-client conda-build - conda install -y setuptools setuptools_scm - - name: Update recipe with version variable - run: | - VERSION=$(python setup.py --version) - LINE_STR='{% set version = "'${VERSION}'" %}' - RECIPE_PATH='recipe/meta.yaml' - echo $LINE_STR | cat - $RECIPE_PATH > temp && mv temp $RECIPE_PATH - - - name: Build and publish - env: - CONDA_USERNAME: ${{ secrets.CONDA_USERNAME }} - CONDA_TOKEN: ${{ secrets.CONDA_TOKEN }} - run: | - conda config --set anaconda_upload yes - conda build -c defaults -c conda-forge -c astropy -c moeyensj --token $CONDA_TOKEN --user $CONDA_USERNAME . diff --git a/.github/workflows/docker-build-lint-test.yml b/.github/workflows/docker-build-lint-test.yml index 7b0fb8a6..16e0b170 100644 --- a/.github/workflows/docker-build-lint-test.yml +++ b/.github/workflows/docker-build-lint-test.yml @@ -1,4 +1,3 @@ - name: docker - Build Lint and Test on: @@ -12,16 +11,18 @@ jobs: runs-on: ubuntu-latest env: IMAGE_TAG: ${{ github.sha }} + OORB_DATA: /tmp/oorb/data + steps: - name: Checkout git repo uses: actions/checkout@v4 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 with: install: true - name: Build run: docker build --load -t thor:$IMAGE_TAG . - name: Lint - run: docker run -i thor:$IMAGE_TAG pre-commit run --all-files + run: docker run -i thor:$IMAGE_TAG pdm lint - name: Test - run: docker run -i thor:$IMAGE_TAG pytest . --cov + run: docker run -i thor:$IMAGE_TAG pdm test diff --git a/.github/workflows/pip-build-integration-test.yml b/.github/workflows/pip-build-integration-test.yml index 800d88bd..4ca9d001 100644 --- a/.github/workflows/pip-build-integration-test.yml +++ b/.github/workflows/pip-build-integration-test.yml @@ -15,29 +15,28 @@ jobs: defaults: run: shell: bash -l {0} + env: + OORB_DATA: /tmp/oorb/data steps: - name: Checkout git repo uses: actions/checkout@v4 - name: Get git tags run: git fetch --prune --unshallow --tags - - name: Set up miniconda - uses: conda-incubator/setup-miniconda@v3 - with: - activate-environment: "thor" - auto-update-conda: true - python-version: ${{ matrix.python-version }} - - name: Install openorb using conda - run: conda install -c defaults -c conda-forge openorb --yes - - name: Update OBSCODE.dat - run: | - cd $CONDA_PREFIX/share/oorb && ./updateOBSCODE - cp OBSCODE.dat $CONDA_PREFIX/share/openorb/OBSCODE.dat - - uses: actions/setup-python@v4 + - uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} cache: 'pip' # caching pip dependencies - - name: Build and install - run: pip install .[tests] + - name: Install openorb dependencies + run: | + sudo apt-get update -y + sudo apt-get install -y gfortran liblapack-dev + wget -P /tmp/ https://storage.googleapis.com/oorb-data/oorb_data.tar.gz + mkdir -p /tmp/oorb/ + tar -xvf /tmp/oorb_data.tar.gz -C /tmp/oorb/ + - name: Install Dependencies + run: | + pip install pip --upgrade + pip install ".[dev]" - name: Integration Tests - run: pytest . -m "integration" + run: pdm test -k integration diff --git a/.github/workflows/pip-build-lint-test-coverage.yml b/.github/workflows/pip-build-lint-test-coverage.yml index 0765369b..e5d969d7 100644 --- a/.github/workflows/pip-build-lint-test-coverage.yml +++ b/.github/workflows/pip-build-lint-test-coverage.yml @@ -16,38 +16,36 @@ jobs: defaults: run: shell: bash -l {0} + env: + OORB_DATA: /tmp/oorb/data steps: - name: Checkout git repo uses: actions/checkout@v4 - name: Get git tags run: git fetch --prune --unshallow --tags - - name: Set up miniconda - uses: conda-incubator/setup-miniconda@v3 - with: - activate-environment: "thor" - auto-update-conda: true - python-version: ${{ matrix.python-version }} - - name: Install openorb using conda - run: conda install -c defaults -c conda-forge openorb --yes - - name: Update OBSCODE.dat - run: | - cd $CONDA_PREFIX/share/oorb - curl https://www.minorplanetcenter.net/iau/lists/ObsCodes.html -o ObsCodes.html - sed -e '2d' ObsCodes.html | grep -v "<" > OBSCODE.dat - rm -f ObsCodes.html - cp OBSCODE.dat $CONDA_PREFIX/share/openorb/OBSCODE.dat - - uses: actions/setup-python@v4 + - uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} cache: 'pip' # caching pip dependencies - - name: Build and install - run: pip install .[tests] + - name: Install openorb dependencies + run: | + sudo apt-get update -y + sudo apt-get install -y gfortran liblapack-dev + wget -P /tmp/ https://storage.googleapis.com/oorb-data/oorb_data.tar.gz + mkdir -p /tmp/oorb/ + tar -xvf /tmp/oorb_data.tar.gz -C /tmp/oorb/ + - name: Install Dependencies + run: | + pip install pip --upgrade + pip install ".[dev]" - name: Lint - run: pre-commit run --all-files - - name: Test - run: pytest . --cov --cov-report xml - - name: Coverage + run: pdm run lint + # - name: Type check + # run: pdm run typecheck + - name: Test with coverage + run: pdm run coverage + - name: Coverage Report uses: coverallsapp/github-action@v2.0.0 with: github-token: ${{ secrets.COVERALLS_TOKEN }} diff --git a/.gitignore b/.gitignore index 743a45dc..20865673 100644 --- a/.gitignore +++ b/.gitignore @@ -1,7 +1,5 @@ -# Data files -thor/data -thor/data/log.yaml -thor/version.py +# Version file: set by pdm-scm +src/thor/_version.py # Byte-compiled / optimized / DLL files __pycache__/ @@ -108,7 +106,7 @@ ipython_config.py # pdm # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. -#pdm.lock +pdm.lock # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it # in version control. # https://pdm.fming.dev/#use-with-ide diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml deleted file mode 100644 index dcaaaf7a..00000000 --- a/.pre-commit-config.yaml +++ /dev/null @@ -1,30 +0,0 @@ -# See https://pre-commit.com for more information -# See https://pre-commit.com/hooks.html for more hooks -repos: - - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v3.2.0 - hooks: - - id: trailing-whitespace - args: [--markdown-linebreak-ext=md] - - id: end-of-file-fixer - - id: check-yaml - - id: check-added-large-files - - repo: https://github.com/PyCQA/isort - rev: 5.12.0 - hooks: - - id: isort - additional_dependencies: - - toml - - repo: https://github.com/psf/black - rev: 22.10.0 - hooks: - - id: black - - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.1.1 - hooks: - - id: mypy - exclude: bench/ - additional_dependencies: - - "types-pyyaml" - - "types-requests" - - "types-python-dateutil" diff --git a/Dockerfile b/Dockerfile index bff93de4..7fbf7568 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,56 +1,22 @@ -FROM ubuntu:latest +FROM python:3.11 # Set shell to bash SHELL ["/bin/bash", "-c"] +CMD ["/bin/bash"] # Update system dependencies RUN apt-get update \ && apt-get upgrade -y \ - && apt-get install -y curl gfortran git liblapack-dev make pip python3.11 python-is-python3 unzip wget + && apt-get install -y pip git wget tar gfortran liblapack-dev -# Upgrade pip to the latest version and install pre-commit -RUN pip install --upgrade pip pre-commit -RUN pip install --upgrade cython==0.29.36 setuptools setuptools_scm -RUN chmod 777 /opt - -# Install numpy -RUN git clone https://github.com/numpy/numpy.git /opt/numpy -RUN cd /opt/numpy && git checkout v1.24.4 && git submodule update --init -RUN cd /opt/numpy && python3 setup.py build --cpu-baseline=native install - - -# Install openorb -# TODO: We need a more robust way to be appropriately natively compiled pyoorb installed -# including data file generation -RUN git clone https://github.com/B612-Asteroid-Institute/oorb.git /opt/oorb -RUN cd /opt/oorb && git checkout fork -RUN cd /opt/oorb && ./configure gfortran opt --with-pyoorb --with-f2py=/usr/local/bin/f2py --with-python=python3 -# Add '-march=native' to compiler options by running a sed -# script directly on the Makefile.includse file. This is a -# hack to get around the fact that the configure script -# doesn't support this option. -RUN sed -i 's/FCOPTIONS = .*/FCOPTIONS = $(FCOPTIONS_OPT_GFORTRAN) -march=native/g' /opt/oorb/Makefile.include -# --no-build-isolation is needed because we need to ensure we use -# the same version of numpy as the one we compiled previously so -# that it matches the version of f2py we passed in to ./configure. -RUN pip install --no-build-isolation -v /opt/oorb - -# Generate the data files -RUN cd /opt/oorb && make ephem -RUN cd /opt/oorb/data && ./getBC430 -RUN cd /opt/oorb/data && ./updateOBSCODE -ENV OORBROOT=/opt/oorb -ENV OORB_DATA=/opt/oorb/data - -# Install pre-commit hooks (before THOR is installed to cache this step) -RUN mkdir /code/ -COPY .pre-commit-config.yaml /code/ -WORKDIR /code/ -RUN git init . \ - && git add .pre-commit-config.yaml \ - && pre-commit install-hooks \ - && rm -rf .git +# Install openorb dependencies +RUN wget -P /tmp/ https://storage.googleapis.com/oorb-data/oorb_data.tar.gz \ + && mkdir -p /tmp/oorb/ \ + && tar -xvf /tmp/oorb_data.tar.gz -C /tmp/oorb/ +ENV OORB_DATA=/tmp/oorb/data/ # Install THOR +RUN mkdir -p /code/ +WORKDIR /code/ ADD . /code/ -RUN SETUPTOOLS_SCM_PRETEND_VERSION=1 pip install -e .[tests,dev] +RUN pip install -e .[dev] diff --git a/MANIFEST.in b/MANIFEST.in index 6e34889c..9c436576 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,4 +1,4 @@ -include thor/*.py +include src/*.py include *.md include *.toml include *.yaml diff --git a/pyproject.toml b/pyproject.toml index 90ba84a4..e4da2c79 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,7 +1,119 @@ +[project] +name = "thor" +dynamic = ["version"] +authors = [ + { name = "Joachim Moeyens", email = "moeyensj@uw.edu" }, + { name = "Mario Juric", email = "mjuric@astro.washington.edu" }, + { name = "Spencer Nelson", email = "spencer@b612foundation.org" }, + { name = "Alec Koumjian", email = "alec@b612foundation.org" }, +] +description = "Tracklet-less Heliocentric Orbit Recovery" +readme = "README.md" +license = { file = "LICENSE.md" } +requires-python = ">=3.10" +classifiers = [ + "Development Status :: 3 - Alpha", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: BSD License", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Topic :: Scientific/Engineering :: Astronomy", + "Topic :: Scientific/Engineering :: Physics", +] +keywords = ["astronomy", "astrophysics", "space", "science", "asteroids", "comets", "solar system"] + +dependencies = [ + "adam-core>=0.2.5", + "adam-pyoorb@git+https://github.com/B612-Asteroid-Institute/adam-pyoorb.git@main#egg=adam-pyoorb", + "astropy>=5.3.1", + "astroquery", + "difi", + "healpy", + "jax", + "numpy", + "numba", + "pandas", + "psutil", + "pyarrow>=14.0.0", + "pydantic<2.0.0", + "pyyaml>=5.1", + "quivr", + "ray[default]", + "scikit-learn", + "scipy", + "spiceypy", +] + [build-system] -requires = ["setuptools>=66", "wheel", "setuptools_scm[toml]>=6.0"] -build-backend = "setuptools.build_meta" +requires = ["pdm-backend"] +build-backend = "pdm.backend" +[tool.pdm.build] +includes = ["src/thor/"] + +[tool.pdm.version] +source = "scm" +write_to = "thor/_version.py" +write_template = "__version__ = '{}'" + +[tool.pdm.scripts] +check = { composite = ["lint", "typecheck", "test"] } +format = { composite = ["black ./src/thor", "isort ./src/thor"] } +lint = { composite = [ + "ruff check ./src/thor", + "black --check ./src/thor", + "isort --check-only ./src/thor", +] } +fix = "ruff ./src/thor --fix" +typecheck = "mypy --strict ./src/thor" + +test = "pytest --benchmark-disable {args}" +doctest = "pytest --doctest-plus --doctest-only" +benchmark = "pytest --benchmark-only" +coverage = "pytest --cov=thor --cov-report=xml" + +[project.urls] +"Documentation" = "https://github.com/moeyensj/thor#README.md" +"Issues" = "https://github.com/moeyensj/thor/issues" +"Source" = "https://github.com/moeyensj/thor" + +[project.optional-dependencies] +dev = [ + "black", + "ipython", + "matplotlib", + "isort", + "mypy", + "pdm", + "pytest-benchmark", + "pytest-cov", + "pytest-doctestplus", + "pytest-mock", + "pytest-memray", + "pytest", + "ruff", +] + +[tool.black] +line-length = 110 + +[tool.isort] +profile = "black" + +[tool.ruff] +line-length = 110 +target-version = "py311" +lint.ignore = [] +exclude = ["build"] + +[tool.mypy] +ignore_missing_imports = true -[tool.setuptools_scm] -write_to = "thor/version.py" -write_to_template = "__version__ = '{version}'" +[tool.pytest.ini_options] +python_functions = "test_*" +addopts = "-m 'not (integration or memory)'" +markers = [ + "integration: Mark a test as an integration test.", + "memory: Mark a test as a memory test." +] \ No newline at end of file diff --git a/recipe/conda_build_config.yaml b/recipe/conda_build_config.yaml deleted file mode 100644 index 6fc26542..00000000 --- a/recipe/conda_build_config.yaml +++ /dev/null @@ -1,4 +0,0 @@ -python: - - 3.9 - - 3.10 - - 3.11 diff --git a/recipe/meta.yaml b/recipe/meta.yaml deleted file mode 100644 index 1df9bbbd..00000000 --- a/recipe/meta.yaml +++ /dev/null @@ -1,49 +0,0 @@ - -package: - name: thor - version: "{{ version }}" - -source: - git_url: "https://github.com/moeyensj/thor.git" - git_tag: "v{{ version }}" - -requirements: - host: - - python {{ python }} - - pip - - setuptools >= 45 - - setuptools_scm >= 6.0 - - wheel - run: - - python {{ python }} - - astropy >= 5.3.1 - - astroquery - - difi - - healpy - - jax - - numpy - - numba - - pandas - - pyarrow >= 13.0.0 - - pyyaml >= 5.1 - - quivr >= 0.7.1 - - ray-default - - scikit-learn - - scipy - - spiceypy - - pytest - - pytest-benchmark - - pytest-cov - - pre-commit - -build: - script: python setup.py install --single-version-externally-managed --record=record.txt - -test: - imports: - - thor - -about: - home: https://github.com/moeyensj/thor - license: BSD-3 Clause - license_file: LICENSE.md diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index dba91178..00000000 --- a/requirements.txt +++ /dev/null @@ -1,24 +0,0 @@ -adam-core @ git+https://github.com/B612-Asteroid-Institute/adam_core@ef8ee48976dbf9c70580c166de4cc4fd6195fa36 -quivr >= 0.7.1 -astropy >= 5.3.1 -astroquery -difi -healpy -jax -numpy -numba -openorb -pandas -pyarrow >= 13.0.0 -pyyaml >= 5.1 -ray-default -scikit-learn -scipy -spiceypy -pytest -pytest-benchmark -pytest-cov -pre-commit -setuptools >= 45 -wheel -setuptools_scm >= 6.0 diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index 64dab1d6..00000000 --- a/setup.cfg +++ /dev/null @@ -1,93 +0,0 @@ -[metadata] -name = thor -version = file: thor/version.py -author = Joachim Moeyens, Mario Juric, Spencer Nelson -author_email = moeyensj@uw.edu -home_page = https://github.com/moeyensj/thor -description = Tracklet-less Heliocentric Orbit Recovery -long_description = file: README.md -long_description_content_type = text/markdown -license = BSD 3-Clause License -license_files = LICENSE.md -keywords = astronomy, astrophysics, space, science, asteroids, comets, solar system -classifiers = - Development Status :: 3 - Alpha - Intended Audience :: Science/Research - License :: OSI Approved :: BSD License - Operating System :: OS Independent - Programming Language :: Python - Programming Language :: Python :: 3.9 - Programming Language :: Python :: 3.10 - Topic :: Scientific/Engineering :: Astronomy - Topic :: Scientific/Engineering :: Physics - -[options] -python_requires = >= 3.9 -packages = - thor -include_package_data = True -setup_requires = - setuptools >= 66 - wheel - setuptools_scm >= 6.0 -install_requires = - adam-core @ git+https://github.com/B612-Asteroid-Institute/adam_core@c8f5aae352308ccd97c512c8d8e0a2e17c86ee18#egg=adam_core - astropy >= 5.3.1 - astroquery - difi - healpy - jax - numpy - numba - pandas - psutil - pyarrow >= 14.0.0 - pydantic < 2.0.0 - pyyaml >= 5.1 - quivr @ git+https://github.com/moeyensj/quivr@concatenate-empty-attributes - ray[default] - scikit-learn - scipy - spiceypy - -[options.extras_require] -tests = - pytest - pytest-benchmark - pytest-memray - pytest-cov - pre-commit - -dev = - ipython - memray - - -[options.package_data] -thor = - data/*.yaml - testing/data/*.csv - -[aliases] -test=pytest - -[tool:pytest] -python_functions = test_* -addopts = -m "not (integration or memory)" -markers = - integration: Mark a test as an integration test. - memory: Mark a test as a memory test. - -[isort] -profile = black -skip = - __init__.py - -[black] -line-length = 110 - -[flake8] -max_line_length = 110 -ignore = - W503 - E203 diff --git a/thor/__init__.py b/src/thor/__init__.py similarity index 66% rename from thor/__init__.py rename to src/thor/__init__.py index 11cc9a9b..6a20e321 100644 --- a/thor/__init__.py +++ b/src/thor/__init__.py @@ -1,13 +1,18 @@ -from .version import __version__ -from .config import * -from .orbits import * -from .utils import * -from .range_and_transform import * +# ruff: noqa: F401, F403 +try: + from ._version import __version__ +except ImportError: + __version__ = "unknown" from .clusters import * -from .projections import * +from .config import * +from .main import * from .orbit import * from .orbit_determination import * from .orbit_selection import * -from .main import * +from .orbits import * +from .projections import * +from .range_and_transform import * +from .utils import * +from .utils import setupLogger logger = setupLogger(__name__) diff --git a/thor/checkpointing.py b/src/thor/checkpointing.py similarity index 88% rename from thor/checkpointing.py rename to src/thor/checkpointing.py index 56d376cd..981ad3c8 100644 --- a/thor/checkpointing.py +++ b/src/thor/checkpointing.py @@ -128,13 +128,11 @@ def detect_checkpoint_stage(test_orbit_directory: pathlib.Path) -> VALID_STAGES: raise ValueError(f"{test_orbit_directory} is not a directory") if not test_orbit_directory.exists(): - logger.info(f"Working directory does not exist, starting at beginning.") + logger.info("Working directory does not exist, starting at beginning.") return "filter_observations" if not (test_orbit_directory / "filtered_observations.parquet").exists(): - logger.info( - "No filtered observations found, starting stage filter_observations" - ) + logger.info("No filtered observations found, starting stage filter_observations") return "filter_observations" if (test_orbit_directory / "recovered_orbits.parquet").exists() and ( @@ -194,16 +192,10 @@ def load_initial_checkpoint_values( # If we've already completed the pipeline, we can load the recovered orbits if stage == "complete": - recovered_orbits_path = pathlib.Path( - test_orbit_directory, "recovered_orbits.parquet" - ) - recovered_orbit_members_path = pathlib.Path( - test_orbit_directory, "recovered_orbit_members.parquet" - ) + recovered_orbits_path = pathlib.Path(test_orbit_directory, "recovered_orbits.parquet") + recovered_orbit_members_path = pathlib.Path(test_orbit_directory, "recovered_orbit_members.parquet") recovered_orbits = FittedOrbits.from_parquet(recovered_orbits_path) - recovered_orbit_members = FittedOrbitMembers.from_parquet( - recovered_orbit_members_path - ) + recovered_orbit_members = FittedOrbitMembers.from_parquet(recovered_orbit_members_path) if recovered_orbits.fragmented(): recovered_orbits = qv.defragment(recovered_orbits) @@ -217,9 +209,7 @@ def load_initial_checkpoint_values( ) # Filtered observations are required for all other stages - filtered_observations_path = pathlib.Path( - test_orbit_directory, "filtered_observations.parquet" - ) + filtered_observations_path = pathlib.Path(test_orbit_directory, "filtered_observations.parquet") filtered_observations = Observations.from_parquet(filtered_observations_path) if filtered_observations.fragmented(): @@ -227,9 +217,7 @@ def load_initial_checkpoint_values( if stage == "recover_orbits": od_orbits_path = pathlib.Path(test_orbit_directory, "od_orbits.parquet") - od_orbit_members_path = pathlib.Path( - test_orbit_directory, "od_orbit_members.parquet" - ) + od_orbit_members_path = pathlib.Path(test_orbit_directory, "od_orbit_members.parquet") if od_orbits_path.exists() and od_orbit_members_path.exists(): logger.info("Found OD orbits in checkpoint") od_orbits = FittedOrbits.from_parquet(od_orbits_path) @@ -249,9 +237,7 @@ def load_initial_checkpoint_values( if stage == "differential_correction": iod_orbits_path = pathlib.Path(test_orbit_directory, "iod_orbits.parquet") - iod_orbit_members_path = pathlib.Path( - test_orbit_directory, "iod_orbit_members.parquet" - ) + iod_orbit_members_path = pathlib.Path(test_orbit_directory, "iod_orbit_members.parquet") if iod_orbits_path.exists() and iod_orbit_members_path.exists(): logger.info("Found IOD orbits") iod_orbits = FittedOrbits.from_parquet(iod_orbits_path) @@ -271,9 +257,7 @@ def load_initial_checkpoint_values( if stage == "initial_orbit_determination": clusters_path = pathlib.Path(test_orbit_directory, "clusters.parquet") - cluster_members_path = pathlib.Path( - test_orbit_directory, "cluster_members.parquet" - ) + cluster_members_path = pathlib.Path(test_orbit_directory, "cluster_members.parquet") if clusters_path.exists() and cluster_members_path.exists(): logger.info("Found clusters") clusters = Clusters.from_parquet(clusters_path) @@ -292,14 +276,10 @@ def load_initial_checkpoint_values( ) if stage == "cluster_and_link": - transformed_detections_path = pathlib.Path( - test_orbit_directory, "transformed_detections.parquet" - ) + transformed_detections_path = pathlib.Path(test_orbit_directory, "transformed_detections.parquet") if transformed_detections_path.exists(): logger.info("Found transformed detections") - transformed_detections = TransformedDetections.from_parquet( - transformed_detections_path - ) + transformed_detections = TransformedDetections.from_parquet(transformed_detections_path) return create_checkpoint_data( "cluster_and_link", @@ -307,6 +287,4 @@ def load_initial_checkpoint_values( transformed_detections=transformed_detections, ) - return create_checkpoint_data( - "range_and_transform", filtered_observations=filtered_observations - ) + return create_checkpoint_data("range_and_transform", filtered_observations=filtered_observations) diff --git a/thor/clusters.py b/src/thor/clusters.py similarity index 93% rename from thor/clusters.py rename to src/thor/clusters.py index 4464aa53..ab0bdea8 100644 --- a/thor/clusters.py +++ b/src/thor/clusters.py @@ -1,3 +1,4 @@ +import hashlib import logging import multiprocessing as mp import time @@ -22,16 +23,10 @@ USE_GPU = False if USE_GPU: - import cudf from cuml.cluster import DBSCAN else: from sklearn.cluster import DBSCAN -import hashlib -from typing import List, Literal, Tuple - -import pyarrow as pa -import pyarrow.compute as pc __all__ = [ "cluster_and_link", @@ -54,7 +49,6 @@ def hash_obs_ids(obs_ids: List[str]) -> str: def drop_duplicate_clusters( clusters: "Clusters", cluster_members: "ClusterMembers", - num_cpus: int = 1, ) -> Tuple["Clusters", "ClusterMembers"]: """ Drop clusters that have identical sets of observation IDs. @@ -92,7 +86,7 @@ def drop_duplicate_clusters( # but found the memory accumulationw as too high. # A simple loop that accumulates the distinct obs ids # for each cluster is more memory efficient. - logger.info(f"Accumulating cluster observation IDs into single strings.") + logger.info("Accumulating cluster observation IDs into single strings.") obs_ids_per_cluster: Union[List[str], pa.Array] = [] current_obs_ids: List[str] = [] current_cluster_id = None @@ -107,7 +101,7 @@ def drop_duplicate_clusters( current_obs_ids.append(obs_id) obs_ids_per_cluster.append(hash_obs_ids(current_obs_ids)) - logger.info(f"Grouping by unique observation sets.") + logger.info("Grouping by unique observation sets.") obs_ids_per_cluster = pa.table( { "index": pa.array(np.arange(0, len(obs_ids_per_cluster))), @@ -118,18 +112,16 @@ def drop_duplicate_clusters( obs_ids_per_cluster = obs_ids_per_cluster.combine_chunks() obs_ids_per_cluster = obs_ids_per_cluster.group_by(["obs_ids"], use_threads=False) - logger.info(f"Taking first index of each unique observation set.") + logger.info("Taking first index of each unique observation set.") indices = obs_ids_per_cluster.aggregate([("index", "first")])["index_first"] del obs_ids_per_cluster indices = indices.combine_chunks() - logger.info(f"Taking clusters that belong to unique observation sets.") + logger.info("Taking clusters that belong to unique observation sets.") clusters = clusters.take(indices) - logger.info(f"Taking cluster members that belong to unique clusters.") - cluster_members = cluster_members.apply_mask( - pc.is_in(cluster_members.cluster_id, clusters.cluster_id) - ) + logger.info("Taking cluster members that belong to unique clusters.") + cluster_members = cluster_members.apply_mask(pc.is_in(cluster_members.cluster_id, clusters.cluster_id)) return clusters, cluster_members @@ -463,10 +455,7 @@ def _label_clusters(runs, points_quantized): labels = np.full(points_quantized.shape[1], -1, np.int64) for i in range(points_quantized.shape[1]): for j in range(runs.shape[1]): - if ( - runs[0, j] == points_quantized[0, i] - and runs[1, j] == points_quantized[1, i] - ): + if runs[0, j] == points_quantized[0, i] and runs[1, j] == points_quantized[1, i]: labels[i] = j break return labels @@ -739,19 +728,13 @@ def cluster_and_link( # Check that there are enough unique times to cluster num_unique_times = len(unique_times) if num_unique_times < min_obs: - logger.info( - "Number of unique times is less than the minimum number of observations required." - ) + logger.info("Number of unique times is less than the minimum number of observations required.") exit_early = True # Calculate the time range and make sure it is greater than the minimum arc length - time_range = ( - unique_times.max().mjd()[0].as_py() - unique_times.min().mjd()[0].as_py() - ) + time_range = unique_times.max().mjd()[0].as_py() - unique_times.min().mjd()[0].as_py() if time_range < min_arc_length: - logger.info( - "Time range of transformed detections is less than the minimum arc length." - ) + logger.info("Time range of transformed detections is less than the minimum arc length.") exit_early = True else: @@ -762,10 +745,8 @@ def cluster_and_link( # If any of the above conditions are met then we exit early if exit_early: time_end_cluster = time.perf_counter() - logger.info(f"Found 0 clusters. Minimum requirements for clustering not met.") - logger.info( - f"Clustering completed in {time_end_cluster - time_start_cluster:.3f} seconds." - ) + logger.info("Found 0 clusters. Minimum requirements for clustering not met.") + logger.info(f"Clustering completed in {time_end_cluster - time_start_cluster:.3f} seconds.") return Clusters.empty(), ClusterMembers.empty() clusters = Clusters.empty() @@ -799,9 +780,7 @@ def cluster_and_link( # want to instead pass references to those rather than extract arrays # from them and put them in the object store again. futures = [] - for vxi_chunk, vyi_chunk in zip( - _iterate_chunks(vxx, chunk_size), _iterate_chunks(vyy, chunk_size) - ): + for vxi_chunk, vyi_chunk in zip(_iterate_chunks(vxx, chunk_size), _iterate_chunks(vyy, chunk_size)): futures.append( cluster_velocity_remote.remote( @@ -825,9 +804,7 @@ def cluster_and_link( if clusters.fragmented(): clusters = qv.defragment(clusters) - cluster_members = qv.concatenate( - [cluster_members, cluster_members_chunk] - ) + cluster_members = qv.concatenate([cluster_members, cluster_members_chunk]) if cluster_members.fragmented(): cluster_members = qv.defragment(cluster_members) @@ -846,9 +823,7 @@ def cluster_and_link( logger.info(f"Removed {len(refs_to_free)} references from the object store.") else: - for vxi_chunk, vyi_chunk in zip( - _iterate_chunks(vxx, chunk_size), _iterate_chunks(vyy, chunk_size) - ): + for vxi_chunk, vyi_chunk in zip(_iterate_chunks(vxx, chunk_size), _iterate_chunks(vyy, chunk_size)): clusters_i, cluster_members_i = cluster_velocity_worker( vxi_chunk, vyi_chunk, @@ -874,9 +849,7 @@ def cluster_and_link( if num_clusters == 0: time_end_cluster = time.perf_counter() logger.info(f"Found {len(clusters)} clusters, exiting early.") - logger.info( - f"Clustering completed in {time_end_cluster - time_start_cluster:.3f} seconds." - ) + logger.info(f"Clustering completed in {time_end_cluster - time_start_cluster:.3f} seconds.") return clusters, cluster_members # Ensure clusters, cluster_members are defragmented and sorted @@ -895,19 +868,13 @@ def cluster_and_link( clusters, cluster_members = drop_duplicate_clusters(clusters, cluster_members) logger.info(f"Removed {num_clusters - len(clusters)} duplicate clusters.") time_end_drop = time.perf_counter() - logger.info( - f"Cluster deduplication completed in {time_end_drop - time_start_drop:.3f} seconds." - ) + logger.info(f"Cluster deduplication completed in {time_end_drop - time_start_drop:.3f} seconds.") # Sort clusters by cluster ID and observation time - clusters, cluster_members = sort_by_id_and_time( - clusters, cluster_members, observations, "cluster_id" - ) + clusters, cluster_members = sort_by_id_and_time(clusters, cluster_members, observations, "cluster_id") time_end_cluster = time.perf_counter() logger.info(f"Found {len(clusters)} clusters.") - logger.info( - f"Clustering completed in {time_end_cluster - time_start_cluster:.3f} seconds." - ) + logger.info(f"Clustering completed in {time_end_cluster - time_start_cluster:.3f} seconds.") return clusters, cluster_members diff --git a/thor/config.py b/src/thor/config.py similarity index 96% rename from thor/config.py rename to src/thor/config.py index f6e53fdc..ddc2093d 100644 --- a/thor/config.py +++ b/src/thor/config.py @@ -27,9 +27,7 @@ class Config(BaseModel): iod_min_arc_length: float = 1.0 iod_contamination_percentage: float = 20.0 iod_rchi2_threshold: float = 100000 - iod_observation_selection_method: Literal[ - "combinations", "first+middle+last" - ] = "combinations" + iod_observation_selection_method: Literal["combinations", "first+middle+last"] = "combinations" iod_chunk_size: int = 10 od_min_obs: int = 6 od_min_arc_length: float = 1.0 diff --git a/thor/main.py b/src/thor/main.py similarity index 91% rename from thor/main.py rename to src/thor/main.py index f8b13953..3098dd87 100644 --- a/thor/main.py +++ b/src/thor/main.py @@ -7,7 +7,7 @@ import quivr as qv import ray -from adam_core.propagator import PYOORB +from adam_core.propagator.adam_pyoorb import PYOORBPropagator from adam_core.ray_cluster import initialize_use_ray from .checkpointing import create_checkpoint_data, load_initial_checkpoint_values @@ -34,9 +34,7 @@ def initialize_test_orbit( Initialize the test orbit by saving it to disk if a working directory is provided. """ if working_dir is not None: - test_orbit_directory = pathlib.Path( - working_dir, "inputs", test_orbit.orbit_id[0].as_py() - ) + test_orbit_directory = pathlib.Path(working_dir, "inputs", test_orbit.orbit_id[0].as_py()) test_orbit_directory.mkdir(parents=True, exist_ok=True) test_orbit_path = os.path.join(test_orbit_directory, "test_orbit.parquet") test_orbit.to_parquet(test_orbit_path) @@ -97,9 +95,7 @@ def link_test_orbit( logger.info("Running test orbit...") if len(test_orbit) != 1: - raise ValueError( - f"link_test_orbit received {len(test_orbit)} orbits but expected 1." - ) + raise ValueError(f"link_test_orbit received {len(test_orbit)} orbits but expected 1.") test_orbit_directory = None if working_dir is not None: @@ -118,7 +114,7 @@ def link_test_orbit( initialize_config(config, working_dir) if config.propagator == "PYOORB": - propagator = PYOORB + propagator = PYOORBPropagator else: raise ValueError(f"Unknown propagator: {config.propagator}") @@ -147,15 +143,11 @@ def link_test_orbit( ) if checkpoint.stage == "filter_observations": - filtered_observations = filter_observations( - observations, test_orbit, config, filters - ) + filtered_observations = filter_observations(observations, test_orbit, config, filters) filtered_observations_path = None if test_orbit_directory is not None: - filtered_observations_path = os.path.join( - test_orbit_directory, "filtered_observations.parquet" - ) + filtered_observations_path = os.path.join(test_orbit_directory, "filtered_observations.parquet") filtered_observations.to_parquet(filtered_observations_path) yield LinkTestOrbitStageResult( @@ -197,9 +189,7 @@ def link_test_orbit( transformed_detections_path = None if test_orbit_directory is not None: logger.info(f"Saving transformed detections to {test_orbit_directory}...") - transformed_detections_path = os.path.join( - test_orbit_directory, "transformed_detections.parquet" - ) + transformed_detections_path = os.path.join(test_orbit_directory, "transformed_detections.parquet") transformed_detections.to_parquet(transformed_detections_path) yield LinkTestOrbitStageResult( @@ -248,9 +238,7 @@ def link_test_orbit( if test_orbit_directory is not None: logger.info(f"Saving clusters to {test_orbit_directory}...") clusters_path = os.path.join(test_orbit_directory, "clusters.parquet") - cluster_members_path = os.path.join( - test_orbit_directory, "cluster_members.parquet" - ) + cluster_members_path = os.path.join(test_orbit_directory, "cluster_members.parquet") clusters.to_parquet(clusters_path) cluster_members.to_parquet(cluster_members_path) @@ -310,9 +298,7 @@ def link_test_orbit( if test_orbit_directory is not None: logger.info(f"Saving IOD orbits to {test_orbit_directory}...") iod_orbits_path = os.path.join(test_orbit_directory, "iod_orbits.parquet") - iod_orbit_members_path = os.path.join( - test_orbit_directory, "iod_orbit_members.parquet" - ) + iod_orbit_members_path = os.path.join(test_orbit_directory, "iod_orbit_members.parquet") iod_orbits.to_parquet(iod_orbits_path) iod_orbit_members.to_parquet(iod_orbit_members_path) @@ -372,9 +358,7 @@ def link_test_orbit( if test_orbit_directory is not None: logger.info(f"Saving OD orbits to {test_orbit_directory}...") od_orbits_path = os.path.join(test_orbit_directory, "od_orbits.parquet") - od_orbit_members_path = os.path.join( - test_orbit_directory, "od_orbit_members.parquet" - ) + od_orbit_members_path = os.path.join(test_orbit_directory, "od_orbit_members.parquet") od_orbits.to_parquet(od_orbits_path) od_orbit_members.to_parquet(od_orbit_members_path) @@ -392,15 +376,11 @@ def link_test_orbit( if not isinstance(od_orbits, ray.ObjectRef): od_orbits = ray.put(od_orbits) refs_to_free.append(od_orbits) - logger.info( - "Placed differentially corrected orbits in the object store." - ) + logger.info("Placed differentially corrected orbits in the object store.") if not isinstance(od_orbit_members, ray.ObjectRef): od_orbit_members = ray.put(od_orbit_members) refs_to_free.append(od_orbit_members) - logger.info( - "Placed differentially corrected orbit members in the object store." - ) + logger.info("Placed differentially corrected orbit members in the object store.") checkpoint = create_checkpoint_data( "recover_orbits", @@ -437,17 +417,13 @@ def link_test_orbit( if use_ray and len(refs_to_free) > 0: ray.internal.free(refs_to_free) - logger.info( - f"Removed {len(refs_to_free)} references from the object store." - ) + logger.info(f"Removed {len(refs_to_free)} references from the object store.") recovered_orbits_path = None recovered_orbit_members_path = None if test_orbit_directory is not None: logger.info(f"Saving recovered orbits to {test_orbit_directory}...") - recovered_orbits_path = os.path.join( - test_orbit_directory, "recovered_orbits.parquet" - ) + recovered_orbits_path = os.path.join(test_orbit_directory, "recovered_orbits.parquet") recovered_orbit_members_path = os.path.join( test_orbit_directory, "recovered_orbit_members.parquet" ) diff --git a/thor/observations/__init__.py b/src/thor/observations/__init__.py similarity index 68% rename from thor/observations/__init__.py rename to src/thor/observations/__init__.py index fe7c30f1..60b69ce1 100644 --- a/thor/observations/__init__.py +++ b/src/thor/observations/__init__.py @@ -1,3 +1,3 @@ -# flake8: noqa: F401 +# ruff: noqa: F401, F403 from .observations import * from .photometry import * diff --git a/thor/observations/filters.py b/src/thor/observations/filters.py similarity index 86% rename from thor/observations/filters.py rename to src/thor/observations/filters.py index 9f204f3b..93fff1ba 100644 --- a/thor/observations/filters.py +++ b/src/thor/observations/filters.py @@ -9,7 +9,6 @@ import quivr as qv import ray from adam_core.coordinates import SphericalCoordinates -from adam_core.propagator.utils import _iterate_chunks from adam_core.ray_cluster import initialize_use_ray from thor.config import Config @@ -115,9 +114,7 @@ def apply( observations_state = observations.select("state_id", state_id) coordinates_state = observations_state.coordinates - assert ( - len(ephemeris_state) == 1 - ), "there should be exactly one ephemeris per exposure" + assert len(ephemeris_state) == 1, "there should be exactly one ephemeris per exposure" ephem_ra = ephemeris_state.ephemeris.coordinates.lon[0].as_py() ephem_dec = ephemeris_state.ephemeris.coordinates.lat[0].as_py() @@ -127,9 +124,7 @@ def apply( _within_radius(coordinates_state, ephem_ra, ephem_dec, self.radius) ) - filtered_observations = qv.concatenate( - [filtered_observations, filtered_observations_chunk] - ) + filtered_observations = qv.concatenate([filtered_observations, filtered_observations_chunk]) if filtered_observations.fragmented(): filtered_observations = qv.defragment(filtered_observations) @@ -145,9 +140,7 @@ def apply( logger.info( f"Filtered {len(observations)} observations to {len(filtered_observations)} observations." ) - logger.info( - f"TestOrbitRadiusObservationFilter completed in {time_end - time_start:.3f} seconds." - ) + logger.info(f"TestOrbitRadiusObservationFilter completed in {time_end - time_start:.3f} seconds.") return filtered_observations @@ -195,9 +188,7 @@ def _within_radius( num1 = cos_det_lat * sin_dist_lon num2 = cos_center_lat * sin_det_lat - sin_center_lat * cos_det_lat * cos_dist_lon - denominator = ( - sin_center_lat * sin_det_lat + cos_center_lat * cos_det_lat * cos_dist_lon - ) + denominator = sin_center_lat * sin_det_lat + cos_center_lat * cos_det_lat * cos_dist_lon distances = np.arctan2(np.hypot(num1, num2), denominator) return distances <= np.deg2rad(radius) @@ -281,9 +272,7 @@ def filter_observations( logger.info("Running observation filters...") if len(test_orbit) != 1: - raise ValueError( - f"filter_observations received {len(test_orbit)} orbits but expected 1." - ) + raise ValueError(f"filter_observations received {len(test_orbit)} orbits but expected 1.") if isinstance(observations, str): num_obs = pq.read_metadata(observations).num_rows @@ -294,9 +283,7 @@ def filter_observations( logger.info(f"Reading {num_obs} observations in memory.") else: - raise ValueError( - "observations should be a parquet file or an Observations object." - ) + raise ValueError("observations should be a parquet file or an Observations object.") if filters is None: # By default we always filter by radius from the predicted position of the test orbit @@ -313,9 +300,7 @@ def filter_observations( if use_ray: futures: List[ray.ObjectRef] = [] - for observations_chunk in observations_iterator( - observations, chunk_size=chunk_size - ): + for observations_chunk in observations_iterator(observations, chunk_size=chunk_size): futures.append( filter_observations_worker_remote.remote( observations_chunk, @@ -325,17 +310,13 @@ def filter_observations( ) if len(futures) > max_processes * 1.5: finished, futures = ray.wait(futures, num_returns=1) - filtered_observations = qv.concatenate( - [filtered_observations, ray.get(finished[0])] - ) + filtered_observations = qv.concatenate([filtered_observations, ray.get(finished[0])]) if filtered_observations.fragmented(): filtered_observations = qv.defragment(filtered_observations) while futures: finished, futures = ray.wait(futures, num_returns=1) - filtered_observations = qv.concatenate( - [filtered_observations, ray.get(finished[0])] - ) + filtered_observations = qv.concatenate([filtered_observations, ray.get(finished[0])]) if filtered_observations.fragmented(): filtered_observations = qv.defragment(filtered_observations) @@ -344,17 +325,13 @@ def filter_observations( logger.info("Removed observations from the object store.") else: - for observations_chunk in observations_iterator( - observations, chunk_size=chunk_size - ): + for observations_chunk in observations_iterator(observations, chunk_size=chunk_size): filtered_observations_chunk = filter_observations_worker( observations_chunk, test_orbit, filters, ) - filtered_observations = qv.concatenate( - [filtered_observations, filtered_observations_chunk] - ) + filtered_observations = qv.concatenate([filtered_observations, filtered_observations_chunk]) if filtered_observations.fragmented(): filtered_observations = qv.defragment(filtered_observations) @@ -367,10 +344,6 @@ def filter_observations( ) time_end = time.perf_counter() - logger.info( - f"Filtered {num_obs} observations to {len(filtered_observations)} observations." - ) - logger.info( - f"Observations filters completed in {time_end - time_start:.3f} seconds." - ) + logger.info(f"Filtered {num_obs} observations to {len(filtered_observations)} observations.") + logger.info(f"Observations filters completed in {time_end - time_start:.3f} seconds.") return filtered_observations diff --git a/thor/observations/observations.py b/src/thor/observations/observations.py similarity index 95% rename from thor/observations/observations.py rename to src/thor/observations/observations.py index 57fbbd0d..46758e99 100644 --- a/thor/observations/observations.py +++ b/src/thor/observations/observations.py @@ -141,9 +141,7 @@ def _input_observations_iterator( for batch in parquet_file.iter_batches(batch_size=chunk_size): table = InputObservations.from_pyarrow(pa.Table.from_batches([batch])) ## TODO: This should be happening some other way - time = Timestamp.from_kwargs( - days=table.time.days, nanos=table.time.nanos, scale="utc" - ) + time = Timestamp.from_kwargs(days=table.time.days, nanos=table.time.nanos, scale="utc") table = table.set_column("time", time) yield table @@ -173,9 +171,7 @@ def input_observations_to_observations_worker( return Observations.from_input_observations(input_observations_chunk) -input_observations_to_observations_worker_remote = ray.remote( - input_observations_to_observations_worker -) +input_observations_to_observations_worker_remote = ray.remote(input_observations_to_observations_worker) def _process_next_future_result( @@ -226,11 +222,7 @@ def convert_input_observations_to_observations( futures, output_observations, output_writer ) - futures.append( - input_observations_to_observations_worker_remote.remote( - input_observation_chunk - ) - ) + futures.append(input_observations_to_observations_worker_remote.remote(input_observation_chunk)) while futures: futures, output_observations = _process_next_future_result( @@ -238,15 +230,11 @@ def convert_input_observations_to_observations( ) else: for input_observation_chunk in input_iterator: - observations_chunk = input_observations_to_observations_worker( - input_observation_chunk - ) + observations_chunk = input_observations_to_observations_worker(input_observation_chunk) if output_writer is not None: output_writer.write_table(observations_chunk.table) else: - output_observations = qv.concatenate( - [output_observations, observations_chunk] - ) + output_observations = qv.concatenate([output_observations, observations_chunk]) if output_observations.fragmented(): output_observations = qv.defragment(output_observations) @@ -450,9 +438,7 @@ def get_observers(self) -> ObserversWithStates: observers=observers_i, ) - observers_with_states = qv.concatenate( - [observers_with_states, observers_with_states_i] - ) + observers_with_states = qv.concatenate([observers_with_states, observers_with_states_i]) if observers_with_states.fragmented(): observers_with_states = qv.defragment(observers_with_states) @@ -515,6 +501,4 @@ def group_by_observatory_code(self) -> Iterator[Tuple[str, "Observations"]]: """ observatory_codes = self.coordinates.origin.code for observatory_code in observatory_codes.unique().sort(): - yield observatory_code.as_py(), self.select_observatory_code( - observatory_code - ) + yield observatory_code.as_py(), self.select_observatory_code(observatory_code) diff --git a/thor/observations/photometry.py b/src/thor/observations/photometry.py similarity index 100% rename from thor/observations/photometry.py rename to src/thor/observations/photometry.py diff --git a/thor/observations/states.py b/src/thor/observations/states.py similarity index 87% rename from thor/observations/states.py rename to src/thor/observations/states.py index 25088ca8..e9b7fa42 100644 --- a/thor/observations/states.py +++ b/src/thor/observations/states.py @@ -3,13 +3,10 @@ import numpy as np import pyarrow as pa -import pyarrow.compute as pc from adam_core.coordinates import CartesianCoordinates, SphericalCoordinates -def calculate_state_ids( - coordinates: Union[SphericalCoordinates, CartesianCoordinates] -) -> pa.Int64Array: +def calculate_state_ids(coordinates: Union[SphericalCoordinates, CartesianCoordinates]) -> pa.Int64Array: """ Calculate the state IDs for a set of coordinates. States are defined as unique time and origin combinations. This function will return a state ID for each coordinate in the input coordinates @@ -29,17 +26,13 @@ def calculate_state_ids( table = coordinates.flattened_table() # Append index column so we can maintain the original order - table = table.append_column( - pa.field("index", pa.int64()), pa.array(np.arange(0, len(table))) - ) + table = table.append_column(pa.field("index", pa.int64()), pa.array(np.arange(0, len(table)))) # Select only the relevant columns time_origins = table.select(["time.days", "time.nanos", "origin.code"]) # Group the detections by the observatory code and the detection times and then grab the unique ones - unique_time_origins = time_origins.group_by( - ["time.days", "time.nanos", "origin.code"] - ).aggregate([]) + unique_time_origins = time_origins.group_by(["time.days", "time.nanos", "origin.code"]).aggregate([]) # Now sort the unique states by the time and origin code unique_time_origins = unique_time_origins.sort_by( @@ -95,9 +88,7 @@ def calculate_state_id_hashes( state_id_hashes The state ID hashes for each coordinate in the input coordinates array. """ - hash_inputs = coordinates.flattened_table().select( - ["time.days", "time.nanos", "origin.code"] - ) + hash_inputs = coordinates.flattened_table().select(["time.days", "time.nanos", "origin.code"]) hash_inputs = [ hash_inputs["time.days"].to_pylist(), hash_inputs["time.nanos"].to_pylist(), diff --git a/thor/observations/tests/__init__.py b/src/thor/observations/tests/__init__.py similarity index 100% rename from thor/observations/tests/__init__.py rename to src/thor/observations/tests/__init__.py diff --git a/thor/observations/tests/test_filters.py b/src/thor/observations/tests/conftest.py similarity index 60% rename from thor/observations/tests/test_filters.py rename to src/thor/observations/tests/conftest.py index aeaf087b..ce2f82be 100644 --- a/thor/observations/tests/test_filters.py +++ b/src/thor/observations/tests/conftest.py @@ -1,8 +1,5 @@ -from unittest import mock - import numpy as np import pyarrow as pa -import pyarrow.compute as pc import pytest import quivr as qv from adam_core.coordinates import CartesianCoordinates, Origin @@ -11,9 +8,7 @@ from adam_core.orbits import Ephemeris from adam_core.time import Timestamp -from ...config import Config from ...orbit import TestOrbits -from ..filters import TestOrbitRadiusObservationFilter, filter_observations from ..observations import Observations @@ -106,55 +101,5 @@ def fixed_detections(fixed_ephems, fixed_exposures): @pytest.fixture -def fixed_observations( - fixed_detections: PointSourceDetections, fixed_exposures: Exposures -) -> Observations: +def fixed_observations(fixed_detections: PointSourceDetections, fixed_exposures: Exposures) -> Observations: return Observations.from_detections_and_exposures(fixed_detections, fixed_exposures) - - -def test_observation_fixtures(fixed_test_orbit, fixed_observations): - assert len(fixed_test_orbit) == 1 - assert len(pc.unique(fixed_observations.exposure_id)) == 5 - assert len(fixed_observations.coordinates) == 100 * 100 * 5 - - -def test_orbit_radius_observation_filter(fixed_test_orbit, fixed_observations): - fos = TestOrbitRadiusObservationFilter( - radius=0.5, - ) - have = fos.apply(fixed_observations, fixed_test_orbit) - assert len(pc.unique(have.exposure_id)) == 5 - assert pc.all( - pc.equal( - pc.unique(have.exposure_id), - pc.unique(fixed_observations.exposure_id), - ) - ) - # Should be about pi/4 fraction of the detections (0.785 - assert len(have.coordinates) < 0.80 * len(fixed_observations.coordinates) - assert len(have.coordinates) > 0.76 * len(fixed_observations.coordinates) - - -def test_filter_observations(fixed_observations, fixed_test_orbit): - # Test that if not filters are passed, we use - # TestOrbitRadiusObservationFilter by defualt - config = Config(cell_radius=0.5, max_processes=1) - - have = filter_observations(fixed_observations, fixed_test_orbit, config) - assert len(pc.unique(have.exposure_id)) == 5 - assert len(have.coordinates) < 0.80 * len(fixed_observations.coordinates) - assert len(have.coordinates) > 0.76 * len(fixed_observations.coordinates) - - # Make sure if we pass a custom list of filters, they are used - # instead - noop_filter = mock.Mock() - noop_filter.apply.return_value = fixed_observations - - filters = [noop_filter] - have = filter_observations( - fixed_observations, fixed_test_orbit, config, filters=filters - ) - assert len(have.coordinates) == len(fixed_observations.coordinates) - - # Ensure NOOPFilter.apply was run - filters[0].apply.assert_called_once() diff --git a/src/thor/observations/tests/test_filters.py b/src/thor/observations/tests/test_filters.py new file mode 100644 index 00000000..72b3809f --- /dev/null +++ b/src/thor/observations/tests/test_filters.py @@ -0,0 +1,52 @@ +from unittest import mock + +import pyarrow.compute as pc + +from ...config import Config +from ..filters import TestOrbitRadiusObservationFilter, filter_observations + + +def test_observation_fixtures(fixed_test_orbit, fixed_observations): + assert len(fixed_test_orbit) == 1 + assert len(pc.unique(fixed_observations.exposure_id)) == 5 + assert len(fixed_observations.coordinates) == 100 * 100 * 5 + + +def test_orbit_radius_observation_filter(fixed_test_orbit, fixed_observations): + fos = TestOrbitRadiusObservationFilter( + radius=0.5, + ) + have = fos.apply(fixed_observations, fixed_test_orbit) + assert len(pc.unique(have.exposure_id)) == 5 + assert pc.all( + pc.equal( + pc.unique(have.exposure_id), + pc.unique(fixed_observations.exposure_id), + ) + ) + # Should be about pi/4 fraction of the detections (0.785 + assert len(have.coordinates) < 0.80 * len(fixed_observations.coordinates) + assert len(have.coordinates) > 0.76 * len(fixed_observations.coordinates) + + +def test_filter_observations(fixed_observations, fixed_test_orbit): + # Test that if not filters are passed, we use + # TestOrbitRadiusObservationFilter by defualt + config = Config(cell_radius=0.5, max_processes=1) + + have = filter_observations(fixed_observations, fixed_test_orbit, config) + assert len(pc.unique(have.exposure_id)) == 5 + assert len(have.coordinates) < 0.80 * len(fixed_observations.coordinates) + assert len(have.coordinates) > 0.76 * len(fixed_observations.coordinates) + + # Make sure if we pass a custom list of filters, they are used + # instead + noop_filter = mock.Mock() + noop_filter.apply.return_value = fixed_observations + + filters = [noop_filter] + have = filter_observations(fixed_observations, fixed_test_orbit, config, filters=filters) + assert len(have.coordinates) == len(fixed_observations.coordinates) + + # Ensure NOOPFilter.apply was run + filters[0].apply.assert_called_once() diff --git a/thor/observations/tests/test_observations.py b/src/thor/observations/tests/test_observations.py similarity index 86% rename from thor/observations/tests/test_observations.py rename to src/thor/observations/tests/test_observations.py index c9f510cc..b5c0f1bd 100644 --- a/thor/observations/tests/test_observations.py +++ b/src/thor/observations/tests/test_observations.py @@ -1,4 +1,3 @@ -import numpy as np import pyarrow as pa import pytest @@ -11,14 +10,6 @@ input_observations_to_observations_worker, ) from ..states import calculate_state_id_hashes -from .test_filters import ( - fixed_detections, - fixed_ephems, - fixed_exposures, - fixed_observations, - fixed_observers, - fixed_test_orbit, -) @pytest.fixture @@ -66,16 +57,12 @@ def test_input_observations_to_observations(input_observations_fixture): def test_convert_observations_table(input_observations_fixture, observations_config): - observations = convert_input_observations_to_observations( - input_observations_fixture, observations_config - ) + observations = convert_input_observations_to_observations(input_observations_fixture, observations_config) assert isinstance(observations, Observations) assert len(observations) == len(input_observations_fixture) -def test_convert_observations_file( - tmp_path, observations_config, input_observations_file -): +def test_convert_observations_file(tmp_path, observations_config, input_observations_file): output = str(tmp_path / "output.parquet") observations = convert_input_observations_to_observations( input_observations_file, observations_config, output_path=output diff --git a/thor/observations/tests/test_states.py b/src/thor/observations/tests/test_states.py similarity index 90% rename from thor/observations/tests/test_states.py rename to src/thor/observations/tests/test_states.py index f7bada4b..6bd5efcd 100644 --- a/thor/observations/tests/test_states.py +++ b/src/thor/observations/tests/test_states.py @@ -1,8 +1,7 @@ import numpy as np import pyarrow as pa import pyarrow.compute as pc -import pytest -from adam_core.coordinates import CartesianCoordinates, Origin, SphericalCoordinates +from adam_core.coordinates import Origin, SphericalCoordinates from adam_core.time import Timestamp from ..states import calculate_state_ids diff --git a/thor/observations/utils.py b/src/thor/observations/utils.py similarity index 100% rename from thor/observations/utils.py rename to src/thor/observations/utils.py diff --git a/thor/orbit.py b/src/thor/orbit.py similarity index 92% rename from thor/orbit.py rename to src/thor/orbit.py index a9a48705..c9dbbf49 100644 --- a/thor/orbit.py +++ b/src/thor/orbit.py @@ -18,10 +18,13 @@ ) from adam_core.observers import Observers from adam_core.orbits import Ephemeris, Orbits -from adam_core.propagator import PYOORB, Propagator +from adam_core.propagator import Propagator +from adam_core.propagator.adam_pyoorb import PYOORBPropagator from adam_core.ray_cluster import initialize_use_ray from adam_core.time import Timestamp +from .observations import Observations + CoordinateType = TypeVar( "CoordinateType", bound=Union[ @@ -32,7 +35,6 @@ ], ) -from .observations import Observations logger = logging.getLogger(__name__) @@ -84,9 +86,7 @@ def range_observations_worker( return RangedPointSourceDetections.from_kwargs( id=observations_state.id, exposure_id=observations_state.exposure_id, - coordinates=assume_heliocentric_distance( - r, observations_state.coordinates, observer_i.coordinates - ), + coordinates=assume_heliocentric_distance(r, observations_state.coordinates, observer_i.coordinates), state_id=observations_state.state_id, ) @@ -152,9 +152,7 @@ def _is_cache_fresh(self, observations: Observations) -> bool: else: return False - def _cache_ephemeris( - self, ephemeris: TestOrbitEphemeris, observations: Observations - ): + def _cache_ephemeris(self, ephemeris: TestOrbitEphemeris, observations: Observations): """ Cache the ephemeris and observation IDs. @@ -175,7 +173,7 @@ def _cache_ephemeris( def propagate( self, times: Timestamp, - propagator: Propagator = PYOORB(), + propagator: Propagator = PYOORBPropagator(), max_processes: Optional[int] = 1, ) -> Orbits: """ @@ -200,13 +198,12 @@ def propagate( times, max_processes=max_processes, chunk_size=1, - parallel_backend="ray", ) def generate_ephemeris( self, observers: Observers, - propagator: Propagator = PYOORB(), + propagator: Propagator = PYOORBPropagator(), max_processes: Optional[int] = 1, ) -> Ephemeris: """ @@ -231,13 +228,12 @@ def generate_ephemeris( observers, max_processes=max_processes, chunk_size=1, - parallel_backend="ray", ) def generate_ephemeris_from_observations( self, observations: Union[Observations, ray.ObjectRef], - propagator: Propagator = PYOORB(), + propagator: Propagator = PYOORBPropagator(), max_processes: Optional[int] = 1, ): """ @@ -276,9 +272,7 @@ def generate_ephemeris_from_observations( raise ValueError("Observations must not be empty.") if self._is_cache_fresh(observations): - logger.debug( - "Test orbit ephemeris cache is fresh. Returning cached states." - ) + logger.debug("Test orbit ephemeris cache is fresh. Returning cached states.") return self._cached_ephemeris logger.debug("Test orbit ephemeris cache is stale. Regenerating.") @@ -318,7 +312,7 @@ def generate_ephemeris_from_observations( def range_observations( self, observations: Union[Observations, ray.ObjectRef], - propagator: Propagator = PYOORB(), + propagator: Propagator = PYOORBPropagator(), max_processes: Optional[int] = 1, ) -> RangedPointSourceDetections: """ @@ -366,27 +360,19 @@ def range_observations( state_ids = observations.state_id.unique() futures = [] for state_id in state_ids: - futures.append( - range_observations_remote.remote( - observations_ref, ephemeris_ref, state_id - ) - ) + futures.append(range_observations_remote.remote(observations_ref, ephemeris_ref, state_id)) if len(futures) >= max_processes * 1.5: finished, futures = ray.wait(futures, num_returns=1) ranged_detections_chunk = ray.get(finished[0]) - ranged_detections = qv.concatenate( - [ranged_detections, ranged_detections_chunk] - ) + ranged_detections = qv.concatenate([ranged_detections, ranged_detections_chunk]) if ranged_detections.fragmented(): ranged_detections = qv.defragment(ranged_detections) while futures: finished, futures = ray.wait(futures, num_returns=1) ranged_detections_chunk = ray.get(finished[0]) - ranged_detections = qv.concatenate( - [ranged_detections, ranged_detections_chunk] - ) + ranged_detections = qv.concatenate([ranged_detections, ranged_detections_chunk]) if ranged_detections.fragmented(): ranged_detections = qv.defragment(ranged_detections) @@ -401,9 +387,7 @@ def range_observations( state_id, ) - ranged_detections = qv.concatenate( - [ranged_detections, ranged_detections_chunk] - ) + ranged_detections = qv.concatenate([ranged_detections, ranged_detections_chunk]) if ranged_detections.fragmented(): ranged_detections = qv.defragment(ranged_detections) @@ -446,9 +430,7 @@ def assume_heliocentric_distance( # Transform the coordinates to the ecliptic frame by assuming they lie on a unit sphere # (this assumption will only be applied to coordinates with missing rho values) coords_eq_unit = coords.to_unit_sphere(only_missing=True) - coords_ec = transform_coordinates( - coords_eq_unit, SphericalCoordinates, frame_out="ecliptic" - ) + coords_ec = transform_coordinates(coords_eq_unit, SphericalCoordinates, frame_out="ecliptic") # Transform the coordinates to cartesian and calculate the unit vectors pointing # from the origin to the coordinates diff --git a/src/thor/orbit_determination/__init__.py b/src/thor/orbit_determination/__init__.py new file mode 100644 index 00000000..4d632195 --- /dev/null +++ b/src/thor/orbit_determination/__init__.py @@ -0,0 +1,3 @@ +# ruff: noqa: F401, F403 +from .fitted_orbits import * +from .outliers import * diff --git a/thor/orbit_determination/fitted_orbits.py b/src/thor/orbit_determination/fitted_orbits.py similarity index 94% rename from thor/orbit_determination/fitted_orbits.py rename to src/thor/orbit_determination/fitted_orbits.py index 888af7bb..b0050b9d 100644 --- a/thor/orbit_determination/fitted_orbits.py +++ b/src/thor/orbit_determination/fitted_orbits.py @@ -1,7 +1,6 @@ import uuid from typing import List, Literal, Optional, Tuple -import numpy as np import pyarrow as pa import pyarrow.compute as pc import quivr as qv @@ -9,8 +8,6 @@ from adam_core.coordinates.residuals import Residuals from adam_core.orbits import Orbits -from ..utils.quivr import drop_duplicates - __all__ = [ "FittedOrbits", "FittedOrbitMembers", @@ -78,9 +75,7 @@ def assign_duplicate_observations( orbit_members = orbit_members.apply_mask(pc.invert(mask_to_remove)) # Filtering self based on the filtered orbit_members - orbits_mask = pc.is_in( - orbits.column("orbit_id"), value_set=orbit_members.column("orbit_id") - ) + orbits_mask = pc.is_in(orbits.column("orbit_id"), value_set=orbit_members.column("orbit_id")) filtered_orbits = orbits.apply_mask(orbits_mask) return filtered_orbits, orbit_members @@ -127,10 +122,8 @@ def drop_duplicate_orbits( "coordinates.vz", ] - filtered = drop_duplicates(orbits, subset=subset, keep=keep) - filtered_orbit_members = orbit_members.apply_mask( - pc.is_in(orbit_members.orbit_id, filtered.orbit_id) - ) + filtered = orbits.drop_duplicates(subset=subset, keep=keep) + filtered_orbit_members = orbit_members.apply_mask(pc.is_in(orbit_members.orbit_id, filtered.orbit_id)) return filtered, filtered_orbit_members diff --git a/thor/orbit_determination/outliers.py b/src/thor/orbit_determination/outliers.py similarity index 92% rename from thor/orbit_determination/outliers.py rename to src/thor/orbit_determination/outliers.py index dd8b7564..c297c53a 100644 --- a/thor/orbit_determination/outliers.py +++ b/src/thor/orbit_determination/outliers.py @@ -1,9 +1,7 @@ import numpy as np -def calculate_max_outliers( - num_obs: int, min_obs: int, contamination_percentage: float -) -> int: +def calculate_max_outliers(num_obs: int, min_obs: int, contamination_percentage: float) -> int: """ Calculate the maximum number of allowable outliers. Linkages may contain err oneuos observations that need to be removed. This function calculates the maximum number of diff --git a/thor/orbit_determination/tests/__init__.py b/src/thor/orbit_determination/tests/__init__.py similarity index 100% rename from thor/orbit_determination/tests/__init__.py rename to src/thor/orbit_determination/tests/__init__.py diff --git a/thor/orbit_determination/tests/test_fitted_orbits.py b/src/thor/orbit_determination/tests/test_fitted_orbits.py similarity index 88% rename from thor/orbit_determination/tests/test_fitted_orbits.py rename to src/thor/orbit_determination/tests/test_fitted_orbits.py index 31c2313e..1b3c4872 100644 --- a/thor/orbit_determination/tests/test_fitted_orbits.py +++ b/src/thor/orbit_determination/tests/test_fitted_orbits.py @@ -85,9 +85,7 @@ def test_all_duplicates(simple_orbits, all_duplicates_orbit_members): simple_orbits, all_duplicates_orbit_members ) unique_obs_ids = set(filtered_members.obs_id) - assert len(unique_obs_ids) == len( - filtered_members - ) # Ensure no duplicates in 'obs_id' + assert len(unique_obs_ids) == len(filtered_members) # Ensure no duplicates in 'obs_id' def test_mixed_duplicates(simple_orbits, mixed_duplicates_orbit_members): @@ -96,9 +94,7 @@ def test_mixed_duplicates(simple_orbits, mixed_duplicates_orbit_members): simple_orbits, mixed_duplicates_orbit_members ) unique_obs_ids = set(filtered_members.obs_id) - assert len(unique_obs_ids) == len( - filtered_members - ) # Ensure no duplicates in 'obs_id' + assert len(unique_obs_ids) == len(filtered_members) # Ensure no duplicates in 'obs_id' # 1 -> 1 # 2 -> 2 @@ -111,19 +107,13 @@ def test_mixed_duplicates(simple_orbits, mixed_duplicates_orbit_members): # Now we assert that the correct orbits were selected based on sort order # Assert that filtered_members where obs_id is 2 that the orbit_id is 2 mask = pc.equal(filtered_members.obs_id, pa.scalar("2")) - assert pc.all( - pc.equal(filtered_members.apply_mask(mask).orbit_id, pa.scalar("2")) - ).as_py() + assert pc.all(pc.equal(filtered_members.apply_mask(mask).orbit_id, pa.scalar("2"))).as_py() # Assert that members where obs_id is 3 that the orbit_id is also 2 mask = pc.equal(filtered_members.obs_id, pa.scalar("3")) - assert pc.all( - pc.equal(filtered_members.apply_mask(mask).orbit_id, pa.scalar("2")) - ).as_py() + assert pc.all(pc.equal(filtered_members.apply_mask(mask).orbit_id, pa.scalar("2"))).as_py() # Assert that members where obs_id is 4 that the orbit_id is 4 mask = pc.equal(filtered_members.obs_id, pa.scalar("4")) - assert pc.all( - pc.equal(filtered_members.apply_mask(mask).orbit_id, pa.scalar("4")) - ).as_py() + assert pc.all(pc.equal(filtered_members.apply_mask(mask).orbit_id, pa.scalar("4"))).as_py() def test_with_no_duplicates(simple_orbits, no_duplicate_orbit_members): @@ -139,8 +129,6 @@ def test_empty_data(): # Test with empty FittedOrbits and FittedOrbitMembers empty_orbits = FittedOrbits.empty() empty_members = FittedOrbitMembers.empty() - filtered_orbits, filtered_members = assign_duplicate_observations( - empty_orbits, empty_members - ) + filtered_orbits, filtered_members = assign_duplicate_observations(empty_orbits, empty_members) assert len(filtered_orbits) == 0 assert len(filtered_members) == 0 diff --git a/thor/orbit_determination/tests/test_outliers.py b/src/thor/orbit_determination/tests/test_outliers.py similarity index 83% rename from thor/orbit_determination/tests/test_outliers.py rename to src/thor/orbit_determination/tests/test_outliers.py index e10f8232..7c3ef97e 100644 --- a/thor/orbit_determination/tests/test_outliers.py +++ b/src/thor/orbit_determination/tests/test_outliers.py @@ -36,14 +36,12 @@ def test_calculate_max_outliers_assertions(): AssertionError, match=r"Number of observations must be greater than or equal to the minimum number of observations.", ): - outliers = calculate_max_outliers(num_obs, min_obs, contamination_percentage) + calculate_max_outliers(num_obs, min_obs, contamination_percentage) # Test that the function raises an assertion error when the contamination percentage # is less than 0. min_obs = 10 num_obs = 10 contamination_percentage = -50 - with pytest.raises( - AssertionError, match=r"Contamination percentage must be between 0 and 1." - ): - outliers = calculate_max_outliers(num_obs, min_obs, contamination_percentage) + with pytest.raises(AssertionError, match=r"Contamination percentage must be between 0 and 1."): + calculate_max_outliers(num_obs, min_obs, contamination_percentage) diff --git a/thor/orbit_selection.py b/src/thor/orbit_selection.py similarity index 92% rename from thor/orbit_selection.py rename to src/thor/orbit_selection.py index 7752144a..adf3e4db 100644 --- a/thor/orbit_selection.py +++ b/src/thor/orbit_selection.py @@ -13,7 +13,8 @@ from adam_core.coordinates import KeplerianCoordinates from adam_core.observers import Observers from adam_core.orbits import Ephemeris, Orbits -from adam_core.propagator import PYOORB, Propagator +from adam_core.propagator import Propagator +from adam_core.propagator.adam_pyoorb import PYOORBPropagator from adam_core.propagator.utils import _iterate_chunks from adam_core.ray_cluster import initialize_use_ray from adam_core.time import Timestamp @@ -223,9 +224,7 @@ def generate_test_orbits_worker( ephemeris_mask = pc.is_in(ephemeris_healpixels, healpixel_chunk) ephemeris_filtered = ephemeris.apply_mask(ephemeris_mask) ephemeris_healpixels = pc.filter(ephemeris_healpixels, ephemeris_mask) - logger.info( - f"{len(ephemeris_filtered)} orbit ephemerides overlap with the observations." - ) + logger.info(f"{len(ephemeris_filtered)} orbit ephemerides overlap with the observations.") # Filter the orbits to only those in the ephemeris orbits_filtered = propagated_orbits.apply_mask( @@ -271,7 +270,7 @@ def generate_test_orbits( observations: Union[str, Observations], catalog: Orbits, nside: int = 32, - propagator: Propagator = PYOORB(), + propagator: Propagator = PYOORBPropagator(), max_processes: Optional[int] = None, chunk_size: int = 100, ) -> TestOrbits: @@ -317,9 +316,7 @@ def generate_test_orbits( # If the input file is a string, read in the days column to # extract the minimum time if isinstance(observations, str): - table = pq.read_table( - observations, columns=["coordinates.time.days"], memory_map=True - ) + table = pq.read_table(observations, columns=["coordinates.time.days"], memory_map=True) min_day = pc.min(table["days"]).as_py() # Set the start time to the midnight of the first night of observations @@ -330,9 +327,7 @@ def generate_test_orbits( earliest_time = observations.coordinates.time.min() # Set the start time to the midnight of the first night of observations - start_time = Timestamp.from_kwargs( - days=earliest_time.days, nanos=[0], scale="utc" - ) + start_time = Timestamp.from_kwargs(days=earliest_time.days, nanos=[0], scale="utc") else: raise ValueError( f"observations must be a path to a parquet file or an Observations object. Got {type(observations)}." @@ -345,13 +340,10 @@ def generate_test_orbits( catalog, start_time, max_processes=max_processes, - parallel_backend="ray", chunk_size=500, ) propagation_end_time = time.perf_counter() - logger.info( - f"Propagation completed in {propagation_end_time - propagation_start_time:.3f} seconds." - ) + logger.info(f"Propagation completed in {propagation_end_time - propagation_start_time:.3f} seconds.") # Create a geocentric observer for the observations logger.info("Generating ephemerides for the propagated orbits...") @@ -364,13 +356,10 @@ def generate_test_orbits( observers, start_time, max_processes=max_processes, - parallel_backend="ray", chunk_size=1000, ) ephemeris_end_time = time.perf_counter() - logger.info( - f"Ephemeris generation completed in {ephemeris_end_time - ephemeris_start_time:.3f} seconds." - ) + logger.info(f"Ephemeris generation completed in {ephemeris_end_time - ephemeris_start_time:.3f} seconds.") if isinstance(observations, str): table = pq.read_table( @@ -395,9 +384,7 @@ def generate_test_orbits( nside=nside, ) observations_healpixels = pc.unique(pa.array(observations_healpixels)) - logger.info( - f"Observations occur in {len(observations_healpixels)} unique healpixels." - ) + logger.info(f"Observations occur in {len(observations_healpixels)} unique healpixels.") # Calculate the healpixels for each ephemeris # We do not want unique healpixels here because we want to @@ -414,9 +401,7 @@ def generate_test_orbits( if max_processes is None: max_processes = mp.cpu_count() - chunk_size = np.minimum( - np.ceil(len(observations_healpixels) / max_processes).astype(int), chunk_size - ) + chunk_size = np.minimum(np.ceil(len(observations_healpixels) / max_processes).astype(int), chunk_size) logger.info(f"Generating test orbits with a chunk size of {chunk_size} healpixels.") test_orbits = TestOrbits.empty() @@ -459,7 +444,5 @@ def generate_test_orbits( time_end = time.perf_counter() logger.info(f"Selected {len(test_orbits)} test orbits.") - logger.info( - f"Test orbit generation completed in {time_end - time_start:.3f} seconds." - ) + logger.info(f"Test orbit generation completed in {time_end - time_start:.3f} seconds.") return test_orbits diff --git a/thor/orbits/__init__.py b/src/thor/orbits/__init__.py similarity index 88% rename from thor/orbits/__init__.py rename to src/thor/orbits/__init__.py index a97a21e9..9f7db6ed 100644 --- a/thor/orbits/__init__.py +++ b/src/thor/orbits/__init__.py @@ -1,8 +1,9 @@ -from .state_transition import * -from .iterators import * +# ruff: noqa: F401, F403 +from .attribution import * from .gauss import * from .gibbs import * from .herrick_gibbs import * -from .attribution import * from .iod import * +from .iterators import * from .od import * +from .state_transition import * diff --git a/thor/orbits/attribution.py b/src/thor/orbits/attribution.py similarity index 90% rename from thor/orbits/attribution.py rename to src/thor/orbits/attribution.py index 87b5a937..321cb56c 100644 --- a/thor/orbits/attribution.py +++ b/src/thor/orbits/attribution.py @@ -11,7 +11,8 @@ import ray from adam_core.coordinates.residuals import Residuals from adam_core.orbits import Orbits -from adam_core.propagator import PYOORB, Propagator +from adam_core.propagator import Propagator +from adam_core.propagator.adam_pyoorb import PYOORBPropagator from adam_core.propagator.utils import _iterate_chunk_indices, _iterate_chunks from adam_core.ray_cluster import initialize_use_ray from sklearn.neighbors import BallTree @@ -39,9 +40,7 @@ class Attributions(qv.Table): residuals = Residuals.as_column(nullable=True) distance = qv.Float64Column(nullable=True) - def drop_coincident_attributions( - self, observations: Observations - ) -> "Attributions": + def drop_coincident_attributions(self, observations: Observations) -> "Attributions": """ Drop attributions that are coincident in time: two or more observations attributed to the same orbit that occur at the same time. The observation with the lowest @@ -61,9 +60,7 @@ def drop_coincident_attributions( flattened_table = self.flattened_table() # Add index to flattened table - flattened_table = flattened_table.add_column( - 0, "index", pa.array(np.arange(len(flattened_table))) - ) + flattened_table = flattened_table.add_column(0, "index", pa.array(np.arange(len(flattened_table)))) # Drop the residual values (a list column) due to: https://github.com/apache/arrow/issues/32504 flattened_table = flattened_table.drop(["residuals.values"]) @@ -83,9 +80,7 @@ def drop_coincident_attributions( ) # Join the time column back to the flattened attributions table - flattened_table = flattened_table.join( - flattened_observations, ["obs_id"], right_keys=["id"] - ) + flattened_table = flattened_table.join(flattened_observations, ["obs_id"], right_keys=["id"]) # Sort the table flattened_table = flattened_table.sort_by( @@ -126,9 +121,7 @@ def drop_multiple_attributions(self) -> "Attributions": flattened_table = self.flattened_table() # Add index to flattened table - flattened_table = flattened_table.add_column( - 0, "index", pa.array(np.arange(len(flattened_table))) - ) + flattened_table = flattened_table.add_column(0, "index", pa.array(np.arange(len(flattened_table)))) # Sort the table by observation ID and then distance flattened_table = flattened_table.sort_by( @@ -161,7 +154,7 @@ def attribution_worker( orbits: Union[Orbits, FittedOrbits], observations: Observations, radius: float = 1 / 3600, - propagator: Type[Propagator] = PYOORB, + propagator: Type[Propagator] = PYOORBPropagator, propagator_kwargs: dict = {}, ) -> Attributions: # Initialize the propagator @@ -179,14 +172,10 @@ def attribution_worker( observers = observers_with_states.observers # Generate ephemerides for each orbit at the observation times - ephemeris = prop.generate_ephemeris( - orbits, observers, chunk_size=len(orbits), max_processes=1 - ) + ephemeris = prop.generate_ephemeris(orbits, observers, chunk_size=len(orbits), max_processes=1) # Round the ephemeris and observations to the nearest millisecond - ephemeris = ephemeris.set_column( - "coordinates.time", ephemeris.coordinates.time.rounded(precision="ms") - ) + ephemeris = ephemeris.set_column("coordinates.time", ephemeris.coordinates.time.rounded(precision="ms")) observations_rounded = observations.set_column( "coordinates.time", observations.coordinates.time.rounded(precision="ms") @@ -256,9 +245,7 @@ def attribution_worker( obs_times_associated.append(obs_times[mask[0]]) distances.append(d[mask]) - residuals_i = Residuals.calculate( - coords.take(mask[0]), coords_predicted.take(i[mask]) - ) + residuals_i = Residuals.calculate(coords.take(mask[0]), coords_predicted.take(i[mask])) residuals = qv.concatenate([residuals, residuals_i]) if len(distances) > 0: @@ -293,7 +280,7 @@ def attribute_observations( orbits: Union[Orbits, FittedOrbits, ray.ObjectRef], observations: Union[Observations, ray.ObjectRef], radius: float = 5 / 3600, - propagator: Type[Propagator] = PYOORB, + propagator: Type[Propagator] = PYOORBPropagator, propagator_kwargs: dict = {}, orbits_chunk_size: int = 10, observations_chunk_size: int = 100000, @@ -346,9 +333,7 @@ def attribute_observations( # We wait for each chunk of orbits to finish before starting the next # chunk of observations to reduce the memory pressure. If not, the number # of expected futures will be large (num_orbits / orbit_chunk_size * num_observation_chunks) - for observations_indices_chunk in _iterate_chunk_indices( - observations, observations_chunk_size - ): + for observations_indices_chunk in _iterate_chunk_indices(observations, observations_chunk_size): futures = [] for orbit_id_chunk in _iterate_chunks(orbit_ids, orbits_chunk_size): futures.append( @@ -377,15 +362,11 @@ def attribute_observations( if len(refs_to_free) > 0: ray.internal.free(refs_to_free) - logger.info( - f"Removed {len(refs_to_free)} references from the object store." - ) + logger.info(f"Removed {len(refs_to_free)} references from the object store.") else: for orbit_id_chunk in _iterate_chunks(orbit_ids, orbits_chunk_size): - for observations_indices_chunk in _iterate_chunk_indices( - observations, observations_chunk_size - ): + for observations_indices_chunk in _iterate_chunk_indices(observations, observations_chunk_size): attribution_df_i = attribution_worker( orbit_id_chunk, observations_indices_chunk, @@ -407,9 +388,7 @@ def attribute_observations( logger.info( f"Attributed {len(attributions.obs_id.unique())} observations to {len(attributions.orbit_id.unique())} orbits." ) - logger.info( - "Attribution completed in {:.3f} seconds.".format(time_end - time_start) - ) + logger.info("Attribution completed in {:.3f} seconds.".format(time_end - time_start)) return attributions @@ -425,7 +404,7 @@ def merge_and_extend_orbits( delta: float = 1e-8, max_iter: int = 20, method: Literal["central", "finite"] = "central", - propagator: Type[Propagator] = PYOORB, + propagator: Type[Propagator] = PYOORBPropagator, propagator_kwargs: dict = {}, orbits_chunk_size: int = 10, observations_chunk_size: int = 100000, @@ -446,9 +425,6 @@ def merge_and_extend_orbits( Number of observations to process per batch. num_jobs : int, optional Number of jobs to launch. - parallel_backend : str, optional - Which parallelization backend to use {'ray', 'mp', cf}. Defaults to using Python's concurrent.futures - module ('cf'). """ time_start = time.perf_counter() logger.info("Running orbit extension and merging...") @@ -524,9 +500,7 @@ def merge_and_extend_orbits( obs_id=attributions.obs_id, residuals=attributions.residuals, ) - orbits = orbits.apply_mask( - pc.is_in(orbits.orbit_id, orbit_members.orbit_id.unique()) - ) + orbits = orbits.apply_mask(pc.is_in(orbits.orbit_id, orbit_members.orbit_id.unique())) if use_ray: ray.internal.free(refs_to_free) @@ -579,18 +553,14 @@ def merge_and_extend_orbits( # are unlikely to recover more observations in subsequent iterations and so can be saved for output. not_improved_mask = pc.equal(orbits.success, False) orbits_out = orbits.apply_mask(not_improved_mask) - orbit_members_out = orbit_members.apply_mask( - pc.is_in(orbit_members.orbit_id, orbits_out.orbit_id) - ) + orbit_members_out = orbit_members.apply_mask(pc.is_in(orbit_members.orbit_id, orbits_out.orbit_id)) # If some of the orbits that haven't improved in their orbit fit still share observations # then assign those observations to the orbit with the most observations, longest arc length, # and lowest reduced chi2 (and remove the other instances of that observation) # We will one final iteration of OD on the output orbits later - orbits_out, orbit_members_out = assign_duplicate_observations( - orbits_out, orbit_members_out - ) + orbits_out, orbit_members_out = assign_duplicate_observations(orbits_out, orbit_members_out) odp_orbits = qv.concatenate([odp_orbits, orbits_out]) if odp_orbits.fragmented(): @@ -603,9 +573,7 @@ def merge_and_extend_orbits( # Identify the orbit that could still be improved more improved_mask = pc.invert(not_improved_mask) orbits = orbits.apply_mask(improved_mask) - orbit_members = orbit_members.apply_mask( - pc.is_in(orbit_members.orbit_id, orbits.orbit_id) - ) + orbit_members = orbit_members.apply_mask(pc.is_in(orbit_members.orbit_id, orbits.orbit_id)) # Remove observations that have been added to the output list of orbits # from the orbits that we will continue iterating over @@ -617,9 +585,7 @@ def merge_and_extend_orbits( ) ) ) - orbits = orbits.apply_mask( - pc.is_in(orbits.orbit_id, orbit_members.orbit_id.unique()) - ) + orbits = orbits.apply_mask(pc.is_in(orbits.orbit_id, orbit_members.orbit_id.unique())) if orbits.fragmented(): orbits = qv.defragment(orbits) @@ -656,9 +622,7 @@ def merge_and_extend_orbits( if len(odp_orbits) > 0: # Assign any remaining duplicate observations to the orbit with # the most observations, longest arc length, and lowest reduced chi2 - odp_orbits, odp_orbit_members = assign_duplicate_observations( - odp_orbits, odp_orbit_members - ) + odp_orbits, odp_orbit_members = assign_duplicate_observations(odp_orbits, odp_orbit_members) # Do one final iteration of OD on the output orbits. This # will update any fits of orbits that might have had observations @@ -683,16 +647,8 @@ def merge_and_extend_orbits( odp_orbit_members = odp_orbit_members.drop_outliers() time_end = time.perf_counter() - logger.info( - f"Number of attribution / differential correction iterations: {iterations}" - ) - logger.info( - f"Extended and/or merged {len(orbits)} orbits into {len(odp_orbits)} orbits." - ) - logger.info( - "Orbit extension and merging completed in {:.3f} seconds.".format( - time_end - time_start - ) - ) + logger.info(f"Number of attribution / differential correction iterations: {iterations}") + logger.info(f"Extended and/or merged {len(orbits)} orbits into {len(odp_orbits)} orbits.") + logger.info("Orbit extension and merging completed in {:.3f} seconds.".format(time_end - time_start)) return odp_orbits, odp_orbit_members diff --git a/thor/orbits/gauss.py b/src/thor/orbits/gauss.py similarity index 92% rename from thor/orbits/gauss.py rename to src/thor/orbits/gauss.py index fbd6fef9..7a972d2a 100644 --- a/thor/orbits/gauss.py +++ b/src/thor/orbits/gauss.py @@ -42,21 +42,13 @@ def _calcV(rho1_hat, rho2_hat, rho3_hat): @jit("f8(f8[:], f8[:], f8[:], f8[:], f8[:], f8, f8, f8)", nopython=True, cache=True) def _calcA(q1, q2, q3, rho1_hat, rho3_hat, t31, t32, t21): # Equation 21 from Milani et al. 2008 - return np.linalg.norm(q2) ** 3 * np.dot( - np.cross(rho1_hat, rho3_hat), (t32 * q1 - t31 * q2 + t21 * q3) - ) + return np.linalg.norm(q2) ** 3 * np.dot(np.cross(rho1_hat, rho3_hat), (t32 * q1 - t31 * q2 + t21 * q3)) @jit("f8(f8[:], f8[:], f8[:], f8[:], f8, f8, f8, f8)", nopython=True, cache=True) def _calcB(q1, q3, rho1_hat, rho3_hat, t31, t32, t21, mu=MU): # Equation 19 from Milani et al. 2008 - return ( - mu - / 6 - * t32 - * t21 - * np.dot(np.cross(rho1_hat, rho3_hat), ((t31 + t32) * q1 + (t31 + t21) * q3)) - ) + return mu / 6 * t32 * t21 * np.dot(np.cross(rho1_hat, rho3_hat), ((t31 + t32) * q1 + (t31 + t21) * q3)) @jit("UniTuple(f8, 2)(f8, f8, f8, f8, f8)", nopython=True, cache=True) @@ -203,9 +195,7 @@ def gaussIOD( SphericalCoordinates.from_kwargs( lon=coords[:, 0], lat=coords[:, 1], - origin=Origin.from_kwargs( - code=np.full(len(coords), "Unknown", dtype="object") - ), + origin=Origin.from_kwargs(code=np.full(len(coords), "Unknown", dtype="object")), frame="equatorial", ).to_unit_sphere(), representation_out=CartesianCoordinates, @@ -268,9 +258,7 @@ def gaussIOD( epochs = [] for r2_mag in r2_mags: lambda1, lambda3 = _calcLambdas(r2_mag, t31, t32, t21, mu=mu) - rho1, rho2, rho3 = _calcRhos( - lambda1, lambda3, q1, q2, q3, rho1_hat, rho2_hat, rho3_hat, V - ) + rho1, rho2, rho3 = _calcRhos(lambda1, lambda3, q1, q2, q3, rho1_hat, rho2_hat, rho3_hat, V) if np.dot(rho2, rho2_hat) < 0: continue @@ -290,14 +278,12 @@ def gaussIOD( elif velocity_method == "herrick+gibbs": v2 = calcHerrickGibbs(r1, r2, r3, t1, t2, t3) else: - raise ValueError( - "velocity_method should be one of {'gauss', 'gibbs', 'herrick+gibbs'}" - ) + raise ValueError("velocity_method should be one of {'gauss', 'gibbs', 'herrick+gibbs'}") epoch = t2 orbit = np.concatenate([r2, v2]) - if iterate == True: + if iterate is True: if iterator == "state transition": orbit = iterateStateTransition( orbit, @@ -315,7 +301,7 @@ def gaussIOD( tol=tol, ) - if light_time == True: + if light_time is True: rho2_mag = np.linalg.norm(orbit[:3] - q2) lt = rho2_mag / C epoch -= lt @@ -346,9 +332,7 @@ def gaussIOD( vy=orbits[:, 4], vz=orbits[:, 5], time=Timestamp.from_mjd(epochs, scale="utc"), - origin=Origin.from_kwargs( - code=np.full(len(orbits), "SUN", dtype="object") - ), + origin=Origin.from_kwargs(code=np.full(len(orbits), "SUN", dtype="object")), frame="ecliptic", ) ) diff --git a/thor/orbits/gibbs.py b/src/thor/orbits/gibbs.py similarity index 94% rename from thor/orbits/gibbs.py rename to src/thor/orbits/gibbs.py index 5b7be573..7a0704d2 100644 --- a/thor/orbits/gibbs.py +++ b/src/thor/orbits/gibbs.py @@ -49,14 +49,14 @@ def calcGibbs(r1, r2, r3): Z23 = np.cross(r2, r3) Z31 = np.cross(r3, r1) - coplanarity = np.arcsin(np.dot(Z23, r1) / (np.linalg.norm(Z23) * r1_mag)) + # coplanarity = np.arcsin(np.dot(Z23, r1) / (np.linalg.norm(Z23) * r1_mag)) N = r1_mag * Z23 + r2_mag * Z31 + r3_mag * Z12 N_mag = np.linalg.norm(N) D = Z12 + Z23 + Z31 D_mag = np.linalg.norm(D) S = (r2_mag - r3_mag) * r1 + (r3_mag - r1_mag) * r2 + (r1_mag - r2_mag) * r3 - S_mag = np.linalg.norm(S) + # S_mag = np.linalg.norm(S) B = np.cross(D, r2) Lg = np.sqrt(MU / N_mag / D_mag) v2 = Lg / r2_mag * B + Lg * S diff --git a/thor/orbits/herrick_gibbs.py b/src/thor/orbits/herrick_gibbs.py similarity index 100% rename from thor/orbits/herrick_gibbs.py rename to src/thor/orbits/herrick_gibbs.py diff --git a/thor/orbits/iod.py b/src/thor/orbits/iod.py similarity index 91% rename from thor/orbits/iod.py rename to src/thor/orbits/iod.py index 9d19efef..0f4283d9 100644 --- a/thor/orbits/iod.py +++ b/src/thor/orbits/iod.py @@ -12,7 +12,8 @@ import ray from adam_core.coordinates.residuals import Residuals from adam_core.orbit_determination import OrbitDeterminationObservations -from adam_core.propagator import PYOORB, Propagator +from adam_core.propagator import Propagator +from adam_core.propagator.adam_pyoorb import PYOORBPropagator from adam_core.propagator.utils import _iterate_chunk_indices, _iterate_chunks from adam_core.ray_cluster import initialize_use_ray @@ -73,25 +74,20 @@ def select_observations( selected_index = np.array([selected_index]) elif method == "thirds": - selected_times = np.percentile( - times, [1 / 6 * 100, 50, 5 / 6 * 100], interpolation="nearest" - ) + selected_times = np.percentile(times, [1 / 6 * 100, 50, 5 / 6 * 100], interpolation="nearest") selected_index = np.intersect1d(times, selected_times, return_indices=True)[1] selected_index = np.array([selected_index]) elif method == "combinations": # Make all possible combinations of 3 observations - selected_index = np.array( - [np.array(index) for index in combinations(indexes, 3)] - ) + selected_index = np.array([np.array(index) for index in combinations(indexes, 3)]) # Calculate arc length arc_length = times[selected_index][:, 2] - times[selected_index][:, 0] # Calculate distance of second observation from middle point (last + first) / 2 time_from_mid = np.abs( - (times[selected_index][:, 2] + times[selected_index][:, 0]) / 2 - - times[selected_index][:, 1] + (times[selected_index][:, 2] + times[selected_index][:, 0]) / 2 - times[selected_index][:, 1] ) # Sort by descending arc length and ascending time from midpoint @@ -126,13 +122,11 @@ def iod_worker( min_arc_length: float = 1.0, contamination_percentage: float = 0.0, rchi2_threshold: float = 200, - observation_selection_method: Literal[ - "combinations", "first+middle+last", "thirds" - ] = "combinations", + observation_selection_method: Literal["combinations", "first+middle+last", "thirds"] = "combinations", linkage_id_col: str = "cluster_id", iterate: bool = False, light_time: bool = True, - propagator: Type[Propagator] = PYOORB, + propagator: Type[Propagator] = PYOORBPropagator, propagator_kwargs: dict = {}, ) -> Tuple[FittedOrbits, FittedOrbitMembers]: @@ -145,9 +139,7 @@ def iod_worker( obs_ids = linkage_members.apply_mask( pc.equal(linkage_members.column(linkage_id_col), linkage_id) ).obs_id - observations_linkage = observations.apply_mask( - pc.is_in(observations.id, obs_ids) - ) + observations_linkage = observations.apply_mask(pc.is_in(observations.id, obs_ids)) # Sort observations by time observations_linkage = observations_linkage.sort_by( @@ -210,19 +202,15 @@ def iod_worker_remote( min_arc_length: float = 1.0, contamination_percentage: float = 0.0, rchi2_threshold: float = 200, - observation_selection_method: Literal[ - "combinations", "first+middle+last", "thirds" - ] = "combinations", + observation_selection_method: Literal["combinations", "first+middle+last", "thirds"] = "combinations", linkage_id_col: str = "cluster_id", iterate: bool = False, light_time: bool = True, - propagator: Type[Propagator] = PYOORB, + propagator: Type[Propagator] = PYOORBPropagator, propagator_kwargs: dict = {}, ) -> Tuple[FittedOrbits, FittedOrbitMembers]: # Select linkage ids from linkage_members_indices - linkage_id_chunk = linkage_ids[ - linkage_members_indices[0] : linkage_members_indices[1] - ] + linkage_id_chunk = linkage_ids[linkage_members_indices[0] : linkage_members_indices[1]] return iod_worker( linkage_id_chunk, observations, @@ -249,12 +237,10 @@ def iod( min_arc_length: float = 1.0, contamination_percentage: float = 0.0, rchi2_threshold: float = 200, - observation_selection_method: Literal[ - "combinations", "first+middle+last", "thirds" - ] = "combinations", + observation_selection_method: Literal["combinations", "first+middle+last", "thirds"] = "combinations", iterate: bool = False, light_time: bool = True, - propagator: Type[Propagator] = PYOORB, + propagator: Type[Propagator] = PYOORBPropagator, propagator_kwargs: dict = {}, ) -> Tuple[FittedOrbits, FittedOrbitMembers]: """ @@ -298,7 +284,7 @@ def iod( Correct preliminary orbit for light travel time. linkage_id_col : str, optional Name of linkage_id column in the linkage_members dataframe. - backend : {'MJOLNIR', 'PYOORB'}, optional + backend : {'MJOLNIR', 'PYOORBPropagator'}, optional Which backend to use for ephemeris generation. backend_kwargs : dict, optional Settings and additional parameters to pass to selected @@ -358,7 +344,6 @@ def iod( coords_obs_all = observers.coordinates.r times_all = coords_all.time.mjd().to_numpy(zero_copy_only=False) - chi2_sol = 1e10 orbit_sol: FittedOrbits = FittedOrbits.empty() obs_ids_sol = None arc_length = None @@ -409,9 +394,7 @@ def iod( continue # Propagate initial orbit to all observation times - ephemeris = prop.generate_ephemeris( - iod_orbits, observers, chunk_size=1, max_processes=1 - ) + ephemeris = prop.generate_ephemeris(iod_orbits, observers, chunk_size=1, max_processes=1) # For each unique initial orbit calculate residuals and chi-squared # Find the orbit which yields the lowest chi-squared @@ -447,7 +430,6 @@ def iod( orbit_sol = iod_orbits[i : i + 1] obs_ids_sol = ids chi2_total_sol = chi2_total - chi2_sol = chi2 rchi2_sol = rchi2 residuals_sol = residuals outliers = np.array([]) @@ -491,10 +473,7 @@ def iod( outliers = obs_id_outlier num_obs = num_obs_new ids_mask = np.isin(obs_ids_all, outliers, invert=True) - arc_length = ( - times_all[ids_mask].max() - times_all[ids_mask].min() - ) - chi2_sol = chi2 + arc_length = times_all[ids_mask].max() - times_all[ids_mask].min() converged = True break @@ -518,9 +497,7 @@ def iod( ) orbit_members = FittedOrbitMembers.from_kwargs( - orbit_id=np.full( - len(obs_ids_all), orbit_sol.orbit_id[0].as_py(), dtype="object" - ), + orbit_id=np.full(len(obs_ids_all), orbit_sol.orbit_id[0].as_py(), dtype="object"), obs_id=obs_ids_all, residuals=residuals_sol, solution=np.isin(obs_ids_all, obs_ids_sol), @@ -537,13 +514,11 @@ def initial_orbit_determination( min_arc_length: float = 1.0, contamination_percentage: float = 20.0, rchi2_threshold: float = 10**3, - observation_selection_method: Literal[ - "combinations", "first+middle+last", "thirds" - ] = "combinations", + observation_selection_method: Literal["combinations", "first+middle+last", "thirds"] = "combinations", iterate: bool = False, light_time: bool = True, linkage_id_col: str = "cluster_id", - propagator: Type[Propagator] = PYOORB, + propagator: Type[Propagator] = PYOORBPropagator, propagator_kwargs: dict = {}, chunk_size: int = 1, max_processes: Optional[int] = 1, @@ -596,9 +571,7 @@ def initial_orbit_determination( logger.info("Placed observations in the object store.") futures = [] - for linkage_id_chunk_indices in _iterate_chunk_indices( - linkage_ids, chunk_size - ): + for linkage_id_chunk_indices in _iterate_chunk_indices(linkage_ids, chunk_size): futures.append( iod_worker_remote.remote( linkage_ids_ref, @@ -623,9 +596,7 @@ def initial_orbit_determination( result = ray.get(finished[0]) iod_orbits_chunk, iod_orbit_members_chunk = result iod_orbits = qv.concatenate([iod_orbits, iod_orbits_chunk]) - iod_orbit_members = qv.concatenate( - [iod_orbit_members, iod_orbit_members_chunk] - ) + iod_orbit_members = qv.concatenate([iod_orbit_members, iod_orbit_members_chunk]) if iod_orbits.fragmented(): iod_orbits = qv.defragment(iod_orbits) if iod_orbit_members.fragmented(): @@ -636,9 +607,7 @@ def initial_orbit_determination( result = ray.get(finished[0]) iod_orbits_chunk, iod_orbit_members_chunk = result iod_orbits = qv.concatenate([iod_orbits, iod_orbits_chunk]) - iod_orbit_members = qv.concatenate( - [iod_orbit_members, iod_orbit_members_chunk] - ) + iod_orbit_members = qv.concatenate([iod_orbit_members, iod_orbit_members_chunk]) if iod_orbits.fragmented(): iod_orbits = qv.defragment(iod_orbits) if iod_orbit_members.fragmented(): @@ -646,9 +615,7 @@ def initial_orbit_determination( if len(refs_to_free) > 0: ray.internal.free(refs_to_free) - logger.info( - f"Removed {len(refs_to_free)} references from the object store." - ) + logger.info(f"Removed {len(refs_to_free)} references from the object store.") else: for linkage_id_chunk in _iterate_chunks(linkage_ids, chunk_size): @@ -668,9 +635,7 @@ def initial_orbit_determination( propagator_kwargs=propagator_kwargs, ) iod_orbits = qv.concatenate([iod_orbits, iod_orbits_chunk]) - iod_orbit_members = qv.concatenate( - [iod_orbit_members, iod_orbit_members_chunk] - ) + iod_orbit_members = qv.concatenate([iod_orbit_members, iod_orbit_members_chunk]) if iod_orbits.fragmented(): iod_orbits = qv.defragment(iod_orbits) if iod_orbit_members.fragmented(): @@ -697,9 +662,7 @@ def initial_orbit_determination( time_end_drop = time.time() logger.info(f"Removed {num_orbits - len(iod_orbits)} duplicate clusters.") time_end_drop = time.time() - logger.info( - f"Inital orbit deduplication completed in {time_end_drop - time_start_drop:.3f} seconds." - ) + logger.info(f"Inital orbit deduplication completed in {time_end_drop - time_start_drop:.3f} seconds.") # Sort initial orbits by orbit ID and observation time iod_orbits, iod_orbit_members = sort_by_id_and_time( @@ -712,8 +675,6 @@ def initial_orbit_determination( time_end = time.perf_counter() logger.info(f"Found {len(iod_orbits)} initial orbits.") - logger.info( - f"Initial orbit determination completed in {time_end - time_start:.3f} seconds." - ) + logger.info(f"Initial orbit determination completed in {time_end - time_start:.3f} seconds.") return iod_orbits, iod_orbit_members diff --git a/thor/orbits/iterators.py b/src/thor/orbits/iterators.py similarity index 96% rename from thor/orbits/iterators.py rename to src/thor/orbits/iterators.py index ece94ee3..29b70b50 100644 --- a/thor/orbits/iterators.py +++ b/src/thor/orbits/iterators.py @@ -80,7 +80,6 @@ def iterateStateTransition( orbit_iter = orbit orbit_iter_prev = orbit i = 0 - phi_mag_prev = 1e10 for i in range(max_iter): # Grab orbit position and velocity vectors # These should belong to the state of the object at the time of the second @@ -109,9 +108,7 @@ def iterateStateTransition( r_new, v_new = apply_lagrange_coefficients(r, v, *lagrange_coeffs) # Calculate the state transition matrix - STM = calcStateTransitionMatrix( - orbit_iter, dt, mu=mu, max_iter=100, tol=1e-15 - ) + STM = calcStateTransitionMatrix(orbit_iter, dt, mu=mu, max_iter=100, tol=1e-15) if j == 0: STM1 = STM @@ -167,7 +164,6 @@ def iterateStateTransition( i += 1 orbit_iter_prev = orbit_iter - phi_mag_prev = phi_mag_iter if i >= max_iter: break diff --git a/thor/orbits/od.py b/src/thor/orbits/od.py similarity index 87% rename from thor/orbits/od.py rename to src/thor/orbits/od.py index 19991834..ed43fdcc 100644 --- a/thor/orbits/od.py +++ b/src/thor/orbits/od.py @@ -12,7 +12,8 @@ from adam_core.coordinates.residuals import Residuals from adam_core.orbit_determination import OrbitDeterminationObservations from adam_core.orbits import Orbits -from adam_core.propagator import PYOORB, Propagator, _iterate_chunks +from adam_core.propagator import Propagator, _iterate_chunks +from adam_core.propagator.adam_pyoorb import PYOORBPropagator from adam_core.propagator.utils import _iterate_chunk_indices from adam_core.ray_cluster import initialize_use_ray from scipy.linalg import solve @@ -38,7 +39,7 @@ def od_worker( delta: float = 1e-6, max_iter: int = 20, method: Literal["central", "finite"] = "central", - propagator: Type[Propagator] = PYOORB, + propagator: Type[Propagator] = PYOORBPropagator, propagator_kwargs: dict = {}, ) -> Tuple[FittedOrbits, FittedOrbitMembers]: @@ -49,9 +50,7 @@ def od_worker( logger.debug(f"Differentially correcting orbit {orbit_id}...") orbit = orbits.select("orbit_id", orbit_id) - obs_ids = orbit_members.apply_mask( - pc.equal(orbit_members.orbit_id, orbit_id) - ).obs_id + obs_ids = orbit_members.apply_mask(pc.equal(orbit_members.orbit_id, orbit_id)).obs_id orbit_observations = observations.apply_mask(pc.is_in(observations.id, obs_ids)) # Sort observations by time @@ -112,7 +111,7 @@ def od_worker_remote( delta: float = 1e-6, max_iter: int = 20, method: Literal["central", "finite"] = "central", - propagator: Type[Propagator] = PYOORB, + propagator: Type[Propagator] = PYOORBPropagator, propagator_kwargs: dict = {}, ) -> Tuple[FittedOrbits, FittedOrbitMembers]: orbit_ids_chunk = orbit_ids[orbit_ids_indices[0] : orbit_ids_indices[1]] @@ -146,7 +145,7 @@ def od( delta: float = 1e-6, max_iter: int = 20, method: Literal["central", "finite"] = "central", - propagator: Type[Propagator] = PYOORB, + propagator: Type[Propagator] = PYOORBPropagator, propagator_kwargs: dict = {}, ) -> Tuple[FittedOrbits, FittedOrbitMembers]: # Intialize the propagator @@ -180,9 +179,7 @@ def od( logger.debug("This orbit has fewer than {} observations.".format(min_obs)) processable = False else: - max_outliers = calculate_max_outliers( - num_obs, min_obs, contamination_percentage - ) + max_outliers = calculate_max_outliers(num_obs, min_obs, contamination_percentage) logger.debug(f"Maximum number of outliers allowed: {max_outliers}") outliers_tried = 0 @@ -191,18 +188,14 @@ def od( # such that the chi2 improves orbit_prev_ = orbit.to_orbits() - ephemeris_prev_ = prop.generate_ephemeris( - orbit_prev_, observers, chunk_size=1, max_processes=1 - ) + ephemeris_prev_ = prop.generate_ephemeris(orbit_prev_, observers, chunk_size=1, max_processes=1) # Calculate residuals and chi2 residuals_prev_ = Residuals.calculate( coords, ephemeris_prev_.coordinates, ) - residuals_prev_array = np.stack( - residuals_prev_.values.to_numpy(zero_copy_only=False) - )[:, 1:3] + residuals_prev_array = np.stack(residuals_prev_.values.to_numpy(zero_copy_only=False))[:, 1:3] num_obs_ = len(observations) chi2_prev_ = residuals_prev_.chi2.to_numpy() @@ -239,12 +232,10 @@ def od( # will exit the current iteration if something fails which makes accounting for # iterations at the start of an iteration easier. if iterations == max_iter_outliers + 1: - logger.debug(f"Maximum number of iterations completed.") + logger.debug("Maximum number of iterations completed.") break - if iterations == max_iter_i + 1 and ( - solution_found or (max_outliers == outliers_tried) - ): - logger.debug(f"Maximum number of iterations completed.") + if iterations == max_iter_i + 1 and (solution_found or (max_outliers == outliers_tried)): + logger.debug("Maximum number of iterations completed.") break logger.debug(f"Starting iteration number: {iterations}/{max_iter_outliers}") @@ -268,9 +259,7 @@ def od( ATWb = np.zeros((num_params, 1, num_obs)) # Generate ephemeris with current nominal orbit - ephemeris_nom = prop.generate_ephemeris( - orbit_prev, observers, chunk_size=1, max_processes=1 - ) + ephemeris_nom = prop.generate_ephemeris(orbit_prev, observers, chunk_size=1, max_processes=1) # Modify each component of the state by a small delta d = np.zeros((1, 6)) @@ -304,9 +293,7 @@ def od( ) # Calculate the modified ephemerides - ephemeris_mod_p = prop.generate_ephemeris( - orbit_iter_p, observers, chunk_size=1, max_processes=1 - ) + ephemeris_mod_p = prop.generate_ephemeris(orbit_iter_p, observers, chunk_size=1, max_processes=1) delta_denom = d[0, i] if method == "central": @@ -340,16 +327,12 @@ def od( ephemeris_mod_p.coordinates, ephemeris_mod_n.coordinates, ) - residuals_mod = np.stack( - residuals_mod.values.to_numpy(zero_copy_only=False) - ) + residuals_mod = np.stack(residuals_mod.values.to_numpy(zero_copy_only=False)) residuals_mod_array = residuals_mod[:, 1:3] for n in range(num_obs): try: - A[:, i : i + 1, n] = ( - residuals_mod_array[ids_mask][n : n + 1].T / delta_denom - ) + A[:, i : i + 1, n] = residuals_mod_array[ids_mask][n : n + 1].T / delta_denom except RuntimeError: print(orbit_prev.orbit_id) @@ -368,9 +351,7 @@ def od( except np.linalg.LinAlgError: ATWA_condition = np.nan ATWb_condition = np.nan - logger.debug( - f"Matrix condition calculation failed for {orbit.orbit_id[0].as_py()}" - ) + logger.debug(f"Matrix condition calculation failed for {orbit.orbit_id[0].as_py()}") if (ATWA_condition > 1e15) or (ATWb_condition > 1e15): delta_prev /= DELTA_DECREASE_FACTOR @@ -388,9 +369,7 @@ def od( variances = np.diag(covariance_matrix) if np.any(variances <= 0) or np.any(np.isnan(variances)): delta_prev /= DELTA_DECREASE_FACTOR - logger.debug( - "Variances are negative, 0.0, or NaN. Discarding solution." - ) + logger.debug("Variances are negative, 0.0, or NaN. Discarding solution.") continue r_variances = variances[0:3] @@ -398,16 +377,12 @@ def od( r = orbit_prev.coordinates.r_mag if (r_sigma / r) > 1: delta_prev /= DELTA_DECREASE_FACTOR - logger.debug( - "Covariance matrix is largely unconstrained. Discarding solution." - ) + logger.debug("Covariance matrix is largely unconstrained. Discarding solution.") continue if np.any(np.isnan(covariance_matrix)): delta_prev *= DELTA_INCREASE_FACTOR - logger.debug( - "Covariance matrix contains NaNs. Discarding solution." - ) + logger.debug("Covariance matrix contains NaNs. Discarding solution.") continue except np.linalg.LinAlgError: @@ -433,9 +408,7 @@ def od( vx=cartesian_elements[:, 3], vy=cartesian_elements[:, 4], vz=cartesian_elements[:, 5], - covariance=CoordinateCovariances.from_matrix( - covariance_matrix.reshape(1, 6, 6) - ), + covariance=CoordinateCovariances.from_matrix(covariance_matrix.reshape(1, 6, 6)), time=orbit_prev.coordinates.time, origin=orbit_prev.coordinates.origin, frame=orbit_prev.coordinates.frame, @@ -447,9 +420,7 @@ def od( continue # Generate ephemeris with current nominal orbit - ephemeris_iter = prop.generate_ephemeris( - orbit_iter, observers, chunk_size=1, max_processes=1 - ) + ephemeris_iter = prop.generate_ephemeris(orbit_iter, observers, chunk_size=1, max_processes=1) residuals = Residuals.calculate(coords, ephemeris_iter.coordinates) chi2_iter = residuals.chi2.to_numpy() @@ -461,9 +432,7 @@ def od( # accept the orbit and continue iterating until max iterations has been # reached. Once max iterations have been reached and the orbit still has not converged # to an acceptable solution, try removing an observation as an outlier and iterate again. - if ( - (rchi2_iter < rchi2_prev) or first_solution - ) and arc_length >= min_arc_length: + if ((rchi2_iter < rchi2_prev) or first_solution) and arc_length >= min_arc_length: if first_solution: logger.debug( "Storing first successful differential correction iteration for these observations." @@ -518,9 +487,7 @@ def od( if arc_length >= min_arc_length: max_iter_i = max_iter * (outliers_tried + 1) else: - logger.debug( - "Removing the outlier will cause the arc length to go below the minimum." - ) + logger.debug("Removing the outlier will cause the arc length to go below the minimum.") # If the new orbit does not have lower residuals, try changing # delta to see if we get an improvement @@ -561,9 +528,7 @@ def od( ) od_orbit_members = FittedOrbitMembers.from_kwargs( - orbit_id=np.full( - len(obs_ids_all), orbit_prev.orbit_id[0].as_py(), dtype="object" - ), + orbit_id=np.full(len(obs_ids_all), orbit_prev.orbit_id[0].as_py(), dtype="object"), obs_id=obs_ids_all, residuals=residuals_prev, solution=np.isin(obs_ids_all, obs_id_outlier, invert=True), @@ -584,7 +549,7 @@ def differential_correction( delta: float = 1e-8, max_iter: int = 20, method: Literal["central", "finite"] = "central", - propagator: Type[Propagator] = PYOORB, + propagator: Type[Propagator] = PYOORBPropagator, propagator_kwargs: dict = {}, chunk_size: int = 10, max_processes: Optional[int] = 1, @@ -600,9 +565,6 @@ def differential_correction( Number of orbits to send to each job. num_jobs : int, optional Number of jobs to launch. - parallel_backend : str, optional - Which parallelization backend to use {'ray', 'mp', 'cf'}. Defaults to using Python's concurrent.futures - module ('cf'). """ time_start = time.perf_counter() logger.info("Running differential correction...") @@ -623,14 +585,10 @@ def differential_correction( logger.info("Retrieved orbit members from the object store.") if obs_ids is not None: - orbit_members = orbit_members.apply_mask( - pc.is_in(orbit_members.obs_id, obs_ids) - ) + orbit_members = orbit_members.apply_mask(pc.is_in(orbit_members.obs_id, obs_ids)) logger.info("Applied mask to orbit members.") if orbit_ids is not None: - orbit_members = orbit_members.apply_mask( - pc.is_in(orbit_members.orbit_id, orbit_ids) - ) + orbit_members = orbit_members.apply_mask(pc.is_in(orbit_members.orbit_id, orbit_ids)) logger.info("Applied mask to orbit members.") else: orbit_members_ref = None @@ -652,9 +610,7 @@ def differential_correction( od_orbit_members = FittedOrbitMembers.empty() time_end = time.perf_counter() logger.info(f"Differentially corrected {len(od_orbits)} orbits.") - logger.info( - f"Differential correction completed in {time_end - time_start:.3f} seconds." - ) + logger.info(f"Differential correction completed in {time_end - time_start:.3f} seconds.") return od_orbits, od_orbit_members orbit_ids = orbits.orbit_id.to_numpy(zero_copy_only=False) @@ -719,9 +675,7 @@ def differential_correction( od_orbits = qv.concatenate([od_orbits, od_orbits_chunk]) if od_orbits.fragmented(): od_orbits = qv.defragment(od_orbits) - od_orbit_members = qv.concatenate( - [od_orbit_members, od_orbit_members_chunk] - ) + od_orbit_members = qv.concatenate([od_orbit_members, od_orbit_members_chunk]) if od_orbit_members.fragmented(): od_orbit_members = qv.defragment(od_orbit_members) @@ -731,17 +685,13 @@ def differential_correction( od_orbits = qv.concatenate([od_orbits, od_orbits_chunk]) if od_orbits.fragmented(): od_orbits = qv.defragment(od_orbits) - od_orbit_members = qv.concatenate( - [od_orbit_members, od_orbit_members_chunk] - ) + od_orbit_members = qv.concatenate([od_orbit_members, od_orbit_members_chunk]) if od_orbit_members.fragmented(): od_orbit_members = qv.defragment(od_orbit_members) if len(refs_to_free) > 0: ray.internal.free(refs_to_free) - logger.info( - f"Removed {len(refs_to_free)} references from the object store." - ) + logger.info(f"Removed {len(refs_to_free)} references from the object store.") else: for orbit_ids_chunk in _iterate_chunks(orbit_ids, chunk_size): @@ -763,16 +713,12 @@ def differential_correction( od_orbits = qv.concatenate([od_orbits, od_orbits_chunk]) if od_orbits.fragmented(): od_orbits = qv.defragment(od_orbits) - od_orbit_members = qv.concatenate( - [od_orbit_members, od_orbit_members_chunk] - ) + od_orbit_members = qv.concatenate([od_orbit_members, od_orbit_members_chunk]) if od_orbit_members.fragmented(): od_orbit_members = qv.defragment(od_orbit_members) time_end = time.perf_counter() logger.info(f"Differentially corrected {len(od_orbits)} orbits.") - logger.info( - f"Differential correction completed in {time_end - time_start:.3f} seconds." - ) + logger.info(f"Differential correction completed in {time_end - time_start:.3f} seconds.") return od_orbits, od_orbit_members diff --git a/thor/orbits/state_transition.py b/src/thor/orbits/state_transition.py similarity index 93% rename from thor/orbits/state_transition.py rename to src/thor/orbits/state_transition.py index d4df5e20..456dc08c 100644 --- a/thor/orbits/state_transition.py +++ b/src/thor/orbits/state_transition.py @@ -68,7 +68,7 @@ def calcMMatrix(r0, r1, lagrange_coeffs, stumpff_coeffs, chi, alpha, mu=MU): # Everhart & Pitkin 1983 [3]. # TODO : These conversions were not robustly tested and needed further investigation w = chi / sqrt_mu - alpha_alt = -mu * alpha + # alpha_alt = -mu * alpha U0 = (1 - alpha * chi**2) * c2 U1 = (chi - alpha * chi**3) * c3 / sqrt_mu U2 = chi**2 * c2 / mu @@ -85,9 +85,7 @@ def calcMMatrix(r0, r1, lagrange_coeffs, stumpff_coeffs, chi, alpha, mu=MU): # Calculate elements of the M matrix # See equation A.41 in Shepperd 1985 [4] - m11 = (U0 / (r1_mag * r0_mag) + 1 / r0_mag**2 + 1 / r1_mag**2) * F - ( - mu**2 * W - ) / (r1_mag * r0_mag) ** 3 + m11 = (U0 / (r1_mag * r0_mag) + 1 / r0_mag**2 + 1 / r1_mag**2) * F - (mu**2 * W) / (r1_mag * r0_mag) ** 3 m12 = F * U1 / r1_mag + (G - 1) / r1_mag**2 m13 = (G - 1) * U1 / r1_mag - (mu * W) / r1_mag**3 m21 = -F * U1 / r0_mag - (f - 1) / r0_mag**2 @@ -108,9 +106,7 @@ def calcMMatrix(r0, r1, lagrange_coeffs, stumpff_coeffs, chi, alpha, mu=MU): return M -def calcStateTransitionMatrix( - orbit, dt, mu=0.0002959122082855911, max_iter=100, tol=1e-15 -): +def calcStateTransitionMatrix(orbit, dt, mu=0.0002959122082855911, max_iter=100, tol=1e-15): """ Calculate the state transition matrix for a given change in epoch. The state transition matrix maps deviations from a state at an epoch t0 to a different epoch t1 (dt = t1 - t0). @@ -172,12 +168,12 @@ def calcStateTransitionMatrix( # Construct the 4 3 x 3 submatrices that can form the # the 6 x 6 state transition matrix. # See equations A.42 - A.46 in Shepperd 1985 [1] - I = np.identity(3) + I3 = np.identity(3) phi = np.zeros((6, 6)) - phi11 = f * I + state_1 @ (M[1:3, 0:2] @ state_0.T) - phi12 = g * I + state_1 @ (M[1:3, 1:3] @ state_0.T) - phi21 = f_dot * I - state_1 @ (M[0:2, 0:2] @ state_0.T) - phi22 = g_dot * I - state_1 @ (M[0:2, 1:3] @ state_0.T) + phi11 = f * I3 + state_1 @ (M[1:3, 0:2] @ state_0.T) + phi12 = g * I3 + state_1 @ (M[1:3, 1:3] @ state_0.T) + phi21 = f_dot * I3 - state_1 @ (M[0:2, 0:2] @ state_0.T) + phi22 = g_dot * I3 - state_1 @ (M[0:2, 1:3] @ state_0.T) phi[0:3, 0:3] = phi11 phi[0:3, 3:6] = phi12 diff --git a/thor/orbits/tests/__init__.py b/src/thor/orbits/tests/__init__.py similarity index 100% rename from thor/orbits/tests/__init__.py rename to src/thor/orbits/tests/__init__.py diff --git a/thor/orbits/tests/test_attribution.py b/src/thor/orbits/tests/test_attribution.py similarity index 96% rename from thor/orbits/tests/test_attribution.py rename to src/thor/orbits/tests/test_attribution.py index 0b3dfbf1..4d7d7512 100644 --- a/thor/orbits/tests/test_attribution.py +++ b/src/thor/orbits/tests/test_attribution.py @@ -15,9 +15,7 @@ def observations(): id=["01", "02", "03", "04", "05"], exposure_id=["e01", "e01", "e02", "e02", "e02"], coordinates=SphericalCoordinates.from_kwargs( - time=Timestamp.from_mjd( - [59001.1, 59001.1, 59002.1, 59002.1, 59002.1], scale="utc" - ), + time=Timestamp.from_mjd([59001.1, 59001.1, 59002.1, 59002.1, 59002.1], scale="utc"), lon=[1, 2, 3, 4, 5], lat=[5, 6, 7, 8, 9], origin=Origin.from_kwargs(code=["500", "500", "500", "500", "500"]), diff --git a/src/thor/projections/__init__.py b/src/thor/projections/__init__.py new file mode 100644 index 00000000..35d1b0e9 --- /dev/null +++ b/src/thor/projections/__init__.py @@ -0,0 +1,4 @@ +# ruff: noqa: F401, F403 +from .covariances import * +from .gnomonic import * +from .transforms import * diff --git a/thor/projections/covariances.py b/src/thor/projections/covariances.py similarity index 99% rename from thor/projections/covariances.py rename to src/thor/projections/covariances.py index 50edc7b9..cd3625d1 100644 --- a/thor/projections/covariances.py +++ b/src/thor/projections/covariances.py @@ -1,5 +1,3 @@ -from typing import List - import numpy as np import pyarrow as pa import quivr as qv diff --git a/thor/projections/gnomonic.py b/src/thor/projections/gnomonic.py similarity index 92% rename from thor/projections/gnomonic.py rename to src/thor/projections/gnomonic.py index 971bff0e..5e8c79f7 100644 --- a/thor/projections/gnomonic.py +++ b/src/thor/projections/gnomonic.py @@ -1,9 +1,7 @@ -from typing import Literal, Optional +from typing import Optional import numpy as np -import pandas as pd import pyarrow as pa -import pyarrow.compute as pc import quivr as qv from adam_core.coordinates import CartesianCoordinates, Origin from adam_core.coordinates.covariances import transform_covariances_jacobian @@ -27,9 +25,7 @@ class GnomonicCoordinates(qv.Table): @property def values(self) -> np.ndarray: - return np.array( - self.table.select(["theta_x", "theta_y", "vtheta_x", "vtheta_y"]) - ) + return np.array(self.table.select(["theta_x", "theta_y", "vtheta_x", "vtheta_y"])) @property def sigma_theta_x(self) -> np.ndarray: @@ -127,9 +123,7 @@ def from_cartesian( vy=np.zeros(num_unique_times, dtype=np.float64), vz=np.zeros(num_unique_times, dtype=np.float64), frame=cartesian.frame, - origin=qv.concatenate( - [cartesian[0].origin for _ in range(num_unique_times)] - ), + origin=qv.concatenate([cartesian[0].origin for _ in range(num_unique_times)]), ) link = cartesian.time.link(center_cartesian.time, precision="ms") @@ -156,9 +150,7 @@ def from_cartesian( rounded_cartesian_times.equals_scalar(key[0], key[1], precision="ms") ) center_cartesian_i = center_cartesian.apply_mask( - rounded_center_cartesian_times.equals_scalar( - key[0], key[1], precision="ms" - ) + rounded_center_cartesian_times.equals_scalar(key[0], key[1], precision="ms") ) if len(center_cartesian_i) == 0: @@ -180,9 +172,7 @@ def from_cartesian( center_cartesian=center_cartesian_i.values[0], ) else: - covariances_gnomonic = np.empty( - (len(coords_gnomonic), 4, 4), dtype=np.float64 - ) + covariances_gnomonic = np.empty((len(coords_gnomonic), 4, 4), dtype=np.float64) covariances_gnomonic.fill(np.nan) gnomonic_coords_chunk = cls.from_kwargs( diff --git a/thor/projections/transforms.py b/src/thor/projections/transforms.py similarity index 93% rename from thor/projections/transforms.py rename to src/thor/projections/transforms.py index 4637904c..18897f01 100644 --- a/thor/projections/transforms.py +++ b/src/thor/projections/transforms.py @@ -1,7 +1,6 @@ from typing import Tuple import jax.numpy as jnp -import numpy as np from jax import config, jit, lax, vmap config.update("jax_enable_x64", True) @@ -46,8 +45,7 @@ def _calc_gnomonic_rotation_matrix(coords_cartesian: jnp.ndarray) -> jnp.ndarray r_dot_v = jnp.dot(r, v) v_mag = jnp.linalg.norm(v) v, vmag_set = lax.cond( - ((r_dot_v > (1 - FLOAT_TOLERANCE)) & (r_dot_v < (1 + FLOAT_TOLERANCE))) - | (v_mag < FLOAT_TOLERANCE), + ((r_dot_v > (1 - FLOAT_TOLERANCE)) & (r_dot_v < (1 + FLOAT_TOLERANCE))) | (v_mag < FLOAT_TOLERANCE), lambda v_i: (jnp.array([jnp.sqrt(2) / 2, jnp.sqrt(2) / 2, 0.0]), True), lambda v_i: (v_i, False), v, @@ -69,9 +67,7 @@ def _calc_gnomonic_rotation_matrix(coords_cartesian: jnp.ndarray) -> jnp.ndarray R1 = lax.cond( jnp.linalg.norm(nu) < FLOAT_TOLERANCE, lambda vp, c: jnp.identity(3), - lambda vp, c: jnp.identity(3) - + vp - + jnp.linalg.matrix_power(vp, 2) * (1 / (1 + c)), + lambda vp, c: jnp.identity(3) + vp + jnp.linalg.matrix_power(vp, 2) * (1 / (1 + c)), vp, c, ) @@ -143,9 +139,7 @@ def _cartesian_to_gnomonic( # Vectorization Map: _cartesian_to_gnomonic -_cartesian_to_gnomonic_vmap = jit( - vmap(_cartesian_to_gnomonic, in_axes=(0, None), out_axes=(0, None)) -) +_cartesian_to_gnomonic_vmap = jit(vmap(_cartesian_to_gnomonic, in_axes=(0, None), out_axes=(0, None))) @jit @@ -173,7 +167,5 @@ def cartesian_to_gnomonic( M : `~jax.numpy.ndarray` (6, 6) Gnomonic rotation matrix. """ - coords_gnomonic, M_matrix = _cartesian_to_gnomonic_vmap( - coords_cartesian, center_cartesian - ) + coords_gnomonic, M_matrix = _cartesian_to_gnomonic_vmap(coords_cartesian, center_cartesian) return coords_gnomonic, M_matrix diff --git a/thor/range_and_transform.py b/src/thor/range_and_transform.py similarity index 91% rename from thor/range_and_transform.py rename to src/thor/range_and_transform.py index 903fdc31..b645bc5e 100644 --- a/thor/range_and_transform.py +++ b/src/thor/range_and_transform.py @@ -10,7 +10,8 @@ OriginCodes, transform_coordinates, ) -from adam_core.propagator import PYOORB, Propagator +from adam_core.propagator import Propagator +from adam_core.propagator.adam_pyoorb import PYOORBPropagator from adam_core.ray_cluster import initialize_use_ray from .observations.observations import Observations @@ -93,7 +94,7 @@ def range_and_transform_worker( def range_and_transform( test_orbit: TestOrbits, observations: Union[Observations, ray.ObjectRef], - propagator: Type[Propagator] = PYOORB, + propagator: Type[Propagator] = PYOORBPropagator, propagator_kwargs: dict = {}, max_processes: Optional[int] = 1, ) -> TransformedDetections: @@ -126,9 +127,7 @@ def range_and_transform( logger.info("Running range and transform...") if len(test_orbit) != 1: - raise ValueError( - f"range_and_transform received {len(test_orbit)} orbits but expected 1." - ) + raise ValueError(f"range_and_transform received {len(test_orbit)} orbits but expected 1.") logger.info(f"Assuming r = {test_orbit.coordinates.r[0]} au") logger.info(f"Assuming v = {test_orbit.coordinates.v[0]} au/d") @@ -195,25 +194,19 @@ def range_and_transform( if len(futures) >= max_processes * 1.5: finished, futures = ray.wait(futures, num_returns=1) - transformed_detections = qv.concatenate( - [transformed_detections, ray.get(finished[0])] - ) + transformed_detections = qv.concatenate([transformed_detections, ray.get(finished[0])]) if transformed_detections.fragmented(): transformed_detections = qv.defragment(transformed_detections) while futures: finished, futures = ray.wait(futures, num_returns=1) - transformed_detections = qv.concatenate( - [transformed_detections, ray.get(finished[0])] - ) + transformed_detections = qv.concatenate([transformed_detections, ray.get(finished[0])]) if transformed_detections.fragmented(): transformed_detections = qv.defragment(transformed_detections) if len(refs_to_free) > 0: ray.internal.free(refs_to_free) - logger.info( - f"Removed {len(refs_to_free)} references from the object store." - ) + logger.info(f"Removed {len(refs_to_free)} references from the object store.") else: # Get state IDs @@ -238,7 +231,5 @@ def range_and_transform( time_end = time.perf_counter() logger.info(f"Transformed {len(transformed_detections)} observations.") - logger.info( - f"Range and transform completed in {time_end - time_start:.3f} seconds." - ) + logger.info(f"Range and transform completed in {time_end - time_start:.3f} seconds.") return transformed_detections diff --git a/thor/tests/__init__.py b/src/thor/tests/__init__.py similarity index 100% rename from thor/tests/__init__.py rename to src/thor/tests/__init__.py diff --git a/thor/tests/memory/__init__.py b/src/thor/tests/memory/__init__.py similarity index 100% rename from thor/tests/memory/__init__.py rename to src/thor/tests/memory/__init__.py diff --git a/thor/tests/memory/fixtures/896831/cluster_members.parquet b/src/thor/tests/memory/fixtures/896831/cluster_members.parquet similarity index 100% rename from thor/tests/memory/fixtures/896831/cluster_members.parquet rename to src/thor/tests/memory/fixtures/896831/cluster_members.parquet diff --git a/thor/tests/memory/fixtures/896831/clusters.parquet b/src/thor/tests/memory/fixtures/896831/clusters.parquet similarity index 100% rename from thor/tests/memory/fixtures/896831/clusters.parquet rename to src/thor/tests/memory/fixtures/896831/clusters.parquet diff --git a/thor/tests/memory/fixtures/896831/filtered_observations.parquet b/src/thor/tests/memory/fixtures/896831/filtered_observations.parquet similarity index 100% rename from thor/tests/memory/fixtures/896831/filtered_observations.parquet rename to src/thor/tests/memory/fixtures/896831/filtered_observations.parquet diff --git a/thor/tests/memory/fixtures/896831/iod_orbit_members.parquet b/src/thor/tests/memory/fixtures/896831/iod_orbit_members.parquet similarity index 100% rename from thor/tests/memory/fixtures/896831/iod_orbit_members.parquet rename to src/thor/tests/memory/fixtures/896831/iod_orbit_members.parquet diff --git a/thor/tests/memory/fixtures/896831/iod_orbits.parquet b/src/thor/tests/memory/fixtures/896831/iod_orbits.parquet similarity index 100% rename from thor/tests/memory/fixtures/896831/iod_orbits.parquet rename to src/thor/tests/memory/fixtures/896831/iod_orbits.parquet diff --git a/thor/tests/memory/fixtures/896831/od_orbit_members.parquet b/src/thor/tests/memory/fixtures/896831/od_orbit_members.parquet similarity index 100% rename from thor/tests/memory/fixtures/896831/od_orbit_members.parquet rename to src/thor/tests/memory/fixtures/896831/od_orbit_members.parquet diff --git a/thor/tests/memory/fixtures/896831/od_orbits.parquet b/src/thor/tests/memory/fixtures/896831/od_orbits.parquet similarity index 100% rename from thor/tests/memory/fixtures/896831/od_orbits.parquet rename to src/thor/tests/memory/fixtures/896831/od_orbits.parquet diff --git a/thor/tests/memory/fixtures/896831/recovered_orbit_members.parquet b/src/thor/tests/memory/fixtures/896831/recovered_orbit_members.parquet similarity index 100% rename from thor/tests/memory/fixtures/896831/recovered_orbit_members.parquet rename to src/thor/tests/memory/fixtures/896831/recovered_orbit_members.parquet diff --git a/thor/tests/memory/fixtures/896831/recovered_orbits.parquet b/src/thor/tests/memory/fixtures/896831/recovered_orbits.parquet similarity index 100% rename from thor/tests/memory/fixtures/896831/recovered_orbits.parquet rename to src/thor/tests/memory/fixtures/896831/recovered_orbits.parquet diff --git a/thor/tests/memory/fixtures/896831/transformed_detections.parquet b/src/thor/tests/memory/fixtures/896831/transformed_detections.parquet similarity index 100% rename from thor/tests/memory/fixtures/896831/transformed_detections.parquet rename to src/thor/tests/memory/fixtures/896831/transformed_detections.parquet diff --git a/thor/tests/memory/fixtures/input_observations.parquet b/src/thor/tests/memory/fixtures/input_observations.parquet similarity index 100% rename from thor/tests/memory/fixtures/input_observations.parquet rename to src/thor/tests/memory/fixtures/input_observations.parquet diff --git a/thor/tests/memory/fixtures/inputs/896831/test_orbit.parquet b/src/thor/tests/memory/fixtures/inputs/896831/test_orbit.parquet similarity index 100% rename from thor/tests/memory/fixtures/inputs/896831/test_orbit.parquet rename to src/thor/tests/memory/fixtures/inputs/896831/test_orbit.parquet diff --git a/thor/tests/memory/fixtures/inputs/config.json b/src/thor/tests/memory/fixtures/inputs/config.json similarity index 100% rename from thor/tests/memory/fixtures/inputs/config.json rename to src/thor/tests/memory/fixtures/inputs/config.json diff --git a/thor/tests/memory/fixtures/inputs/observations.feather b/src/thor/tests/memory/fixtures/inputs/observations.feather similarity index 100% rename from thor/tests/memory/fixtures/inputs/observations.feather rename to src/thor/tests/memory/fixtures/inputs/observations.feather diff --git a/thor/tests/memory/fixtures/inputs/test_orbits.parquet b/src/thor/tests/memory/fixtures/inputs/test_orbits.parquet similarity index 100% rename from thor/tests/memory/fixtures/inputs/test_orbits.parquet rename to src/thor/tests/memory/fixtures/inputs/test_orbits.parquet diff --git a/thor/tests/memory/test_memory.py b/src/thor/tests/memory/test_memory.py similarity index 83% rename from thor/tests/memory/test_memory.py rename to src/thor/tests/memory/test_memory.py index c9b58d49..6c9a6198 100644 --- a/thor/tests/memory/test_memory.py +++ b/src/thor/tests/memory/test_memory.py @@ -15,6 +15,7 @@ open .cache.bench/memory/[session_name]/[test_name].png """ + import os import subprocess import threading @@ -35,18 +36,12 @@ def get_git_branch_or_revision(): Get the current Git branch name or revision hash """ try: - branch = ( - subprocess.check_output(["git", "rev-parse", "--abbrev-ref", "HEAD"]) - .strip() - .decode() - ) + branch = subprocess.check_output(["git", "rev-parse", "--abbrev-ref", "HEAD"]).strip().decode() if branch != "HEAD": return branch else: # If HEAD is detached, get the revision hash - return ( - subprocess.check_output(["git", "rev-parse", "HEAD"]).strip().decode() - ) + return subprocess.check_output(["git", "rev-parse", "HEAD"]).strip().decode() except subprocess.CalledProcessError: return "unknown" @@ -81,9 +76,7 @@ def snapshot(): plt.style.use("seaborn-v0_8-notebook") plt.figure(figsize=(12, 6)) - plt.plot( - timestamps, mem_usage, color="magenta", linestyle="-", linewidth=2 - ) # Removed marker + plt.plot(timestamps, mem_usage, color="magenta", linestyle="-", linewidth=2) # Removed marker plt.title( f"Memory Usage [{git_branch_or_revision},{TEST_ORBIT_ID} {request.node.name}", fontsize=12, @@ -94,15 +87,11 @@ def snapshot(): plt.xlim(left=0) max_mem_usage = max(mem_usage) min_mem_usage = min(mem_usage) - plt.ylim( - bottom=min_mem_usage * 0.8, top=max_mem_usage + (max_mem_usage * 0.1) - ) # Add 10% breathing room + plt.ylim(bottom=min_mem_usage * 0.8, top=max_mem_usage + (max_mem_usage * 0.1)) # Add 10% breathing room # Save the plot to a folder based on Git branch/revision in a file based on the test name os.makedirs(f"{root_path}/{git_branch_or_revision}", exist_ok=True) - plt.savefig( - f"{root_path}/{git_branch_or_revision}/{TEST_ORBIT_ID}-{request.node.name}.png" - ) + plt.savefig(f"{root_path}/{git_branch_or_revision}/{TEST_ORBIT_ID}-{request.node.name}.png") @pytest.fixture @@ -142,9 +131,7 @@ def memory_config(request): def memory_filtered_observations(): from thor.observations import Observations - return Observations.from_parquet( - FIXTURES_DIR / f"{TEST_ORBIT_ID}/filtered_observations.parquet" - ) + return Observations.from_parquet(FIXTURES_DIR / f"{TEST_ORBIT_ID}/filtered_observations.parquet") @pytest.fixture @@ -161,9 +148,7 @@ def memory_clusters(): from thor.clusters import ClusterMembers, Clusters clusters = Clusters.from_parquet(FIXTURES_DIR / f"{TEST_ORBIT_ID}/clusters.parquet") - cluster_members = ClusterMembers.from_parquet( - FIXTURES_DIR / f"{TEST_ORBIT_ID}/cluster_members.parquet" - ) + cluster_members = ClusterMembers.from_parquet(FIXTURES_DIR / f"{TEST_ORBIT_ID}/cluster_members.parquet") return clusters, cluster_members @@ -171,9 +156,7 @@ def memory_clusters(): def memory_iod_orbits(): from thor.orbit_determination import FittedOrbitMembers, FittedOrbits - iod_orbits = FittedOrbits.from_parquet( - FIXTURES_DIR / f"{TEST_ORBIT_ID}/iod_orbits.parquet" - ) + iod_orbits = FittedOrbits.from_parquet(FIXTURES_DIR / f"{TEST_ORBIT_ID}/iod_orbits.parquet") iod_orbit_members = FittedOrbitMembers.from_parquet( FIXTURES_DIR / f"{TEST_ORBIT_ID}/iod_orbit_members.parquet" ) @@ -184,9 +167,7 @@ def memory_iod_orbits(): def memory_od_orbits(): from thor.orbit_determination import FittedOrbitMembers, FittedOrbits - od_orbits = FittedOrbits.from_parquet( - FIXTURES_DIR / f"{TEST_ORBIT_ID}/od_orbits.parquet" - ) + od_orbits = FittedOrbits.from_parquet(FIXTURES_DIR / f"{TEST_ORBIT_ID}/od_orbits.parquet") od_orbit_members = FittedOrbitMembers.from_parquet( FIXTURES_DIR / f"{TEST_ORBIT_ID}/od_orbit_members.parquet" ) @@ -199,9 +180,7 @@ def ray_cluster(memory_config): from adam_core.ray_cluster import initialize_use_ray if memory_config.max_processes > 1: - initialize_use_ray( - num_cpus=memory_config.max_processes, object_store_bytes=4000000000 - ) + initialize_use_ray(num_cpus=memory_config.max_processes, object_store_bytes=4000000000) yield if memory_config.max_processes > 1: @@ -210,9 +189,7 @@ def ray_cluster(memory_config): @pytest.mark.memory @pytest.mark.parametrize("memory_config", CONFIG_PROCESSES, indirect=True) -def test_load_input_observations( - memory_snapshot, memory_config, ray_cluster, memory_input_observations -): +def test_load_input_observations(memory_snapshot, memory_config, ray_cluster, memory_input_observations): from thor.observations import Observations # It's always necessary to sort ahead of time, so we include it in our test @@ -222,7 +199,7 @@ def test_load_input_observations( memory_input_observations = memory_input_observations.set_column( "time", memory_input_observations.time.rescale("utc") ) - observations = Observations.from_input_observations(memory_input_observations) + Observations.from_input_observations(memory_input_observations) @pytest.mark.memory @@ -232,9 +209,7 @@ def test_filter_observations( ): from thor.main import filter_observations - observations = filter_observations( - memory_observations, memory_test_orbit, memory_config - ) + filter_observations(memory_observations, memory_test_orbit, memory_config) @pytest.mark.memory @@ -257,12 +232,10 @@ def test_range_and_transform( @pytest.mark.memory @pytest.mark.parametrize("memory_config", CONFIG_PROCESSES, indirect=True) -def test_cluster_and_link( - memory_transformed_detections, memory_config, ray_cluster, memory_snapshot -): +def test_cluster_and_link(memory_transformed_detections, memory_config, ray_cluster, memory_snapshot): from thor.main import cluster_and_link - clusters, cluster_members = cluster_and_link( + cluster_and_link( memory_transformed_detections, vx_range=[memory_config.vx_min, memory_config.vx_max], vy_range=[memory_config.vy_min, memory_config.vy_max], @@ -288,8 +261,8 @@ def test_initial_orbit_determination( ): from thor.orbits.iod import initial_orbit_determination - clusters, cluster_members = memory_clusters - orbits, orbit_members = initial_orbit_determination( + _, cluster_members = memory_clusters + initial_orbit_determination( memory_filtered_observations, cluster_members, min_obs=memory_config.iod_min_obs, @@ -360,9 +333,7 @@ def test_merge_and_extend( # Disabling until smaller dataset is available @pytest.mark.memory @pytest.mark.parametrize("memory_config", CONFIG_PROCESSES, indirect=True) -def test_link_test_orbit( - memory_snapshot, memory_config, memory_observations, memory_test_orbit, ray_cluster -): +def test_link_test_orbit(memory_snapshot, memory_config, memory_observations, memory_test_orbit, ray_cluster): from thor.main import link_test_orbit for result in link_test_orbit( diff --git a/thor/tests/test_clusters.py b/src/thor/tests/test_clusters.py similarity index 97% rename from thor/tests/test_clusters.py rename to src/thor/tests/test_clusters.py index f6ad8106..0b1d02a3 100644 --- a/thor/tests/test_clusters.py +++ b/src/thor/tests/test_clusters.py @@ -1,8 +1,5 @@ import numpy as np -import pyarrow as pa -import pyarrow.compute as pc import pytest -import quivr as qv from ..clusters import ( ClusterMembers, @@ -382,14 +379,10 @@ def test_Clusters_drop_duplicates(): ) cluster_members = ClusterMembers.from_kwargs( cluster_id=list(np.repeat(cluster_ids, 5)), - obs_id=[ - obs for cluster_members_i in obs_ids_duplicated for obs in cluster_members_i - ], + obs_id=[obs for cluster_members_i in obs_ids_duplicated for obs in cluster_members_i], ) - clusters_filtered, cluster_members_filtered = drop_duplicate_clusters( - clusters, cluster_members - ) + clusters_filtered, cluster_members_filtered = drop_duplicate_clusters(clusters, cluster_members) assert len(clusters_filtered) == 5 assert clusters_filtered.cluster_id.to_pylist() == [ "c00000", diff --git a/thor/tests/test_config.py b/src/thor/tests/test_config.py similarity index 100% rename from thor/tests/test_config.py rename to src/thor/tests/test_config.py diff --git a/thor/tests/test_main.py b/src/thor/tests/test_main.py similarity index 86% rename from thor/tests/test_main.py rename to src/thor/tests/test_main.py index 33c5ade4..634cf827 100644 --- a/thor/tests/test_main.py +++ b/src/thor/tests/test_main.py @@ -102,9 +102,7 @@ def ray_cluster(integration_config): ray.shutdown() -def setup_test_data( - object_id, orbits, observations, integration_config, max_arc_length=None -): +def setup_test_data(object_id, orbits, observations, integration_config, max_arc_length=None): """ Selects the observations and orbit for a given object ID and returns the test orbit, observations, expected observation IDs and the configuration @@ -115,9 +113,7 @@ def setup_test_data( # Select the associations that match this object ID associations_i = associations.select("object_id", object_id) - detections_i = detections.apply_mask( - pc.is_in(detections.id, associations_i.detection_id) - ) + detections_i = detections.apply_mask(pc.is_in(detections.id, associations_i.detection_id)) exposures_i = exposures.apply_mask(pc.is_in(exposures.id, detections_i.exposure_id)) assert len(associations_i) == 90 @@ -131,12 +127,8 @@ def setup_test_data( ), ) detections_i = detections_i.apply_mask(time_mask) - exposures_i = exposures_i.apply_mask( - pc.is_in(exposures_i.id, detections_i.exposure_id) - ) - associations_i = associations_i.apply_mask( - pc.is_in(associations_i.detection_id, detections_i.id) - ) + exposures_i = exposures_i.apply_mask(pc.is_in(exposures_i.id, detections_i.exposure_id)) + associations_i = associations_i.apply_mask(pc.is_in(associations_i.detection_id, detections_i.id)) # Extract the observations that match this object ID obs_ids_expected = associations_i.detection_id.unique().sort() @@ -237,9 +229,12 @@ def run_link_test_orbit(test_orbit, observations, config): @pytest.mark.parametrize("integration_config", [1, 4], indirect=True) @pytest.mark.integration def test_link_test_orbit(object_id, orbits, observations, integration_config): - (test_orbit, observations, obs_ids_expected, integration_config,) = setup_test_data( - object_id, orbits, observations, integration_config, max_arc_length=14 - ) + ( + test_orbit, + observations, + obs_ids_expected, + integration_config, + ) = setup_test_data(object_id, orbits, observations, integration_config, max_arc_length=14) # Run link_test_orbit and make sure we get the correct observations back recovered_orbits, recovered_orbit_members = run_link_test_orbit( @@ -255,14 +250,15 @@ def test_link_test_orbit(object_id, orbits, observations, integration_config): @pytest.mark.parametrize("integration_config", [1, 4], indirect=True) @pytest.mark.benchmark(group="link_test_orbit", min_rounds=5, warmup=True) -def test_benchmark_link_test_orbit( - orbits, observations, integration_config, benchmark, ray_cluster -): +def test_benchmark_link_test_orbit(orbits, observations, integration_config, benchmark, ray_cluster): object_id = "202930 Ivezic (1998 SG172)" - (test_orbit, observations, obs_ids_expected, integration_config,) = setup_test_data( - object_id, orbits, observations, integration_config, max_arc_length=14 - ) + ( + test_orbit, + observations, + obs_ids_expected, + integration_config, + ) = setup_test_data(object_id, orbits, observations, integration_config, max_arc_length=14) recovered_orbits, recovered_orbit_members = benchmark( run_link_test_orbit, test_orbit, observations, integration_config @@ -290,12 +286,8 @@ def test_load_initial_checkpoint_values(working_dir): # Create filtered_observations file to simulate first checkpoint exposures, detections, associations = make_observations() - filtered_observations = Observations.from_detections_and_exposures( - detections, exposures - ) - filtered_observations_path = os.path.join( - working_dir, "filtered_observations.parquet" - ) + filtered_observations = Observations.from_detections_and_exposures(detections, exposures) + filtered_observations_path = os.path.join(working_dir, "filtered_observations.parquet") filtered_observations.to_parquet(filtered_observations_path) checkpoint = load_initial_checkpoint_values(working_dir) @@ -305,9 +297,7 @@ def test_load_initial_checkpoint_values(working_dir): assert checkpoint.filtered_observations.coordinates.time.scale == "utc" # Create transformed_detections file to simulate second checkpoint - TransformedDetections.empty().to_parquet( - os.path.join(working_dir, "transformed_detections.parquet") - ) + TransformedDetections.empty().to_parquet(os.path.join(working_dir, "transformed_detections.parquet")) checkpoint = load_initial_checkpoint_values(working_dir) assert checkpoint.stage == "cluster_and_link" @@ -316,9 +306,7 @@ def test_load_initial_checkpoint_values(working_dir): # Create clusters file to simulate third checkpoint Clusters.empty().to_parquet(os.path.join(working_dir, "clusters.parquet")) - ClusterMembers.empty().to_parquet( - os.path.join(working_dir, "cluster_members.parquet") - ) + ClusterMembers.empty().to_parquet(os.path.join(working_dir, "cluster_members.parquet")) checkpoint = load_initial_checkpoint_values(working_dir) assert checkpoint.stage == "initial_orbit_determination" @@ -328,9 +316,7 @@ def test_load_initial_checkpoint_values(working_dir): # Create iod_orbits file to simulate fourth checkpoint FittedOrbits.empty().to_parquet(os.path.join(working_dir, "iod_orbits.parquet")) - FittedOrbitMembers.empty().to_parquet( - os.path.join(working_dir, "iod_orbit_members.parquet") - ) + FittedOrbitMembers.empty().to_parquet(os.path.join(working_dir, "iod_orbit_members.parquet")) checkpoint = load_initial_checkpoint_values(working_dir) assert checkpoint.stage == "differential_correction" @@ -340,9 +326,7 @@ def test_load_initial_checkpoint_values(working_dir): # Create od_orbits files to simulate fifth checkpoint FittedOrbits.empty().to_parquet(os.path.join(working_dir, "od_orbits.parquet")) - FittedOrbitMembers.empty().to_parquet( - os.path.join(working_dir, "od_orbit_members.parquet") - ) + FittedOrbitMembers.empty().to_parquet(os.path.join(working_dir, "od_orbit_members.parquet")) checkpoint = load_initial_checkpoint_values(working_dir) assert checkpoint.stage == "recover_orbits" @@ -351,12 +335,8 @@ def test_load_initial_checkpoint_values(working_dir): assert checkpoint.od_orbit_members is not None # Create recovered_orbits files to simulate completed run folder - FittedOrbits.empty().to_parquet( - os.path.join(working_dir, "recovered_orbits.parquet") - ) - FittedOrbitMembers.empty().to_parquet( - os.path.join(working_dir, "recovered_orbit_members.parquet") - ) + FittedOrbits.empty().to_parquet(os.path.join(working_dir, "recovered_orbits.parquet")) + FittedOrbitMembers.empty().to_parquet(os.path.join(working_dir, "recovered_orbit_members.parquet")) checkpoint = load_initial_checkpoint_values(working_dir) assert checkpoint.stage == "complete" diff --git a/thor/tests/test_orbit.py b/src/thor/tests/test_orbit.py similarity index 99% rename from thor/tests/test_orbit.py rename to src/thor/tests/test_orbit.py index d78cfe0f..9c551a74 100644 --- a/thor/tests/test_orbit.py +++ b/src/thor/tests/test_orbit.py @@ -41,7 +41,6 @@ def test_assume_heliocentric_distance_missing_rho(): # Since the origin is the Sun the heliocentric distance is also the # topocentric distance r = np.array([3.0, 0.0, 0.0]) - r_mag = np.linalg.norm(r) coords_assumed = assume_heliocentric_distance(r, coords, origin_coords) np.testing.assert_equal(coords_assumed.rho, np.array([r[0], rho[1]])) diff --git a/thor/tests/test_orbit_selection.py b/src/thor/tests/test_orbit_selection.py similarity index 100% rename from thor/tests/test_orbit_selection.py rename to src/thor/tests/test_orbit_selection.py diff --git a/src/thor/utils/__init__.py b/src/thor/utils/__init__.py new file mode 100644 index 00000000..d1d288c0 --- /dev/null +++ b/src/thor/utils/__init__.py @@ -0,0 +1,4 @@ +# ruff: noqa: F401, F403 +from .ades import * +from .linkages import * +from .logging import * diff --git a/thor/utils/ades.py b/src/thor/utils/ades.py similarity index 97% rename from thor/utils/ades.py rename to src/thor/utils/ades.py index 3f92a2e6..6a1ead5a 100644 --- a/thor/utils/ades.py +++ b/src/thor/utils/ades.py @@ -207,10 +207,7 @@ def writeToADES( id_present = True if not id_present: - err = ( - "At least one of permID, provID, or trkSub should\n" - "be present in observations." - ) + err = "At least one of permID, provID, or trkSub should\n" "be present in observations." raise ValueError(err) observation_times = Time( @@ -264,7 +261,5 @@ def writeToADES( if col in ades.columns: ades[col] = ades[col].map(lambda x: "{0:.3f}".format(x)) - ades.to_csv( - file_out, sep="|", header=False, index=False, mode="a", float_format="%.16f" - ) + ades.to_csv(file_out, sep="|", header=False, index=False, mode="a", float_format="%.16f") return diff --git a/thor/utils/linkages.py b/src/thor/utils/linkages.py similarity index 86% rename from thor/utils/linkages.py rename to src/thor/utils/linkages.py index 34b8264e..f2384243 100644 --- a/thor/utils/linkages.py +++ b/src/thor/utils/linkages.py @@ -1,4 +1,3 @@ -import gc from typing import Tuple import numpy as np @@ -43,23 +42,17 @@ def sort_by_id_and_time( """ # Grab the linkage ID column from the linkages table and add an index column linkage_table = linkages.table.select([linkage_column]) - linkage_table = linkage_table.add_column( - 0, "index", pa.array(np.arange(0, len(linkage_table))) - ) + linkage_table = linkage_table.add_column(0, "index", pa.array(np.arange(0, len(linkage_table)))) # Grab the linkage ID and observation ID columns from the linkage members table and add an index column members_table = members.table.select([linkage_column, "obs_id"]) - members_table = members_table.add_column( - 0, "index", pa.array(np.arange(0, len(members_table))) - ) + members_table = members_table.add_column(0, "index", pa.array(np.arange(0, len(members_table)))) # Grab the observation ID, observation time columns and join with the linkage members table on the observation ID observation_times = observations.flattened_table().select( ["id", "coordinates.time.days", "coordinates.time.nanos"] ) - member_times = members_table.join( - observation_times, keys=["obs_id"], right_keys=["id"] - ) + member_times = members_table.join(observation_times, keys=["obs_id"], right_keys=["id"]) # Sort the reduced linkages table by linkage ID and the linkage member times table by linkage ID and observation time linkage_table = linkage_table.sort_by([(linkage_column, "ascending")]) diff --git a/thor/utils/logging.py b/src/thor/utils/logging.py similarity index 90% rename from thor/utils/logging.py rename to src/thor/utils/logging.py index c78a11af..bdf25c22 100644 --- a/thor/utils/logging.py +++ b/src/thor/utils/logging.py @@ -2,8 +2,6 @@ import os import sys -__all__ = ["setupLogger"] - logger = logging.getLogger(__name__) @@ -44,9 +42,7 @@ def setupLogger(name, out_dir=None): logger.addHandler(stream_handler) if add_file_handler: - file_handler = logging.FileHandler( - os.path.join(out_dir, "thor.log"), encoding="utf-8", delay=False - ) + file_handler = logging.FileHandler(os.path.join(out_dir, "thor.log"), encoding="utf-8", delay=False) file_handler.setLevel(logging.DEBUG) file_format = logging.Formatter( "%(asctime)s.%(msecs)03d [%(levelname)s] %(message)s (%(filename)s, %(funcName)s, %(lineno)d)", diff --git a/thor/utils/tests/__init__.py b/src/thor/utils/tests/__init__.py similarity index 100% rename from thor/utils/tests/__init__.py rename to src/thor/utils/tests/__init__.py diff --git a/thor/utils/tests/test_linkages.py b/src/thor/utils/tests/test_linkages.py similarity index 100% rename from thor/utils/tests/test_linkages.py rename to src/thor/utils/tests/test_linkages.py diff --git a/thor/orbit_determination/__init__.py b/thor/orbit_determination/__init__.py deleted file mode 100644 index bfb3c192..00000000 --- a/thor/orbit_determination/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# noqa: F401 -from .fitted_orbits import ( - FittedOrbits, - FittedOrbitMembers, - assign_duplicate_observations, -) -from .outliers import calculate_max_outliers diff --git a/thor/projections/__init__.py b/thor/projections/__init__.py deleted file mode 100644 index 840e4eba..00000000 --- a/thor/projections/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# noqa: F401 -from .covariances import ProjectionCovariances -from .gnomonic import GnomonicCoordinates -from .transforms import _cartesian_to_gnomonic, cartesian_to_gnomonic diff --git a/thor/utils/__init__.py b/thor/utils/__init__.py deleted file mode 100644 index 747ff773..00000000 --- a/thor/utils/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .ades import * -from .logging import * -from .quivr import * diff --git a/thor/utils/quivr.py b/thor/utils/quivr.py deleted file mode 100644 index 54531a47..00000000 --- a/thor/utils/quivr.py +++ /dev/null @@ -1,61 +0,0 @@ -from typing import List, Literal, Optional - -import numpy as np -import pyarrow as pa -import quivr as qv -from typing_extensions import Final - -__all__ = [ - "drop_duplicates", -] - - -def drop_duplicates( - table: qv.AnyTable, - subset: Optional[List[str]] = None, - keep: Literal["first", "last"] = "first", -) -> qv.AnyTable: - """ - Drop duplicate rows from a `~quivr.Table`. This function is similar to - `~pandas.DataFrame.drop_duplicates` but it supports nested columns (representing - nested tables). - - Parameters - ---------- - table : `~quivr.Table` - Table to drop duplicate rows from. - subset : list of str, optional - Subset of columns to consider when dropping duplicates. If not specified then - all columns are used. - keep : {'first', 'last'}, default 'first' - If there are duplicate rows then keep the first or last row. - - Returns - ------- - table : `~quivr.Table` - Table with duplicate rows removed. - """ - # Flatten the table so nested columns are dot-delimited at the top level - flattened_table = table.flattened_table() - - # If subset is not specified then use all the columns - if subset is None: - subset = [c for c in flattened_table.column_names] - - # Add an index column to the flattened table - flattened_table = flattened_table.add_column( - 0, "index", pa.array(np.arange(len(flattened_table))) - ) - - if keep not in ["first", "last"]: - raise ValueError(f"keep must be 'first' or 'last', got {keep}.") - - agg_func = keep - indices = ( - flattened_table.group_by(subset, use_threads=False) - .aggregate([("index", agg_func)]) - .column(f"index_{agg_func}") - ) - - # Take the indices from the flattened table and use them to index into the original table - return table.take(indices)