diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 000000000..682203bf3 --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,61 @@ +// For format details, see https://aka.ms/devcontainer.json. For config options, see the +// README at: https://github.com/devcontainers/templates/tree/main/src/miniconda +{ + "name": "py4dstem-dev", + "image": "mcr.microsoft.com/vscode/devcontainers/miniconda:0-3", + // "build": { + // "context": "..", + // "dockerfile": "Dockerfile" + // }, + + // Features to add to the dev container. More info: https://containers.dev/features. + // "features": {}, + + // Use 'forwardPorts' to make a list of ports inside the container available locally. + // "forwardPorts": [] + + // Use 'postCreateCommand' to run commands after the container is created. + "postCreateCommand": "/opt/conda/bin/conda init && /opt/conda/bin/pip install -e /workspaces/py4DSTEM/ && /opt/conda/bin/pip install ipython ipykernel jupyter", + + // Configure tool-specific properties. + "customizations": { + "vscode": { + "settings": { + "python.defaultInterpreterPath": "/opt/conda/bin/python", + "python.analysis.autoFormatStrings": true, + "python.analysis.completeFunctionParens": true, + "ruff.showNotifications": "onWarning", + "workbench.colorTheme": "Monokai", + // "editor.defaultFormatter": "ms-python.black-formatter", + "editor.fontFamily": "Menlo, Monaco, 'Courier New', monospace", + "editor.bracketPairColorization.enabled": true, + "editor.guides.bracketPairs": "active", + "editor.minimap.renderCharacters": false, + "editor.minimap.autohide": true, + "editor.minimap.scale": 2, + "[python]": { + "editor.defaultFormatter": "ms-python.black-formatter", + "editor.codeActionsOnSave": { + "source.organizeImports": false + } + } + }, + "extensions": [ + "ms-python.python", + "donjayamanne.python-extension-pack", + "ms-python.vscode-pylance", + "ms-toolsai.jupyter", + "GitHub.codespaces", + "ms-azuretools.vscode-docker", + "DavidAnson.vscode-markdownlint", + "ms-vsliveshare.vsliveshare", + "charliermarsh.ruff", + "eamodio.gitlens", + "ms-python.black-formatter" + ] + } + } + + // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root. + // "remoteUser": "root" +} \ No newline at end of file diff --git a/.github/scripts/update_version.py b/.github/scripts/update_version.py index 2a07e5697..2aaaa07af 100644 --- a/.github/scripts/update_version.py +++ b/.github/scripts/update_version.py @@ -1,6 +1,5 @@ """ Script to update the patch version number of the py4DSTEM package. -Author: Tara Mishra (Quantumstud) """ version_file_path = "py4DSTEM/version.py" @@ -8,15 +7,14 @@ with open(version_file_path, "r") as f: lines = f.readlines() -line_split = lines[0].split('.') -patch_number = line_split[2].split('\'')[0] +line_split = lines[0].split(".") +patch_number = line_split[2].split("'")[0] # Increment patch number -patch_number = str(int(patch_number) + 1)+'\'' +patch_number = str(int(patch_number) + 1) + "'" -new_line = line_split[0]+'.'+line_split[1]+'.'+patch_number +new_line = line_split[0] + "." + line_split[1] + "." + patch_number -with open(version_file_path,"w") as f: +with open(version_file_path, "w") as f: f.write(new_line) - \ No newline at end of file diff --git a/.github/workflows/black.yml b/.github/workflows/black.yml new file mode 100644 index 000000000..09b2a0fba --- /dev/null +++ b/.github/workflows/black.yml @@ -0,0 +1,14 @@ +name: Check code style + +on: + push: + branches: [ "dev" ] + pull_request: + branches: [ "dev" ] + +jobs: + lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: psf/black@stable \ No newline at end of file diff --git a/.github/workflows/build-flake.yml b/.github/workflows/build-flake.yml index c17a0becc..3393b7908 100644 --- a/.github/workflows/build-flake.yml +++ b/.github/workflows/build-flake.yml @@ -1,7 +1,7 @@ # This workflow will install Python dependencies, run tests and lint with a single version of Python # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions -name: Python application +name: Check module can be imported on: push: diff --git a/.github/workflows/check_install_dev.yml b/.github/workflows/check_install_dev.yml new file mode 100644 index 000000000..82701d50d --- /dev/null +++ b/.github/workflows/check_install_dev.yml @@ -0,0 +1,45 @@ +name: Install Checker Dev +on: + push: + branches: [ "dev" ] + pull_request: + branches: [ "dev" ] +jobs: + + test-python-os-versions: + name: Check Python ${{ matrix.python-version }} on ${{ matrix.runs-on }} on ${{ matrix.architecture }} + continue-on-error: ${{ matrix.allow_failure }} + runs-on: ${{ matrix.runs-on }} + strategy: + fail-fast: false + matrix: + allow_failure: [false] + runs-on: [ubuntu-latest] + architecture: [x86_64] + python-version: ["3.9", "3.10", "3.11",] + # include: + # - python-version: "3.12.0-beta.4" + # runs-on: ubuntu-latest + # allow_failure: true + # Currently no public runners available for this but this or arm64 should work next time + # include: + # - python-version: "3.10" + # architecture: [aarch64] + # runs-on: macos-latest + # allow_failure: true + steps: + - uses: actions/checkout@v3 + + - name: Setup Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + - name: Install repo + run: | + python -m pip install . + - name: Check installation + run: | + python -c "import py4DSTEM; print(py4DSTEM.__version__)" + # - name: Check machine arch + # run: | + # python -c "import platform; print(platform.machine())" diff --git a/.github/workflows/check_install_main.yml b/.github/workflows/check_install_main.yml new file mode 100644 index 000000000..2d1c8ed2a --- /dev/null +++ b/.github/workflows/check_install_main.yml @@ -0,0 +1,45 @@ +name: Install Checker Main +on: + push: + branches: [ "main" ] + pull_request: + branches: [ "main" ] +jobs: + + test-python-os-versions: + name: Check Python ${{ matrix.python-version }} on ${{ matrix.runs-on }} on ${{ matrix.architecture }} + continue-on-error: ${{ matrix.allow_failure }} + runs-on: ${{ matrix.runs-on }} + strategy: + fail-fast: false + matrix: + allow_failure: [false] + runs-on: [ubuntu-latest, windows-latest, macos-latest] + architecture: [x86_64] + python-version: ["3.9", "3.10", "3.11",] + #include: + # - python-version: "3.12.0-beta.4" + # runs-on: ubuntu-latest + # allow_failure: true + # Currently no public runners available for this but this or arm64 should work next time + # include: + # - python-version: "3.10" + # architecture: [aarch64] + # runs-on: macos-latest + # allow_failure: true + steps: + - uses: actions/checkout@v3 + + - name: Setup Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + - name: Install repo + run: | + python -m pip install . + - name: Check installation + run: | + python -c "import py4DSTEM; print(py4DSTEM.__version__)" + - name: Check machine arch + run: | + python -c "import platform; print(platform.machine())" diff --git a/.github/workflows/check_install_quick.yml b/.github/workflows/check_install_quick.yml new file mode 100644 index 000000000..a36db34da --- /dev/null +++ b/.github/workflows/check_install_quick.yml @@ -0,0 +1,45 @@ +name: Install Checker Quick +on: + push: + branches-ignore: + - main + - dev + pull_request: + branches-ignore: + - main + - dev +jobs: + + test-python-os-versions: + name: Check Python ${{ matrix.python-version }} on ${{ matrix.runs-on }} on ${{ matrix.architecture }} + continue-on-error: ${{ matrix.allow_failure }} + runs-on: ${{ matrix.runs-on }} + strategy: + fail-fast: false + matrix: + allow_failure: [false] + runs-on: [ubuntu-latest] + architecture: [x86_64] + python-version: ["3.10"] + # Currently no public runners available for this but this or arm64 should work next time + # include: + # - python-version: "3.10" + # architecture: [aarch64] + # runs-on: macos-latest + # allow_failure: true + steps: + - uses: actions/checkout@v3 + + - name: Setup Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + - name: Install repo + run: | + python -m pip install . + - name: Check installation + run: | + python -c "import py4DSTEM; print(py4DSTEM.__version__)" + # - name: Check machine arch + # run: | + # python -c "import platform; print(platform.machine())" \ No newline at end of file diff --git a/.github/workflows/linter.yml b/.github/workflows/linter.yml index ceda4fd58..3e8071f6f 100644 --- a/.github/workflows/linter.yml +++ b/.github/workflows/linter.yml @@ -1,4 +1,4 @@ -name: flake8 Lint +name: Check for errors with flake8 on: push: diff --git a/.github/workflows/pypi_upload.yml b/.github/workflows/pypi_upload.yml index 07a95273d..264c69030 100644 --- a/.github/workflows/pypi_upload.yml +++ b/.github/workflows/pypi_upload.yml @@ -1,6 +1,5 @@ # Action to check the version of the package and upload it to PyPI # if the version is higher than the one on PyPI -# Author: @quantumstud name: PyPI Upload on: @@ -22,10 +21,15 @@ jobs: token: ${{ secrets.GH_ACTION_VERSION_UPDATE }} - name: Get changed files id: changed-files-specific - uses: tj-actions/changed-files@v32 + uses: tj-actions/changed-files@v39 with: files: | py4DSTEM/version.py + - name: Debug version file change checker + run: | + echo "Checking variable..." + echo ${{ steps.changed-files-specific.outputs.any_changed }} + echo "Done" - name: Running if py4DSTEM/version.py file is not changed if: steps.changed-files-specific.outputs.any_changed == 'false' run: | diff --git a/README.md b/README.md index 0561f098a..aa102542a 100644 --- a/README.md +++ b/README.md @@ -46,42 +46,50 @@ First, download and install Anaconda: www.anaconda.com/download. If you prefer a more lightweight conda client, you can instead install Miniconda: https://docs.conda.io/en/latest/miniconda.html. Then open a conda terminal and run one of the following sets of commands to ensure everything is up-to-date and create a new environment for your py4DSTEM installation: - ``` conda update conda conda create -n py4dstem conda activate py4dstem +conda install -c conda-forge py4dstem pymatgen jupyterlab ``` -Next, install py4DSTEM. To simultaneously install py4DSTEM with `pymatgen` (used in some crystal structure workflows) and `jupyterlab` (providing an interface for running Python notebooks like those provided in the [py4DSTEM tutorials repository](https://github.com/py4dstem/py4DSTEM_tutorials)) run: +In order, these commands +- ensure your installation of anaconda is up-to-date +- make a virtual environment (see below) +- enter the environment +- install py4DSTEM, as well as pymatgen (used for crystal structure calculations) and JupyterLab (an interface for running Python notebooks like those in the [py4DSTEM tutorials repository](https://github.com/py4dstem/py4DSTEM_tutorials)) + + +We've had some recent reports install of `conda` getting stuck trying to solve the environment using the above installation. If you run into this problem, you can install py4DSTEM using `pip` instead of `conda` by running: ``` -conda install -c conda-forge py4dstem pymatgen jupyterlab +conda update conda +conda create -n py4dstem python=3.10 +conda activate py4dstem +pip install py4dstem pymatgen ``` -Or if you would prefer to install only the base modules of **py4DSTEM**, you can instead run: +Both `conda` and `pip` are programs which manage package installations, i.e. make sure different codes you're installing which depend on one another are using mutually compatible versions. Each has advantages and disadvantages; `pip` is a little more bare-bones, and we've seen this install work when `conda` doesn't. If you also want to use Jupyterlab you can then use either `pip install jupyterlab` or `conda install jupyterlab`. + +If you would prefer to install only the base modules of **py4DSTEM**, and skip pymategen and Jupterlab, you can instead run: ``` conda install -c conda-forge py4dstem ``` -In Windows you should then also run: +Finally, regardless of which of the above approaches you used, in Windows you should then also run: ``` conda install pywin32 ``` -In order, these commands -- ensure your installation of anaconda is up-to-date -- make a virtual environment (see below) -- enter the environment -- install py4DSTEM, and optionally also pymatgen and JupyterLab -- on Windows, enable python to talk to the windows API +which enables Python to talk to the Windows API. Please note that virtual environments are used in the instructions above in order to make sure packages that have different dependencies don't conflict with one another. Because these directions install py4DSTEM to its own virtual environment, each time you want to use py4DSTEM you'll need to activate this environment. You can do this in the command line by running `conda activate py4dstem`, or, if you're using the Anaconda Navigator, by clicking on the Environments tab and then clicking on `py4dstem`. +Last - as of the version 0.14.4 update, we've had a few reports of problems upgrading to the newest version. We're not sure what's causing the issue yet, but have found the new version can be installed successfully in these cases using a fresh Anaconda installation. diff --git a/docs/requirements.txt b/docs/requirements.txt index 43dbc0817..03ecc7e26 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,2 +1,3 @@ emdfile -# py4dstem \ No newline at end of file +sphinx_rtd_theme +# py4dstem diff --git a/docs/source/conf.py b/docs/source/conf.py index b14578631..6da66611e 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -15,15 +15,16 @@ import os import sys -sys.path.insert(0,os.path.dirname(os.getcwd())) + +sys.path.insert(0, os.path.dirname(os.getcwd())) from py4DSTEM import __version__ from datetime import datetime # -- Project information ----------------------------------------------------- -project = 'py4dstem' -copyright = f'{datetime.today().year}, py4DSTEM Development Team' -author = 'Ben Savitsky & Alex Rakowski' +project = "py4dstem" +copyright = f"{datetime.today().year}, py4DSTEM Development Team" +author = "Ben Savitsky & Alex Rakowski" # The full version, including alpha/beta/rc tags # release = '0.14.0' @@ -35,9 +36,12 @@ # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. -extensions = ['sphinx.ext.autodoc', - 'sphinx.ext.napoleon', - 'sphinx.ext.intersphinx'] +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.napoleon", + "sphinx.ext.intersphinx", + "sphinx_rtd_theme", +] # Other useful extensions # sphinx_copybutton @@ -49,7 +53,7 @@ # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. @@ -58,18 +62,18 @@ # Set autodoc defaults autodoc_default_options = { - 'members': True, - 'member-order': 'bysource', - 'special-members': '__init__' + "members": True, + "member-order": "bysource", + "special-members": "__init__", } # Include todo items/lists todo_include_todos = True -#autodoc_member_order = 'bysource' +# autodoc_member_order = 'bysource' -# intersphinx options +# intersphinx options # intersphinx_mapping = { # 'emdfile': ('https://pypi.org/project/emdfile/0.0.4/', None) @@ -80,21 +84,19 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # -html_theme = 'sphinx_rtd_theme' +html_theme = "sphinx_rtd_theme" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['../_static'] +html_static_path = ["../_static"] # The name of an image file (relative to this directory) to place at the top # of the sidebar. -html_logo = '../_static/py4DSTEM_logo.png' +html_logo = "../_static/py4DSTEM_logo.png" # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. -html_favicon = '../_static/py4DSTEM_logo_vsmall.ico' - - +html_favicon = "../_static/py4DSTEM_logo_vsmall.ico" diff --git a/py4DSTEM/__init__.py b/py4DSTEM/__init__.py index 0d85948b1..dcb6a861d 100644 --- a/py4DSTEM/__init__.py +++ b/py4DSTEM/__init__.py @@ -15,12 +15,12 @@ Custom, print_h5_tree, ) + _emd_hook = True # structure from py4DSTEM import io -from py4DSTEM.io import import_file,read,save - +from py4DSTEM.io import import_file, read, save ### basic data classes @@ -35,12 +35,7 @@ ) # datacube -from py4DSTEM.datacube import ( - DataCube, - VirtualImage, - VirtualDiffraction -) - +from py4DSTEM.datacube import DataCube, VirtualImage, VirtualDiffraction ### visualization @@ -70,8 +65,6 @@ ) - - ### more submodules # TODO @@ -79,16 +72,14 @@ from py4DSTEM import process - ### utilities # config from py4DSTEM.utils.configuration_checker import check_config + # TODO - config .toml # testing -from os.path import dirname,join -_TESTPATH = join(dirname(__file__), "../test/unit_test_data") - - +from os.path import dirname, join +_TESTPATH = join(dirname(__file__), "../test/unit_test_data") diff --git a/py4DSTEM/braggvectors/__init__.py b/py4DSTEM/braggvectors/__init__.py index 030fe6358..482b1f31e 100644 --- a/py4DSTEM/braggvectors/__init__.py +++ b/py4DSTEM/braggvectors/__init__.py @@ -3,6 +3,6 @@ from py4DSTEM.braggvectors.braggvector_methods import BraggVectorMap from py4DSTEM.braggvectors.diskdetection import * from py4DSTEM.braggvectors.probe import * -#from .diskdetection_aiml import * -#from .diskdetection_parallel_new import * +# from .diskdetection_aiml import * +# from .diskdetection_parallel_new import * diff --git a/py4DSTEM/braggvectors/braggvector_methods.py b/py4DSTEM/braggvectors/braggvector_methods.py index be766ad49..267f81e5f 100644 --- a/py4DSTEM/braggvectors/braggvector_methods.py +++ b/py4DSTEM/braggvectors/braggvector_methods.py @@ -18,11 +18,11 @@ class BraggVectorMethods: def histogram( self, - mode = 'cal', - sampling = 1, - weights = None, - weights_thresh = 0.005, - ): + mode="cal", + sampling=1, + weights=None, + weights_thresh=0.005, + ): """ Returns a 2D histogram of Bragg vector intensities in diffraction space, aka a Bragg vector map. @@ -52,8 +52,8 @@ def histogram( representing the sampling grid. """ # get vectors - assert(mode in ('cal','raw')), f"Invalid mode {mode}!" - if mode == 'cal': + assert mode in ("cal", "raw"), f"Invalid mode {mode}!" + if mode == "cal": v = self.cal else: v = self.raw @@ -62,30 +62,34 @@ def histogram( # handling any weight factors if weights is None: vects = np.concatenate( - [v[i,j].data for i in range(self.Rshape[0]) for j in range(self.Rshape[1])]) + [ + v[i, j].data + for i in range(self.Rshape[0]) + for j in range(self.Rshape[1]) + ] + ) elif weights.dtype == bool: - x,y = np.nonzero(weights) - vects = np.concatenate( - [v[i,j].data for i,j in zip(x,y)]) + x, y = np.nonzero(weights) + vects = np.concatenate([v[i, j].data for i, j in zip(x, y)]) else: l = [] - x,y = np.nonzero(weights>weights_thresh) - for i,j in zip(x,y): - d = v[i,j].data - d['intensity'] *= weights[i,j] + x, y = np.nonzero(weights > weights_thresh) + for i, j in zip(x, y): + d = v[i, j].data + d["intensity"] *= weights[i, j] l.append(d) vects = np.concatenate(l) # get the vectors - qx = vects['qx'] - qy = vects['qy'] - I = vects['intensity'] + qx = vects["qx"] + qy = vects["qy"] + I = vects["intensity"] # Set up bin grid - Q_Nx = np.round(self.Qshape[0]*sampling).astype(int) - Q_Ny = np.round(self.Qshape[1]*sampling).astype(int) + Q_Nx = np.round(self.Qshape[0] * sampling).astype(int) + Q_Ny = np.round(self.Qshape[1] * sampling).astype(int) # transform vects onto bin grid - if mode == 'raw': + if mode == "raw": qx *= sampling qy *= sampling # calibrated vects @@ -93,12 +97,12 @@ def histogram( # then scale by the sampling factor else: # get pixel calibration - if self.calstate['pixel']==True: + if self.calstate["pixel"] == True: qpix = self.calibration.get_Q_pixel_size() qx /= qpix qy /= qpix # origin calibration - if self.calstate['center']==True: + if self.calstate["center"] == True: origin = self.calibration.get_origin_mean() qx += origin[0] qy += origin[1] @@ -114,7 +118,8 @@ def histogram( # Remove any points outside the bin grid mask = np.logical_and.reduce( - ((floorx>=0),(floory>=0),(ceilx= 0), (floory >= 0), (ceilx < Q_Nx), (ceily < Q_Ny)) + ) qx = qx[mask] qy = qy[mask] I = I[mask] @@ -128,47 +133,47 @@ def histogram( dy = qy - floory # Compute indices of the 4 neighbors to (qx,qy) # floor x, floor y - inds00 = np.ravel_multi_index([floorx,floory],(Q_Nx,Q_Ny)) + inds00 = np.ravel_multi_index([floorx, floory], (Q_Nx, Q_Ny)) # floor x, ceil y - inds01 = np.ravel_multi_index([floorx,ceily],(Q_Nx,Q_Ny)) + inds01 = np.ravel_multi_index([floorx, ceily], (Q_Nx, Q_Ny)) # ceil x, floor y - inds10 = np.ravel_multi_index([ceilx,floory],(Q_Nx,Q_Ny)) + inds10 = np.ravel_multi_index([ceilx, floory], (Q_Nx, Q_Ny)) # ceil x, ceil y - inds11 = np.ravel_multi_index([ceilx,ceily],(Q_Nx,Q_Ny)) + inds11 = np.ravel_multi_index([ceilx, ceily], (Q_Nx, Q_Ny)) # Compute the histogram by accumulating intensity in each # neighbor weighted by linear interpolation hist = ( - np.bincount(inds00, I * (1.-dx) * (1.-dy), minlength=Q_Nx*Q_Ny) \ - + np.bincount(inds01, I * (1.-dx) * dy, minlength=Q_Nx*Q_Ny) \ - + np.bincount(inds10, I * dx * (1.-dy), minlength=Q_Nx*Q_Ny) \ - + np.bincount(inds11, I * dx * dy, minlength=Q_Nx*Q_Ny) - ).reshape(Q_Nx,Q_Ny) + np.bincount(inds00, I * (1.0 - dx) * (1.0 - dy), minlength=Q_Nx * Q_Ny) + + np.bincount(inds01, I * (1.0 - dx) * dy, minlength=Q_Nx * Q_Ny) + + np.bincount(inds10, I * dx * (1.0 - dy), minlength=Q_Nx * Q_Ny) + + np.bincount(inds11, I * dx * dy, minlength=Q_Nx * Q_Ny) + ).reshape(Q_Nx, Q_Ny) # determine the resampled grid center and pixel size - if mode == 'cal' and self.calstate['center']==True: - x0 = sampling*origin[0] - y0 = sampling*origin[1] + if mode == "cal" and self.calstate["center"] == True: + x0 = sampling * origin[0] + y0 = sampling * origin[1] else: - x0,y0 = 0,0 - if mode == 'cal' and self.calstate['pixel']==True: - pixelsize = qpix/sampling + x0, y0 = 0, 0 + if mode == "cal" and self.calstate["pixel"] == True: + pixelsize = qpix / sampling else: - pixelsize = 1/sampling + pixelsize = 1 / sampling # find the dim vectors - dimx = (np.arange(Q_Nx)-x0)*pixelsize - dimy = (np.arange(Q_Ny)-y0)*pixelsize + dimx = (np.arange(Q_Nx) - x0) * pixelsize + dimy = (np.arange(Q_Ny) - y0) * pixelsize dim_units = self.calibration.get_Q_pixel_units() # wrap in a class ans = BraggVectorMap( - name = f'2Dhist_{self.name}_{mode}_s={sampling}', - data = hist, - weights = weights, - dims = [dimx,dimy], - dim_units = dim_units, - origin = (x0,y0), - pixelsize = pixelsize + name=f"2Dhist_{self.name}_{mode}_s={sampling}", + data=hist, + weights=weights, + dims=[dimx, dimy], + dim_units=dim_units, + origin=(x0, y0), + pixelsize=pixelsize, ) # return @@ -177,22 +182,20 @@ def histogram( # aliases get_bvm = get_bragg_vector_map = histogram - - # bragg virtual imaging def get_virtual_image( self, - mode = None, - geometry = None, - name = 'bragg_virtual_image', - returncalc = True, - center = True, - ellipse = True, - pixel = True, - rotate = True, - ): - ''' + mode=None, + geometry=None, + name="bragg_virtual_image", + returncalc=True, + center=True, + ellipse=True, + pixel=True, + rotate=True, + ): + """ Calculates a virtual image based on the values of the Braggvectors integrated over some detector function determined by `mode` and `geometry`. @@ -208,7 +211,7 @@ def get_virtual_image( - 'circle', 'circular': nested 2-tuple, ((qx,qy),radius) - 'annular' or 'annulus': nested 2-tuple, ((qx,qy),(radius_i,radius_o)) - Values can be in pixels or calibrated units. Note that (qx,qy) + Values can be in pixels or calibrated units. Note that (qx,qy) can be skipped, which assumes peaks centered at (0,0). center: bool Apply calibration - center coordinate. @@ -222,40 +225,40 @@ def get_virtual_image( Returns ------- virtual_im : VirtualImage - ''' + """ # parse inputs - circle_modes = ['circular','circle'] - annulus_modes = ['annular','annulus'] + circle_modes = ["circular", "circle"] + annulus_modes = ["annular", "annulus"] modes = circle_modes + annulus_modes + [None] - assert(mode in modes), f"Unrecognized mode {mode}" + assert mode in modes, f"Unrecognized mode {mode}" # set geometry if mode is None: if geometry is None: qxy_center = None - radial_range = np.array((0,np.inf)) + radial_range = np.array((0, np.inf)) else: if len(geometry[0]) == 0: qxy_center = None else: qxy_center = np.array(geometry[0]) if isinstance(geometry[1], int) or isinstance(geometry[1], float): - radial_range = np.array((0,geometry[1])) + radial_range = np.array((0, geometry[1])) elif len(geometry[1]) == 0: radial_range = None else: radial_range = np.array(geometry[1]) - elif mode == 'circular' or mode == 'circle': - radial_range = np.array((0,geometry[1])) + elif mode == "circular" or mode == "circle": + radial_range = np.array((0, geometry[1])) if len(geometry[0]) == 0: - qxy_center = None + qxy_center = None else: qxy_center = np.array(geometry[0]) - elif mode == 'annular' or mode == 'annulus': + elif mode == "annular" or mode == "annulus": radial_range = np.array(geometry[1]) if len(geometry[0]) == 0: - qxy_center = None + qxy_center = None else: qxy_center = np.array(geometry[0]) @@ -263,52 +266,45 @@ def get_virtual_image( im_virtual = np.zeros(self.shape) # generate image - for rx,ry in tqdmnd( + for rx, ry in tqdmnd( self.shape[0], self.shape[1], - ): + ): # Get user-specified Bragg vectors p = self.get_vectors( rx, ry, - center = center, - ellipse = ellipse, - pixel = pixel, - rotate = rotate, - ) + center=center, + ellipse=ellipse, + pixel=pixel, + rotate=rotate, + ) if p.data.shape[0] > 0: if radial_range is None: - im_virtual[rx,ry] = np.sum(p.I) + im_virtual[rx, ry] = np.sum(p.I) else: if qxy_center is None: - qr = np.hypot(p.qx,p.qy) + qr = np.hypot(p.qx, p.qy) else: - qr = np.hypot( - p.qx - qxy_center[0], - p.qy - qxy_center[1]) - sub = np.logical_and( - qr >= radial_range[0], - qr < radial_range[1]) + qr = np.hypot(p.qx - qxy_center[0], p.qy - qxy_center[1]) + sub = np.logical_and(qr >= radial_range[0], qr < radial_range[1]) if np.sum(sub) > 0: - im_virtual[rx,ry] = np.sum(p.I[sub]) + im_virtual[rx, ry] = np.sum(p.I[sub]) # wrap in Virtual Image class - ans = VirtualImage( - data = im_virtual, - name = name - ) + ans = VirtualImage(data=im_virtual, name=name) # add generating params as metadta ans.metadata = Metadata( - name = 'gen_params', - data = { - '_calling_method' : inspect.stack()[0][3], - '_calling_class' : __class__.__name__, - 'mode' : mode, - 'geometry' : geometry, - 'name' : name, - 'returncalc' : returncalc - } + name="gen_params", + data={ + "_calling_method": inspect.stack()[0][3], + "_calling_class": __class__.__name__, + "mode": mode, + "geometry": geometry, + "name": name, + "returncalc": returncalc, + }, ) # attach to the tree self.attach(ans) @@ -317,15 +313,12 @@ def get_virtual_image( if returncalc: return ans - - - # calibration measurements def measure_origin( self, - center_guess = None, - score_method = None, + center_guess=None, + score_method=None, findcenter="max", ): """ @@ -363,11 +356,16 @@ def measure_origin( the Bragg peaks identified with the unscattered beam. Useful for diagnostic purposes. """ - assert(findcenter in ["CoM", "max"]), "center must be either 'CoM' or 'max'" - assert score_method in ["distance", "intensity", "intensity weighted distance", None], "center must be either 'distance' or 'intensity weighted distance'" + assert findcenter in ["CoM", "max"], "center must be either 'CoM' or 'max'" + assert score_method in [ + "distance", + "intensity", + "intensity weighted distance", + None, + ], "center must be either 'distance' or 'intensity weighted distance'" - R_Nx,R_Ny = self.Rshape - Q_Nx,Q_Ny = self.Qshape + R_Nx, R_Ny = self.Rshape + Q_Nx, Q_Ny = self.Qshape # Default scoring method if score_method is None: @@ -378,13 +376,14 @@ def measure_origin( # Get guess at position of unscattered beam (x0,y0) if center_guess is None: - bvm = self.histogram( mode='raw' ) + bvm = self.histogram(mode="raw") if findcenter == "max": x0, y0 = np.unravel_index( np.argmax(gaussian_filter(bvm, 10)), (Q_Nx, Q_Ny) ) else: from py4DSTEM.process.utils import get_CoM + x0, y0 = get_CoM(bvm) else: x0, y0 = center_guess @@ -395,7 +394,7 @@ def measure_origin( mask = np.ones(self.Rshape, dtype=bool) for Rx in range(R_Nx): for Ry in range(R_Ny): - vects = self.raw[Rx,Ry].data + vects = self.raw[Rx, Ry].data if len(vects) > 0: if score_method == "distance": r2 = (vects["qx"] - x0) ** 2 + (vects["qy"] - y0) ** 2 @@ -403,7 +402,9 @@ def measure_origin( elif score_method == "intensity": index = np.argmax(vects["intensity"]) elif score_method == "intensity weighted distance": - r2 = vects["intensity"]/(1+((vects["qx"] - x0) ** 2 + (vects["qy"] - y0) ** 2)) + r2 = vects["intensity"] / ( + 1 + ((vects["qx"] - x0) ** 2 + (vects["qy"] - y0) ** 2) + ) index = np.argmax(r2) qx0[Rx, Ry] = vects["qx"][index] qy0[Rx, Ry] = vects["qy"][index] @@ -413,21 +414,15 @@ def measure_origin( qy0[Rx, Ry] = y0 # set calibration metadata - self.calibration.set_origin_meas((qx0,qy0)) + self.calibration.set_origin_meas((qx0, qy0)) self.calibration.set_origin_meas_mask(mask) # return return qx0, qy0, mask - def measure_origin_beamstop( - self, - center_guess, - radii, - max_dist=2, - max_iter=1, - **kwargs - ): + self, center_guess, radii, max_dist=None, max_iter=1, **kwargs + ): """ Find the origin from a set of braggpeaks assuming there is a beamstop, by identifying pairs of conjugate peaks inside an annular region and finding their centers of mass. @@ -442,74 +437,76 @@ def measure_origin_beamstop( Returns: (2d masked array): the origins """ - R_Nx,R_Ny = self.Rshape + R_Nx, R_Ny = self.Rshape braggpeaks = self._v_uncal + if max_dist is None: + max_dist = radii[1] + # remove peaks outside the annulus braggpeaks_masked = braggpeaks.copy() for rx in range(R_Nx): for ry in range(R_Ny): - pl = braggpeaks_masked[rx,ry] - qr = np.hypot(pl.data['qx']-center_guess[0], - pl.data['qy']-center_guess[1]) - rm = np.logical_not(np.logical_and(qr>=radii[0],qr<=radii[1])) + pl = braggpeaks_masked[rx, ry] + qr = np.hypot( + pl.data["qx"] - center_guess[0], pl.data["qy"] - center_guess[1] + ) + rm = np.logical_not(np.logical_and(qr >= radii[0], qr <= radii[1])) pl.remove(rm) # Find all matching conjugate pairs of peaks center_curr = center_guess for ii in range(max_iter): - centers = np.zeros((R_Nx,R_Ny,2)) - found_center = np.zeros((R_Nx,R_Ny),dtype=bool) + centers = np.zeros((R_Nx, R_Ny, 2)) + found_center = np.zeros((R_Nx, R_Ny), dtype=bool) for rx in range(R_Nx): for ry in range(R_Ny): - # Get data - pl = braggpeaks_masked[rx,ry] - is_paired = np.zeros(len(pl.data),dtype=bool) + pl = braggpeaks_masked[rx, ry] + is_paired = np.zeros(len(pl.data), dtype=bool) matches = [] # Find matching pairs for i in range(len(pl.data)): if not is_paired[i]: - x,y = pl.data['qx'][i],pl.data['qy'][i] - x_r = -x+2*center_curr[0] - y_r = -y+2*center_curr[1] - dists = np.hypot(x_r-pl.data['qx'],y_r-pl.data['qy']) - dists[is_paired] = 2*max_dist - matched = dists<=max_dist - if(any(matched)): + x, y = pl.data["qx"][i], pl.data["qy"][i] + x_r = -x + 2 * center_curr[0] + y_r = -y + 2 * center_curr[1] + dists = np.hypot(x_r - pl.data["qx"], y_r - pl.data["qy"]) + dists[is_paired] = max_dist + matched = dists <= max_dist + if any(matched): match = np.argmin(dists) - matches.append((i,match)) - is_paired[i],is_paired[match] = True,True + matches.append((i, match)) + is_paired[i], is_paired[match] = True, True # Find the center - if len(matches)>0: - x0,y0 = [],[] + if len(matches) > 0: + x0, y0 = [], [] for i in range(len(matches)): - x0.append(np.mean(pl.data['qx'][list(matches[i])])) - y0.append(np.mean(pl.data['qy'][list(matches[i])])) - x0,y0 = np.mean(x0),np.mean(y0) - centers[rx,ry,:] = x0,y0 - found_center[rx,ry] = True + x0.append(np.mean(pl.data["qx"][list(matches[i])])) + y0.append(np.mean(pl.data["qy"][list(matches[i])])) + x0, y0 = np.mean(x0), np.mean(y0) + centers[rx, ry, :] = x0, y0 + found_center[rx, ry] = True else: - found_center[rx,ry] = False + found_center[rx, ry] = False # Update current center guess - x0_curr = np.mean(centers[found_center,0]) - y0_curr = np.mean(centers[found_center,1]) - center_curr = x0_curr,y0_curr + x0_curr = np.mean(centers[found_center, 0]) + y0_curr = np.mean(centers[found_center, 1]) + center_curr = x0_curr, y0_curr # collect answers mask = found_center - qx0,qy0 = centers[:,:,0],centers[:,:,1] + qx0, qy0 = centers[:, :, 0], centers[:, :, 1] # set calibration metadata - self.calibration.set_origin_meas((qx0,qy0)) + self.calibration.set_origin_meas((qx0, qy0)) self.calibration.set_origin_meas_mask(mask) # return - return qx0,qy0,mask - + return qx0, qy0, mask def fit_origin( self, @@ -518,12 +515,12 @@ def fit_origin( robust=False, robust_steps=3, robust_thresh=2, - mask_check_data = True, - plot = True, - plot_range = None, - returncalc = True, - **kwargs - ): + mask_check_data=True, + plot=True, + plot_range=None, + returncalc=True, + **kwargs, + ): """ Fit origin of bragg vectors. @@ -553,79 +550,89 @@ def fit_origin( q_meas = self.calibration.get_origin_meas() from py4DSTEM.process.calibration import fit_origin + if mask_check_data is True: # TODO - replace this bad hack for the mask for the origin fit - mask = np.logical_not(q_meas[0]==0) - qx0_fit,qy0_fit,qx0_residuals,qy0_residuals = fit_origin( + mask = np.logical_not(q_meas[0] == 0) + qx0_fit, qy0_fit, qx0_residuals, qy0_residuals = fit_origin( tuple(q_meas), - mask = mask, - ) + mask=mask, + ) else: - qx0_fit,qy0_fit,qx0_residuals,qy0_residuals = fit_origin( - tuple(q_meas)) + qx0_fit, qy0_fit, qx0_residuals, qy0_residuals = fit_origin(tuple(q_meas)) # try to add to calibration try: - self.calibration.set_origin([qx0_fit,qy0_fit]) + self.calibration.set_origin([qx0_fit, qy0_fit]) except AttributeError: - warn("No calibration found on this datacube - fit values are not being stored") + warn( + "No calibration found on this datacube - fit values are not being stored" + ) pass if plot: from py4DSTEM.visualize import show_image_grid + if mask is None: - qx0_meas,qy0_meas = q_meas + qx0_meas, qy0_meas = q_meas qx0_res_plot = qx0_residuals qy0_res_plot = qy0_residuals else: - qx0_meas = np.ma.masked_array(q_meas[0], mask = np.logical_not(mask)) - qy0_meas = np.ma.masked_array(q_meas[1], mask = np.logical_not(mask)) - qx0_res_plot = np.ma.masked_array(qx0_residuals, mask = np.logical_not(mask)) - qy0_res_plot = np.ma.masked_array(qy0_residuals, mask = np.logical_not(mask)) + qx0_meas = np.ma.masked_array(q_meas[0], mask=np.logical_not(mask)) + qy0_meas = np.ma.masked_array(q_meas[1], mask=np.logical_not(mask)) + qx0_res_plot = np.ma.masked_array( + qx0_residuals, mask=np.logical_not(mask) + ) + qy0_res_plot = np.ma.masked_array( + qy0_residuals, mask=np.logical_not(mask) + ) qx0_mean = np.mean(qx0_fit) qy0_mean = np.mean(qy0_fit) - if plot_range is None: - plot_range = 2*np.max(qx0_fit - qx0_mean) + plot_range = 2 * np.max(qx0_fit - qx0_mean) cmap = kwargs.get("cmap", "RdBu_r") kwargs.pop("cmap", None) - axsize = kwargs.get("axsize", (6,2)) + axsize = kwargs.get("axsize", (6, 2)) kwargs.pop("axsize", None) show_image_grid( - lambda i:[qx0_meas-qx0_mean,qx0_fit-qx0_mean,qx0_res_plot, - qy0_meas-qy0_mean,qy0_fit-qy0_mean,qy0_res_plot][i], - H = 2, - W = 3, - cmap = cmap, - axsize = axsize, - title = [ - 'measured origin, x', 'fitorigin, x', 'residuals, x', - 'measured origin, y', 'fitorigin, y', 'residuals, y' + lambda i: [ + qx0_meas - qx0_mean, + qx0_fit - qx0_mean, + qx0_res_plot, + qy0_meas - qy0_mean, + qy0_fit - qy0_mean, + qy0_res_plot, + ][i], + H=2, + W=3, + cmap=cmap, + axsize=axsize, + title=[ + "measured origin, x", + "fitorigin, x", + "residuals, x", + "measured origin, y", + "fitorigin, y", + "residuals, y", ], - vmin = -1*plot_range, - vmax = 1*plot_range, - intensity_range = "absolute", + vmin=-1 * plot_range, + vmax=1 * plot_range, + intensity_range="absolute", **kwargs, ) # update calibration metadata - self.calibration.set_origin((qx0_fit,qy0_fit)) + self.calibration.set_origin((qx0_fit, qy0_fit)) self.setcal() if returncalc: - return qx0_fit,qy0_fit,qx0_residuals,qy0_residuals + return qx0_fit, qy0_fit, qx0_residuals, qy0_residuals def fit_p_ellipse( - self, - bvm, - center, - fitradii, - mask=None, - returncalc = False, - **kwargs - ): + self, bvm, center, fitradii, mask=None, returncalc=False, **kwargs + ): """ Args: bvm (BraggVectorMap): a 2D array used for ellipse fitting @@ -637,23 +644,14 @@ def fit_p_ellipse( p_ellipse if returncal is True """ from py4DSTEM.process.calibration import fit_ellipse_1D - p_ellipse = fit_ellipse_1D( - bvm, - center, - fitradii, - mask - ) + + p_ellipse = fit_ellipse_1D(bvm, center, fitradii, mask) scaling = kwargs.get("scaling", "log") kwargs.pop("scaling", None) from py4DSTEM.visualize import show_elliptical_fit - show_elliptical_fit( - bvm, - fitradii, - p_ellipse, - scaling = scaling, - **kwargs - ) + + show_elliptical_fit(bvm, fitradii, p_ellipse, scaling=scaling, **kwargs) self.calibration.set_p_ellipse(p_ellipse) self.setcal() @@ -661,12 +659,7 @@ def fit_p_ellipse( if returncalc: return p_ellipse - def mask_in_Q( - self, - mask, - update_inplace = False, - returncalc = True - ): + def mask_in_Q(self, mask, update_inplace=False, returncalc=True): """ Remove peaks which fall inside the diffraction shaped boolean array `mask`, in raw (uncalibrated) peak positions. @@ -690,23 +683,23 @@ def mask_in_Q( if update_inplace: v = self._v_uncal else: - v = self._v_uncal.copy( name='_v_uncal' ) + v = self._v_uncal.copy(name="_v_uncal") # Loop and remove masked peaks for rx in range(v.shape[0]): for ry in range(v.shape[1]): - p = v[rx,ry] + p = v[rx, ry] xs = np.round(p.data["qx"]).astype(int) ys = np.round(p.data["qy"]).astype(int) - sub = mask[xs,ys] + sub = mask[xs, ys] p.remove(sub) # assign the return value if update_inplace: ans = self else: - ans = self.copy( name=self.name+'_masked' ) - ans.set_raw_vectors( v ) + ans = self.copy(name=self.name + "_masked") + ans.set_raw_vectors(v) # return if returncalc: @@ -715,27 +708,18 @@ def mask_in_Q( return # alias - def get_masked_peaks( - self, - mask, - update_inplace = False, - returncalc = True): + def get_masked_peaks(self, mask, update_inplace=False, returncalc=True): """ Alias for `mask_in_Q`. """ - warn("`.get_masked_peaks` is deprecated and will be removed in a future version. Use `.mask_in_Q`") + warn( + "`.get_masked_peaks` is deprecated and will be removed in a future version. Use `.mask_in_Q`" + ) return self.mask_in_Q( - mask = mask, - update_inplace = update_inplace, - returncalc = returncalc + mask=mask, update_inplace=update_inplace, returncalc=returncalc ) - def mask_in_R( - self, - mask, - update_inplace = False, - returncalc = True - ): + def mask_in_R(self, mask, update_inplace=False, returncalc=True): """ Remove peaks which fall inside the real space shaped boolean array `mask`. @@ -759,21 +743,21 @@ def mask_in_R( if update_inplace: v = self._v_uncal else: - v = self._v_uncal.copy( name='_v_uncal' ) + v = self._v_uncal.copy(name="_v_uncal") # Loop and remove masked peaks for rx in range(v.shape[0]): for ry in range(v.shape[1]): - if mask[rx,ry]: - p = v[rx,ry] - p.remove(np.ones(len(p),dtype=bool)) + if mask[rx, ry]: + p = v[rx, ry] + p.remove(np.ones(len(p), dtype=bool)) # assign the return value if update_inplace: ans = self else: - ans = self.copy( name=self.name+'_masked' ) - ans.set_raw_vectors( v ) + ans = self.copy(name=self.name + "_masked") + ans.set_raw_vectors(v) # return if returncalc: @@ -782,73 +766,54 @@ def mask_in_R( return - - - ######### END BraggVectorMethods CLASS ######## - class BraggVectorMap(Array): - - def __init__( - self, - name, - data, - weights, - dims, - dim_units, - origin, - pixelsize - ): + def __init__(self, name, data, weights, dims, dim_units, origin, pixelsize): Array.__init__( self, - name = name, - data = data, - dims = dims, - dim_units = [dim_units,dim_units], + name=name, + data=data, + dims=dims, + dim_units=[dim_units, dim_units], ) self.metadata = Metadata( - name = 'grid', - data = { - 'origin' : origin, - 'pixelsize' : pixelsize, - 'weights' : weights - } + name="grid", + data={"origin": origin, "pixelsize": pixelsize, "weights": weights}, ) @property def origin(self): - return self.metadata['grid']['origin'] + return self.metadata["grid"]["origin"] + @property def pixelsize(self): - return self.metadata['grid']['pixelsize'] + return self.metadata["grid"]["pixelsize"] + @property def pixelunits(self): return self.dim_units[0] + @property def weights(self): - return self.metadata['grid']['weights'] - + return self.metadata["grid"]["weights"] # read @classmethod - def _get_constructor_args(cls,group): + def _get_constructor_args(cls, group): """ Returns a dictionary of args/values to pass to the class constructor """ constr_args = Array._get_constructor_args(group) - metadata = _read_metadata(group,'grid') + metadata = _read_metadata(group, "grid") args = { - 'name' : constr_args['name'], - 'data' : constr_args['data'], - 'weights' : metadata['weights'], - 'dims' : constr_args['dims'], - 'dim_units' : constr_args['dim_units'], - 'origin' : metadata['origin'], - 'pixelsize' : metadata['pixelsize'] + "name": constr_args["name"], + "data": constr_args["data"], + "weights": metadata["weights"], + "dims": constr_args["dims"], + "dim_units": constr_args["dim_units"], + "origin": metadata["origin"], + "pixelsize": metadata["pixelsize"], } return args - - - diff --git a/py4DSTEM/braggvectors/braggvectors.py b/py4DSTEM/braggvectors/braggvectors.py index f1ff406d0..45c08b9c9 100644 --- a/py4DSTEM/braggvectors/braggvectors.py +++ b/py4DSTEM/braggvectors/braggvectors.py @@ -1,14 +1,14 @@ # Defines the BraggVectors class from py4DSTEM.data import Data -from emdfile import Custom,PointListArray,PointList,Metadata +from emdfile import Custom, PointListArray, PointList, Metadata from py4DSTEM.braggvectors.braggvector_methods import BraggVectorMethods from os.path import basename import numpy as np from warnings import warn -class BraggVectors(Custom,BraggVectorMethods,Data): +class BraggVectors(Custom, BraggVectorMethods, Data): """ Stores localized bragg scattering positions and intensities for a 4D-STEM datacube. @@ -61,36 +61,27 @@ class BraggVectors(Custom,BraggVectorMethods,Data): """ def __init__( - self, - Rshape, - Qshape, - name = 'braggvectors', - verbose = False, - calibration = None - ): - Custom.__init__(self,name=name) - Data.__init__(self,calibration=calibration) + self, Rshape, Qshape, name="braggvectors", verbose=False, calibration=None + ): + Custom.__init__(self, name=name) + Data.__init__(self, calibration=calibration) self.Rshape = Rshape self.Qshape = Qshape self.verbose = verbose self._v_uncal = PointListArray( - dtype = [ - ('qx',np.float64), - ('qy',np.float64), - ('intensity',np.float64) - ], - shape = Rshape, - name = '_v_uncal' + dtype=[("qx", np.float64), ("qy", np.float64), ("intensity", np.float64)], + shape=Rshape, + name="_v_uncal", ) # initial calibration state self._calstate = { - "center" : False, - "ellipse" : False, - "pixel" : False, - "rotate" : False, + "center": False, + "ellipse": False, + "pixel": False, + "rotate": False, } # register with calibrations @@ -100,39 +91,34 @@ def __init__( self._set_raw_vector_getter() self._set_cal_vector_getter() - # set new raw vectors - def set_raw_vectors(self,x): - """ Given some PointListArray x of the correct shape, sets this to the raw vectors - """ - assert(isinstance(x,PointListArray)), f"Raw vectors must be set to a PointListArray, not type {type(x)}" - assert(x.shape == self.Rshape), "Shapes don't match!" + def set_raw_vectors(self, x): + """Given some PointListArray x of the correct shape, sets this to the raw vectors""" + assert isinstance( + x, PointListArray + ), f"Raw vectors must be set to a PointListArray, not type {type(x)}" + assert x.shape == self.Rshape, "Shapes don't match!" self._v_uncal = x self._set_raw_vector_getter() self._set_cal_vector_getter() - # calibration state, vector getters @property def calstate(self): return self._calstate + def _set_raw_vector_getter(self): - self._raw_vector_getter = RawVectorGetter( - braggvects = self - ) - def _set_cal_vector_getter(self): - self._cal_vector_getter = CalibratedVectorGetter( - braggvects = self - ) + self._raw_vector_getter = RawVectorGetter(braggvects=self) + def _set_cal_vector_getter(self): + self._cal_vector_getter = CalibratedVectorGetter(braggvects=self) # shape @property def shape(self): return self.Rshape - # raw vectors @property @@ -147,7 +133,6 @@ def raw(self): # use the vector getter to grab the vector return self._raw_vector_getter - # calibrated vectors @property @@ -165,15 +150,14 @@ def cal(self): # retrieve the getter and return return self._cal_vector_getter - # set calibration state def setcal( self, - center = None, - ellipse = None, - pixel = None, - rotate = None, + center=None, + ellipse=None, + pixel=None, + rotate=None, ): """ Calling @@ -201,10 +185,10 @@ def setcal( except Exception: warn("No calibrations found at .calibration; setting all cals to False") self._calstate = { - "center" : False, - "ellipse" : False, - "pixel" : False, - "rotate" : False, + "center": False, + "ellipse": False, + "pixel": False, + "rotate": False, } return @@ -220,23 +204,23 @@ def setcal( # validate requested state if center: - assert(c.get_origin() is not None), "Requested calibration not found" + assert c.get_origin() is not None, "Requested calibration not found" if ellipse: - assert(c.get_ellipse() is not None), "Requested calibration not found" + assert c.get_ellipse() is not None, "Requested calibration not found" if pixel: - assert(c.get_Q_pixel_size() is not None), "Requested calibration not found" + assert c.get_Q_pixel_size() is not None, "Requested calibration not found" if rotate: - assert(c.get_QR_rotflip() is not None), "Requested calibration not found" + assert c.get_QR_rotflip() is not None, "Requested calibration not found" # set the calibrations self._calstate = { - "center" : center, - "ellipse" : ellipse, - "pixel" : pixel, - "rotate" : rotate, + "center": center, + "ellipse": ellipse, + "pixel": pixel, + "rotate": rotate, } if self.verbose: - print('current calibration state: ', self.calstate) + print("current calibration state: ", self.calstate) pass def calibrate(self): @@ -245,19 +229,9 @@ def calibrate(self): """ self.setcal() - - # vector getter method - def get_vectors( - self, - scan_x, - scan_y, - center, - ellipse, - pixel, - rotate - ): + def get_vectors(self, scan_x, scan_y, center, ellipse, pixel, rotate): """ Returns the bragg vectors at the specified scan position with the specified calibration state. @@ -276,92 +250,84 @@ def get_vectors( vectors : BVects """ - ans = self._v_uncal[scan_x,scan_y].data + ans = self._v_uncal[scan_x, scan_y].data ans = self.cal._transform( - data = ans, - cal = self.calibration, - scanxy = (scan_x,scan_y), - center = center, - ellipse = ellipse, - pixel = pixel, - rotate = rotate, + data=ans, + cal=self.calibration, + scanxy=(scan_x, scan_y), + center=center, + ellipse=ellipse, + pixel=pixel, + rotate=rotate, ) return BVects(ans) - # copy def copy(self, name=None): - name = name if name is not None else self.name+"_copy" + name = name if name is not None else self.name + "_copy" braggvector_copy = BraggVectors( - self.Rshape, - self.Qshape, - name=name, - calibration = self.calibration.copy() + self.Rshape, self.Qshape, name=name, calibration=self.calibration.copy() ) - braggvector_copy.set_raw_vectors( self._v_uncal.copy() ) + braggvector_copy.set_raw_vectors(self._v_uncal.copy()) for k in self.metadata.keys(): braggvector_copy.metadata = self.metadata[k].copy() return braggvector_copy - # write - def to_h5(self,group): - """ Constructs the group, adds the bragg vector pointlists, + def to_h5(self, group): + """Constructs the group, adds the bragg vector pointlists, and adds metadata describing the shape """ - md = Metadata( name = '_braggvectors_shape' ) - md['Rshape'] = self.Rshape - md['Qshape'] = self.Qshape + md = Metadata(name="_braggvectors_shape") + md["Rshape"] = self.Rshape + md["Qshape"] = self.Qshape self.metadata = md - grp = Custom.to_h5(self,group) - + grp = Custom.to_h5(self, group) + return grp # read @classmethod - def _get_constructor_args(cls,group): - """ - """ + def _get_constructor_args(cls, group): + """ """ # Get shape metadata from the metadatabundle group - assert('metadatabundle' in group.keys()), "No metadata found, can't get Rshape and Qshape" - grp_metadata = group['metadatabundle'] - assert('_braggvectors_shape' in grp_metadata.keys()), "No _braggvectors_shape metadata found" - md = Metadata.from_h5(grp_metadata['_braggvectors_shape']) + assert ( + "metadatabundle" in group.keys() + ), "No metadata found, can't get Rshape and Qshape" + grp_metadata = group["metadatabundle"] + assert ( + "_braggvectors_shape" in grp_metadata.keys() + ), "No _braggvectors_shape metadata found" + md = Metadata.from_h5(grp_metadata["_braggvectors_shape"]) # Populate args and return kwargs = { - 'name' : basename(group.name), - 'Rshape' : md['Rshape'], - 'Qshape' : md['Qshape'] + "name": basename(group.name), + "Rshape": md["Rshape"], + "Qshape": md["Qshape"], } return kwargs - def _populate_instance(self,group): - """ - """ + def _populate_instance(self, group): + """ """ # Get the vectors dic = self._get_emd_attr_data(group) - assert('_v_uncal' in dic.keys()), "Uncalibrated bragg vectors not found!" - self._v_uncal = dic['_v_uncal'] + assert "_v_uncal" in dic.keys(), "Uncalibrated bragg vectors not found!" + self._v_uncal = dic["_v_uncal"] # Point the vector getters to the vectors self._set_raw_vector_getter() self._set_cal_vector_getter() - # standard output display def __repr__(self): - - space = ' '*len(self.__class__.__name__)+' ' + space = " " * len(self.__class__.__name__) + " " string = f"{self.__class__.__name__}( " string += f"A {self.shape}-shaped array of lists of bragg vectors )" return string - - - # Vector access classes @@ -374,36 +340,34 @@ class BVects: -like access to a collection of Bragg vector. """ - def __init__( - self, - data - ): - """ pointlist must have fields 'qx', 'qy', and 'intensity' - """ + def __init__(self, data): + """pointlist must have fields 'qx', 'qy', and 'intensity'""" self._data = data @property def qx(self): - return self._data['qx'] + return self._data["qx"] + @property def qy(self): - return self._data['qy'] + return self._data["qy"] + @property def I(self): - return self._data['intensity'] + return self._data["intensity"] + @property def data(self): return self._data def __repr__(self): - space = ' '*len(self.__class__.__name__)+' ' + space = " " * len(self.__class__.__name__) + " " string = f"{self.__class__.__name__}( " string += f"A set of {len(self.data)} bragg vectors." string += " Access data with .qx, .qy, .I, or .data.)" return string - class RawVectorGetter: def __init__( self, @@ -412,20 +376,19 @@ def __init__( self._bvects = braggvects self._data = braggvects._v_uncal - def __getitem__(self,pos): - x,y = pos - ans = self._data[x,y].data + def __getitem__(self, pos): + x, y = pos + ans = self._data[x, y].data return BVects(ans) def __repr__(self): - space = ' '*len(self.__class__.__name__)+' ' + space = " " * len(self.__class__.__name__) + " " string = f"{self.__class__.__name__}( " string += f"Retrieves raw bragg vectors. Get vectors for scan position x,y with [x,y]. )" return string class CalibratedVectorGetter: - def __init__( self, braggvects, @@ -433,25 +396,29 @@ def __init__( self._bvects = braggvects self._data = braggvects._v_uncal - def __getitem__(self,pos): - x,y = pos - ans = self._data[x,y].data + def __getitem__(self, pos): + x, y = pos + ans = self._data[x, y].data ans = self._transform( - data = ans, - cal = self._bvects.calibration, - scanxy = (x,y), - center = self._bvects.calstate['center'], - ellipse = self._bvects.calstate['ellipse'], - pixel = self._bvects.calstate['pixel'], - rotate = self._bvects.calstate['rotate'], + data=ans, + cal=self._bvects.calibration, + scanxy=(x, y), + center=self._bvects.calstate["center"], + ellipse=self._bvects.calstate["ellipse"], + pixel=self._bvects.calstate["pixel"], + rotate=self._bvects.calstate["rotate"], ) return BVects(ans) def __repr__(self): - space = ' '*len(self.__class__.__name__)+' ' + space = " " * len(self.__class__.__name__) + " " string = f"{self.__class__.__name__}( " string += "Retrieves calibrated Bragg vectors. Get vectors for scan position x,y with [x,y]." - string += "\n"+space+"Set which calibrations to apply with braggvectors.setcal(...). )" + string += ( + "\n" + + space + + "Set which calibrations to apply with braggvectors.setcal(...). )" + ) return string def _transform( @@ -463,7 +430,7 @@ def _transform( ellipse, pixel, rotate, - ): + ): """ Return a transformed copy of stractured data `data` with fields with fields 'qx','qy','intensity', applying calibrating transforms @@ -472,65 +439,61 @@ def _transform( """ ans = data.copy() - x,y = scanxy + x, y = scanxy # origin if center: - origin = cal.get_origin(x,y) - assert(origin is not None), "Requested calibration was not found!" - ans['qx'] -= origin[0] - ans['qy'] -= origin[1] - + origin = cal.get_origin(x, y) + assert origin is not None, "Requested calibration was not found!" + ans["qx"] -= origin[0] + ans["qy"] -= origin[1] # ellipse if ellipse: - ell = cal.get_ellipse(x,y) - assert(ell is not None), "Requested calibration was not found!" - a,b,theta = ell + ell = cal.get_ellipse(x, y) + assert ell is not None, "Requested calibration was not found!" + a, b, theta = ell # Get the transformation matrix - e = b/a - sint, cost = np.sin(theta-np.pi/2.), np.cos(theta-np.pi/2.) + e = b / a + sint, cost = np.sin(theta - np.pi / 2.0), np.cos(theta - np.pi / 2.0) T = np.array( - [ - [e*sint**2 + cost**2, sint*cost*(1-e)], - [sint*cost*(1-e), sint**2 + e*cost**2] - ] - ) + [ + [e * sint**2 + cost**2, sint * cost * (1 - e)], + [sint * cost * (1 - e), sint**2 + e * cost**2], + ] + ) # apply it - xyar_i = np.vstack([ans['qx'],ans['qy']]) + xyar_i = np.vstack([ans["qx"], ans["qy"]]) xyar_f = np.matmul(T, xyar_i) - ans['qx'] = xyar_f[0, :] - ans['qy'] = xyar_f[1, :] - + ans["qx"] = xyar_f[0, :] + ans["qy"] = xyar_f[1, :] # pixel size if pixel: qpix = cal.get_Q_pixel_size() - assert(qpix is not None), "Requested calibration was not found!" - ans['qx'] *= qpix - ans['qy'] *= qpix - + assert qpix is not None, "Requested calibration was not found!" + ans["qx"] *= qpix + ans["qy"] *= qpix # Q/R rotation if rotate: flip = cal.get_QR_flip() theta = cal.get_QR_rotation_degrees() - assert(flip is not None), "Requested calibration was not found!" - assert(theta is not None), "Requested calibration was not found!" + assert flip is not None, "Requested calibration was not found!" + assert theta is not None, "Requested calibration was not found!" # rotation matrix - R = np.array([ - [np.cos(theta), -np.sin(theta)], - [np.sin(theta), np.cos(theta)]]) + R = np.array( + [[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]] + ) # apply if flip: positions = R @ np.vstack((ans["qy"], ans["qx"])) else: positions = R @ np.vstack((ans["qx"], ans["qy"])) # update - ans['qx'] = positions[0,:] - ans['qy'] = positions[1,:] - + ans["qx"] = positions[0, :] + ans["qy"] = positions[1, :] # return - return ans \ No newline at end of file + return ans diff --git a/py4DSTEM/braggvectors/diskdetection.py b/py4DSTEM/braggvectors/diskdetection.py index fb7755349..e23b10a15 100644 --- a/py4DSTEM/braggvectors/diskdetection.py +++ b/py4DSTEM/braggvectors/diskdetection.py @@ -13,38 +13,31 @@ from py4DSTEM.braggvectors.diskdetection_aiml import find_Bragg_disks_aiml - - def find_Bragg_disks( data, template, - - radial_bksb = False, - filter_function = None, - - corrPower = 1, - sigma = None, - sigma_dp = 0, - sigma_cc = 2, - subpixel = 'multicorr', - upsample_factor = 16, - - minAbsoluteIntensity = 0, - minRelativeIntensity = 0.005, - relativeToPeak = 0, - minPeakSpacing = 60, - edgeBoundary = 20, - maxNumPeaks = 70, - - CUDA = False, - CUDA_batched = True, - distributed = None, - - ML = False, - ml_model_path = None, - ml_num_attempts = 1, - ml_batch_size = 8, - ): + radial_bksb=False, + filter_function=None, + corrPower=1, + sigma=None, + sigma_dp=0, + sigma_cc=2, + subpixel="multicorr", + upsample_factor=16, + minAbsoluteIntensity=0, + minRelativeIntensity=0.005, + relativeToPeak=0, + minPeakSpacing=60, + edgeBoundary=20, + maxNumPeaks=70, + CUDA=False, + CUDA_batched=True, + distributed=None, + ML=False, + ml_model_path=None, + ml_num_attempts=1, + ml_batch_size=8, +): """ Finds the Bragg disks in the diffraction patterns represented by `data` by cross/phase correlatin with `template`. @@ -188,12 +181,12 @@ def find_Bragg_disks( # `data` type if isinstance(data, DataCube): - mode = 'datacube' + mode = "datacube" elif isinstance(data, np.ndarray): if data.ndim == 2: - mode = 'dp' + mode = "dp" elif data.ndim == 3: - mode = 'dp_stack' + mode = "dp_stack" else: er = f"if `data` is an array, must be 2- or 3-D, not {data.ndim}-D" raise Exception(er) @@ -201,33 +194,33 @@ def find_Bragg_disks( try: # when a position (rx,ry) is passed, get those patterns # and put them in a stack - dc,rx,ry = data[0],data[1],data[2] + dc, rx, ry = data[0], data[1], data[2] # h5py datasets have different rules for slicing than # numpy arrays, so we have to do this manually if "h5py" in str(type(dc.data)): - data = np.zeros((len(rx),dc.Q_Nx,dc.Q_Ny)) + data = np.zeros((len(rx), dc.Q_Nx, dc.Q_Ny)) # no background subtraction if not radial_bksb: - for i,(x,y) in enumerate(zip(rx,ry)): - data[i] = dc.data[x,y] + for i, (x, y) in enumerate(zip(rx, ry)): + data[i] = dc.data[x, y] # with bksubtr else: - for i,(x,y) in enumerate(zip(rx,ry)): - data[i] = dc.get_radial_bksb_dp(rx,ry) + for i, (x, y) in enumerate(zip(rx, ry)): + data[i] = dc.get_radial_bksb_dp(rx, ry) else: # no background subtraction if not radial_bksb: - data = dc.data[np.array(rx),np.array(ry),:,:] + data = dc.data[np.array(rx), np.array(ry), :, :] # with bksubtr else: - data = np.zeros((len(rx),dc.Q_Nx,dc.Q_Ny)) - for i,(x,y) in enumerate(zip(rx,ry)): - data[i] = dc.get_radial_bksb_dp(x,y) + data = np.zeros((len(rx), dc.Q_Nx, dc.Q_Ny)) + for i, (x, y) in enumerate(zip(rx, ry)): + data[i] = dc.get_radial_bksb_dp(x, y) if data.ndim == 2: - mode = 'dp' + mode = "dp" elif data.ndim == 3: - mode = 'dp_stack' + mode = "dp_stack" except: er = f"entry {data} for `data` could not be parsed" raise Exception(er) @@ -235,132 +228,121 @@ def find_Bragg_disks( # CPU/GPU/cluster/ML-AI if ML: - mode = 'dc_ml' + mode = "dc_ml" - elif mode == 'datacube': + elif mode == "datacube": if distributed is None and CUDA == False: - mode = 'dc_CPU' + mode = "dc_CPU" elif distributed is None and CUDA == True: if CUDA_batched == False: - mode = 'dc_GPU' + mode = "dc_GPU" else: - mode = 'dc_GPU_batched' + mode = "dc_GPU_batched" else: x = _parse_distributed(distributed) connect, data_file, cluster_path, distributed_mode = x - if distributed_mode == 'dask': - mode = 'dc_dask' - elif distributed_mode == 'ipyparallel': - mode = 'dc_ipyparallel' + if distributed_mode == "dask": + mode = "dc_dask" + elif distributed_mode == "ipyparallel": + mode = "dc_ipyparallel" else: er = f"unrecognized distributed mode {distributed_mode}" raise Exception(er) # overwrite if ML selected - # select a function fn_dict = { - "dp" : _find_Bragg_disks_single, - "dp_stack" : _find_Bragg_disks_stack, - "dc_CPU" : _find_Bragg_disks_CPU, - "dc_GPU" : _find_Bragg_disks_CUDA_unbatched, - "dc_GPU_batched" : _find_Bragg_disks_CUDA_batched, - "dc_dask" : _find_Bragg_disks_dask, - "dc_ipyparallel" : _find_Bragg_disks_ipp, - "dc_ml" : find_Bragg_disks_aiml + "dp": _find_Bragg_disks_single, + "dp_stack": _find_Bragg_disks_stack, + "dc_CPU": _find_Bragg_disks_CPU, + "dc_GPU": _find_Bragg_disks_CUDA_unbatched, + "dc_GPU_batched": _find_Bragg_disks_CUDA_batched, + "dc_dask": _find_Bragg_disks_dask, + "dc_ipyparallel": _find_Bragg_disks_ipp, + "dc_ml": find_Bragg_disks_aiml, } fn = fn_dict[mode] - # prepare kwargs kws = {} # distributed kwargs if distributed is not None: - kws['connect'] = connect - kws['data_file'] = data_file - kws['cluster_path'] = cluster_path + kws["connect"] = connect + kws["data_file"] = data_file + kws["cluster_path"] = cluster_path # ML arguments if ML == True: - kws['CUDA'] = CUDA - kws['model_path'] = ml_model_path - kws['num_attempts'] = ml_num_attempts - kws['batch_size'] = ml_batch_size + kws["CUDA"] = CUDA + kws["model_path"] = ml_model_path + kws["num_attempts"] = ml_num_attempts + kws["batch_size"] = ml_batch_size # if radial background subtraction is requested, add to args - if radial_bksb and mode=='dc_CPU': - kws['radial_bksb'] = radial_bksb - + if radial_bksb and mode == "dc_CPU": + kws["radial_bksb"] = radial_bksb # run and return ans = fn( data, template, - filter_function = filter_function, - corrPower = corrPower, - sigma_dp = sigma_dp, - sigma_cc = sigma_cc, - subpixel = subpixel, - upsample_factor = upsample_factor, - minAbsoluteIntensity = minAbsoluteIntensity, - minRelativeIntensity = minRelativeIntensity, - relativeToPeak = relativeToPeak, - minPeakSpacing = minPeakSpacing, - edgeBoundary = edgeBoundary, - maxNumPeaks = maxNumPeaks, - **kws + filter_function=filter_function, + corrPower=corrPower, + sigma_dp=sigma_dp, + sigma_cc=sigma_cc, + subpixel=subpixel, + upsample_factor=upsample_factor, + minAbsoluteIntensity=minAbsoluteIntensity, + minRelativeIntensity=minRelativeIntensity, + relativeToPeak=relativeToPeak, + minPeakSpacing=minPeakSpacing, + edgeBoundary=edgeBoundary, + maxNumPeaks=maxNumPeaks, + **kws, ) return ans - - - - # Single diffraction pattern def _find_Bragg_disks_single( DP, template, - filter_function = None, - corrPower = 1, - sigma_dp = 0, - sigma_cc = 2, - subpixel = 'poly', - upsample_factor = 16, - minAbsoluteIntensity = 0, - minRelativeIntensity = 0, - relativeToPeak = 0, - minPeakSpacing = 0, - edgeBoundary = 1, - maxNumPeaks = 100, - _return_cc = False, - _template_space = 'real' - ): - - - # apply filter function + filter_function=None, + corrPower=1, + sigma_dp=0, + sigma_cc=2, + subpixel="poly", + upsample_factor=16, + minAbsoluteIntensity=0, + minRelativeIntensity=0, + relativeToPeak=0, + minPeakSpacing=0, + edgeBoundary=1, + maxNumPeaks=100, + _return_cc=False, + _template_space="real", +): + # apply filter function er = "filter_function must be callable" - if filter_function: assert callable(filter_function), er + if filter_function: + assert callable(filter_function), er DP = DP if filter_function is None else filter_function(DP) # check for a template if template is None: cc = DP else: - - # fourier transform the template - assert _template_space in ('real','fourier') - if _template_space == 'real': + assert _template_space in ("real", "fourier") + if _template_space == "real": template_FT = np.conj(np.fft.fft2(template)) else: template_FT = template - # apply any smoothing to the data - if sigma_dp>0: - DP = gaussian_filter( DP,sigma_dp ) + if sigma_dp > 0: + DP = gaussian_filter(DP, sigma_dp) # Compute cross correlation # _returnval = 'fourier' if subpixel == 'multicorr' else 'real' @@ -368,28 +350,26 @@ def _find_Bragg_disks_single( DP, template_FT, corrPower, - 'fourier', + "fourier", ) - # Get maxima maxima = get_maxima_2D( - np.maximum(np.real(np.fft.ifft2(cc)),0), - subpixel = subpixel, - upsample_factor = upsample_factor, - sigma = sigma_cc, - minAbsoluteIntensity = minAbsoluteIntensity, - minRelativeIntensity = minRelativeIntensity, - relativeToPeak = relativeToPeak, - minSpacing = minPeakSpacing, - edgeBoundary = edgeBoundary, - maxNumPeaks = maxNumPeaks, - _ar_FT = cc + np.maximum(np.real(np.fft.ifft2(cc)), 0), + subpixel=subpixel, + upsample_factor=upsample_factor, + sigma=sigma_cc, + minAbsoluteIntensity=minAbsoluteIntensity, + minRelativeIntensity=minRelativeIntensity, + relativeToPeak=relativeToPeak, + minSpacing=minPeakSpacing, + edgeBoundary=edgeBoundary, + maxNumPeaks=maxNumPeaks, + _ar_FT=cc, ) # Wrap as QPoints instance - maxima = QPoints( maxima ) - + maxima = QPoints(maxima) # Return if _return_cc is True: @@ -397,10 +377,7 @@ def _find_Bragg_disks_single( return maxima - - - -#def _get_cross_correlation_FT( +# def _get_cross_correlation_FT( # DP, # template_FT, # corrPower = 1, @@ -418,162 +395,146 @@ def _find_Bragg_disks_single( # return cc - - - - # 3D stack of DPs def _find_Bragg_disks_stack( dp_stack, template, - filter_function = None, - corrPower = 1, - sigma_dp = 0, - sigma_cc = 2, - subpixel = 'poly', - upsample_factor = 16, - minAbsoluteIntensity = 0, - minRelativeIntensity = 0, - relativeToPeak = 0, - minPeakSpacing = 0, - edgeBoundary = 1, - maxNumPeaks = 100, - _template_space = 'real' - ): - + filter_function=None, + corrPower=1, + sigma_dp=0, + sigma_cc=2, + subpixel="poly", + upsample_factor=16, + minAbsoluteIntensity=0, + minRelativeIntensity=0, + relativeToPeak=0, + minPeakSpacing=0, + edgeBoundary=1, + maxNumPeaks=100, + _template_space="real", +): ans = [] for idx in range(dp_stack.shape[0]): - - dp = dp_stack[idx,:,:] - peaks =_find_Bragg_disks_single( + dp = dp_stack[idx, :, :] + peaks = _find_Bragg_disks_single( dp, template, - filter_function = filter_function, - corrPower = corrPower, - sigma_dp = sigma_dp, - sigma_cc = sigma_cc, - subpixel = subpixel, - upsample_factor = upsample_factor, - minAbsoluteIntensity = minAbsoluteIntensity, - minRelativeIntensity = minRelativeIntensity, - relativeToPeak = relativeToPeak, - minPeakSpacing = minPeakSpacing, - edgeBoundary = edgeBoundary, - maxNumPeaks = maxNumPeaks, - _template_space = _template_space, - _return_cc = False + filter_function=filter_function, + corrPower=corrPower, + sigma_dp=sigma_dp, + sigma_cc=sigma_cc, + subpixel=subpixel, + upsample_factor=upsample_factor, + minAbsoluteIntensity=minAbsoluteIntensity, + minRelativeIntensity=minRelativeIntensity, + relativeToPeak=relativeToPeak, + minPeakSpacing=minPeakSpacing, + edgeBoundary=edgeBoundary, + maxNumPeaks=maxNumPeaks, + _template_space=_template_space, + _return_cc=False, ) ans.append(peaks) return ans - - - # Whole datacube, CPU def _find_Bragg_disks_CPU( datacube, probe, - filter_function = None, - corrPower = 1, - sigma_dp = 0, - sigma_cc = 2, - subpixel = 'multicorr', - upsample_factor = 16, - minAbsoluteIntensity = 0, - minRelativeIntensity = 0.005, - relativeToPeak = 0, - minPeakSpacing = 60, - edgeBoundary = 20, - maxNumPeaks = 70, - radial_bksb = False - ): - + filter_function=None, + corrPower=1, + sigma_dp=0, + sigma_cc=2, + subpixel="multicorr", + upsample_factor=16, + minAbsoluteIntensity=0, + minRelativeIntensity=0.005, + relativeToPeak=0, + minPeakSpacing=60, + edgeBoundary=20, + maxNumPeaks=70, + radial_bksb=False, +): # Make the BraggVectors instance - braggvectors = BraggVectors( datacube.Rshape, datacube.Qshape ) - + braggvectors = BraggVectors(datacube.Rshape, datacube.Qshape) # Get the template's Fourier Transform probe_kernel_FT = np.conj(np.fft.fft2(probe)) if probe is not None else None - # Loop over all diffraction patterns # Compute and populate BraggVectors data - for (rx,ry) in tqdmnd( + for rx, ry in tqdmnd( datacube.R_Nx, datacube.R_Ny, - desc='Finding Bragg Disks', - unit='DP', - unit_scale=True - ): - + desc="Finding Bragg Disks", + unit="DP", + unit_scale=True, + ): # Get a diffraction pattern # without background subtraction if not radial_bksb: - dp = datacube.data[rx,ry,:,:] + dp = datacube.data[rx, ry, :, :] # and with else: - dp = datacube.get_radial_bksb_dp(rx,ry) - + dp = datacube.get_radial_bksb_dp(rx, ry) # Compute - peaks =_find_Bragg_disks_single( + peaks = _find_Bragg_disks_single( dp, - template = probe_kernel_FT, - filter_function = filter_function, - corrPower = corrPower, - sigma_dp = sigma_dp, - sigma_cc = sigma_cc, - subpixel = subpixel, - upsample_factor = upsample_factor, - minAbsoluteIntensity = minAbsoluteIntensity, - minRelativeIntensity = minRelativeIntensity, - relativeToPeak = relativeToPeak, - minPeakSpacing = minPeakSpacing, - edgeBoundary = edgeBoundary, - maxNumPeaks = maxNumPeaks, - _return_cc = False, - _template_space = 'fourier' + template=probe_kernel_FT, + filter_function=filter_function, + corrPower=corrPower, + sigma_dp=sigma_dp, + sigma_cc=sigma_cc, + subpixel=subpixel, + upsample_factor=upsample_factor, + minAbsoluteIntensity=minAbsoluteIntensity, + minRelativeIntensity=minRelativeIntensity, + relativeToPeak=relativeToPeak, + minPeakSpacing=minPeakSpacing, + edgeBoundary=edgeBoundary, + maxNumPeaks=maxNumPeaks, + _return_cc=False, + _template_space="fourier", ) # Populate data - braggvectors._v_uncal[rx,ry] = peaks - + braggvectors._v_uncal[rx, ry] = peaks # Return return braggvectors - # CUDA - unbatched def _find_Bragg_disks_CUDA_unbatched( datacube, probe, - filter_function = None, - corrPower = 1, - sigma_dp = 0, - sigma_cc = 2, - subpixel = 'multicorr', - upsample_factor = 16, - minAbsoluteIntensity = 0, - minRelativeIntensity = 0.005, - relativeToPeak = 0, - minPeakSpacing = 60, - edgeBoundary = 20, - maxNumPeaks = 70, - ): - + filter_function=None, + corrPower=1, + sigma_dp=0, + sigma_cc=2, + subpixel="multicorr", + upsample_factor=16, + minAbsoluteIntensity=0, + minRelativeIntensity=0.005, + relativeToPeak=0, + minPeakSpacing=60, + edgeBoundary=20, + maxNumPeaks=70, +): # compute from py4DSTEM.braggvectors.diskdetection_cuda import find_Bragg_disks_CUDA + peaks = find_Bragg_disks_CUDA( datacube, probe, @@ -588,38 +549,39 @@ def _find_Bragg_disks_CUDA_unbatched( minPeakSpacing=minPeakSpacing, edgeBoundary=edgeBoundary, maxNumPeaks=maxNumPeaks, - batching=False) + batching=False, + ) # Populate a BraggVectors instance and return - braggvectors = BraggVectors( datacube.Rshape, datacube.Qshape ) + braggvectors = BraggVectors(datacube.Rshape, datacube.Qshape) braggvectors._v_uncal = peaks + braggvectors._set_raw_vector_getter() + braggvectors._set_cal_vector_getter() return braggvectors - - # CUDA - batched def _find_Bragg_disks_CUDA_batched( datacube, probe, - filter_function = None, - corrPower = 1, - sigma_dp = 0, - sigma_cc = 2, - subpixel = 'multicorr', - upsample_factor = 16, - minAbsoluteIntensity = 0, - minRelativeIntensity = 0.005, - relativeToPeak = 0, - minPeakSpacing = 60, - edgeBoundary = 20, - maxNumPeaks = 70, - ): - + filter_function=None, + corrPower=1, + sigma_dp=0, + sigma_cc=2, + subpixel="multicorr", + upsample_factor=16, + minAbsoluteIntensity=0, + minRelativeIntensity=0.005, + relativeToPeak=0, + minPeakSpacing=60, + edgeBoundary=20, + maxNumPeaks=70, +): # compute from py4DSTEM.braggvectors.diskdetection_cuda import find_Bragg_disks_CUDA + peaks = find_Bragg_disks_CUDA( datacube, probe, @@ -634,17 +596,17 @@ def _find_Bragg_disks_CUDA_batched( minPeakSpacing=minPeakSpacing, edgeBoundary=edgeBoundary, maxNumPeaks=maxNumPeaks, - batching=True) + batching=True, + ) # Populate a BraggVectors instance and return - braggvectors = BraggVectors( datacube.Rshape, datacube.Qshape ) + braggvectors = BraggVectors(datacube.Rshape, datacube.Qshape) braggvectors._v_uncal = peaks + braggvectors._set_raw_vector_getter() + braggvectors._set_cal_vector_getter() return braggvectors - - - # Distributed - ipyparallel @@ -654,22 +616,22 @@ def _find_Bragg_disks_ipp( connect, data_file, cluster_path, - filter_function = None, - corrPower = 1, - sigma_dp = 0, - sigma_cc = 2, - subpixel = 'multicorr', - upsample_factor = 16, - minAbsoluteIntensity = 0, - minRelativeIntensity = 0.005, - relativeToPeak = 0, - minPeakSpacing = 60, - edgeBoundary = 20, - maxNumPeaks = 70, - ): - + filter_function=None, + corrPower=1, + sigma_dp=0, + sigma_cc=2, + subpixel="multicorr", + upsample_factor=16, + minAbsoluteIntensity=0, + minRelativeIntensity=0.005, + relativeToPeak=0, + minPeakSpacing=60, + edgeBoundary=20, + maxNumPeaks=70, +): # compute from py4DSTEM.braggvectors.diskdetection_parallel import find_Bragg_disks_ipp + peaks = find_Bragg_disks_ipp( datacube, probe, @@ -686,42 +648,42 @@ def _find_Bragg_disks_ipp( maxNumPeaks=maxNumPeaks, ipyparallel_client_file=connect, data_file=data_file, - cluster_path=cluster_path - ) + cluster_path=cluster_path, + ) # Populate a BraggVectors instance and return - braggvectors = BraggVectors( datacube.Rshape, datacube.Qshape ) + braggvectors = BraggVectors(datacube.Rshape, datacube.Qshape) braggvectors._v_uncal = peaks + braggvectors._set_raw_vector_getter() + braggvectors._set_cal_vector_getter() return braggvectors - - - # Distributed - dask + def _find_Bragg_disks_dask( datacube, probe, connect, data_file, cluster_path, - filter_function = None, - corrPower = 1, - sigma_dp = 0, - sigma_cc = 2, - subpixel = 'multicorr', - upsample_factor = 16, - minAbsoluteIntensity = 0, - minRelativeIntensity = 0.005, - relativeToPeak = 0, - minPeakSpacing = 60, - edgeBoundary = 20, - maxNumPeaks = 70, - ): - + filter_function=None, + corrPower=1, + sigma_dp=0, + sigma_cc=2, + subpixel="multicorr", + upsample_factor=16, + minAbsoluteIntensity=0, + minRelativeIntensity=0.005, + relativeToPeak=0, + minPeakSpacing=60, + edgeBoundary=20, + maxNumPeaks=70, +): # compute from py4DSTEM.braggvectors.diskdetection_parallel import find_Bragg_disks_dask + peaks = find_Bragg_disks_dask( datacube, probe, @@ -738,21 +700,17 @@ def _find_Bragg_disks_dask( maxNumPeaks=maxNumPeaks, dask_client_file=connect, data_file=data_file, - cluster_path=cluster_path - ) + cluster_path=cluster_path, + ) # Populate a BraggVectors instance and return - braggvectors = BraggVectors( datacube.Rshape, datacube.Qshape ) + braggvectors = BraggVectors(datacube.Rshape, datacube.Qshape) braggvectors._v_uncal = peaks + braggvectors._set_raw_vector_getter() + braggvectors._set_cal_vector_getter() return braggvectors - - - - - - def _parse_distributed(distributed): """ Parse the `distributed` dict argument to determine distribution behavior @@ -761,16 +719,17 @@ def _parse_distributed(distributed): # parse mode (ipyparallel or dask) if "ipyparallel" in distributed: - mode = 'ipyparallel' + mode = "ipyparallel" if "client_file" in distributed["ipyparallel"]: connect = distributed["ipyparallel"]["client_file"] else: - er = "Within distributed[\"ipyparallel\"], " - er += "missing key for \"client_file\"" + er = 'Within distributed["ipyparallel"], ' + er += 'missing key for "client_file"' raise KeyError(er) try: import ipyparallel as ipp + c = ipp.Client(url_file=connect, timeout=30) if len(c.ids) == 0: @@ -780,18 +739,17 @@ def _parse_distributed(distributed): raise ImportError("Unable to import module ipyparallel!") elif "dask" in distributed: - mode = 'dask' + mode = "dask" if "client" in distributed["dask"]: connect = distributed["dask"]["client"] else: - er = "Within distributed[\"dask\"], missing key for \"client\"" + er = 'Within distributed["dask"], missing key for "client"' raise KeyError(er) else: er = "Within distributed, you must specify 'ipyparallel' or 'dask'!" raise KeyError(er) - # parse data file if "data_file" not in distributed: er = "Missing input data file path to distributed! " @@ -810,7 +768,6 @@ def _parse_distributed(distributed): elif not os.path.exists(data_file): raise FileNotFoundError("File not found") - # parse cluster path if "cluster_path" in distributed: cluster_path = distributed["cluster_path"] @@ -833,13 +790,5 @@ def _parse_distributed(distributed): else: cluster_path = None - # return return connect, data_file, cluster_path, mode - - - - - - - diff --git a/py4DSTEM/braggvectors/diskdetection_aiml.py b/py4DSTEM/braggvectors/diskdetection_aiml.py index 6d9bea623..67df18074 100644 --- a/py4DSTEM/braggvectors/diskdetection_aiml.py +++ b/py4DSTEM/braggvectors/diskdetection_aiml.py @@ -1,7 +1,7 @@ # Functions for finding Bragg disks using AI/ML pipeline -''' +""" Functions for finding Braggdisks using AI/ML method using tensorflow -''' +""" import os import glob @@ -17,24 +17,29 @@ from py4DSTEM.braggvectors.braggvectors import BraggVectors from py4DSTEM.data import QPoints from py4DSTEM.process.utils import get_maxima_2D + # from py4DSTEM.braggvectors import universal_threshold -def find_Bragg_disks_aiml_single_DP(DP, probe, - num_attempts = 5, - int_window_radius = 1, - predict = True, - sigma = 0, - edgeBoundary = 20, - minRelativeIntensity = 0.005, - minAbsoluteIntensity = 0, - relativeToPeak = 0, - minPeakSpacing = 60, - maxNumPeaks = 70, - subpixel = 'multicorr', - upsample_factor = 16, - filter_function = None, - peaks = None, - model_path = None): + +def find_Bragg_disks_aiml_single_DP( + DP, + probe, + num_attempts=5, + int_window_radius=1, + predict=True, + sigma=0, + edgeBoundary=20, + minRelativeIntensity=0.005, + minAbsoluteIntensity=0, + relativeToPeak=0, + minPeakSpacing=60, + maxNumPeaks=70, + subpixel="multicorr", + upsample_factor=16, + filter_function=None, + peaks=None, + model_path=None, +): """ Finds the Bragg disks in single DP by AI/ML method. This method utilizes FCU-Net to predict Bragg disks from diffraction images. @@ -103,40 +108,62 @@ def find_Bragg_disks_aiml_single_DP(DP, probe, try: import tensorflow as tf except: - raise ImportError("Please install tensorflow before proceeding - please check " + "https://www.tensorflow.org/install" + "for more information") - - assert subpixel in [ 'none', 'poly', 'multicorr' ], "Unrecognized subpixel option {}, subpixel must be 'none', 'poly', or 'multicorr'".format(subpixel) + raise ImportError( + "Please install tensorflow before proceeding - please check " + + "https://www.tensorflow.org/install" + + "for more information" + ) + + assert subpixel in [ + "none", + "poly", + "multicorr", + ], "Unrecognized subpixel option {}, subpixel must be 'none', 'poly', or 'multicorr'".format( + subpixel + ) # Perform any prefiltering - if filter_function: assert callable(filter_function), "filter_function must be callable" + if filter_function: + assert callable(filter_function), "filter_function must be callable" DP = DP if filter_function is None else filter_function(DP) if predict: - assert(len(DP.shape)==2), "Dimension of single diffraction should be 2 (Qx, Qy)" - assert(len(probe.shape)==2), "Dimension of probe should be 2 (Qx, Qy)" - model = _get_latest_model(model_path = model_path) + assert ( + len(DP.shape) == 2 + ), "Dimension of single diffraction should be 2 (Qx, Qy)" + assert len(probe.shape) == 2, "Dimension of probe should be 2 (Qx, Qy)" + model = _get_latest_model(model_path=model_path) DP = tf.expand_dims(tf.expand_dims(DP, axis=0), axis=-1) probe = tf.expand_dims(tf.expand_dims(probe, axis=0), axis=-1) - prediction = np.zeros(shape = (1, DP.shape[1],DP.shape[2],1)) - - for i in tqdmnd(num_attempts, desc='Neural network is predicting atomic potential', unit='ATTEMPTS',unit_scale=True): - prediction += model.predict([DP,probe]) - print('Averaging over {} attempts \n'.format(num_attempts)) - pred = prediction[0,:,:,0]/num_attempts + prediction = np.zeros(shape=(1, DP.shape[1], DP.shape[2], 1)) + + for i in tqdmnd( + num_attempts, + desc="Neural network is predicting atomic potential", + unit="ATTEMPTS", + unit_scale=True, + ): + prediction += model.predict([DP, probe]) + print("Averaging over {} attempts \n".format(num_attempts)) + pred = prediction[0, :, :, 0] / num_attempts else: - assert(len(DP.shape)==2), "Dimension of single diffraction should be 2 (Qx, Qy)" + assert ( + len(DP.shape) == 2 + ), "Dimension of single diffraction should be 2 (Qx, Qy)" pred = DP - maxima = get_maxima_2D(pred, - sigma = sigma, - minRelativeIntensity=minRelativeIntensity, - minAbsoluteIntensity=minAbsoluteIntensity, - edgeBoundary=edgeBoundary, - relativeToPeak=relativeToPeak, - maxNumPeaks=maxNumPeaks, - minSpacing = minPeakSpacing, - subpixel=subpixel, - upsample_factor=upsample_factor) + maxima = get_maxima_2D( + pred, + sigma=sigma, + minRelativeIntensity=minRelativeIntensity, + minAbsoluteIntensity=minAbsoluteIntensity, + edgeBoundary=edgeBoundary, + relativeToPeak=relativeToPeak, + maxNumPeaks=maxNumPeaks, + minSpacing=minPeakSpacing, + subpixel=subpixel, + upsample_factor=upsample_factor, + ) # maxima_x, maxima_y, maxima_int = _integrate_disks(pred, maxima_x,maxima_y,maxima_int,int_window_radius=int_window_radius) @@ -151,22 +178,27 @@ def find_Bragg_disks_aiml_single_DP(DP, probe, return maxima -def find_Bragg_disks_aiml_selected(datacube, probe, Rx, Ry, - num_attempts = 5, - int_window_radius = 1, - batch_size = 1, - predict =True, - sigma = 0, - edgeBoundary = 20, - minRelativeIntensity = 0.005, - minAbsoluteIntensity = 0, - relativeToPeak = 0, - minPeakSpacing = 60, - maxNumPeaks = 70, - subpixel = 'multicorr', - upsample_factor = 16, - filter_function = None, - model_path = None): +def find_Bragg_disks_aiml_selected( + datacube, + probe, + Rx, + Ry, + num_attempts=5, + int_window_radius=1, + batch_size=1, + predict=True, + sigma=0, + edgeBoundary=20, + minRelativeIntensity=0.005, + minAbsoluteIntensity=0, + relativeToPeak=0, + minPeakSpacing=60, + maxNumPeaks=70, + subpixel="multicorr", + upsample_factor=16, + filter_function=None, + model_path=None, +): """ Finds the Bragg disks in the diffraction patterns of datacube at scan positions (Rx,Ry) by AI/ML method. This method utilizes FCU-Net to predict Bragg @@ -212,7 +244,7 @@ def find_Bragg_disks_aiml_selected(datacube, probe, Rx, Ry, detected peaks are added to, and must have the appropriate coords ('qx','qy','intensity'). model_path (str): filepath for the model weights (Tensorflow model) to load from. - By default, if the model_path is not provided, py4DSTEM will search for the + By default, if the model_path is not provided, py4DSTEM will search for the latest model stored on cloud using metadata json file. It is not recommended to keep track of the model path and advised to keep this argument unchanged (None) to always search for the latest updated training model weights. @@ -227,78 +259,109 @@ def find_Bragg_disks_aiml_selected(datacube, probe, Rx, Ry, except: raise ImportError("Import Error: Please install crystal4D before proceeding") - assert(len(Rx)==len(Ry)) + assert len(Rx) == len(Ry) peaks = [] if predict: - model = _get_latest_model(model_path = model_path) - t0= time() - probe = np.expand_dims(np.repeat(np.expand_dims(probe, axis=0), - len(Rx), axis=0), axis=-1) - DP = np.expand_dims(np.expand_dims(datacube.data[Rx[0],Ry[0],:,:], axis=0), axis=-1) + model = _get_latest_model(model_path=model_path) + t0 = time() + probe = np.expand_dims( + np.repeat(np.expand_dims(probe, axis=0), len(Rx), axis=0), axis=-1 + ) + DP = np.expand_dims( + np.expand_dims(datacube.data[Rx[0], Ry[0], :, :], axis=0), axis=-1 + ) total_DP = len(Rx) - for i in range(1,len(Rx)): - DP_ = np.expand_dims(np.expand_dims(datacube.data[Rx[i],Ry[i],:,:], axis=0), axis=-1) - DP = np.concatenate([DP,DP_], axis=0) + for i in range(1, len(Rx)): + DP_ = np.expand_dims( + np.expand_dims(datacube.data[Rx[i], Ry[i], :, :], axis=0), axis=-1 + ) + DP = np.concatenate([DP, DP_], axis=0) - prediction = np.zeros(shape = (total_DP, datacube.Q_Nx, datacube.Q_Ny, 1)) + prediction = np.zeros(shape=(total_DP, datacube.Q_Nx, datacube.Q_Ny, 1)) image_num = len(Rx) - batch_num = int(image_num//batch_size) - - for att in tqdmnd(num_attempts, desc='Neural network is predicting structure factors', unit='ATTEMPTS',unit_scale=True): + batch_num = int(image_num // batch_size) + + for att in tqdmnd( + num_attempts, + desc="Neural network is predicting structure factors", + unit="ATTEMPTS", + unit_scale=True, + ): for i in range(batch_num): - prediction[i*batch_size:(i+1)*batch_size] += model.predict([DP[i*batch_size:(i+1)*batch_size],probe[i*batch_size:(i+1)*batch_size]], verbose=0) - if (i+1)*batch_size < image_num: - prediction[(i+1)*batch_size:] += model.predict([DP[(i+1)*batch_size:],probe[(i+1)*batch_size:]], verbose=0) - - prediction = prediction/num_attempts + prediction[i * batch_size : (i + 1) * batch_size] += model.predict( + [ + DP[i * batch_size : (i + 1) * batch_size], + probe[i * batch_size : (i + 1) * batch_size], + ], + verbose=0, + ) + if (i + 1) * batch_size < image_num: + prediction[(i + 1) * batch_size :] += model.predict( + [DP[(i + 1) * batch_size :], probe[(i + 1) * batch_size :]], + verbose=0, + ) + + prediction = prediction / num_attempts # Loop over selected diffraction patterns - for Rx in tqdmnd(image_num,desc='Finding Bragg Disks using AI/ML',unit='DP',unit_scale=True): - DP = prediction[Rx,:,:,0] - _peaks = find_Bragg_disks_aiml_single_DP(DP, probe, - int_window_radius = int_window_radius, - predict = False, - sigma = sigma, - edgeBoundary=edgeBoundary, - minRelativeIntensity=minRelativeIntensity, - minAbsoluteIntensity=minAbsoluteIntensity, - relativeToPeak=relativeToPeak, - minPeakSpacing=minPeakSpacing, - maxNumPeaks=maxNumPeaks, - subpixel=subpixel, - upsample_factor=upsample_factor, - filter_function=filter_function, - model_path=model_path) + for Rx in tqdmnd( + image_num, desc="Finding Bragg Disks using AI/ML", unit="DP", unit_scale=True + ): + DP = prediction[Rx, :, :, 0] + _peaks = find_Bragg_disks_aiml_single_DP( + DP, + probe, + int_window_radius=int_window_radius, + predict=False, + sigma=sigma, + edgeBoundary=edgeBoundary, + minRelativeIntensity=minRelativeIntensity, + minAbsoluteIntensity=minAbsoluteIntensity, + relativeToPeak=relativeToPeak, + minPeakSpacing=minPeakSpacing, + maxNumPeaks=maxNumPeaks, + subpixel=subpixel, + upsample_factor=upsample_factor, + filter_function=filter_function, + model_path=model_path, + ) peaks.append(_peaks) - t2 = time()-t0 - print("Analyzed {} diffraction patterns in {}h {}m {}s".format(image_num, int(t2/3600), - int(t2/60), int(t2%60))) + t2 = time() - t0 + print( + "Analyzed {} diffraction patterns in {}h {}m {}s".format( + image_num, int(t2 / 3600), int(t2 / 60), int(t2 % 60) + ) + ) peaks = tuple(peaks) return peaks -def find_Bragg_disks_aiml_serial(datacube, probe, - num_attempts = 5, - int_window_radius = 1, - predict =True, - batch_size = 2, - sigma = 0, - edgeBoundary = 20, - minRelativeIntensity = 0.005, - minAbsoluteIntensity = 0, - relativeToPeak = 0, - minPeakSpacing = 60, - maxNumPeaks = 70, - subpixel = 'multicorr', - upsample_factor = 16, - global_threshold = False, - minGlobalIntensity = 0.005, - metric = 'mean', - filter_function = None, - name = 'braggpeaks_raw', - model_path = None,): + +def find_Bragg_disks_aiml_serial( + datacube, + probe, + num_attempts=5, + int_window_radius=1, + predict=True, + batch_size=2, + sigma=0, + edgeBoundary=20, + minRelativeIntensity=0.005, + minAbsoluteIntensity=0, + relativeToPeak=0, + minPeakSpacing=60, + maxNumPeaks=70, + subpixel="multicorr", + upsample_factor=16, + global_threshold=False, + minGlobalIntensity=0.005, + metric="mean", + filter_function=None, + name="braggpeaks_raw", + model_path=None, +): """ Finds the Bragg disks in all diffraction patterns of datacube from AI/ML method. When hist = True, returns histogram of intensities in the entire datacube. @@ -378,85 +441,126 @@ def find_Bragg_disks_aiml_serial(datacube, probe, peaks = BraggVectors(datacube.Rshape, datacube.Qshape) # check that the filtered DP is the right size for the probe kernel: - if filter_function: assert callable(filter_function), "filter_function must be callable" - DP = datacube.data[0,0,:,:] if filter_function is None else filter_function(datacube.data[0,0,:,:]) - #assert np.all(DP.shape == probe.shape), 'Probe kernel shape must match filtered DP shape' + if filter_function: + assert callable(filter_function), "filter_function must be callable" + DP = ( + datacube.data[0, 0, :, :] + if filter_function is None + else filter_function(datacube.data[0, 0, :, :]) + ) + # assert np.all(DP.shape == probe.shape), 'Probe kernel shape must match filtered DP shape' if predict: - t0=time() - model = _get_latest_model(model_path = model_path) - probe = np.expand_dims(np.repeat(np.expand_dims(probe, axis=0), - datacube.R_N, axis=0), axis=-1) - DP = np.expand_dims(np.reshape(datacube.data, - (datacube.R_N,datacube.Q_Nx,datacube.Q_Ny)), axis = -1) - - prediction = np.zeros(shape = (datacube.R_N, datacube.Q_Nx, datacube.Q_Ny, 1)) + t0 = time() + model = _get_latest_model(model_path=model_path) + probe = np.expand_dims( + np.repeat(np.expand_dims(probe, axis=0), datacube.R_N, axis=0), axis=-1 + ) + DP = np.expand_dims( + np.reshape(datacube.data, (datacube.R_N, datacube.Q_Nx, datacube.Q_Ny)), + axis=-1, + ) + + prediction = np.zeros(shape=(datacube.R_N, datacube.Q_Nx, datacube.Q_Ny, 1)) image_num = datacube.R_N - batch_num = int(image_num//batch_size) - - for att in tqdmnd(num_attempts, desc='Neural network is predicting structure factors', unit='ATTEMPTS',unit_scale=True): + batch_num = int(image_num // batch_size) + + for att in tqdmnd( + num_attempts, + desc="Neural network is predicting structure factors", + unit="ATTEMPTS", + unit_scale=True, + ): for i in range(batch_num): - prediction[i*batch_size:(i+1)*batch_size] += model.predict([DP[i*batch_size:(i+1)*batch_size],probe[i*batch_size:(i+1)*batch_size]], verbose =0) - if (i+1)*batch_size < image_num: - prediction[(i+1)*batch_size:] += model.predict([DP[(i+1)*batch_size:],probe[(i+1)*batch_size:]], verbose =0) - - prediction = prediction/num_attempts - - prediction = np.reshape(np.transpose(prediction, (0,3,1,2)), - (datacube.R_Nx, datacube.R_Ny, datacube.Q_Nx, datacube.Q_Ny)) + prediction[i * batch_size : (i + 1) * batch_size] += model.predict( + [ + DP[i * batch_size : (i + 1) * batch_size], + probe[i * batch_size : (i + 1) * batch_size], + ], + verbose=0, + ) + if (i + 1) * batch_size < image_num: + prediction[(i + 1) * batch_size :] += model.predict( + [DP[(i + 1) * batch_size :], probe[(i + 1) * batch_size :]], + verbose=0, + ) + + prediction = prediction / num_attempts + + prediction = np.reshape( + np.transpose(prediction, (0, 3, 1, 2)), + (datacube.R_Nx, datacube.R_Ny, datacube.Q_Nx, datacube.Q_Ny), + ) # Loop over all diffraction patterns - for (Rx,Ry) in tqdmnd(datacube.R_Nx,datacube.R_Ny,desc='Finding Bragg Disks using AI/ML',unit='DP',unit_scale=True): - DP_ = prediction[Rx,Ry,:,:] - find_Bragg_disks_aiml_single_DP(DP_, probe, - num_attempts = num_attempts, - int_window_radius = int_window_radius, - predict = False, - sigma = sigma, - edgeBoundary=edgeBoundary, - minRelativeIntensity=minRelativeIntensity, - minAbsoluteIntensity=minAbsoluteIntensity, - relativeToPeak=relativeToPeak, - minPeakSpacing=minPeakSpacing, - maxNumPeaks=maxNumPeaks, - subpixel=subpixel, - upsample_factor=upsample_factor, - filter_function=filter_function, - peaks = peaks.vectors_uncal.get_pointlist(Rx,Ry), - model_path=model_path) - t2 = time()-t0 - print("Analyzed {} diffraction patterns in {}h {}m {}s".format(datacube.R_N, int(t2/3600), - int(t2/60), int(t2%60))) + for Rx, Ry in tqdmnd( + datacube.R_Nx, + datacube.R_Ny, + desc="Finding Bragg Disks using AI/ML", + unit="DP", + unit_scale=True, + ): + DP_ = prediction[Rx, Ry, :, :] + find_Bragg_disks_aiml_single_DP( + DP_, + probe, + num_attempts=num_attempts, + int_window_radius=int_window_radius, + predict=False, + sigma=sigma, + edgeBoundary=edgeBoundary, + minRelativeIntensity=minRelativeIntensity, + minAbsoluteIntensity=minAbsoluteIntensity, + relativeToPeak=relativeToPeak, + minPeakSpacing=minPeakSpacing, + maxNumPeaks=maxNumPeaks, + subpixel=subpixel, + upsample_factor=upsample_factor, + filter_function=filter_function, + peaks=peaks.vectors_uncal.get_pointlist(Rx, Ry), + model_path=model_path, + ) + t2 = time() - t0 + print( + "Analyzed {} diffraction patterns in {}h {}m {}s".format( + datacube.R_N, int(t2 / 3600), int(t2 / 60), int(t2 % 60) + ) + ) if global_threshold == True: from py4DSTEM.braggvectors import universal_threshold - peaks = universal_threshold(peaks, minGlobalIntensity, metric, minPeakSpacing, - maxNumPeaks) + peaks = universal_threshold( + peaks, minGlobalIntensity, metric, minPeakSpacing, maxNumPeaks + ) peaks.name = name return peaks -def find_Bragg_disks_aiml(datacube, probe, - num_attempts = 5, - int_window_radius = 1, - predict = True, - batch_size = 8, - sigma = 0, - edgeBoundary = 20, - minRelativeIntensity = 0.005, - minAbsoluteIntensity = 0, - relativeToPeak = 0, - minPeakSpacing = 60, - maxNumPeaks = 70, - subpixel = 'multicorr', - upsample_factor = 16, - name = 'braggpeaks_raw', - filter_function = None, - model_path = None, - distributed = None, - CUDA = True, - **kwargs): + +def find_Bragg_disks_aiml( + datacube, + probe, + num_attempts=5, + int_window_radius=1, + predict=True, + batch_size=8, + sigma=0, + edgeBoundary=20, + minRelativeIntensity=0.005, + minAbsoluteIntensity=0, + relativeToPeak=0, + minPeakSpacing=60, + maxNumPeaks=70, + subpixel="multicorr", + upsample_factor=16, + name="braggpeaks_raw", + filter_function=None, + model_path=None, + distributed=None, + CUDA=True, + **kwargs +): """ Finds the Bragg disks in all diffraction patterns of datacube by AI/ML method. This method utilizes FCU-Net to predict Bragg disks from diffraction images. @@ -549,10 +653,13 @@ def _parse_distributed(distributed): if "client_file" in distributed["ipyparallel"]: connect = distributed["ipyparallel"]["client_file"] else: - raise KeyError("Within distributed[\"ipyparallel\"], missing key for \"client_file\"") + raise KeyError( + 'Within distributed["ipyparallel"], missing key for "client_file"' + ) try: import ipyparallel as ipp + c = ipp.Client(url_file=connect, timeout=30) if len(c.ids) == 0: @@ -563,18 +670,25 @@ def _parse_distributed(distributed): if "client" in distributed["dask"]: connect = distributed["dask"]["client"] else: - raise KeyError("Within distributed[\"dask\"], missing key for \"client\"") + raise KeyError('Within distributed["dask"], missing key for "client"') else: raise KeyError( - "Within distributed, you must specify 'ipyparallel' or 'dask'!") + "Within distributed, you must specify 'ipyparallel' or 'dask'!" + ) if "data_file" not in distributed: - raise KeyError("Missing input data file path to distributed! Required key 'data_file'") + raise KeyError( + "Missing input data file path to distributed! Required key 'data_file'" + ) data_file = distributed["data_file"] if not isinstance(data_file, str): - raise TypeError("Expected string for distributed key 'data_file', received {}".format(type(data_file))) + raise TypeError( + "Expected string for distributed key 'data_file', received {}".format( + type(data_file) + ) + ) if len(data_file.strip()) == 0: raise ValueError("Empty data file path from distributed key 'data_file'") elif not os.path.exists(data_file): @@ -585,14 +699,27 @@ def _parse_distributed(distributed): if not isinstance(cluster_path, str): raise TypeError( - "distributed key 'cluster_path' must be of type str, received {}".format(type(cluster_path))) + "distributed key 'cluster_path' must be of type str, received {}".format( + type(cluster_path) + ) + ) if len(cluster_path.strip()) == 0: - raise ValueError("distributed key 'cluster_path' cannot be an empty string!") + raise ValueError( + "distributed key 'cluster_path' cannot be an empty string!" + ) elif not os.path.exists(cluster_path): - raise FileNotFoundError("distributed key 'cluster_path' does not exist: {}".format(cluster_path)) + raise FileNotFoundError( + "distributed key 'cluster_path' does not exist: {}".format( + cluster_path + ) + ) elif not os.path.isdir(cluster_path): - raise NotADirectoryError("distributed key 'cluster_path' is not a directory: {}".format(cluster_path)) + raise NotADirectoryError( + "distributed key 'cluster_path' is not a directory: {}".format( + cluster_path + ) + ) else: cluster_path = None @@ -600,96 +727,123 @@ def _parse_distributed(distributed): if distributed is None: import warnings + if not CUDA: if _check_cuda_device_available(): - warnings.warn('WARNING: CUDA = False is selected but py4DSTEM found available CUDA device to speed up. Going ahead anyway with non-CUDA mode (CPU only). You may want to abort and switch to CUDA = True to speed things up... \n') + warnings.warn( + "WARNING: CUDA = False is selected but py4DSTEM found available CUDA device to speed up. Going ahead anyway with non-CUDA mode (CPU only). You may want to abort and switch to CUDA = True to speed things up... \n" + ) if num_attempts > 1: - warnings.warn('WARNING: num_attempts > 1 will take significant amount of time with Non-CUDA mode ...') - return find_Bragg_disks_aiml_serial(datacube, - probe, - num_attempts = num_attempts, - int_window_radius = int_window_radius, - predict = predict, - batch_size = batch_size, - sigma = sigma, - edgeBoundary=edgeBoundary, - minRelativeIntensity=minRelativeIntensity, - minAbsoluteIntensity=minAbsoluteIntensity, - relativeToPeak=relativeToPeak, - minPeakSpacing=minPeakSpacing, - maxNumPeaks=maxNumPeaks, - subpixel=subpixel, - upsample_factor=upsample_factor, - model_path=model_path, - name=name, - filter_function=filter_function) + warnings.warn( + "WARNING: num_attempts > 1 will take significant amount of time with Non-CUDA mode ..." + ) + return find_Bragg_disks_aiml_serial( + datacube, + probe, + num_attempts=num_attempts, + int_window_radius=int_window_radius, + predict=predict, + batch_size=batch_size, + sigma=sigma, + edgeBoundary=edgeBoundary, + minRelativeIntensity=minRelativeIntensity, + minAbsoluteIntensity=minAbsoluteIntensity, + relativeToPeak=relativeToPeak, + minPeakSpacing=minPeakSpacing, + maxNumPeaks=maxNumPeaks, + subpixel=subpixel, + upsample_factor=upsample_factor, + model_path=model_path, + name=name, + filter_function=filter_function, + ) elif _check_cuda_device_available(): - from py4DSTEM.braggvectors.diskdetection_aiml_cuda import find_Bragg_disks_aiml_CUDA - return find_Bragg_disks_aiml_CUDA(datacube, - probe, - num_attempts = num_attempts, - int_window_radius = int_window_radius, - predict = predict, - batch_size = batch_size, - sigma = sigma, - edgeBoundary=edgeBoundary, - minRelativeIntensity=minRelativeIntensity, - minAbsoluteIntensity=minAbsoluteIntensity, - relativeToPeak=relativeToPeak, - minPeakSpacing=minPeakSpacing, - maxNumPeaks=maxNumPeaks, - subpixel=subpixel, - upsample_factor=upsample_factor, - model_path=model_path, - name=name, - filter_function=filter_function) + from py4DSTEM.braggvectors.diskdetection_aiml_cuda import ( + find_Bragg_disks_aiml_CUDA, + ) + + return find_Bragg_disks_aiml_CUDA( + datacube, + probe, + num_attempts=num_attempts, + int_window_radius=int_window_radius, + predict=predict, + batch_size=batch_size, + sigma=sigma, + edgeBoundary=edgeBoundary, + minRelativeIntensity=minRelativeIntensity, + minAbsoluteIntensity=minAbsoluteIntensity, + relativeToPeak=relativeToPeak, + minPeakSpacing=minPeakSpacing, + maxNumPeaks=maxNumPeaks, + subpixel=subpixel, + upsample_factor=upsample_factor, + model_path=model_path, + name=name, + filter_function=filter_function, + ) else: import warnings - warnings.warn('WARNING: py4DSTEM attempted to speed up the process using GPUs but no CUDA enabled devices are found. Switching back to Non-CUDA (CPU only) mode (Note it will take significant amount of time to get AIML predictions for disk detection using CPUs!!!!) \n') + + warnings.warn( + "WARNING: py4DSTEM attempted to speed up the process using GPUs but no CUDA enabled devices are found. Switching back to Non-CUDA (CPU only) mode (Note it will take significant amount of time to get AIML predictions for disk detection using CPUs!!!!) \n" + ) if num_attempts > 1: - warnings.warn('WARNING: num_attempts > 1 will take significant amount of time with Non-CUDA mode ...') - return find_Bragg_disks_aiml_serial(datacube, - probe, - num_attempts = num_attempts, - int_window_radius = int_window_radius, - predict = predict, - batch_size = batch_size, - sigma = sigma, - edgeBoundary=edgeBoundary, - minRelativeIntensity=minRelativeIntensity, - minAbsoluteIntensity=minAbsoluteIntensity, - relativeToPeak=relativeToPeak, - minPeakSpacing=minPeakSpacing, - maxNumPeaks=maxNumPeaks, - subpixel=subpixel, - upsample_factor=upsample_factor, - model_path=model_path, - name=name, - filter_function=filter_function) + warnings.warn( + "WARNING: num_attempts > 1 will take significant amount of time with Non-CUDA mode ..." + ) + return find_Bragg_disks_aiml_serial( + datacube, + probe, + num_attempts=num_attempts, + int_window_radius=int_window_radius, + predict=predict, + batch_size=batch_size, + sigma=sigma, + edgeBoundary=edgeBoundary, + minRelativeIntensity=minRelativeIntensity, + minAbsoluteIntensity=minAbsoluteIntensity, + relativeToPeak=relativeToPeak, + minPeakSpacing=minPeakSpacing, + maxNumPeaks=maxNumPeaks, + subpixel=subpixel, + upsample_factor=upsample_factor, + model_path=model_path, + name=name, + filter_function=filter_function, + ) elif isinstance(distributed, dict): - raise Exception("{} is not yet implemented for aiml pipeline".format(type(distributed))) + raise Exception( + "{} is not yet implemented for aiml pipeline".format(type(distributed)) + ) else: - raise Exception("Expected type dict or None for distributed, instead found : {}".format(type(distributed))) + raise Exception( + "Expected type dict or None for distributed, instead found : {}".format( + type(distributed) + ) + ) + -def _integrate_disks(DP, maxima_x,maxima_y,maxima_int,int_window_radius=1): +def _integrate_disks(DP, maxima_x, maxima_y, maxima_int, int_window_radius=1): """ Integrate DP over the circular patch of pixel with radius """ disks = [] img_size = DP.shape[0] - for x,y,i in zip(maxima_x,maxima_y,maxima_int): - r1,r2 = np.ogrid[-x:img_size-x, -y:img_size-y] + for x, y, i in zip(maxima_x, maxima_y, maxima_int): + r1, r2 = np.ogrid[-x : img_size - x, -y : img_size - y] mask = r1**2 + r2**2 <= int_window_radius**2 mask_arr = np.zeros((img_size, img_size)) mask_arr[mask] = 1 - disk = DP*mask_arr + disk = DP * mask_arr disks.append(np.average(disk)) try: - disks = disks/max(disks) + disks = disks / max(disks) except: pass - return (maxima_x,maxima_y,disks) + return (maxima_x, maxima_y, disks) + def _check_cuda_device_available(): """ @@ -698,14 +852,15 @@ def _check_cuda_device_available(): import tensorflow as tf - tf_recog_gpus = tf.config.experimental.list_physical_devices('GPU') + tf_recog_gpus = tf.config.experimental.list_physical_devices("GPU") - if len(tf_recog_gpus) >0: + if len(tf_recog_gpus) > 0: return True else: return False -def _get_latest_model(model_path = None): + +def _get_latest_model(model_path=None): """ get the latest tensorflow model and model weights for disk detection @@ -720,56 +875,67 @@ def _get_latest_model(model_path = None): model: Trained tensorflow model for disk detection """ import crystal4D + try: import tensorflow as tf except: - raise ImportError("Please install tensorflow before proceeding - please check " + "https://www.tensorflow.org/install" + "for more information") + raise ImportError( + "Please install tensorflow before proceeding - please check " + + "https://www.tensorflow.org/install" + + "for more information" + ) from py4DSTEM.io.google_drive_downloader import download_file_from_google_drive + tf.keras.backend.clear_session() if model_path is None: try: - os.mkdir('./tmp') + os.mkdir("./tmp") except: pass # download the json file with the meta data - download_file_from_google_drive('FCU-Net','./tmp/model_metadata.json') - with open('./tmp/model_metadata.json') as f: + download_file_from_google_drive("FCU-Net", "./tmp/model_metadata.json") + with open("./tmp/model_metadata.json") as f: metadata = json.load(f) - file_id = metadata['file_id'] - file_path = metadata['file_path'] - file_type = metadata['file_type'] + file_id = metadata["file_id"] + file_path = metadata["file_path"] + file_type = metadata["file_type"] try: - with open('./tmp/model_metadata_old.json') as f_old: + with open("./tmp/model_metadata_old.json") as f_old: metaold = json.load(f_old) - file_id_old = metaold['file_id'] + file_id_old = metaold["file_id"] except: file_id_old = file_id if os.path.exists(file_path) and file_id == file_id_old: - print('Latest model weight is already available in the local system. Loading the model... \n') + print( + "Latest model weight is already available in the local system. Loading the model... \n" + ) model_path = file_path - os.remove('./tmp/model_metadata_old.json') - os.rename('./tmp/model_metadata.json', './tmp/model_metadata_old.json') + os.remove("./tmp/model_metadata_old.json") + os.rename("./tmp/model_metadata.json", "./tmp/model_metadata_old.json") else: - print('Checking the latest model on the cloud... \n') + print("Checking the latest model on the cloud... \n") filename = file_path + file_type - download_file_from_google_drive(file_id,filename) + download_file_from_google_drive(file_id, filename) try: - shutil.unpack_archive(filename, './tmp' ,format="zip") + shutil.unpack_archive(filename, "./tmp", format="zip") except: pass model_path = file_path - os.rename('./tmp/model_metadata.json', './tmp/model_metadata_old.json') - print('Loading the model... \n') + os.rename("./tmp/model_metadata.json", "./tmp/model_metadata_old.json") + print("Loading the model... \n") model = tf.keras.models.load_model( model_path, - custom_objects={'lrScheduler': crystal4D.utils.utils.lrScheduler(128)}) + custom_objects={"lrScheduler": crystal4D.utils.utils.lrScheduler(128)}, + ) else: - print('Loading the user provided model... \n') - model = tf.keras.models.load_model(model_path, - custom_objects={'lrScheduler': crystal4D.utils.utils.lrScheduler(128)}) + print("Loading the user provided model... \n") + model = tf.keras.models.load_model( + model_path, + custom_objects={"lrScheduler": crystal4D.utils.utils.lrScheduler(128)}, + ) return model diff --git a/py4DSTEM/braggvectors/diskdetection_aiml_cuda.py b/py4DSTEM/braggvectors/diskdetection_aiml_cuda.py index d714feda9..d0f550dcc 100644 --- a/py4DSTEM/braggvectors/diskdetection_aiml_cuda.py +++ b/py4DSTEM/braggvectors/diskdetection_aiml_cuda.py @@ -1,7 +1,7 @@ # Functions for finding Bragg disks using AI/ML pipeline (CUDA version) -''' +""" Functions for finding Braggdisks (AI/ML) using cupy and tensorflow-gpu -''' +""" import numpy as np from time import time @@ -12,6 +12,7 @@ from py4DSTEM.data import QPoints from py4DSTEM.braggvectors.kernels import kernels from py4DSTEM.braggvectors.diskdetection_aiml import _get_latest_model + # from py4DSTEM.braggvectors.diskdetection import universal_threshold try: @@ -22,33 +23,38 @@ try: import tensorflow as tf except: - raise ImportError("Please install tensorflow before proceeding - please check " + "https://www.tensorflow.org/install" + "for more information") + raise ImportError( + "Please install tensorflow before proceeding - please check " + + "https://www.tensorflow.org/install" + + "for more information" + ) from cupyx.scipy.ndimage import gaussian_filter - - -def find_Bragg_disks_aiml_CUDA(datacube, probe, - num_attempts = 5, - int_window_radius = 1, - predict = True, - batch_size = 8, - sigma = 0, - edgeBoundary = 20, - minRelativeIntensity = 0.005, - minAbsoluteIntensity = 0, - relativeToPeak = 0, - minPeakSpacing = 60, - maxNumPeaks = 70, - subpixel = 'multicorr', - upsample_factor = 16, - global_threshold = False, - minGlobalIntensity = 0.005, - metric = 'mean', - filter_function = None, - name = 'braggpeaks_raw', - model_path=None): +def find_Bragg_disks_aiml_CUDA( + datacube, + probe, + num_attempts=5, + int_window_radius=1, + predict=True, + batch_size=8, + sigma=0, + edgeBoundary=20, + minRelativeIntensity=0.005, + minAbsoluteIntensity=0, + relativeToPeak=0, + minPeakSpacing=60, + maxNumPeaks=70, + subpixel="multicorr", + upsample_factor=16, + global_threshold=False, + minGlobalIntensity=0.005, + metric="mean", + filter_function=None, + name="braggpeaks_raw", + model_path=None, +): """ Finds the Bragg disks in all diffraction patterns of datacube by AI/ML method (CUDA version) This method utilizes FCU-Net to predict Bragg disks from diffraction images. @@ -123,31 +129,45 @@ def find_Bragg_disks_aiml_CUDA(datacube, probe, peaks = BraggVectors(datacube.Rshape, datacube.Qshape) # check that the filtered DP is the right size for the probe kernel: - if filter_function: assert callable(filter_function), "filter_function must be callable" - DP = datacube.data[0,0,:,:] if filter_function is None else filter_function(datacube.data[0,0,:,:]) - assert np.all(DP.shape == probe.shape), 'Probe kernel shape must match filtered DP shape' + if filter_function: + assert callable(filter_function), "filter_function must be callable" + DP = ( + datacube.data[0, 0, :, :] + if filter_function is None + else filter_function(datacube.data[0, 0, :, :]) + ) + assert np.all( + DP.shape == probe.shape + ), "Probe kernel shape must match filtered DP shape" - get_maximal_points = kernels['maximal_pts_float64'] + get_maximal_points = kernels["maximal_pts_float64"] if get_maximal_points.max_threads_per_block < DP.shape[1]: - blocks = ((np.prod(DP.shape)//get_maximal_points.max_threads_per_block + 1),) - threads = ((get_maximal_points.max_threads_per_block)) + blocks = ((np.prod(DP.shape) // get_maximal_points.max_threads_per_block + 1),) + threads = get_maximal_points.max_threads_per_block else: blocks = (DP.shape[0],) threads = (DP.shape[1],) if predict: t0 = time() - model = _get_latest_model(model_path = model_path) - prediction = np.zeros(shape = (datacube.R_N, datacube.Q_Nx, datacube.Q_Ny, 1)) + model = _get_latest_model(model_path=model_path) + prediction = np.zeros(shape=(datacube.R_N, datacube.Q_Nx, datacube.Q_Ny, 1)) image_num = datacube.R_N - batch_num = int(image_num//batch_size) + batch_num = int(image_num // batch_size) datacube_flattened = datacube.data.view() - datacube_flattened = datacube_flattened.reshape(datacube.R_N,datacube.Q_Nx,datacube.Q_Ny) - - for att in tqdmnd(num_attempts, desc='Neural network is predicting structure factors', unit='ATTEMPTS',unit_scale=True): + datacube_flattened = datacube_flattened.reshape( + datacube.R_N, datacube.Q_Nx, datacube.Q_Ny + ) + + for att in tqdmnd( + num_attempts, + desc="Neural network is predicting structure factors", + unit="ATTEMPTS", + unit_scale=True, + ): for batch_idx in range(batch_num): # the final batch may be smaller than the other ones: probes_remaining = datacube.R_N - (batch_idx * batch_size) @@ -155,73 +175,98 @@ def find_Bragg_disks_aiml_CUDA(datacube, probe, probes_remaining if probes_remaining < batch_size else batch_size ) DP = tf.expand_dims( - datacube_flattened[batch_idx*batch_size:batch_idx*batch_size + this_batch_size], - axis = -1) + datacube_flattened[ + batch_idx * batch_size : batch_idx * batch_size + + this_batch_size + ], + axis=-1, + ) _probe = tf.expand_dims( tf.repeat(tf.expand_dims(probe, axis=0), this_batch_size, axis=0), - axis = -1) - prediction[batch_idx*batch_size:batch_idx*batch_size+this_batch_size] += model.predict( - [DP,_probe]) - - print('Averaging over {} attempts \n'.format(num_attempts)) - prediction = prediction/num_attempts + axis=-1, + ) + prediction[ + batch_idx * batch_size : batch_idx * batch_size + this_batch_size + ] += model.predict([DP, _probe]) - prediction = np.reshape(np.transpose(prediction, (0,3,1,2)), - (datacube.R_Nx, datacube.R_Ny, datacube.Q_Nx, datacube.Q_Ny)) + print("Averaging over {} attempts \n".format(num_attempts)) + prediction = prediction / num_attempts + prediction = np.reshape( + np.transpose(prediction, (0, 3, 1, 2)), + (datacube.R_Nx, datacube.R_Ny, datacube.Q_Nx, datacube.Q_Ny), + ) # Loop over all diffraction patterns - for (Rx,Ry) in tqdmnd(datacube.R_Nx,datacube.R_Ny,desc='Finding Bragg Disks using AI/ML CUDA',unit='DP',unit_scale=True): - DP = prediction[Rx,Ry,:,:] - _find_Bragg_disks_aiml_single_DP_CUDA(DP, probe, - num_attempts = num_attempts, - int_window_radius = int_window_radius, - predict = False, - sigma = sigma, - edgeBoundary = edgeBoundary, - minRelativeIntensity = minRelativeIntensity, - minAbsoluteIntensity=minAbsoluteIntensity, - relativeToPeak = relativeToPeak, - minPeakSpacing = minPeakSpacing, - maxNumPeaks = maxNumPeaks, - subpixel = subpixel, - upsample_factor = upsample_factor, - filter_function = filter_function, - peaks = peaks.vectors_uncal.get_pointlist(Rx,Ry), - get_maximal_points = get_maximal_points, - blocks = blocks, - threads = threads) - t2 = time()-t0 - print("Analyzed {} diffraction patterns in {}h {}m {}s".format(datacube.R_N, int(t2/3600), - int(t2/60), int(t2%60))) + for Rx, Ry in tqdmnd( + datacube.R_Nx, + datacube.R_Ny, + desc="Finding Bragg Disks using AI/ML CUDA", + unit="DP", + unit_scale=True, + ): + DP = prediction[Rx, Ry, :, :] + _find_Bragg_disks_aiml_single_DP_CUDA( + DP, + probe, + num_attempts=num_attempts, + int_window_radius=int_window_radius, + predict=False, + sigma=sigma, + edgeBoundary=edgeBoundary, + minRelativeIntensity=minRelativeIntensity, + minAbsoluteIntensity=minAbsoluteIntensity, + relativeToPeak=relativeToPeak, + minPeakSpacing=minPeakSpacing, + maxNumPeaks=maxNumPeaks, + subpixel=subpixel, + upsample_factor=upsample_factor, + filter_function=filter_function, + peaks=peaks.vectors_uncal.get_pointlist(Rx, Ry), + get_maximal_points=get_maximal_points, + blocks=blocks, + threads=threads, + ) + t2 = time() - t0 + print( + "Analyzed {} diffraction patterns in {}h {}m {}s".format( + datacube.R_N, int(t2 / 3600), int(t2 / 60), int(t2 % 60) + ) + ) if global_threshold == True: from py4DSTEM.braggvectors import universal_threshold - peaks = universal_threshold(peaks, minGlobalIntensity, metric, minPeakSpacing, - maxNumPeaks) + + peaks = universal_threshold( + peaks, minGlobalIntensity, metric, minPeakSpacing, maxNumPeaks + ) peaks.name = name return peaks -def _find_Bragg_disks_aiml_single_DP_CUDA(DP, probe, - num_attempts = 5, - int_window_radius = 1, - predict = True, - sigma = 0, - edgeBoundary = 20, - minRelativeIntensity = 0.005, - minAbsoluteIntensity = 0, - relativeToPeak = 0, - minPeakSpacing = 60, - maxNumPeaks = 70, - subpixel = 'multicorr', - upsample_factor = 16, - filter_function = None, - return_cc = False, - peaks = None, - get_maximal_points = None, - blocks = None, - threads = None, - model_path=None, - **kwargs): + +def _find_Bragg_disks_aiml_single_DP_CUDA( + DP, + probe, + num_attempts=5, + int_window_radius=1, + predict=True, + sigma=0, + edgeBoundary=20, + minRelativeIntensity=0.005, + minAbsoluteIntensity=0, + relativeToPeak=0, + minPeakSpacing=60, + maxNumPeaks=70, + subpixel="multicorr", + upsample_factor=16, + filter_function=None, + return_cc=False, + peaks=None, + get_maximal_points=None, + blocks=None, + threads=None, + model_path=None, + **kwargs +): """ Finds the Bragg disks in single DP by AI/ML method. This method utilizes FCU-Net to predict Bragg disks from diffraction images. @@ -275,75 +320,99 @@ def _find_Bragg_disks_aiml_single_DP_CUDA(DP, probe, detected peaks are added to, and must have the appropriate coords ('qx','qy','intensity'). model_path (str): filepath for the model weights (Tensorflow model) to load from. - By default, if the model_path is not provided, py4DSTEM will search for the + By default, if the model_path is not provided, py4DSTEM will search for the latest model stored on cloud using metadata json file. It is not recommeded to keep track of the model path and advised to keep this argument unchanged (None) to always search for the latest updated training model weights. Returns: peaks (PointList) the Bragg peak positions and correlation intensities - """ - assert subpixel in [ 'none', 'poly', 'multicorr' ], "Unrecognized subpixel option {}, subpixel must be 'none', 'poly', or 'multicorr'".format(subpixel) + """ + assert subpixel in [ + "none", + "poly", + "multicorr", + ], "Unrecognized subpixel option {}, subpixel must be 'none', 'poly', or 'multicorr'".format( + subpixel + ) if predict: - assert(len(DP.shape)==2), "Dimension of single diffraction should be 2 (Qx, Qy)" - assert(len(probe.shape)==2), "Dimension of Probe should be 2 (Qx, Qy)" + assert ( + len(DP.shape) == 2 + ), "Dimension of single diffraction should be 2 (Qx, Qy)" + assert len(probe.shape) == 2, "Dimension of Probe should be 2 (Qx, Qy)" - model = _get_latest_model(model_path = model_path) + model = _get_latest_model(model_path=model_path) DP = tf.expand_dims(tf.expand_dims(DP, axis=0), axis=-1) probe = tf.expand_dims(tf.expand_dims(probe, axis=0), axis=-1) - prediction = np.zeros(shape = (1, DP.shape[1],DP.shape[2],1)) - - for att in tqdmnd(num_attempts, desc='Neural network is predicting structure factors', unit='ATTEMPTS',unit_scale=True): - print('attempt {} \n'.format(att+1)) - prediction += model.predict([DP,probe]) - print('Averaging over {} attempts \n'.format(num_attempts)) - pred = cp.array(prediction[0,:,:,0]/num_attempts,dtype='float64') + prediction = np.zeros(shape=(1, DP.shape[1], DP.shape[2], 1)) + + for att in tqdmnd( + num_attempts, + desc="Neural network is predicting structure factors", + unit="ATTEMPTS", + unit_scale=True, + ): + print("attempt {} \n".format(att + 1)) + prediction += model.predict([DP, probe]) + print("Averaging over {} attempts \n".format(num_attempts)) + pred = cp.array(prediction[0, :, :, 0] / num_attempts, dtype="float64") else: - assert(len(DP.shape)==2), "Dimension of single diffraction should be 2 (Qx, Qy)" - pred = cp.array(DP if filter_function is None else filter_function(DP),dtype='float64') + assert ( + len(DP.shape) == 2 + ), "Dimension of single diffraction should be 2 (Qx, Qy)" + pred = cp.array( + DP if filter_function is None else filter_function(DP), dtype="float64" + ) # Find the maxima - maxima_x,maxima_y,maxima_int = get_maxima_2D_cp(pred, - sigma=sigma, - edgeBoundary=edgeBoundary, - minRelativeIntensity=minRelativeIntensity, - minAbsoluteIntensity=minAbsoluteIntensity, - relativeToPeak=relativeToPeak, - minSpacing=minPeakSpacing, - maxNumPeaks=maxNumPeaks, - subpixel=subpixel, - upsample_factor = upsample_factor, - get_maximal_points=get_maximal_points, - blocks=blocks, threads=threads) - - maxima_x, maxima_y, maxima_int = _integrate_disks_cp(pred, maxima_x,maxima_y,maxima_int,int_window_radius=int_window_radius) + maxima_x, maxima_y, maxima_int = get_maxima_2D_cp( + pred, + sigma=sigma, + edgeBoundary=edgeBoundary, + minRelativeIntensity=minRelativeIntensity, + minAbsoluteIntensity=minAbsoluteIntensity, + relativeToPeak=relativeToPeak, + minSpacing=minPeakSpacing, + maxNumPeaks=maxNumPeaks, + subpixel=subpixel, + upsample_factor=upsample_factor, + get_maximal_points=get_maximal_points, + blocks=blocks, + threads=threads, + ) + + maxima_x, maxima_y, maxima_int = _integrate_disks_cp( + pred, maxima_x, maxima_y, maxima_int, int_window_radius=int_window_radius + ) # Make peaks PointList if peaks is None: - coords = [('qx',float),('qy',float),('intensity',float)] + coords = [("qx", float), ("qy", float), ("intensity", float)] peaks = PointList(coordinates=coords) else: - assert(isinstance(peaks,PointList)) + assert isinstance(peaks, PointList) peaks.add_data_by_field((maxima_x, maxima_y, maxima_int)) return peaks -def get_maxima_2D_cp(ar, - sigma=0, - edgeBoundary=0, - minSpacing=0, - minRelativeIntensity=0, - minAbsoluteIntensity=0, - relativeToPeak=0, - maxNumPeaks=0, - subpixel='poly', - ar_FT = None, - upsample_factor=16, - get_maximal_points=None, - blocks=None, - threads=None): +def get_maxima_2D_cp( + ar, + sigma=0, + edgeBoundary=0, + minSpacing=0, + minRelativeIntensity=0, + minAbsoluteIntensity=0, + relativeToPeak=0, + maxNumPeaks=0, + subpixel="poly", + ar_FT=None, + upsample_factor=16, + get_maximal_points=None, + blocks=None, + threads=None, +): """ Finds the indices where the 2D array ar is a local maximum. Optional parameters allow blurring of the array and filtering of the output; @@ -377,16 +446,24 @@ def get_maxima_2D_cp(ar, maxima_y (ndarray) y-coords of the local maximum, sorted by intensity. maxima_intensity (ndarray) intensity of the local maxima """ - assert subpixel in [ 'none', 'poly', 'multicorr' ], "Unrecognized subpixel option {}, subpixel must be 'none', 'poly', or 'multicorr'".format(subpixel) + assert subpixel in [ + "none", + "poly", + "multicorr", + ], "Unrecognized subpixel option {}, subpixel must be 'none', 'poly', or 'multicorr'".format( + subpixel + ) # Get maxima ar = gaussian_filter(ar, sigma) - maxima_bool = cp.zeros_like(ar,dtype=bool) + maxima_bool = cp.zeros_like(ar, dtype=bool) sizex = ar.shape[0] sizey = ar.shape[1] - N = sizex*sizey - get_maximal_points(blocks, threads, (ar, maxima_bool, minAbsoluteIntensity, sizex, sizey, N)) - #get_maximal_points(blocks,threads,(ar,maxima_bool,sizex,sizey,N)) + N = sizex * sizey + get_maximal_points( + blocks, threads, (ar, maxima_bool, minAbsoluteIntensity, sizex, sizey, N) + ) + # get_maximal_points(blocks,threads,(ar,maxima_bool,sizex,sizey,N)) # Remove edges if edgeBoundary > 0: @@ -405,14 +482,14 @@ def get_maxima_2D_cp(ar, maxima_x, maxima_y = cp.nonzero(maxima_bool) maxima_x = maxima_x.get() maxima_y = maxima_y.get() - dtype = np.dtype([('x', float), ('y', float), ('intensity', float)]) + dtype = np.dtype([("x", float), ("y", float), ("intensity", float)]) maxima = np.zeros(len(maxima_x), dtype=dtype) - maxima['x'] = maxima_x - maxima['y'] = maxima_y + maxima["x"] = maxima_x + maxima["y"] = maxima_y ar = ar.get() - maxima['intensity'] = ar[maxima_x, maxima_y] - maxima = np.sort(maxima, order='intensity')[::-1] + maxima["intensity"] = ar[maxima_x, maxima_y] + maxima = np.sort(maxima, order="intensity")[::-1] if len(maxima) > 0: # Remove maxima which are too close @@ -420,21 +497,26 @@ def get_maxima_2D_cp(ar, deletemask = np.zeros(len(maxima), dtype=bool) for i in range(len(maxima)): if deletemask[i] == False: - tooClose = ((maxima['x'] - maxima['x'][i]) ** 2 + \ - (maxima['y'] - maxima['y'][i]) ** 2) < minSpacing ** 2 - tooClose[:i + 1] = False + tooClose = ( + (maxima["x"] - maxima["x"][i]) ** 2 + + (maxima["y"] - maxima["y"][i]) ** 2 + ) < minSpacing**2 + tooClose[: i + 1] = False deletemask[tooClose] = True maxima = np.delete(maxima, np.nonzero(deletemask)[0]) # Remove maxima which are too dim if (minRelativeIntensity > 0) & (len(maxima) > relativeToPeak): assert isinstance(relativeToPeak, (int, np.integer)) - deletemask = maxima['intensity'] / maxima['intensity'][relativeToPeak] < minRelativeIntensity + deletemask = ( + maxima["intensity"] / maxima["intensity"][relativeToPeak] + < minRelativeIntensity + ) maxima = np.delete(maxima, np.nonzero(deletemask)[0]) # Remove maxima which are too dim, absolute scale - if (minAbsoluteIntensity > 0): - deletemask = maxima['intensity'] < minAbsoluteIntensity + if minAbsoluteIntensity > 0: + deletemask = maxima["intensity"] < minAbsoluteIntensity maxima = np.delete(maxima, np.nonzero(deletemask)[0]) # Remove maxima in excess of maxNumPeaks @@ -443,45 +525,46 @@ def get_maxima_2D_cp(ar, if len(maxima) > maxNumPeaks: maxima = maxima[:maxNumPeaks] - - # Subpixel fitting + # Subpixel fitting # For all subpixel fitting, first fit 1D parabolas in x and y to 3 points (maximum, +/- 1 pixel) - if subpixel != 'none': + if subpixel != "none": for i in range(len(maxima)): - Ix1_ = ar[int(maxima['x'][i]) - 1, int(maxima['y'][i])] - Ix0 = ar[int(maxima['x'][i]), int(maxima['y'][i])] - Ix1 = ar[int(maxima['x'][i]) + 1, int(maxima['y'][i])] - Iy1_ = ar[int(maxima['x'][i]), int(maxima['y'][i]) - 1] - Iy0 = ar[int(maxima['x'][i]), int(maxima['y'][i])] - Iy1 = ar[int(maxima['x'][i]), int(maxima['y'][i]) + 1] + Ix1_ = ar[int(maxima["x"][i]) - 1, int(maxima["y"][i])] + Ix0 = ar[int(maxima["x"][i]), int(maxima["y"][i])] + Ix1 = ar[int(maxima["x"][i]) + 1, int(maxima["y"][i])] + Iy1_ = ar[int(maxima["x"][i]), int(maxima["y"][i]) - 1] + Iy0 = ar[int(maxima["x"][i]), int(maxima["y"][i])] + Iy1 = ar[int(maxima["x"][i]), int(maxima["y"][i]) + 1] deltax = (Ix1 - Ix1_) / (4 * Ix0 - 2 * Ix1 - 2 * Ix1_) deltay = (Iy1 - Iy1_) / (4 * Iy0 - 2 * Iy1 - 2 * Iy1_) - maxima['x'][i] += deltax - maxima['y'][i] += deltay - maxima['intensity'][i] = linear_interpolation_2D_cp(ar, maxima['x'][i], maxima['y'][i]) + maxima["x"][i] += deltax + maxima["y"][i] += deltay + maxima["intensity"][i] = linear_interpolation_2D_cp( + ar, maxima["x"][i], maxima["y"][i] + ) # Further refinement with fourier upsampling - if subpixel == 'multicorr': + if subpixel == "multicorr": if ar_FT is None: ar_FT = cp.conj(cp.fft.fft2(cp.array(ar))) else: ar_FT = cp.conj(ar_FT) - for ipeak in range(len(maxima['x'])): - xyShift = np.array((maxima['x'][ipeak],maxima['y'][ipeak])) + for ipeak in range(len(maxima["x"])): + xyShift = np.array((maxima["x"][ipeak], maxima["y"][ipeak])) # we actually have to lose some precision and go down to half-pixel # accuracy. this could also be done by a single upsampling at factor 2 # instead of get_maxima_2D_cp. xyShift[0] = np.round(xyShift[0] * 2) / 2 xyShift[1] = np.round(xyShift[1] * 2) / 2 - subShift = upsampled_correlation_cp(ar_FT,upsample_factor,xyShift) - maxima['x'][ipeak]=subShift[0] - maxima['y'][ipeak]=subShift[1] + subShift = upsampled_correlation_cp(ar_FT, upsample_factor, xyShift) + maxima["x"][ipeak] = subShift[0] + maxima["y"][ipeak] = subShift[1] - return maxima['x'], maxima['y'], maxima['intensity'] + return maxima["x"], maxima["y"], maxima["intensity"] def upsampled_correlation_cp(imageCorr, upsampleFactor, xyShift): - ''' + """ Refine the correlation peak of imageCorr around xyShift by DFT upsampling using cupy. Args: @@ -499,56 +582,66 @@ def upsampled_correlation_cp(imageCorr, upsampleFactor, xyShift): Returns: (2-element np array): Refined location of the peak in image coordinates. - ''' - - #------------------------------------------------------------------------------------- - #There are two approaches to Fourier upsampling for subpixel refinement: (a) one - #can pad an (appropriately shifted) FFT with zeros and take the inverse transform, - #or (b) one can compute the DFT by matrix multiplication using modified - #transformation matrices. The former approach is straightforward but requires - #performing the FFT algorithm (which is fast) on very large data. The latter method - #trades one speedup for a slowdown elsewhere: the matrix multiply steps are expensive - #but we operate on smaller matrices. Since we are only interested in a very small - #region of the FT around a peak of interest, we use the latter method to get - #a substantial speedup and enormous decrease in memory requirement. This - #"DFT upsampling" approach computes the transformation matrices for the matrix- - #multiply DFT around a small 1.5px wide region in the original `imageCorr`. - - #Following the matrix multiply DFT we use parabolic subpixel fitting to - #get even more precision! (below 1/upsampleFactor pixels) - - #NOTE: previous versions of multiCorr operated in two steps: using the zero- - #padding upsample method for a first-pass factor-2 upsampling, followed by the - #DFT upsampling (at whatever user-specified factor). I have implemented it - #differently, to better support iterating over multiple peaks. **The DFT is always - #upsampled around xyShift, which MUST be specified to HALF-PIXEL precision - #(no more, no less) to replicate the behavior of the factor-2 step.** - #(It is possible to refactor this so that peak detection is done on a Fourier - #upsampled image rather than using the parabolic subpixel and rounding as now... - #I like keeping it this way because all of the parameters and logic will be identical - #to the other subpixel methods.) - #------------------------------------------------------------------------------------- + """ + + # ------------------------------------------------------------------------------------- + # There are two approaches to Fourier upsampling for subpixel refinement: (a) one + # can pad an (appropriately shifted) FFT with zeros and take the inverse transform, + # or (b) one can compute the DFT by matrix multiplication using modified + # transformation matrices. The former approach is straightforward but requires + # performing the FFT algorithm (which is fast) on very large data. The latter method + # trades one speedup for a slowdown elsewhere: the matrix multiply steps are expensive + # but we operate on smaller matrices. Since we are only interested in a very small + # region of the FT around a peak of interest, we use the latter method to get + # a substantial speedup and enormous decrease in memory requirement. This + # "DFT upsampling" approach computes the transformation matrices for the matrix- + # multiply DFT around a small 1.5px wide region in the original `imageCorr`. + + # Following the matrix multiply DFT we use parabolic subpixel fitting to + # get even more precision! (below 1/upsampleFactor pixels) + + # NOTE: previous versions of multiCorr operated in two steps: using the zero- + # padding upsample method for a first-pass factor-2 upsampling, followed by the + # DFT upsampling (at whatever user-specified factor). I have implemented it + # differently, to better support iterating over multiple peaks. **The DFT is always + # upsampled around xyShift, which MUST be specified to HALF-PIXEL precision + # (no more, no less) to replicate the behavior of the factor-2 step.** + # (It is possible to refactor this so that peak detection is done on a Fourier + # upsampled image rather than using the parabolic subpixel and rounding as now... + # I like keeping it this way because all of the parameters and logic will be identical + # to the other subpixel methods.) + # ------------------------------------------------------------------------------------- assert upsampleFactor > 2 xyShift[0] = np.round(xyShift[0] * upsampleFactor) / upsampleFactor xyShift[1] = np.round(xyShift[1] * upsampleFactor) / upsampleFactor - globalShift = np.fix(np.ceil(upsampleFactor * 1.5)/2) + globalShift = np.fix(np.ceil(upsampleFactor * 1.5) / 2) - upsampleCenter = globalShift - upsampleFactor*xyShift + upsampleCenter = globalShift - upsampleFactor * xyShift - imageCorrUpsample = cp.conj(dftUpsample_cp(imageCorr, upsampleFactor, upsampleCenter )).get() + imageCorrUpsample = cp.conj( + dftUpsample_cp(imageCorr, upsampleFactor, upsampleCenter) + ).get() xySubShift = np.unravel_index(imageCorrUpsample.argmax(), imageCorrUpsample.shape) # add a subpixel shift via parabolic fitting try: - icc = np.real(imageCorrUpsample[xySubShift[0] - 1 : xySubShift[0] + 2, xySubShift[1] - 1 : xySubShift[1] + 2]) - dx = (icc[2,1] - icc[0,1]) / (4 * icc[1,1] - 2 * icc[2,1] - 2 * icc[0,1]) - dy = (icc[1,2] - icc[1,0]) / (4 * icc[1,1] - 2 * icc[1,2] - 2 * icc[1,0]) + icc = np.real( + imageCorrUpsample[ + xySubShift[0] - 1 : xySubShift[0] + 2, + xySubShift[1] - 1 : xySubShift[1] + 2, + ] + ) + dx = (icc[2, 1] - icc[0, 1]) / (4 * icc[1, 1] - 2 * icc[2, 1] - 2 * icc[0, 1]) + dy = (icc[1, 2] - icc[1, 0]) / (4 * icc[1, 1] - 2 * icc[1, 2] - 2 * icc[1, 0]) except: - dx, dy = 0, 0 # this is the case when the peak is near the edge and one of the above values does not exist + dx, dy = ( + 0, + 0, + ) # this is the case when the peak is near the edge and one of the above values does not exist xySubShift = xySubShift - globalShift @@ -558,7 +651,7 @@ def upsampled_correlation_cp(imageCorr, upsampleFactor, xyShift): def dftUpsample_cp(imageCorr, upsampleFactor, xyShift): - ''' + """ This performs a matrix multiply DFT around a small neighboring region of the inital correlation peak. By using the matrix multiply DFT to do the Fourier upsampling, the efficiency is greatly improved. This is adapted from the subfuction dftups found in @@ -584,25 +677,32 @@ def dftUpsample_cp(imageCorr, upsampleFactor, xyShift): Returns: (ndarray): Upsampled image from region around correlation peak. - ''' + """ imageSize = imageCorr.shape pixelRadius = 1.5 numRow = np.ceil(pixelRadius * upsampleFactor) numCol = numRow colKern = cp.exp( - (-1j * 2 * cp.pi / (imageSize[1] * upsampleFactor)) - * cp.outer( (cp.fft.ifftshift( (cp.arange(imageSize[1])) ) - cp.floor(imageSize[1]/2)), (cp.arange(numCol) - xyShift[1])) + (-1j * 2 * cp.pi / (imageSize[1] * upsampleFactor)) + * cp.outer( + (cp.fft.ifftshift((cp.arange(imageSize[1]))) - cp.floor(imageSize[1] / 2)), + (cp.arange(numCol) - xyShift[1]), + ) ) rowKern = cp.exp( - (-1j * 2 * cp.pi / (imageSize[0] * upsampleFactor)) - * cp.outer( (cp.arange(numRow) - xyShift[0]), (cp.fft.ifftshift(cp.arange(imageSize[0])) - cp.floor(imageSize[0]/2))) + (-1j * 2 * cp.pi / (imageSize[0] * upsampleFactor)) + * cp.outer( + (cp.arange(numRow) - xyShift[0]), + (cp.fft.ifftshift(cp.arange(imageSize[0])) - cp.floor(imageSize[0] / 2)), + ) ) imageUpsample = cp.real(rowKern @ imageCorr @ colKern) return imageUpsample + def linear_interpolation_2D_cp(ar, x, y): """ Calculates the 2D linear interpolation of array ar at position x,y using the four @@ -612,22 +712,27 @@ def linear_interpolation_2D_cp(ar, x, y): y0, y1 = int(np.floor(y)), int(np.ceil(y)) dx = x - x0 dy = y - y0 - return (1 - dx) * (1 - dy) * ar[x0, y0] + (1 - dx) * dy * ar[x0, y1] + dx * (1 - dy) * ar[x1, y0] + dx * dy * ar[ - x1, y1] + return ( + (1 - dx) * (1 - dy) * ar[x0, y0] + + (1 - dx) * dy * ar[x0, y1] + + dx * (1 - dy) * ar[x1, y0] + + dx * dy * ar[x1, y1] + ) + -def _integrate_disks_cp(DP, maxima_x,maxima_y,maxima_int,int_window_radius=1): +def _integrate_disks_cp(DP, maxima_x, maxima_y, maxima_int, int_window_radius=1): disks = [] DP = cp.asnumpy(DP) img_size = DP.shape[0] - for x,y,i in zip(maxima_x,maxima_y,maxima_int): - r1,r2 = np.ogrid[-x:img_size-x, -y:img_size-y] + for x, y, i in zip(maxima_x, maxima_y, maxima_int): + r1, r2 = np.ogrid[-x : img_size - x, -y : img_size - y] mask = r1**2 + r2**2 <= int_window_radius**2 mask_arr = np.zeros((img_size, img_size)) mask_arr[mask] = 1 - disk = DP*mask_arr + disk = DP * mask_arr disks.append(np.average(disk)) try: - disks = disks/max(disks) + disks = disks / max(disks) except: pass - return (maxima_x,maxima_y,disks) + return (maxima_x, maxima_y, disks) diff --git a/py4DSTEM/braggvectors/diskdetection_cuda.py b/py4DSTEM/braggvectors/diskdetection_cuda.py index b912bde48..4bbb7f488 100644 --- a/py4DSTEM/braggvectors/diskdetection_cuda.py +++ b/py4DSTEM/braggvectors/diskdetection_cuda.py @@ -129,7 +129,7 @@ def find_Bragg_disks_CUDA( # compute the batch size based on available VRAM: max_num_bytes = cp.cuda.Device().mem_info[0] # use a fudge factor to leave room for the fourier transformed data - # I have set this at 10, which results in underutilization of + # I have set this at 10, which results in underutilization of # VRAM, because this yielded better performance in my testing batch_size = max_num_bytes // (bytes_per_pattern * 10) num_batches = datacube.R_N // batch_size + 1 @@ -202,10 +202,9 @@ def find_Bragg_disks_CUDA( del batched_subcube, batched_crosscorr, subFFT, cc, ccc cp.get_default_memory_pool().free_all_blocks() - else: # Loop over all diffraction patterns - for (Rx, Ry) in tqdmnd( + for Rx, Ry in tqdmnd( datacube.R_Nx, datacube.R_Ny, desc="Finding Bragg Disks", @@ -449,7 +448,9 @@ def get_maxima_2D( sizex = ar.shape[0] sizey = ar.shape[1] N = sizex * sizey - get_maximal_points(blocks, threads, (ar, maxima_bool, minAbsoluteIntensity, sizex, sizey, N)) + get_maximal_points( + blocks, threads, (ar, maxima_bool, minAbsoluteIntensity, sizex, sizey, N) + ) # Remove edges if edgeBoundary > 0: @@ -485,7 +486,7 @@ def get_maxima_2D( tooClose = ( (maxima["x"] - maxima["x"][i]) ** 2 + (maxima["y"] - maxima["y"][i]) ** 2 - ) < minSpacing ** 2 + ) < minSpacing**2 tooClose[: i + 1] = False deletemask[tooClose] = True maxima = np.delete(maxima, np.nonzero(deletemask)[0]) @@ -520,8 +521,8 @@ def get_maxima_2D( Iy1 = ar[int(maxima["x"][i]), int(maxima["y"][i]) + 1] deltax = (Ix1 - Ix1_) / (4 * Ix0 - 2 * Ix1 - 2 * Ix1_) deltay = (Iy1 - Iy1_) / (4 * Iy0 - 2 * Iy1 - 2 * Iy1_) - maxima["x"][i] += deltax if np.abs(deltax) <= 1. else 0. - maxima["y"][i] += deltay if np.abs(deltay) <= 1. else 0. + maxima["x"][i] += deltax if np.abs(deltax) <= 1.0 else 0.0 + maxima["y"][i] += deltay if np.abs(deltay) <= 1.0 else 0.0 maxima["intensity"][i] = linear_interpolation_2D( ar, maxima["x"][i], maxima["y"][i] ) @@ -533,11 +534,11 @@ def get_maxima_2D( # we actually have to lose some precision and go down to half-pixel # accuracy. this could also be done by a single upsampling at factor 2 # instead of get_maxima_2D. - xyShift = cp.array(np.round(xyShift * 2.) / 2) + xyShift = cp.array(np.round(xyShift * 2.0) / 2) subShift = upsampled_correlation(ar_FT, upsample_factor, xyShift).get() - maxima["x"] = subShift[:,0] - maxima["y"] = subShift[:,1] + maxima["x"] = subShift[:, 0] + maxima["y"] = subShift[:, 1] return maxima["x"], maxima["y"], maxima["intensity"] @@ -596,9 +597,12 @@ def upsampled_correlation(imageCorr, upsampleFactor, xyShift): imageCorrUpsample = dftUpsample(imageCorr, upsampleFactor, upsampleCenter).get() - xSubShift, ySubShift = np.unravel_index(imageCorrUpsample.reshape(imageCorrUpsample.shape[0],-1).argmax(axis=1), imageCorrUpsample.shape[1:3]) + xSubShift, ySubShift = np.unravel_index( + imageCorrUpsample.reshape(imageCorrUpsample.shape[0], -1).argmax(axis=1), + imageCorrUpsample.shape[1:3], + ) - # add a subpixel shift via parabolic fitting, serially for each peak + # add a subpixel shift via parabolic fitting, serially for each peak for idx in range(xSubShift.shape[0]): try: icc = np.real( @@ -608,15 +612,23 @@ def upsampled_correlation(imageCorr, upsampleFactor, xyShift): ySubShift[idx] - 1 : ySubShift[idx] + 2, ] ) - dx = (icc[2, 1] - icc[0, 1]) / (4 * icc[1, 1] - 2 * icc[2, 1] - 2 * icc[0, 1]) - dy = (icc[1, 2] - icc[1, 0]) / (4 * icc[1, 1] - 2 * icc[1, 2] - 2 * icc[1, 0]) + dx = (icc[2, 1] - icc[0, 1]) / ( + 4 * icc[1, 1] - 2 * icc[2, 1] - 2 * icc[0, 1] + ) + dy = (icc[1, 2] - icc[1, 0]) / ( + 4 * icc[1, 1] - 2 * icc[1, 2] - 2 * icc[1, 0] + ) except: dx, dy = ( 0, 0, ) # this is the case when the peak is near the edge and one of the above values does not exist - xyShift[idx] = xyShift[idx] + (cp.array([xSubShift[idx] + dx, ySubShift[idx] + dy]) - globalShift) / upsampleFactor + xyShift[idx] = ( + xyShift[idx] + + (cp.array([xSubShift[idx] + dx, ySubShift[idx] + dy]) - globalShift) + / upsampleFactor + ) return xyShift @@ -654,24 +666,37 @@ def dftUpsample(imageCorr, upsampleFactor, xyShift): pixelRadius = 1.5 kernel_size = int(np.ceil(pixelRadius * upsampleFactor)) - colKern = cp.zeros((N_pts, imageSize[1], kernel_size),dtype=cp.complex64) # N_pts * image_size[1] * kernel_size - rowKern = cp.zeros((N_pts, kernel_size, imageSize[0]),dtype=cp.complex64) # N_pts * kernel_size * image_size[0] + colKern = cp.zeros( + (N_pts, imageSize[1], kernel_size), dtype=cp.complex64 + ) # N_pts * image_size[1] * kernel_size + rowKern = cp.zeros( + (N_pts, kernel_size, imageSize[0]), dtype=cp.complex64 + ) # N_pts * kernel_size * image_size[0] # Fill in the DFT arrays using the CUDA kernels multicorr_col_kernel = kernels["multicorr_col_kernel"] - blocks = ((np.prod(colKern.shape) // multicorr_col_kernel.max_threads_per_block + 1),) + blocks = ( + (np.prod(colKern.shape) // multicorr_col_kernel.max_threads_per_block + 1), + ) threads = (multicorr_col_kernel.max_threads_per_block,) - multicorr_col_kernel(blocks,threads,(colKern, xyShift, N_pts, *imageSize, upsampleFactor)) + multicorr_col_kernel( + blocks, threads, (colKern, xyShift, N_pts, *imageSize, upsampleFactor) + ) multicorr_row_kernel = kernels["multicorr_row_kernel"] - blocks = ((np.prod(rowKern.shape) // multicorr_row_kernel.max_threads_per_block + 1),) + blocks = ( + (np.prod(rowKern.shape) // multicorr_row_kernel.max_threads_per_block + 1), + ) threads = (multicorr_row_kernel.max_threads_per_block,) - multicorr_row_kernel(blocks,threads,(rowKern, xyShift, N_pts, *imageSize, upsampleFactor)) + multicorr_row_kernel( + blocks, threads, (rowKern, xyShift, N_pts, *imageSize, upsampleFactor) + ) # Apply the DFT arrays to the correlation image imageUpsample = cp.real(rowKern @ imageCorr @ colKern) return imageUpsample + @numba.jit(nopython=True) def linear_interpolation_2D(ar, x, y): """ diff --git a/py4DSTEM/braggvectors/diskdetection_parallel.py b/py4DSTEM/braggvectors/diskdetection_parallel.py index 5a6d6dc11..a1c5dc6f4 100644 --- a/py4DSTEM/braggvectors/diskdetection_parallel.py +++ b/py4DSTEM/braggvectors/diskdetection_parallel.py @@ -12,20 +12,23 @@ from emdfile import PointListArray -def _find_Bragg_disks_single_DP_FK(DP, probe_kernel_FT, - corrPower=1, - sigma=2, - edgeBoundary=20, - minRelativeIntensity=0.005, - minAbsoluteIntensity=0, - relativeToPeak=0, - minPeakSpacing=60, - maxNumPeaks=70, - subpixel='multicorr', - upsample_factor=16, - filter_function=None, - return_cc=False, - peaks=None): +def _find_Bragg_disks_single_DP_FK( + DP, + probe_kernel_FT, + corrPower=1, + sigma=2, + edgeBoundary=20, + minRelativeIntensity=0.005, + minAbsoluteIntensity=0, + relativeToPeak=0, + minPeakSpacing=60, + maxNumPeaks=70, + subpixel="multicorr", + upsample_factor=16, + filter_function=None, + return_cc=False, + peaks=None, +): """ Mirror of diskdetection.find_Bragg_disks_single_DP_FK with explicit imports for remote execution. @@ -92,8 +95,13 @@ def _find_Bragg_disks_single_DP_FK(DP, probe_kernel_FT, Returns: (PointList) the Bragg peak positions and correlation intensities """ - assert subpixel in ['none', 'poly', 'multicorr'], \ - "Unrecognized subpixel option {}, subpixel must be 'none', 'poly', or 'multicorr'".format(subpixel) + assert subpixel in [ + "none", + "poly", + "multicorr", + ], "Unrecognized subpixel option {}, subpixel must be 'none', 'poly', or 'multicorr'".format( + subpixel + ) import numpy import scipy.ndimage.filters @@ -102,8 +110,10 @@ def _find_Bragg_disks_single_DP_FK(DP, probe_kernel_FT, # apply filter function: DP = DP if filter_function is None else filter_function(DP) - if subpixel == 'none': - cc = py4DSTEM.process.utils.get_cross_correlation_fk(DP, probe_kernel_FT, corrPower) + if subpixel == "none": + cc = py4DSTEM.process.utils.get_cross_correlation_fk( + DP, probe_kernel_FT, corrPower + ) cc = numpy.maximum(cc, 0) maxima_x, maxima_y, maxima_int = py4DSTEM.process.utils.get_maxima_2D( cc, @@ -114,19 +124,24 @@ def _find_Bragg_disks_single_DP_FK(DP, probe_kernel_FT, relativeToPeak=relativeToPeak, minSpacing=minPeakSpacing, maxNumPeaks=maxNumPeaks, - subpixel=False) - elif subpixel == 'poly': - cc = py4DSTEM.process.utils.get_cross_correlation_fk(DP, probe_kernel_FT, corrPower) + subpixel=False, + ) + elif subpixel == "poly": + cc = py4DSTEM.process.utils.get_cross_correlation_fk( + DP, probe_kernel_FT, corrPower + ) cc = numpy.maximum(cc, 0) maxima_x, maxima_y, maxima_int = py4DSTEM.process.utils.get_maxima_2D( - cc, sigma=sigma, + cc, + sigma=sigma, edgeBoundary=edgeBoundary, minRelativeIntensity=minRelativeIntensity, minAbsoluteIntensity=minAbsoluteIntensity, relativeToPeak=relativeToPeak, minSpacing=minPeakSpacing, maxNumPeaks=maxNumPeaks, - subpixel=True) + subpixel=True, + ) else: # Multicorr subpixel: m = numpy.fft.fft2(DP) * probe_kernel_FT @@ -135,14 +150,16 @@ def _find_Bragg_disks_single_DP_FK(DP, probe_kernel_FT, cc = numpy.maximum(numpy.real(numpy.fft.ifft2(ccc)), 0) maxima_x, maxima_y, maxima_int = py4DSTEM.process.utils.get_maxima_2D( - cc, sigma=sigma, + cc, + sigma=sigma, edgeBoundary=edgeBoundary, minRelativeIntensity=minRelativeIntensity, minAbsoluteIntensity=minAbsoluteIntensity, relativeToPeak=relativeToPeak, minSpacing=minPeakSpacing, maxNumPeaks=maxNumPeaks, - subpixel=True) + subpixel=True, + ) # Use the DFT upsample to refine the detected peaks (but not the intensity) for ipeak in range(len(maxima_x)): @@ -153,16 +170,18 @@ def _find_Bragg_disks_single_DP_FK(DP, probe_kernel_FT, xyShift[0] = numpy.round(xyShift[0] * 2) / 2 xyShift[1] = numpy.round(xyShift[1] * 2) / 2 - subShift = py4DSTEM.process.utils.multicorr.upsampled_correlation(ccc, upsample_factor, xyShift) + subShift = py4DSTEM.process.utils.multicorr.upsampled_correlation( + ccc, upsample_factor, xyShift + ) maxima_x[ipeak] = subShift[0] maxima_y[ipeak] = subShift[1] # Make peaks PointList if peaks is None: - coords = [('qx', float), ('qy', float), ('intensity', float)] + coords = [("qx", float), ("qy", float), ("intensity", float)] peaks = py4DSTEM.PointList(coordinates=coords) else: - assert (isinstance(peaks, py4DSTEM.PointList)) + assert isinstance(peaks, py4DSTEM.PointList) peaks.add_tuple_of_nparrays((maxima_x, maxima_y, maxima_int)) if return_cc: @@ -175,14 +194,14 @@ def _process_chunk(_f, start, end, path_to_static, coords, path_to_data, cluster import os import dill - with open(path_to_static, 'rb') as infile: + with open(path_to_static, "rb") as infile: inputs = dill.load(infile) # Always try to memory map the data file, if possible - if path_to_data.rsplit('.', 1)[-1].startswith('dm'): - datacube = py4DSTEM.read(path_to_data, load='dmmmap') - elif path_to_data.rsplit('.',1)[-1].startswith('gt'): - datacube = py4DSTEM.read(path_to_data, load='gatan_bin') + if path_to_data.rsplit(".", 1)[-1].startswith("dm"): + datacube = py4DSTEM.read(path_to_data, load="dmmmap") + elif path_to_data.rsplit(".", 1)[-1].startswith("gt"): + datacube = py4DSTEM.read(path_to_data, load="gatan_bin") else: datacube = py4DSTEM.read(path_to_data) @@ -194,27 +213,30 @@ def _process_chunk(_f, start, end, path_to_static, coords, path_to_data, cluster datacube = None path_to_output = os.path.join(cluster_path, "{}_{}.data".format(start, end)) - with open(path_to_output, 'wb') as data_file: + with open(path_to_output, "wb") as data_file: dill.dump(results, data_file) return path_to_output -def find_Bragg_disks_ipp(DP, probe, - corrPower=1, - sigma=2, - edgeBoundary=20, - minRelativeIntensity=0.005, - minAbsoluteIntensity=0, - relativeToPeak=0, - minPeakSpacing=60, - maxNumPeaks=70, - subpixel='poly', - upsample_factor=4, - filter_function=None, - ipyparallel_client_file=None, - data_file=None, - cluster_path=None): +def find_Bragg_disks_ipp( + DP, + probe, + corrPower=1, + sigma=2, + edgeBoundary=20, + minRelativeIntensity=0.005, + minAbsoluteIntensity=0, + relativeToPeak=0, + minPeakSpacing=60, + maxNumPeaks=70, + subpixel="poly", + upsample_factor=4, + filter_function=None, + ipyparallel_client_file=None, + data_file=None, + cluster_path=None, +): """ Distributed compute using IPyParallel. @@ -268,7 +290,7 @@ def find_Bragg_disks_ipp(DP, probe, DP = None # Make the peaks PointListArray - coords = [('qx', float), ('qy', float), ('intensity', float)] + coords = [("qx", float), ("qy", float), ("intensity", float)] peaks = PointListArray(coordinates=coords, shape=(R_Nx, R_Ny)) # Get the probe kernel FT @@ -294,8 +316,8 @@ def find_Bragg_disks_ipp(DP, probe, maxNumPeaks, subpixel, upsample_factor, - filter_function - ] + filter_function, + ] if cluster_path is None: cluster_path = os.getcwd() @@ -305,7 +327,7 @@ def find_Bragg_disks_ipp(DP, probe, t_00 = time() # write out static inputs path_to_inputs = os.path.join(tmpdir.name, "inputs") - with open(path_to_inputs, 'wb') as inputs_file: + with open(path_to_inputs, "wb") as inputs_file: dill.dump(inputs_list, inputs_file) t_inputs_save = time() - t_00 print("Serialize input values : {}".format(t_inputs_save)) @@ -336,7 +358,9 @@ def find_Bragg_disks_ipp(DP, probe, path_to_inputs, indices[start:end], data_file, - tmpdir.name)) + tmpdir.name, + ) + ) if end == total: break @@ -352,7 +376,7 @@ def find_Bragg_disks_ipp(DP, probe, t3 = time() for i in range(len(results)): - with open(results[i].get(), 'rb') as f: + with open(results[i].get(), "rb") as f: data_chunk = dill.load(f) for Rx, Ry, data in data_chunk: @@ -367,27 +391,33 @@ def find_Bragg_disks_ipp(DP, probe, print("Error when cleaning up temporary files: {}".format(e)) t = time() - t0 - print("Analyzed {} diffraction patterns in {}h {}m {}s".format( - R_N, int(t / 3600), int(t / 60), int(t % 60))) + print( + "Analyzed {} diffraction patterns in {}h {}m {}s".format( + R_N, int(t / 3600), int(t / 60), int(t % 60) + ) + ) return peaks -def find_Bragg_disks_dask(DP, probe, - corrPower=1, - sigma=2, - edgeBoundary=20, - minRelativeIntensity=0.005, - minAbsoluteIntensity=0, - relativeToPeak=0, - minPeakSpacing=60, - maxNumPeaks=70, - subpixel='poly', - upsample_factor=4, - filter_function=None, - dask_client=None, - data_file=None, - cluster_path=None): +def find_Bragg_disks_dask( + DP, + probe, + corrPower=1, + sigma=2, + edgeBoundary=20, + minRelativeIntensity=0.005, + minAbsoluteIntensity=0, + relativeToPeak=0, + minPeakSpacing=60, + maxNumPeaks=70, + subpixel="poly", + upsample_factor=4, + filter_function=None, + dask_client=None, + data_file=None, + cluster_path=None, +): """ Distributed compute using Dask. @@ -440,7 +470,7 @@ def find_Bragg_disks_dask(DP, probe, DP = None # Make the peaks PointListArray - coords = [('qx', float), ('qy', float), ('intensity', float)] + coords = [("qx", float), ("qy", float), ("intensity", float)] peaks = PointListArray(coordinates=coords, shape=(R_Nx, R_Ny)) # Get the probe kernel FT @@ -465,8 +495,8 @@ def find_Bragg_disks_dask(DP, probe, maxNumPeaks, subpixel, upsample_factor, - filter_function - ] + filter_function, + ] if cluster_path is None: cluster_path = os.getcwd() @@ -475,7 +505,7 @@ def find_Bragg_disks_dask(DP, probe, # write out static inputs path_to_inputs = os.path.join(tmpdir.name, "{}.inputs".format(dask_client.id)) - with open(path_to_inputs, 'wb') as inputs_file: + with open(path_to_inputs, "wb") as inputs_file: dill.dump(inputs_list, inputs_file) t_inputs_save = time() - t0 print("Serialize input values : {}".format(t_inputs_save)) @@ -508,7 +538,9 @@ def find_Bragg_disks_dask(DP, probe, path_to_inputs, indices[start:end], data_file, - tmpdir.name)) + tmpdir.name, + ) + ) if end == total: break @@ -521,7 +553,7 @@ def find_Bragg_disks_dask(DP, probe, # collect results for batch in distributed.as_completed(submits, with_results=True).batches(): for future, result in batch: - with open(result, 'rb') as f: + with open(result, "rb") as f: data_chunk = dill.load(f) for Rx, Ry, data in data_chunk: @@ -536,7 +568,10 @@ def find_Bragg_disks_dask(DP, probe, print("Error when cleaning up temporary files: {}".format(e)) t = time() - t0 - print("Analyzed {} diffraction patterns in {}h {}m {}s".format( - R_N, int(t / 3600), int(t / 60), int(t % 60))) + print( + "Analyzed {} diffraction patterns in {}h {}m {}s".format( + R_N, int(t / 3600), int(t / 60), int(t % 60) + ) + ) return peaks diff --git a/py4DSTEM/braggvectors/diskdetection_parallel_new.py b/py4DSTEM/braggvectors/diskdetection_parallel_new.py index 241cae2f7..c15e41732 100644 --- a/py4DSTEM/braggvectors/diskdetection_parallel_new.py +++ b/py4DSTEM/braggvectors/diskdetection_parallel_new.py @@ -10,9 +10,10 @@ from dask import delayed from dask.distributed import Client, LocalCluster from dask.diagnostics import ProgressBar -#import dask.bag as db -#import distributed +# import dask.bag as db + +# import distributed from distributed.protocol.serialize import register_serialization_family import distributed @@ -21,93 +22,101 @@ from py4DSTEM.braggvectors.diskdetection import _find_Bragg_disks_single_DP_FK - #### SERIALISERS #### -# Define Serialiser -# these are functions which allow the hdf5 objects to be passed. May not be required anymore +# Define Serialiser +# these are functions which allow the hdf5 objects to be passed. May not be required anymore + def dill_dumps(x): - header = {'serializer': 'dill'} + header = {"serializer": "dill"} frames = [dill.dumps(x)] return header, frames + def dill_loads(header, frames): if len(frames) > 1: - frame = ''.join(frames) + frame = "".join(frames) else: frame = frames[0] return dill.loads(frame) -# register the serialization method -#register_serialization_family('dill', dill_dumps, dill_loads) + + +# register the serialization method +# register_serialization_family('dill', dill_dumps, dill_loads) + def register_dill_serializer(): """ - This function registers the dill serializer allowing dask to work on h5py objects. - Not sure if this needs to be run and how often this need to be run. Keeping this in for now. + This function registers the dill serializer allowing dask to work on h5py objects. + Not sure if this needs to be run and how often this need to be run. Keeping this in for now. Args: None Returns: None """ - register_serialization_family('dill', dill_dumps, dill_loads) + register_serialization_family("dill", dill_dumps, dill_loads) return None #### END OF SERAILISERS #### -#### DASK WRAPPER FUNCTION #### +#### DASK WRAPPER FUNCTION #### -# Each delayed objected is passed a 4D array, currently implementing only on 2D slices. + +# Each delayed objected is passed a 4D array, currently implementing only on 2D slices. # TODO add batching with fancy indexing - needs to run a for loop over the batch of arrays -# TODO add cuda accelerated version -# TODO add ML-AI version -def _find_Bragg_disks_single_DP_FK_dask_wrapper(arr, *args,**kwargs): +# TODO add cuda accelerated version +# TODO add ML-AI version +def _find_Bragg_disks_single_DP_FK_dask_wrapper(arr, *args, **kwargs): # THis is needed as _find_Bragg_disks_single_DP_FK takes 2D array these arrays have the wrong shape - return _find_Bragg_disks_single_DP_FK(arr[0,0], *args, **kwargs) + return _find_Bragg_disks_single_DP_FK(arr[0, 0], *args, **kwargs) #### END OF DASK WRAPPER FUNCTIONS #### - #### MAIN FUNCTION -# TODO add batching with fancy indexing - needs batch size, fancy indexing method -# TODO add cuda accelerated function - needs dask GPU cluster. - -def beta_parallel_disk_detection(dataset, - probe, - #rxmin=None, # these would allow selecting a sub section - #rxmax=None, - #rymin=None, - #rymax=None, - #qxmin=None, - #qxmax=None, - #qymin=None, - #qymax=None, - probe_type="FT", - dask_client= None, - dask_client_params:dict=None, - restart_dask_client=True, - close_dask_client=False, - return_dask_client=True, - *args, **kwargs): +# TODO add batching with fancy indexing - needs batch size, fancy indexing method +# TODO add cuda accelerated function - needs dask GPU cluster. + + +def beta_parallel_disk_detection( + dataset, + probe, + # rxmin=None, # these would allow selecting a sub section + # rxmax=None, + # rymin=None, + # rymax=None, + # qxmin=None, + # qxmax=None, + # qymin=None, + # qymax=None, + probe_type="FT", + dask_client=None, + dask_client_params: dict = None, + restart_dask_client=True, + close_dask_client=False, + return_dask_client=True, + *args, + **kwargs +): """ - This is not fully validated currently so may not work, please report bugs on the py4DSTEM github page. + This is not fully validated currently so may not work, please report bugs on the py4DSTEM github page. - This parallellises the disk detetection for all probe posistions. This can operate on either in memory or out of memory datasets - - There is an asumption that unless specifying otherwise you are parallelising on a single Local Machine. + This parallellises the disk detetection for all probe posistions. This can operate on either in memory or out of memory datasets + + There is an asumption that unless specifying otherwise you are parallelising on a single Local Machine. If this is not the case its probably best to pass the dask_client into the function, although you can just pass the required arguments to dask_client_params. - If no dask_client arguments are passed it will create a dask_client for a local machine - + If no dask_client arguments are passed it will create a dask_client for a local machine + Note: Do not pass "peaks" argument as a kwarg, like you might in "_find_Bragg_disks_single_DP_FK", as the results will be unreliable and may cause the calculation to crash. Args: dataset (py4dSTEM datacube): 4DSTEM dataset probe (ndarray): can be regular probe kernel or fourier transormed - probe_type (str): "FT" or None + probe_type (str): "FT" or None dask_client (distributed.client.Client): dask client dask_client_params (dict): parameters to pass to dask client or dask cluster restart_dask_client (bool): if True, function will attempt to restart the dask_client. @@ -119,29 +128,36 @@ def beta_parallel_disk_detection(dataset, peaks (PointListArray): the Bragg peak positions and the correlenation intensities dask_client(optional) (distributed.client.Client): dask_client for use later. """ - #TODO add asserts abotu peaks not being passed + # TODO add asserts abotu peaks not being passed # Dask Client stuff - #TODO how to guess at default params for client, sqrt no.cores. Something to do with the size of the diffraction patterm - # write a function which can do this. - #TODO replace dask part with a with statement for easier clean up e.g. - # with LocalCluser(params) as cluster, Client(cluster) as client: - # ... dask stuff. - #TODO add assert statements and other checks. Think about reordering opperations - - if dask_client == None: - if dask_client_params !=None: - - dask.config.set({'distributed.worker.memory.spill': False, - 'distributed.worker.memory.target': False}) + # TODO how to guess at default params for client, sqrt no.cores. Something to do with the size of the diffraction patterm + # write a function which can do this. + # TODO replace dask part with a with statement for easier clean up e.g. + # with LocalCluser(params) as cluster, Client(cluster) as client: + # ... dask stuff. + # TODO add assert statements and other checks. Think about reordering opperations + + if dask_client == None: + if dask_client_params != None: + dask.config.set( + { + "distributed.worker.memory.spill": False, + "distributed.worker.memory.target": False, + } + ) cluster = LocalCluster(**dask_client_params) dask_client = Client(cluster, **dask_client_params) else: # AUTO MAGICALLY SET? # LET DASK SET? # HAVE A FUNCTION WHICH RUNS ON A SUBSET OF THE DATA TO PICK OPTIMIAL VALUE? - # psutil could be used to count cores. - dask.config.set({'distributed.worker.memory.spill': False, # stops spilling to disk - 'distributed.worker.memory.target': False}) # stops spilling to disk and erroring out + # psutil could be used to count cores. + dask.config.set( + { + "distributed.worker.memory.spill": False, # stops spilling to disk + "distributed.worker.memory.target": False, + } + ) # stops spilling to disk and erroring out cluster = LocalCluster() dask_client = Client(cluster) @@ -151,79 +167,97 @@ def beta_parallel_disk_detection(dataset, try: dask_client.restart() except Exception as e: - print('Could not restart dask client. Try manually restarting outside or passing "restart_dask_client=False"') # WARNING STATEMENT - return e + print( + 'Could not restart dask client. Try manually restarting outside or passing "restart_dask_client=False"' + ) # WARNING STATEMENT + return e else: pass - # Probe stuff - assert (probe.shape == dataset.data.shape[2:]), "Probe and Diffraction Pattern Shapes are Mismatched" + assert ( + probe.shape == dataset.data.shape[2:] + ), "Probe and Diffraction Pattern Shapes are Mismatched" if probe_type != "FT": - #TODO clean up and pull out redudant parts - #if probe.dtype != (np.complex128 or np.complex64 or np.complex256): - #DO FFT SHIFT THING + # TODO clean up and pull out redudant parts + # if probe.dtype != (np.complex128 or np.complex64 or np.complex256): + # DO FFT SHIFT THING probe_kernel_FT = np.conj(np.fft.fft2(probe)) - dask_probe_array = da.from_array(probe_kernel_FT, chunks=(dataset.Q_Nx, dataset.Q_Ny)) + dask_probe_array = da.from_array( + probe_kernel_FT, chunks=(dataset.Q_Nx, dataset.Q_Ny) + ) dask_probe_delayed = dask_probe_array.to_delayed() # delayed_probe_kernel_FT = delayed(probe_kernel_FT) else: probe_kernel_FT = probe - dask_probe_array = da.from_array(probe_kernel_FT, chunks=(dataset.Q_Nx, dataset.Q_Ny)) + dask_probe_array = da.from_array( + probe_kernel_FT, chunks=(dataset.Q_Nx, dataset.Q_Ny) + ) dask_probe_delayed = dask_probe_array.to_delayed() - # GET DATA - #TODO add another elif if it is a dask array then pass + # GET DATA + # TODO add another elif if it is a dask array then pass if type(dataset.data) == np.ndarray: - dask_data = da.from_array(dataset.data, chunks=(1, 1,dataset.Q_Nx, dataset.Q_Ny)) + dask_data = da.from_array( + dataset.data, chunks=(1, 1, dataset.Q_Nx, dataset.Q_Ny) + ) elif dataset.stack_pointer != None: - dask_data = da.from_array(dataset.stack_pointer, chunks=(1, 1,dataset.Q_Nx, dataset.Q_Ny)) - else: + dask_data = da.from_array( + dataset.stack_pointer, chunks=(1, 1, dataset.Q_Nx, dataset.Q_Ny) + ) + else: print("Couldn't access the data") return None - # Convert the data to delayed + # Convert the data to delayed dataset_delayed = dask_data.to_delayed() # TODO Trim data e.g. rx,ry,qx,qy # I can pass the index values in here I should trim the probe and diffraction pattern first + # Into the meat of the function - # Into the meat of the function - - # create an empty list to which we will append the dealyed functions to. + # create an empty list to which we will append the dealyed functions to. res = [] - # loop over the dataset_delayed and create a delayed function of + # loop over the dataset_delayed and create a delayed function of for x in np.ndindex(dataset_delayed.shape): - temp = delayed(_find_Bragg_disks_single_DP_FK_dask_wrapper)(dataset_delayed[x], - probe_kernel_FT=dask_probe_delayed[0,0], - #probe_kernel_FT=delayed_probe_kernel_FT, - *args, **kwargs) #passing through args from earlier or should I use - #corrPower=corrPower, - #sigma=sigma_gaussianFilter, - #edgeBoundary=edgeBoundary, - #minRelativeIntensity=minRelativeIntensity, - #minPeakSpacing=minPeakSpacing, - #maxNumPeaks=maxNumPeaks, - #subpixel='poly') + temp = delayed(_find_Bragg_disks_single_DP_FK_dask_wrapper)( + dataset_delayed[x], + probe_kernel_FT=dask_probe_delayed[0, 0], + # probe_kernel_FT=delayed_probe_kernel_FT, + *args, + **kwargs + ) # passing through args from earlier or should I use + # corrPower=corrPower, + # sigma=sigma_gaussianFilter, + # edgeBoundary=edgeBoundary, + # minRelativeIntensity=minRelativeIntensity, + # minPeakSpacing=minPeakSpacing, + # maxNumPeaks=maxNumPeaks, + # subpixel='poly') res.append(temp) - _temp_peaks = dask_client.compute(res, optimize_graph=True) # creates futures and starts computing + _temp_peaks = dask_client.compute( + res, optimize_graph=True + ) # creates futures and starts computing - output = dask_client.gather(_temp_peaks) # gather the future objects + output = dask_client.gather(_temp_peaks) # gather the future objects - coords = [('qx',float),('qy',float),('intensity',float)] + coords = [("qx", float), ("qy", float), ("intensity", float)] peaks = PointListArray(coordinates=coords, shape=dataset.data.shape[:-2]) - #temp_peaks[0][0] + # temp_peaks[0][0] # operating over a list so we need the size (0->count) and re-create the probe positions (0->rx,0->ry), - for (count,(rx, ry)) in zip([i for i in range(dataset.data[...,0,0].size)],np.ndindex(dataset.data.shape[:-2])): - #peaks.get_pointlist(rx, ry).add_pointlist(temp_peaks[0][count]) - #peaks.get_pointlist(rx, ry).add_pointlist(output[count][0]) + for count, (rx, ry) in zip( + [i for i in range(dataset.data[..., 0, 0].size)], + np.ndindex(dataset.data.shape[:-2]), + ): + # peaks.get_pointlist(rx, ry).add_pointlist(temp_peaks[0][count]) + # peaks.get_pointlist(rx, ry).add_pointlist(output[count][0]) peaks.get_pointlist(rx, ry).add_pointlist(output[count]) # Clean up - dask_client.cancel(_temp_peaks) # removes from the dask workers - del _temp_peaks # deletes the object + dask_client.cancel(_temp_peaks) # removes from the dask workers + del _temp_peaks # deletes the object if close_dask_client: dask_client.close() return peaks @@ -232,6 +266,7 @@ def beta_parallel_disk_detection(dataset, elif close_dask_client and return_dask_client == False: return peaks else: - print('Dask Client in unknown state, this may result in unpredicitable behaviour later') + print( + "Dask Client in unknown state, this may result in unpredicitable behaviour later" + ) return peaks - diff --git a/py4DSTEM/braggvectors/kernels.py b/py4DSTEM/braggvectors/kernels.py index e5edaf0e2..d36ae172b 100644 --- a/py4DSTEM/braggvectors/kernels.py +++ b/py4DSTEM/braggvectors/kernels.py @@ -1,17 +1,18 @@ import cupy as cp -__all__ = ['kernels'] +__all__ = ["kernels"] kernels = {} ############################# multicorr kernels ################################# import os -with open(os.path.join(os.path.dirname(__file__), "multicorr_row_kernel.cu"), 'r') as f: - kernels['multicorr_row_kernel'] = cp.RawKernel(f.read(), 'multicorr_row_kernel') -with open(os.path.join(os.path.dirname(__file__), "multicorr_col_kernel.cu"), 'r') as f: - kernels['multicorr_col_kernel'] = cp.RawKernel(f.read(), 'multicorr_col_kernel') +with open(os.path.join(os.path.dirname(__file__), "multicorr_row_kernel.cu"), "r") as f: + kernels["multicorr_row_kernel"] = cp.RawKernel(f.read(), "multicorr_row_kernel") + +with open(os.path.join(os.path.dirname(__file__), "multicorr_col_kernel.cu"), "r") as f: + kernels["multicorr_col_kernel"] = cp.RawKernel(f.read(), "multicorr_col_kernel") ############################# get_maximal_points ################################ @@ -25,7 +26,7 @@ this is not considered a problem. """ -maximal_pts_float32 = r''' +maximal_pts_float32 = r""" extern "C" __global__ void maximal_pts(const float *ar, bool *out, const double minAbsoluteIntensity, const long long sizex, const long long sizey, const long long N){ int tid = blockDim.x * blockIdx.x + threadIdx.x; @@ -46,11 +47,11 @@ (val >= minAbsoluteIntensity)); } } -''' +""" -kernels['maximal_pts_float32'] = cp.RawKernel(maximal_pts_float32,'maximal_pts') +kernels["maximal_pts_float32"] = cp.RawKernel(maximal_pts_float32, "maximal_pts") -maximal_pts_float64 = r''' +maximal_pts_float64 = r""" extern "C" __global__ void maximal_pts(const double *ar, bool *out, const double minAbsoluteIntensity, const long long sizex, const long long sizey, const long long N){ int tid = blockDim.x * blockIdx.x + threadIdx.x; @@ -71,15 +72,14 @@ (val >= minAbsoluteIntensity)); } } -''' - -kernels['maximal_pts_float64'] = cp.RawKernel(maximal_pts_float64,'maximal_pts') +""" +kernels["maximal_pts_float64"] = cp.RawKernel(maximal_pts_float64, "maximal_pts") ################################ edge_boundary ###################################### -edge_boundary = r''' +edge_boundary = r""" extern "C" __global__ void edge_boundary(bool *ar, const long long edgeBoundary, const long long sizex, const long long sizey, const long long N){ @@ -92,6 +92,6 @@ } } } -''' +""" -kernels['edge_boundary'] = cp.RawKernel(edge_boundary,'edge_boundary') +kernels["edge_boundary"] = cp.RawKernel(edge_boundary, "edge_boundary") diff --git a/py4DSTEM/braggvectors/probe.py b/py4DSTEM/braggvectors/probe.py index 9195d06bd..464c2f2a4 100644 --- a/py4DSTEM/braggvectors/probe.py +++ b/py4DSTEM/braggvectors/probe.py @@ -5,12 +5,10 @@ from warnings import warn from py4DSTEM.data import DiffractionSlice, Data -from scipy.ndimage import ( - binary_opening, binary_dilation, distance_transform_edt) +from scipy.ndimage import binary_opening, binary_dilation, distance_transform_edt - -class Probe(DiffractionSlice,Data): +class Probe(DiffractionSlice, Data): """ Stores a vacuum probe. @@ -26,11 +24,7 @@ class Probe(DiffractionSlice,Data): """ - def __init__( - self, - data: np.ndarray, - name: Optional[str] = 'probe' - ): + def __init__(self, data: np.ndarray, name: Optional[str] = "probe"): """ Accepts: data (2D or 3D np.ndarray): the vacuum probe, or @@ -42,67 +36,50 @@ def __init__( """ # if only the probe is passed, make space for the kernel if data.ndim == 2: - data = np.stack([ - data, - np.zeros_like(data) - ]) + data = np.stack([data, np.zeros_like(data)]) # initialize as a DiffractionSlice DiffractionSlice.__init__( - self, - name = name, - data = data, - slicelabels = [ - 'probe', - 'kernel' - ] + self, name=name, data=data, slicelabels=["probe", "kernel"] ) - ## properties @property def probe(self): - return self.get_slice('probe').data + return self.get_slice("probe").data + @probe.setter - def probe(self,x): - assert(x.shape == (self.data.shape[1:])) - self.data[0,:,:] = x + def probe(self, x): + assert x.shape == (self.data.shape[1:]) + self.data[0, :, :] = x + @property def kernel(self): - return self.get_slice('kernel').data - @kernel.setter - def kernel(self,x): - assert(x.shape == (self.data.shape[1:])) - self.data[1,:,:] = x + return self.get_slice("kernel").data + @kernel.setter + def kernel(self, x): + assert x.shape == (self.data.shape[1:]) + self.data[1, :, :] = x # read @classmethod - def _get_constructor_args(cls,group): + def _get_constructor_args(cls, group): """ Returns a dictionary of args/values to pass to the class constructor """ ar_constr_args = DiffractionSlice._get_constructor_args(group) args = { - 'data' : ar_constr_args['data'], - 'name' : ar_constr_args['name'], + "data": ar_constr_args["data"], + "name": ar_constr_args["name"], } return args - - # generation methods @classmethod - def from_vacuum_data( - cls, - data, - mask = None, - threshold = 0.2, - expansion = 12, - opening = 3 - ): + def from_vacuum_data(cls, data, mask=None, threshold=0.2, expansion=12, opening=3): """ Generates and returns a vacuum probe Probe instance from either a 2D vacuum image or a 3D stack of vacuum diffraction patterns. @@ -134,9 +111,9 @@ def from_vacuum_data( probe : Probe the vacuum probe """ - assert(isinstance(data,np.ndarray)) + assert isinstance(data, np.ndarray) if data.ndim == 3: - probe = np.average(data,axis=0) + probe = np.average(data, axis=0) elif data.ndim == 2: probe = data else: @@ -145,23 +122,24 @@ def from_vacuum_data( if mask is not None: probe *= mask - mask = probe > np.max(probe)*threshold + mask = probe > np.max(probe) * threshold mask = binary_opening(mask, iterations=opening) mask = binary_dilation(mask, iterations=1) - mask = np.cos((np.pi/2)*np.minimum( - distance_transform_edt(np.logical_not(mask)) / expansion, 1))**2 + mask = ( + np.cos( + (np.pi / 2) + * np.minimum( + distance_transform_edt(np.logical_not(mask)) / expansion, 1 + ) + ) + ** 2 + ) - probe = cls(probe*mask) + probe = cls(probe * mask) return probe - @classmethod - def generate_synthetic_probe( - cls, - radius, - width, - Qshape - ): + def generate_synthetic_probe(cls, radius, width, Qshape): """ Makes a synthetic probe, with the functional form of a disk blurred by a sigmoid (a logistic function). @@ -183,21 +161,19 @@ def generate_synthetic_probe( the probe """ # Make coords - Q_Nx,Q_Ny = Qshape - qy,qx = np.meshgrid(np.arange(Q_Ny),np.arange(Q_Nx)) - qy,qx = qy - Q_Ny/2., qx-Q_Nx/2. - qr = np.sqrt(qx**2+qy**2) + Q_Nx, Q_Ny = Qshape + qy, qx = np.meshgrid(np.arange(Q_Ny), np.arange(Q_Nx)) + qy, qx = qy - Q_Ny / 2.0, qx - Q_Nx / 2.0 + qr = np.sqrt(qx**2 + qy**2) # Shift zero to disk edge qr = qr - radius # Calculate logistic function - probe = 1/(1+np.exp(4*qr/width)) + probe = 1 / (1 + np.exp(4 * qr / width)) return cls(probe) - - # calibration methods def measure_disk( @@ -207,7 +183,7 @@ def measure_disk( N=100, returncalc=True, data=None, - ): + ): """ Finds the center and radius of an average probe image. @@ -254,7 +230,7 @@ def measure_disk( # get binary images and compute a radius for each immax = np.max(im) - for i,val in enumerate(thresh_vals): + for i, val in enumerate(thresh_vals): mask = im > immax * val r_vals[i] = np.sqrt(np.sum(mask) / np.pi) @@ -269,33 +245,23 @@ def measure_disk( x0, y0 = get_CoM(im * mask) # Store metadata and return - ans = r,x0,y0 + ans = r, x0, y0 if data is None: try: self.calibration.set_probe_param(ans) except AttributeError: - warn(f"Couldn't store the probe parameters in metadata as no calibration was found for this Probe instance, {self}") + warn( + f"Couldn't store the probe parameters in metadata as no calibration was found for this Probe instance, {self}" + ) pass if returncalc: return ans - - - - - - - # Kernel generation methods def get_kernel( - self, - mode = 'flat', - origin = None, - data = None, - returncalc = True, - **kwargs - ): + self, mode="flat", origin=None, data=None, returncalc=True, **kwargs + ): """ Creates a cross-correlation kernel from the vacuum probe. @@ -339,22 +305,17 @@ def get_kernel( kernel : 2D array """ - modes = [ - 'flat', - 'gaussian', - 'sigmoid', - 'sigmoid_log' - ] + modes = ["flat", "gaussian", "sigmoid", "sigmoid_log"] # parse args assert mode in modes, f"mode must be in {modes}. Received {mode}" # get function function_dict = { - 'flat' : self.get_probe_kernel_flat, - 'gaussian' : self.get_probe_kernel_edge_gaussian, - 'sigmoid' : self._get_probe_kernel_edge_sigmoid_sine_squared, - 'sigmoid_log' : self._get_probe_kernel_edge_sigmoid_sine_squared + "flat": self.get_probe_kernel_flat, + "gaussian": self.get_probe_kernel_edge_gaussian, + "sigmoid": self._get_probe_kernel_edge_sigmoid_sine_squared, + "sigmoid_log": self._get_probe_kernel_edge_sigmoid_sine_squared, } fn = function_dict[mode] @@ -368,18 +329,14 @@ def get_kernel( if x is None: origin = None else: - r,x,y = x - origin = (x,y) + r, x, y = x + origin = (x, y) # get the data probe = data if data is not None else self.probe # compute - kern = fn( - probe, - origin = origin, - **kwargs - ) + kern = fn(probe, origin=origin, **kwargs) # add to the Probe self.kernel = kern @@ -388,14 +345,8 @@ def get_kernel( if returncalc: return kern - - @staticmethod - def get_probe_kernel_flat( - probe, - origin=None, - bilinear=False - ): + def get_probe_kernel_flat(probe, origin=None, bilinear=False): """ Creates a cross-correlation kernel from the vacuum probe by normalizing and shifting the center. @@ -424,12 +375,13 @@ def get_probe_kernel_flat( # Get CoM if origin is None: from py4DSTEM.process.calibration import get_probe_size - _,xCoM,yCoM = get_probe_size(probe) + + _, xCoM, yCoM = get_probe_size(probe) else: - xCoM,yCoM = origin + xCoM, yCoM = origin # Normalize - probe = probe/np.sum(probe) + probe = probe / np.sum(probe) # Shift center to corners of array probe_kernel = get_shifted_ar(probe, -xCoM, -yCoM, bilinear=bilinear) @@ -437,14 +389,13 @@ def get_probe_kernel_flat( # Return return probe_kernel - @staticmethod def get_probe_kernel_edge_gaussian( probe, sigma, origin=None, bilinear=True, - ): + ): """ Creates a cross-correlation kernel from the probe, subtracting a gaussian from the normalized probe such that the kernel integrates to @@ -476,37 +427,40 @@ def get_probe_kernel_edge_gaussian( # Get CoM if origin is None: from py4DSTEM.process.calibration import get_probe_size - _,xCoM,yCoM = get_probe_size(probe) + + _, xCoM, yCoM = get_probe_size(probe) else: - xCoM,yCoM = origin + xCoM, yCoM = origin # Shift probe to origin probe_kernel = get_shifted_ar(probe, -xCoM, -yCoM, bilinear=bilinear) # Generate normalization kernel # Coordinates - qy,qx = np.meshgrid( - np.mod(np.arange(Q_Ny) + Q_Ny//2, Q_Ny) - Q_Ny//2, - np.mod(np.arange(Q_Nx) + Q_Nx//2, Q_Nx) - Q_Nx//2) - qr2 = (qx**2 + qy**2) + qy, qx = np.meshgrid( + np.mod(np.arange(Q_Ny) + Q_Ny // 2, Q_Ny) - Q_Ny // 2, + np.mod(np.arange(Q_Nx) + Q_Nx // 2, Q_Nx) - Q_Nx // 2, + ) + qr2 = qx**2 + qy**2 # Calculate Gaussian normalization kernel - qstd2 = np.sum(qr2*probe_kernel) / np.sum(probe_kernel) - kernel_norm = np.exp(-qr2 / (2*qstd2*sigma**2)) + qstd2 = np.sum(qr2 * probe_kernel) / np.sum(probe_kernel) + kernel_norm = np.exp(-qr2 / (2 * qstd2 * sigma**2)) # Output normalized kernel - probe_kernel = probe_kernel/np.sum(probe_kernel) - kernel_norm/np.sum(kernel_norm) + probe_kernel = probe_kernel / np.sum(probe_kernel) - kernel_norm / np.sum( + kernel_norm + ) return probe_kernel - @staticmethod def get_probe_kernel_edge_sigmoid( probe, radii, origin=None, - type='sine_squared', + type="sine_squared", bilinear=True, - ): + ): """ Creates a convolution kernel from an average probe, subtracting an annular trench about the probe such that the kernel integrates to zero, then @@ -535,60 +489,61 @@ def get_probe_kernel_edge_sigmoid( from py4DSTEM.process.utils import get_shifted_ar # parse inputs - if isinstance(probe,Probe): + if isinstance(probe, Probe): probe = probe.probe - valid_types = ('logistic','sine_squared') - assert(type in valid_types), "type must be in {}".format(valid_types) + valid_types = ("logistic", "sine_squared") + assert type in valid_types, "type must be in {}".format(valid_types) Q_Nx, Q_Ny = probe.shape - ri,ro = radii + ri, ro = radii # Get CoM if origin is None: from py4DSTEM.process.calibration import get_probe_size - _,xCoM,yCoM = get_probe_size(probe) + + _, xCoM, yCoM = get_probe_size(probe) else: - xCoM,yCoM = origin + xCoM, yCoM = origin # Shift probe to origin probe_kernel = get_shifted_ar(probe, -xCoM, -yCoM, bilinear=bilinear) # Generate normalization kernel # Coordinates - qy,qx = np.meshgrid( - np.mod(np.arange(Q_Ny) + Q_Ny//2, Q_Ny) - Q_Ny//2, - np.mod(np.arange(Q_Nx) + Q_Nx//2, Q_Nx) - Q_Nx//2) + qy, qx = np.meshgrid( + np.mod(np.arange(Q_Ny) + Q_Ny // 2, Q_Ny) - Q_Ny // 2, + np.mod(np.arange(Q_Nx) + Q_Nx // 2, Q_Nx) - Q_Nx // 2, + ) qr = np.sqrt(qx**2 + qy**2) # Calculate sigmoid - if type == 'logistic': - r0 = 0.5*(ro+ri) - sigma = 0.25*(ro-ri) - sigmoid = 1/(1+np.exp((qr-r0)/sigma)) - elif type == 'sine_squared': + if type == "logistic": + r0 = 0.5 * (ro + ri) + sigma = 0.25 * (ro - ri) + sigmoid = 1 / (1 + np.exp((qr - r0) / sigma)) + elif type == "sine_squared": sigmoid = (qr - ri) / (ro - ri) sigmoid = np.minimum(np.maximum(sigmoid, 0.0), 1.0) - sigmoid = np.cos((np.pi/2)*sigmoid)**2 + sigmoid = np.cos((np.pi / 2) * sigmoid) ** 2 else: raise Exception("type must be in {}".format(valid_types)) # Output normalized kernel - probe_kernel = probe_kernel/np.sum(probe_kernel) - sigmoid/np.sum(sigmoid) + probe_kernel = probe_kernel / np.sum(probe_kernel) - sigmoid / np.sum(sigmoid) return probe_kernel - def _get_probe_kernel_edge_sigmoid_sine_squared( self, probe, radii, origin=None, **kwargs, - ): + ): return self.get_probe_kernel_edge_sigmoid( probe, radii, - origin = origin, - type='sine_squared', + origin=origin, + type="sine_squared", **kwargs, ) @@ -598,20 +553,7 @@ def _get_probe_kernel_edge_sigmoid_logistic( radii, origin=None, **kwargs, - ): + ): return self.get_probe_kernel_edge_sigmoid( - probe, - radii, - origin = origin, - type='logistic', - **kwargs + probe, radii, origin=origin, type="logistic", **kwargs ) - - - - - - - - - diff --git a/py4DSTEM/braggvectors/threshold.py b/py4DSTEM/braggvectors/threshold.py index 12a342b0c..c13b0a665 100644 --- a/py4DSTEM/braggvectors/threshold.py +++ b/py4DSTEM/braggvectors/threshold.py @@ -5,15 +5,9 @@ from emdfile import tqdmnd, PointListArray - - def threshold_Braggpeaks( - pointlistarray, - minRelativeIntensity, - relativeToPeak, - minPeakSpacing, - maxNumPeaks - ): + pointlistarray, minRelativeIntensity, relativeToPeak, minPeakSpacing, maxNumPeaks +): """ Takes a PointListArray of detected Bragg peaks and applies additional thresholding, returning the thresholded PointListArray. To skip a threshold, @@ -31,16 +25,26 @@ def threshold_Braggpeaks( maxNumPeaks (int): maximum number of allowed peaks per diffraction pattern """ - assert all([item in pointlistarray.dtype.fields for item in ['qx','qy','intensity']]), ( - "pointlistarray must include the coordinates 'qx', 'qy', and 'intensity'.") - for (Rx, Ry) in tqdmnd(pointlistarray.shape[0],pointlistarray.shape[1],desc='Thresholding Bragg disks',unit='DP',unit_scale=True): - pointlist = pointlistarray.get_pointlist(Rx,Ry) - pointlist.sort(coordinate='intensity', order='descending') + assert all( + [item in pointlistarray.dtype.fields for item in ["qx", "qy", "intensity"]] + ), "pointlistarray must include the coordinates 'qx', 'qy', and 'intensity'." + for Rx, Ry in tqdmnd( + pointlistarray.shape[0], + pointlistarray.shape[1], + desc="Thresholding Bragg disks", + unit="DP", + unit_scale=True, + ): + pointlist = pointlistarray.get_pointlist(Rx, Ry) + pointlist.sort(coordinate="intensity", order="descending") # Remove peaks below minRelativeIntensity threshold if minRelativeIntensity is not False: - deletemask = pointlist.data['intensity']/pointlist.data['intensity'][relativeToPeak] < \ - minRelativeIntensity + deletemask = ( + pointlist.data["intensity"] + / pointlist.data["intensity"][relativeToPeak] + < minRelativeIntensity + ) pointlist.remove_points(deletemask) # Remove peaks that are too close together @@ -49,9 +53,11 @@ def threshold_Braggpeaks( deletemask = np.zeros(pointlist.length, dtype=bool) for i in range(pointlist.length): if deletemask[i] == False: - tooClose = ( (pointlist.data['qx']-pointlist.data['qx'][i])**2 + \ - (pointlist.data['qy']-pointlist.data['qy'][i])**2 ) < r2 - tooClose[:i+1] = False + tooClose = ( + (pointlist.data["qx"] - pointlist.data["qx"][i]) ** 2 + + (pointlist.data["qy"] - pointlist.data["qy"][i]) ** 2 + ) < r2 + tooClose[: i + 1] = False deletemask[tooClose] = True pointlist.remove_points(deletemask) @@ -68,11 +74,11 @@ def threshold_Braggpeaks( def universal_threshold( pointlistarray, thresh, - metric='maximum', + metric="maximum", minPeakSpacing=False, maxNumPeaks=False, - name=None - ): + name=None, +): """ Takes a PointListArray of detected Bragg peaks and applies universal thresholding, returning the thresholded PointListArray. To skip a threshold, @@ -104,56 +110,71 @@ def universal_threshold( Returns: (PointListArray): Bragg peaks thresholded by intensity. """ - assert isinstance(pointlistarray,PointListArray) - assert metric in ('maximum','average','median','manual') - assert all([item in pointlistarray.dtype.fields for item in ['qx','qy','intensity']]), ( - "pointlistarray must include the coordinates 'qx', 'qy', and 'intensity'.") + assert isinstance(pointlistarray, PointListArray) + assert metric in ("maximum", "average", "median", "manual") + assert all( + [item in pointlistarray.dtype.fields for item in ["qx", "qy", "intensity"]] + ), "pointlistarray must include the coordinates 'qx', 'qy', and 'intensity'." _pointlistarray = pointlistarray.copy() if name is None: - _pointlistarray.name = pointlistarray.name+"_unithresh" - - HI_array = np.zeros( (_pointlistarray.shape[0], _pointlistarray.shape[1]) ) - for (Rx, Ry) in tqdmnd(_pointlistarray.shape[0],_pointlistarray.shape[1],desc='Thresholding Bragg disks',unit='DP',unit_scale=True): - pointlist = _pointlistarray.get_pointlist(Rx,Ry) - if pointlist.data.shape[0] == 0: - top_value = np.nan - else: - HI_array[Rx, Ry] = np.max(pointlist.data['intensity']) - - if metric=='maximum': - _thresh = np.max(HI_array)*thresh - elif metric=='average': - _thresh = np.nanmean(HI_array)*thresh - elif metric=='median': - _thresh = np.median(HI_array)*thresh + _pointlistarray.name = pointlistarray.name + "_unithresh" + + HI_array = np.zeros((_pointlistarray.shape[0], _pointlistarray.shape[1])) + for Rx, Ry in tqdmnd( + _pointlistarray.shape[0], + _pointlistarray.shape[1], + desc="Thresholding Bragg disks", + unit="DP", + unit_scale=True, + ): + pointlist = _pointlistarray.get_pointlist(Rx, Ry) + if pointlist.data.shape[0] == 0: + top_value = np.nan + else: + HI_array[Rx, Ry] = np.max(pointlist.data["intensity"]) + + if metric == "maximum": + _thresh = np.max(HI_array) * thresh + elif metric == "average": + _thresh = np.nanmean(HI_array) * thresh + elif metric == "median": + _thresh = np.median(HI_array) * thresh else: _thresh = thresh - for (Rx, Ry) in tqdmnd(_pointlistarray.shape[0],_pointlistarray.shape[1],desc='Thresholding Bragg disks',unit='DP',unit_scale=True): - pointlist = _pointlistarray.get_pointlist(Rx,Ry) + for Rx, Ry in tqdmnd( + _pointlistarray.shape[0], + _pointlistarray.shape[1], + desc="Thresholding Bragg disks", + unit="DP", + unit_scale=True, + ): + pointlist = _pointlistarray.get_pointlist(Rx, Ry) - # Remove peaks below minRelativeIntensity threshold - deletemask = pointlist.data['intensity'] < _thresh - pointlist.remove(deletemask) + # Remove peaks below minRelativeIntensity threshold + deletemask = pointlist.data["intensity"] < _thresh + pointlist.remove(deletemask) - # Remove peaks that are too close together - if maxNumPeaks is not False: - r2 = minPeakSpacing**2 + # Remove peaks that are too close together + if maxNumPeaks is not False: + r2 = minPeakSpacing**2 + deletemask = np.zeros(pointlist.length, dtype=bool) + for i in range(pointlist.length): + if deletemask[i] == False: + tooClose = ( + (pointlist.data["qx"] - pointlist.data["qx"][i]) ** 2 + + (pointlist.data["qy"] - pointlist.data["qy"][i]) ** 2 + ) < r2 + tooClose[: i + 1] = False + deletemask[tooClose] = True + pointlist.remove_points(deletemask) + + # Keep only up to maxNumPeaks + if maxNumPeaks is not False: + if maxNumPeaks < pointlist.length: deletemask = np.zeros(pointlist.length, dtype=bool) - for i in range(pointlist.length): - if deletemask[i] == False: - tooClose = ( (pointlist.data['qx']-pointlist.data['qx'][i])**2 + \ - (pointlist.data['qy']-pointlist.data['qy'][i])**2 ) < r2 - tooClose[:i+1] = False - deletemask[tooClose] = True + deletemask[maxNumPeaks:] = True pointlist.remove_points(deletemask) - - # Keep only up to maxNumPeaks - if maxNumPeaks is not False: - if maxNumPeaks < pointlist.length: - deletemask = np.zeros(pointlist.length, dtype=bool) - deletemask[maxNumPeaks:] = True - pointlist.remove_points(deletemask) return _pointlistarray @@ -170,15 +191,28 @@ def get_pointlistarray_intensities(pointlistarray): Returns: (ndarray): all detected peak intensities """ - assert np.all([name in pointlistarray.dtype.names for name in ['qx','qy','intensity']]), ( - "pointlistarray coords must include coordinates: 'qx', 'qy', 'intensity'.") - assert 'qx' in pointlistarray.dtype.names, "pointlistarray coords must include 'qx' and 'qy'" - assert 'qy' in pointlistarray.dtype.names, "pointlistarray coords must include 'qx' and 'qy'" - assert 'intensity' in pointlistarray.dtype.names, "pointlistarray coords must include 'intensity'" + assert np.all( + [name in pointlistarray.dtype.names for name in ["qx", "qy", "intensity"]] + ), "pointlistarray coords must include coordinates: 'qx', 'qy', 'intensity'." + assert ( + "qx" in pointlistarray.dtype.names + ), "pointlistarray coords must include 'qx' and 'qy'" + assert ( + "qy" in pointlistarray.dtype.names + ), "pointlistarray coords must include 'qx' and 'qy'" + assert ( + "intensity" in pointlistarray.dtype.names + ), "pointlistarray coords must include 'intensity'" first_pass = True - for (Rx, Ry) in tqdmnd(pointlistarray.shape[0],pointlistarray.shape[1],desc='Getting disk intensities',unit='DP',unit_scale=True): - pointlist = pointlistarray.get_pointlist(Rx,Ry) + for Rx, Ry in tqdmnd( + pointlistarray.shape[0], + pointlistarray.shape[1], + desc="Getting disk intensities", + unit="DP", + unit_scale=True, + ): + pointlist = pointlistarray.get_pointlist(Rx, Ry) for i in range(pointlist.length): if first_pass: peak_intensities = np.array(pointlist.data[i][2]) @@ -189,10 +223,3 @@ def get_pointlistarray_intensities(pointlistarray): temp_array = np.reshape(temp_array, 1) peak_intensities = np.append(peak_intensities, temp_array) return peak_intensities - - - - - - - diff --git a/py4DSTEM/data/__init__.py b/py4DSTEM/data/__init__.py index ff9429fe0..ac697918d 100644 --- a/py4DSTEM/data/__init__.py +++ b/py4DSTEM/data/__init__.py @@ -5,4 +5,3 @@ from py4DSTEM.data.diffractionslice import DiffractionSlice from py4DSTEM.data.realslice import RealSlice from py4DSTEM.data.qpoints import QPoints - diff --git a/py4DSTEM/data/calibration.py b/py4DSTEM/data/calibration.py index 50ec8f6f9..a31f098d4 100644 --- a/py4DSTEM/data/calibration.py +++ b/py4DSTEM/data/calibration.py @@ -5,9 +5,10 @@ from typing import Optional from warnings import warn -from emdfile import Metadata,Root +from emdfile import Metadata, Root from py4DSTEM.data.propagating_calibration import call_calibrate + class Calibration(Metadata): """ Stores calibration measurements. @@ -178,23 +179,21 @@ class Calibration(Metadata): To attach `data` to a different location in the calibration instance's tree, use `node.attach( data )`. See the Data.attach docstring. """ + def __init__( self, - name: Optional[str] = 'calibration', + name: Optional[str] = "calibration", root: Optional[Root] = None, - ): + ): """ Args: name (optional, str): """ - Metadata.__init__( - self, - name=name - ) + Metadata.__init__(self, name=name) # Set the root if root is None: - root = Root( name="py4DSTEM_root" ) + root = Root(name="py4DSTEM_root") self.set_root(root) # List to hold objects that will re-`calibrate` when @@ -202,26 +201,28 @@ def __init__( self._targets = [] # set initial pixel values - self['Q_pixel_size'] = 1 - self['R_pixel_size'] = 1 - self['Q_pixel_units'] = 'pixels' - self['R_pixel_units'] = 'pixels' - + self["Q_pixel_size"] = 1 + self["R_pixel_size"] = 1 + self["Q_pixel_units"] = "pixels" + self["R_pixel_units"] = "pixels" # EMD root property @property def root(self): return self._root + @root.setter def root(self): - raise Exception("Calibration.root does not support assignment; to change the root, use self.set_root") - def set_root(self,root): - assert(isinstance(root,Root)), f"root must be a Root, not type {type(root)}" - self._root = root + raise Exception( + "Calibration.root does not support assignment; to change the root, use self.set_root" + ) + def set_root(self, root): + assert isinstance(root, Root), f"root must be a Root, not type {type(root)}" + self._root = root # Attach data to the calibration instance - def attach(self,data): + def attach(self, data): """ Attach `data` to this calibration instance, placing it in the top level of the Calibration instance's tree. If `data` was in a @@ -231,12 +232,12 @@ def attach(self,data): should be, after attaching it run `self.register_target(data)`. """ from py4DSTEM.data import Data - assert(isinstance(data,Data)), f"data must be a Data instance" - self.root.attach(data) + assert isinstance(data, Data), f"data must be a Data instance" + self.root.attach(data) # Register for auto-calibration - def register_target(self,new_target): + def register_target(self, new_target): """ Register an object to recieve calls to it `calibrate` method when certain calibrations get updated @@ -244,7 +245,7 @@ def register_target(self,new_target): if new_target not in self._targets: self._targets.append(new_target) - def unregister_target(self,target): + def unregister_target(self, target): """ Unlink an object from recieving calls to `calibrate` when certain calibration values are changed @@ -256,227 +257,276 @@ def unregister_target(self,target): def targets(self): return tuple(self._targets) - - ######### Begin Calibration Metadata Params ######### # pixel size/units @call_calibrate - def set_Q_pixel_size(self,x): - self._params['Q_pixel_size'] = x + def set_Q_pixel_size(self, x): + self._params["Q_pixel_size"] = x + def get_Q_pixel_size(self): - return self._get_value('Q_pixel_size') + return self._get_value("Q_pixel_size") + # aliases @property def Q_pixel_size(self): return self.get_Q_pixel_size() + @Q_pixel_size.setter - def Q_pixel_size(self,x): + def Q_pixel_size(self, x): self.set_Q_pixel_size(x) + @property def qpixsize(self): return self.get_Q_pixel_size() + @qpixsize.setter - def qpixsize(self,x): + def qpixsize(self, x): self.set_Q_pixel_size(x) @call_calibrate - def set_R_pixel_size(self,x): - self._params['R_pixel_size'] = x + def set_R_pixel_size(self, x): + self._params["R_pixel_size"] = x + def get_R_pixel_size(self): - return self._get_value('R_pixel_size') + return self._get_value("R_pixel_size") + # aliases @property def R_pixel_size(self): return self.get_R_pixel_size() + @R_pixel_size.setter - def R_pixel_size(self,x): + def R_pixel_size(self, x): self.set_R_pixel_size(x) + @property def qpixsize(self): return self.get_R_pixel_size() + @qpixsize.setter - def qpixsize(self,x): + def qpixsize(self, x): self.set_R_pixel_size(x) @call_calibrate - def set_Q_pixel_units(self,x): - assert(x in ('pixels','A^-1','mrad')), f"Q pixel units must be 'A^-1', 'mrad' or 'pixels'." - self._params['Q_pixel_units'] = x + def set_Q_pixel_units(self, x): + assert x in ( + "pixels", + "A^-1", + "mrad", + ), f"Q pixel units must be 'A^-1', 'mrad' or 'pixels'." + self._params["Q_pixel_units"] = x + def get_Q_pixel_units(self): - return self._get_value('Q_pixel_units') + return self._get_value("Q_pixel_units") + # aliases @property def Q_pixel_units(self): return self.get_Q_pixel_units() + @Q_pixel_units.setter - def Q_pixel_units(self,x): + def Q_pixel_units(self, x): self.set_Q_pixel_units(x) + @property def qpixunits(self): return self.get_Q_pixel_units() + @qpixunits.setter - def qpixunits(self,x): + def qpixunits(self, x): self.set_Q_pixel_units(x) @call_calibrate - def set_R_pixel_units(self,x): - self._params['R_pixel_units'] = x + def set_R_pixel_units(self, x): + self._params["R_pixel_units"] = x + def get_R_pixel_units(self): - return self._get_value('R_pixel_units') + return self._get_value("R_pixel_units") + # aliases @property def R_pixel_units(self): return self.get_R_pixel_units() + @R_pixel_units.setter - def R_pixel_units(self,x): + def R_pixel_units(self, x): self.set_R_pixel_units(x) + @property def rpixunits(self): return self.get_R_pixel_units() + @rpixunits.setter - def rpixunits(self,x): + def rpixunits(self, x): self.set_R_pixel_units(x) - # origin # qx0,qy0 - def set_qx0(self,x): - self._params['qx0'] = x + def set_qx0(self, x): + self._params["qx0"] = x x = np.asarray(x) qx0_mean = np.mean(x) - qx0_shift = x-qx0_mean - self._params['qx0_mean'] = qx0_mean - self._params['qx0_shift'] = qx0_shift - def set_qx0_mean(self,x): - self._params['qx0_mean'] = x - def get_qx0(self,rx=None,ry=None): - return self._get_value('qx0',rx,ry) + qx0_shift = x - qx0_mean + self._params["qx0_mean"] = qx0_mean + self._params["qx0_shift"] = qx0_shift + + def set_qx0_mean(self, x): + self._params["qx0_mean"] = x + + def get_qx0(self, rx=None, ry=None): + return self._get_value("qx0", rx, ry) + def get_qx0_mean(self): - return self._get_value('qx0_mean') - def get_qx0shift(self,rx=None,ry=None): - return self._get_value('qx0_shift',rx,ry) + return self._get_value("qx0_mean") + + def get_qx0shift(self, rx=None, ry=None): + return self._get_value("qx0_shift", rx, ry) - def set_qy0(self,x): - self._params['qy0'] = x + def set_qy0(self, x): + self._params["qy0"] = x x = np.asarray(x) qy0_mean = np.mean(x) - qy0_shift = x-qy0_mean - self._params['qy0_mean'] = qy0_mean - self._params['qy0_shift'] = qy0_shift - def set_qy0_mean(self,x): - self._params['qy0_mean'] = x - def get_qy0(self,rx=None,ry=None): - return self._get_value('qy0',rx,ry) + qy0_shift = x - qy0_mean + self._params["qy0_mean"] = qy0_mean + self._params["qy0_shift"] = qy0_shift + + def set_qy0_mean(self, x): + self._params["qy0_mean"] = x + + def get_qy0(self, rx=None, ry=None): + return self._get_value("qy0", rx, ry) + def get_qy0_mean(self): - return self._get_value('qy0_mean') - def get_qy0shift(self,rx=None,ry=None): - return self._get_value('qy0_shift',rx,ry) + return self._get_value("qy0_mean") + + def get_qy0shift(self, rx=None, ry=None): + return self._get_value("qy0_shift", rx, ry) + + def set_qx0_meas(self, x): + self._params["qx0_meas"] = x + + def get_qx0_meas(self, rx=None, ry=None): + return self._get_value("qx0_meas", rx, ry) - def set_qx0_meas(self,x): - self._params['qx0_meas'] = x - def get_qx0_meas(self,rx=None,ry=None): - return self._get_value('qx0_meas',rx,ry) + def set_qy0_meas(self, x): + self._params["qy0_meas"] = x - def set_qy0_meas(self,x): - self._params['qy0_meas'] = x - def get_qy0_meas(self,rx=None,ry=None): - return self._get_value('qy0_meas',rx,ry) + def get_qy0_meas(self, rx=None, ry=None): + return self._get_value("qy0_meas", rx, ry) - def set_origin_meas_mask(self,x): - self._params['origin_meas_mask'] = x - def get_origin_meas_mask(self,rx=None,ry=None): - return self._get_value('origin_meas_mask',rx,ry) + def set_origin_meas_mask(self, x): + self._params["origin_meas_mask"] = x + + def get_origin_meas_mask(self, rx=None, ry=None): + return self._get_value("origin_meas_mask", rx, ry) # aliases @property def qx0(self): return self.get_qx0() + @qx0.setter - def qx0(self,x): + def qx0(self, x): self.set_qx0(x) + @property def qx0_mean(self): return self.get_qx0_mean() + @qx0_mean.setter - def qx0_mean(self,x): + def qx0_mean(self, x): self.set_qx0_mean(x) + @property def qx0shift(self): return self.get_qx0shift() + @property def qy0(self): return self.get_qy0() + @qy0.setter - def qy0(self,x): + def qy0(self, x): self.set_qy0(x) + @property def qy0_mean(self): return self.get_qy0_mean() + @qy0_mean.setter - def qy0_mean(self,x): + def qy0_mean(self, x): self.set_qy0_mean(x) + @property def qy0_shift(self): return self.get_qy0_shift() + @property def qx0_meas(self): return self.get_qx0_meas() + @qx0_meas.setter - def qx0_meas(self,x): + def qx0_meas(self, x): self.set_qx0_meas(x) + @property def qy0_meas(self): return self.get_qy0_meas() + @qy0_meas.setter - def qy0_meas(self,x): + def qy0_meas(self, x): self.set_qy0_meas(x) + @property def origin_meas_mask(self): return self.get_origin_meas_mask() + @origin_meas_mask.setter - def origin_meas_mask(self,x): + def origin_meas_mask(self, x): self.set_origin_meas_mask(x) - # origin = (qx0,qy0) @call_calibrate - def set_origin(self,x): + def set_origin(self, x): """ Args: x (2-tuple of numbers or of 2D, R-shaped arrays): the origin """ - qx0,qy0 = x + qx0, qy0 = x self.set_qx0(qx0) self.set_qy0(qy0) - def get_origin(self,rx=None,ry=None): - qx0 = self._get_value('qx0',rx,ry) - qy0 = self._get_value('qy0',rx,ry) - ans = (qx0,qy0) + + def get_origin(self, rx=None, ry=None): + qx0 = self._get_value("qx0", rx, ry) + qy0 = self._get_value("qy0", rx, ry) + ans = (qx0, qy0) if any([x is None for x in ans]): ans = None return ans + def get_origin_mean(self): - qx0 = self._get_value('qx0_mean') - qy0 = self._get_value('qy0_mean') - return qx0,qy0 - def get_origin_shift(self,rx=None,ry=None): - qx0 = self._get_value('qx0_shift',rx,ry) - qy0 = self._get_value('qy0_shift',rx,ry) - ans = (qx0,qy0) + qx0 = self._get_value("qx0_mean") + qy0 = self._get_value("qy0_mean") + return qx0, qy0 + + def get_origin_shift(self, rx=None, ry=None): + qx0 = self._get_value("qx0_shift", rx, ry) + qy0 = self._get_value("qy0_shift", rx, ry) + ans = (qx0, qy0) if any([x is None for x in ans]): ans = None return ans - def set_origin_meas(self,x): + def set_origin_meas(self, x): """ Args: x (2-tuple or 3 uple of 2D R-shaped arrays): qx0,qy0,[mask] """ - qx0,qy0 = x[0],x[1] + qx0, qy0 = x[0], x[1] self.set_qx0_meas(qx0) self.set_qy0_meas(qy0) try: @@ -484,10 +534,11 @@ def set_origin_meas(self,x): self.set_origin_meas_mask(m) except IndexError: pass - def get_origin_meas(self,rx=None,ry=None): - qx0 = self._get_value('qx0_meas',rx,ry) - qy0 = self._get_value('qy0_meas',rx,ry) - ans = (qx0,qy0) + + def get_origin_meas(self, rx=None, ry=None): + qx0 = self._get_value("qx0_meas", rx, ry) + qy0 = self._get_value("qy0_meas", rx, ry) + ans = (qx0, qy0) if any([x is None for x in ans]): ans = None return ans @@ -496,119 +547,138 @@ def get_origin_meas(self,rx=None,ry=None): @property def origin(self): return self.get_origin() + @origin.setter - def origin(self,x): + def origin(self, x): self.set_origin(x) + @property def origin_meas(self): return self.get_origin_meas() + @origin_meas.setter - def origin_meas(self,x): + def origin_meas(self, x): self.set_origin_meas(x) + @property def origin_shift(self): return self.get_origin_shift() - # ellipse @call_calibrate - def set_a(self,x): - self._params['a'] = x - def get_a(self,rx=None,ry=None): - return self._get_value('a',rx,ry) + def set_a(self, x): + self._params["a"] = x + + def get_a(self, rx=None, ry=None): + return self._get_value("a", rx, ry) + @call_calibrate - def set_b(self,x): - self._params['b'] = x - def get_b(self,rx=None,ry=None): - return self._get_value('b',rx,ry) + def set_b(self, x): + self._params["b"] = x + + def get_b(self, rx=None, ry=None): + return self._get_value("b", rx, ry) + @call_calibrate - def set_theta(self,x): - self._params['theta'] = x - def get_theta(self,rx=None,ry=None): - return self._get_value('theta',rx,ry) + def set_theta(self, x): + self._params["theta"] = x + + def get_theta(self, rx=None, ry=None): + return self._get_value("theta", rx, ry) @call_calibrate - def set_ellipse(self,x): + def set_ellipse(self, x): """ Args: x (3-tuple): (a,b,theta) """ - a,b,theta = x - self._params['a'] = a - self._params['b'] = b - self._params['theta'] = theta + a, b, theta = x + self._params["a"] = a + self._params["b"] = b + self._params["theta"] = theta @call_calibrate - def set_p_ellipse(self,x): + def set_p_ellipse(self, x): """ Args: x (5-tuple): (qx0,qy0,a,b,theta) NOTE: does *not* change qx0,qy0! """ - _,_,a,b,theta = x - self._params['a'] = a - self._params['b'] = b - self._params['theta'] = theta - def get_ellipse(self,rx=None,ry=None): - a = self.get_a(rx,ry) - b = self.get_b(rx,ry) - theta = self.get_theta(rx,ry) - ans = (a,b,theta) + _, _, a, b, theta = x + self._params["a"] = a + self._params["b"] = b + self._params["theta"] = theta + + def get_ellipse(self, rx=None, ry=None): + a = self.get_a(rx, ry) + b = self.get_b(rx, ry) + theta = self.get_theta(rx, ry) + ans = (a, b, theta) if any([x is None for x in ans]): ans = None return ans - def get_p_ellipse(self,rx=None,ry=None): - qx0,qy0 = self.get_origin(rx,ry) - a,b,theta = self.get_ellipse(rx,ry) - return (qx0,qy0,a,b,theta) + + def get_p_ellipse(self, rx=None, ry=None): + qx0, qy0 = self.get_origin(rx, ry) + a, b, theta = self.get_ellipse(rx, ry) + return (qx0, qy0, a, b, theta) # aliases @property def a(self): return self.get_a() + @a.setter - def a(self,x): + def a(self, x): self.set_a(x) + @property def b(self): return self.get_b() + @b.setter - def b(self,x): + def b(self, x): self.set_b(x) + @property def theta(self): return self.get_theta() + @theta.setter - def theta(self,x): + def theta(self, x): self.set_theta(x) + @property def p_ellipse(self): return self.get_p_ellipse() + @p_ellipse.setter - def p_ellipse(self,x): + def p_ellipse(self, x): self.set_p_ellipse(x) + @property def ellipse(self): return self.get_ellipse() + @ellipse.setter - def ellipse(self,x): + def ellipse(self, x): self.set_ellipse(x) - - # Q/R-space rotation and flip @call_calibrate - def set_QR_rotation_degrees(self,x): - self._params['QR_rotation_degrees'] = x + def set_QR_rotation_degrees(self, x): + self._params["QR_rotation_degrees"] = x + def get_QR_rotation_degrees(self): - return self._get_value('QR_rotation_degrees') + return self._get_value("QR_rotation_degrees") @call_calibrate - def set_QR_flip(self,x): - self._params['QR_flip'] = x + def set_QR_flip(self, x): + self._params["QR_flip"] = x + def get_QR_flip(self): - return self._get_value('QR_flip') + return self._get_value("QR_flip") @call_calibrate def set_QR_rotflip(self, rot_flip): @@ -618,46 +688,50 @@ def set_QR_rotflip(self, rot_flip): rot (number): rotation in degrees flip (bool): True indicates a Q/R axes flip """ - rot,flip = rot_flip - self._params['QR_rotation_degrees'] = rot - self._params['QR_flip'] = flip + rot, flip = rot_flip + self._params["QR_rotation_degrees"] = rot + self._params["QR_flip"] = flip + def get_QR_rotflip(self): rot = self.get_QR_rotation_degrees() flip = self.get_QR_flip() if rot is None or flip is None: return None - return (rot,flip) + return (rot, flip) # aliases @property def QR_rotation_degrees(self): return self.get_QR_rotation_degrees() + @QR_rotation_degrees.setter - def QR_rotation_degrees(self,x): + def QR_rotation_degrees(self, x): self.set_QR_rotation_degrees(x) + @property def QR_flip(self): return self.get_QR_flip() + @QR_flip.setter - def QR_flip(self,x): + def QR_flip(self, x): self.set_QR_flip(x) + @property def QR_rotflip(self): return self.get_QR_rotflip() + @QR_rotflip.setter - def QR_rotflip(self,x): + def QR_rotflip(self, x): self.set_QR_rotflip(x) - - - - # probe - def set_probe_semiangle(self,x): - self._params['probe_semiangle'] = x + def set_probe_semiangle(self, x): + self._params["probe_semiangle"] = x + def get_probe_semiangle(self): - return self._get_value('probe_semiangle') + return self._get_value("probe_semiangle") + def set_probe_param(self, x): """ Args: @@ -667,125 +741,128 @@ def set_probe_param(self, x): self.set_probe_semiangle(probe_semiangle) self.set_qx0_mean(qx0) self.set_qy0_mean(qy0) + def get_probe_param(self): - probe_semiangle = self._get_value('probe_semiangle') - qx0 = self._get_value('qx0') - qy0 = self._get_value('qy0') - ans = (probe_semiangle,qx0,qy0) + probe_semiangle = self._get_value("probe_semiangle") + qx0 = self._get_value("qx0") + qy0 = self._get_value("qy0") + ans = (probe_semiangle, qx0, qy0) if any([x is None for x in ans]): ans = None return ans - def set_convergence_semiangle_pixels(self,x): - self._params['convergence_semiangle_pixels'] = x + def set_convergence_semiangle_pixels(self, x): + self._params["convergence_semiangle_pixels"] = x + def get_convergence_semiangle_pixels(self): - return self._get_value('convergence_semiangle_pixels') - def set_convergence_semiangle_mrad(self,x): - self._params['convergence_semiangle_mrad'] = x + return self._get_value("convergence_semiangle_pixels") + + def set_convergence_semiangle_mrad(self, x): + self._params["convergence_semiangle_mrad"] = x + def get_convergence_semiangle_mrad(self): - return self._get_value('convergence_semiangle_mrad') - def set_probe_center(self,x): - self._params['probe_center'] = x + return self._get_value("convergence_semiangle_mrad") + + def set_probe_center(self, x): + self._params["probe_center"] = x + def get_probe_center(self): - return self._get_value('probe_center') + return self._get_value("probe_center") - #aliases + # aliases @property def probe_semiangle(self): return self.get_probe_semiangle() + @probe_semiangle.setter - def probe_semiangle(self,x): + def probe_semiangle(self, x): self.set_probe_semiangle(x) + @property def probe_param(self): return self.get_probe_param() + @probe_param.setter - def probe_param(self,x): + def probe_param(self, x): self.set_probe_param(x) + @property def probe_center(self): return self.get_probe_center() + @probe_center.setter - def probe_center(self,x): + def probe_center(self, x): self.set_probe_center(x) + @property def probe_convergence_semiangle_pixels(self): return self.get_probe_convergence_semiangle_pixels() + @probe_convergence_semiangle_pixels.setter - def probe_convergence_semiangle_pixels(self,x): + def probe_convergence_semiangle_pixels(self, x): self.set_probe_convergence_semiangle_pixels(x) + @property def probe_convergence_semiangle_mrad(self): return self.get_probe_convergence_semiangle_mrad() + @probe_convergence_semiangle_mrad.setter - def probe_convergence_semiangle_mrad(self,x): + def probe_convergence_semiangle_mrad(self, x): self.set_probe_convergence_semiangle_mrad(x) - - - - ######## End Calibration Metadata Params ######## - - # calibrate targets @call_calibrate def calibrate(self): pass - - # For parameters which can have 2D or (2+n)D array values, # this function enables returning the value(s) at a 2D position, # rather than the whole array - def _get_value(self,p,rx=None,ry=None): - """ Enables returning the value of a pixel (rx,ry), - if these are passed and `p` is an appropriate array + def _get_value(self, p, rx=None, ry=None): + """Enables returning the value of a pixel (rx,ry), + if these are passed and `p` is an appropriate array """ v = self._params.get(p) if v is None: return v - if (rx is None) or (ry is None) or (not isinstance(v,np.ndarray)): + if (rx is None) or (ry is None) or (not isinstance(v, np.ndarray)): return v else: er = f"`rx` and `ry` must be ints; got values {rx} and {ry}" - assert np.all([isinstance(i,(int,np.integer)) for i in (rx,ry)]), er - return v[rx,ry] - - + assert np.all([isinstance(i, (int, np.integer)) for i in (rx, ry)]), er + return v[rx, ry] - def copy(self,name=None): - """ - """ - if name is None: name = self.name+"_copy" + def copy(self, name=None): + """ """ + if name is None: + name = self.name + "_copy" cal = Calibration(name=name) cal._params.update(self._params) return cal - - # HDF5 i/o # write is inherited from Metadata - def to_h5(self,group): + def to_h5(self, group): """ Saves the metadata dictionary _params to group, then adds the calibration's target's list """ # Add targets list to metadata targets = [x._treepath for x in self.targets] - self['_target_paths'] = targets + self["_target_paths"] = targets # Save the metadata - Metadata.to_h5(self,group) - del(self._params['_target_paths']) + Metadata.to_h5(self, group) + del self._params["_target_paths"] # read @classmethod - def from_h5(cls,group): + def from_h5(cls, group): """ Takes a valid group for an HDF5 file object which is open in read mode. Determines if it's a valid Metadata representation, and @@ -802,15 +879,11 @@ def from_h5(cls,group): metadata = Metadata.from_h5(group) # convert it to a Calibration instance - cal = Calibration(name = metadata.name) + cal = Calibration(name=metadata.name) cal._params.update(metadata._params) # return return cal - - ########## End of class ########## - - diff --git a/py4DSTEM/data/data.py b/py4DSTEM/data/data.py index 3a7b415db..ed5db2852 100644 --- a/py4DSTEM/data/data.py +++ b/py4DSTEM/data/data.py @@ -74,51 +74,54 @@ class Data: See also the Calibration docstring. """ - def __init__( - self, - calibration = None - ): - assert(isinstance(self,Node)), "Data instances must inherit from Node" - assert(calibration is None or isinstance(calibration,Calibration)), f"calibration must be None or a Calibration instance, not type {type(calibration)}" - + def __init__(self, calibration=None): + assert isinstance(self, Node), "Data instances must inherit from Node" + assert calibration is None or isinstance( + calibration, Calibration + ), f"calibration must be None or a Calibration instance, not type {type(calibration)}" # set up calibration + EMD tree if calibration is None: if self.root is None: - root = Root( name=self.name+"_root" ) - root.tree( self ) + root = Root(name=self.name + "_root") + root.tree(self) self.calibration = Calibration() - elif 'calibration' not in self.root.metadata: + elif "calibration" not in self.root.metadata: self.calibration = Calibration() else: pass elif calibration.root is None: if self.root is None: - root = Root( name=self.name+"_root" ) + root = Root(name=self.name + "_root") root.tree(self) self.calibration = calibration - elif 'calibration' not in self.root.metadata: + elif "calibration" not in self.root.metadata: self.calibration = calibration else: - warnings.warn("A calibration was passed to instantiate a new Data instance, but the instance already has a calibration. The passed calibration *WAS NOT* attached. To attach the new calibration and overwrite the existing calibration, use `data.calibration = new_calibration`") + warnings.warn( + "A calibration was passed to instantiate a new Data instance, but the instance already has a calibration. The passed calibration *WAS NOT* attached. To attach the new calibration and overwrite the existing calibration, use `data.calibration = new_calibration`" + ) pass else: if self.root is None: calibration.root.tree(self) self.calibration = calibration - elif 'calibration' not in self.root.metadata: + elif "calibration" not in self.root.metadata: self.calibration = calibration - warnings.warn("A calibration was passed to instantiate a new Data instance. The Data already had a root but no calibration, and the calibration already exists in a different root. The calibration has been added and now lives in both roots, and can therefore be modified from either place!") + warnings.warn( + "A calibration was passed to instantiate a new Data instance. The Data already had a root but no calibration, and the calibration already exists in a different root. The calibration has been added and now lives in both roots, and can therefore be modified from either place!" + ) else: - warnings.warn("A calibration was passed to instantiate a new Data instance, however the Data already has a root and calibration, and the calibration already has a root!! The passed calibration *WAS NOT* attached. To attach the new calibration and overwrite the existing calibration, use `data.calibration = new_calibration.") - + warnings.warn( + "A calibration was passed to instantiate a new Data instance, however the Data already has a root and calibration, and the calibration already has a root!! The passed calibration *WAS NOT* attached. To attach the new calibration and overwrite the existing calibration, use `data.calibration = new_calibration." + ) # calibration property @property def calibration(self): try: - return self.root.metadata['calibration'] + return self.root.metadata["calibration"] except KeyError: warnings.warn("No calibration metadata found in root, returning None") return None @@ -128,23 +131,24 @@ def calibration(self): @calibration.setter def calibration(self, x): - assert( isinstance( x, Calibration) ) - if 'calibration' in self.root.metadata.keys(): - warnings.warn("A 'calibration' key already exists in root.metadata - overwriting...") - x.name = 'calibration' - self.root.metadata['calibration'] = x - + assert isinstance(x, Calibration) + if "calibration" in self.root.metadata.keys(): + warnings.warn( + "A 'calibration' key already exists in root.metadata - overwriting..." + ) + x.name = "calibration" + self.root.metadata["calibration"] = x # transfer trees - def attach(self,node): + def attach(self, node): """ Attach `node` to the current object's tree, attaching calibration and detaching calibrations as needed. """ - assert(isinstance(node,Node)), f"node must be a Node, not type {type(node)}" + assert isinstance(node, Node), f"node must be a Node, not type {type(node)}" register = False - if hasattr(node,'calibration'): + if hasattr(node, "calibration"): if node.calibration is not None: if node in node.calibration._targets: register = True @@ -155,6 +159,3 @@ def attach(self,node): self.graft(node) if register: self.calibration.register_target(node) - - - diff --git a/py4DSTEM/data/diffractionslice.py b/py4DSTEM/data/diffractionslice.py index 40104cf80..4a6d1b9c2 100644 --- a/py4DSTEM/data/diffractionslice.py +++ b/py4DSTEM/data/diffractionslice.py @@ -4,23 +4,23 @@ from emdfile import Array from py4DSTEM.data import Data -from typing import Optional,Union +from typing import Optional, Union import numpy as np - -class DiffractionSlice(Array,Data): +class DiffractionSlice(Array, Data): """ Stores a diffraction-space shaped 2D data array. """ + def __init__( self, data: np.ndarray, - name: Optional[str] = 'diffractionslice', - units: Optional[str] = 'intensity', - slicelabels: Optional[Union[bool,list]] = None, - calibration = None - ): + name: Optional[str] = "diffractionslice", + units: Optional[str] = "intensity", + slicelabels: Optional[Union[bool, list]] = None, + calibration=None, + ): """ Accepts: data (np.ndarray): the data @@ -33,36 +33,21 @@ def __init__( """ # initialize as an Array - Array.__init__( - self, - data = data, - name = name, - units = units, - slicelabels = slicelabels - ) + Array.__init__(self, data=data, name=name, units=units, slicelabels=slicelabels) # initialize as Data - Data.__init__( - self, - calibration - ) - - - + Data.__init__(self, calibration) # read @classmethod - def _get_constructor_args(cls,group): + def _get_constructor_args(cls, group): """ Returns a dictionary of args/values to pass to the class constructor """ ar_constr_args = Array._get_constructor_args(group) args = { - 'data' : ar_constr_args['data'], - 'name' : ar_constr_args['name'], - 'units' : ar_constr_args['units'], - 'slicelabels' : ar_constr_args['slicelabels'] + "data": ar_constr_args["data"], + "name": ar_constr_args["name"], + "units": ar_constr_args["units"], + "slicelabels": ar_constr_args["slicelabels"], } return args - - - diff --git a/py4DSTEM/data/propagating_calibration.py b/py4DSTEM/data/propagating_calibration.py index a80382338..4de0c8d96 100644 --- a/py4DSTEM/data/propagating_calibration.py +++ b/py4DSTEM/data/propagating_calibration.py @@ -6,6 +6,7 @@ # This is the abstract pattern: + class call_method(object): """ A decorator which, when attached to a method of SomeClass, @@ -13,6 +14,7 @@ class call_method(object): instance's `_targets` list, following execution of the decorated function. """ + def __init__(self, func): self.func = func @@ -22,19 +24,25 @@ def __call__(self, *args, **kwargs): method, then loop through the list of targets and call their `calibrate` methods. """ - self.func(*args,**kwargs) + self.func(*args, **kwargs) some_object = args[0] - assert hasattr(some_object, "_targets"), "SomeObject object appears to be in an invalid state. _targets attribute is missing." + assert hasattr( + some_object, "_targets" + ), "SomeObject object appears to be in an invalid state. _targets attribute is missing." for target in some_object._targets: - if hasattr(target,'method') and callable(target.method): + if hasattr(target, "method") and callable(target.method): try: target.method() except Exception as err: - print(f"Attempted to call .method(), but this raised an error: {err}") + print( + f"Attempted to call .method(), but this raised an error: {err}" + ) else: # warn or pass or error out here, as needs be - #pass - warnings.warn(f"{target} is registered as a target but does not appear to have a .method() callable") + # pass + warnings.warn( + f"{target} is registered as a target but does not appear to have a .method() callable" + ) def __get__(self, instance, owner): """ @@ -47,6 +55,7 @@ def __get__(self, instance, owner): partial application of the method.) """ from functools import partial + return partial(self.__call__, instance) @@ -55,31 +64,35 @@ def __get__(self, instance, owner): # calls: calibrate() # targets: _targets + class call_calibrate(object): """ Decorated methods cause all targets in _targets to call .calibrate(). """ + def __init__(self, func): self.func = func def __call__(self, *args, **kwargs): - """ - """ - self.func(*args,**kwargs) + """ """ + self.func(*args, **kwargs) calibration = args[0] - assert hasattr(calibration, "_targets"), "Calibration object appears to be in an invalid state. _targets attribute is missing." + assert hasattr( + calibration, "_targets" + ), "Calibration object appears to be in an invalid state. _targets attribute is missing." for target in calibration._targets: - if hasattr(target,'calibrate') and callable(target.calibrate): + if hasattr(target, "calibrate") and callable(target.calibrate): try: target.calibrate() except Exception as err: - print(f"Attempted to calibrate object {target} but this raised an error: {err}") + print( + f"Attempted to calibrate object {target} but this raised an error: {err}" + ) else: pass def __get__(self, instance, owner): - """ - """ + """ """ from functools import partial - return partial(self.__call__, instance) + return partial(self.__call__, instance) diff --git a/py4DSTEM/data/qpoints.py b/py4DSTEM/data/qpoints.py index c29127406..8eabd3eb4 100644 --- a/py4DSTEM/data/qpoints.py +++ b/py4DSTEM/data/qpoints.py @@ -6,16 +6,18 @@ from typing import Optional import numpy as np -class QPoints(PointList,Data): + +class QPoints(PointList, Data): """ Stores a set of diffraction space points, with fields 'qx', 'qy' and 'intensity' """ + def __init__( self, data: np.ndarray, - name: Optional[str] = 'qpoints', - ): + name: Optional[str] = "qpoints", + ): """ Accepts: data (structured numpy ndarray): should have three fields, which @@ -29,47 +31,40 @@ def __init__( # initialize as a PointList PointList.__init__( self, - data = data, - name = name, + data=data, + name=name, ) # rename fields - self.fields = 'qx','qy','intensity' - + self.fields = "qx", "qy", "intensity" # properties @property def qx(self): - return self.data['qx'] + return self.data["qx"] + @property def qy(self): - return self.data['qy'] + return self.data["qy"] + @property def intensity(self): - return self.data['intensity'] + return self.data["intensity"] # aliases I = intensity - - # read # this method is not necessary but is kept for consistency of structure! @classmethod - def _get_constructor_args(cls,group): + def _get_constructor_args(cls, group): """ Returns a dictionary of args/values to pass to the class constructor """ pl_constr_args = PointList._get_constructor_args(group) args = { - 'data' : pl_constr_args['data'], - 'name' : pl_constr_args['name'], + "data": pl_constr_args["data"], + "name": pl_constr_args["name"], } return args - - - - - - diff --git a/py4DSTEM/data/realslice.py b/py4DSTEM/data/realslice.py index 205cbc1ab..2c834df4d 100644 --- a/py4DSTEM/data/realslice.py +++ b/py4DSTEM/data/realslice.py @@ -3,23 +3,23 @@ from emdfile import Array from py4DSTEM.data import Data -from typing import Optional,Union +from typing import Optional, Union import numpy as np - -class RealSlice(Array,Data): +class RealSlice(Array, Data): """ Stores a real-space shaped 2D data array. """ + def __init__( self, data: np.ndarray, - name: Optional[str] = 'realslice', - units: Optional[str] = 'intensity', - slicelabels: Optional[Union[bool,list]] = None, - calibration = None - ): + name: Optional[str] = "realslice", + units: Optional[str] = "intensity", + slicelabels: Optional[Union[bool, list]] = None, + calibration=None, + ): """ Accepts: data (np.ndarray): the data @@ -32,38 +32,22 @@ def __init__( """ # initialize as an Array Array.__init__( - self, - data = data, - name = name, - units = 'intensity', - slicelabels = slicelabels + self, data=data, name=name, units="intensity", slicelabels=slicelabels ) # initialize as Data - Data.__init__( - self, - calibration - ) - + Data.__init__(self, calibration) # read @classmethod - def _get_constructor_args(cls,group): + def _get_constructor_args(cls, group): """ Returns a dictionary of args/values to pass to the class constructor """ ar_constr_args = Array._get_constructor_args(group) args = { - 'data' : ar_constr_args['data'], - 'name' : ar_constr_args['name'], - 'units' : ar_constr_args['units'], - 'slicelabels' : ar_constr_args['slicelabels'] + "data": ar_constr_args["data"], + "name": ar_constr_args["name"], + "units": ar_constr_args["units"], + "slicelabels": ar_constr_args["slicelabels"], } return args - - - - - - - - diff --git a/py4DSTEM/datacube/__init__.py b/py4DSTEM/datacube/__init__.py index 881966e2f..883961fcb 100644 --- a/py4DSTEM/datacube/__init__.py +++ b/py4DSTEM/datacube/__init__.py @@ -3,5 +3,3 @@ from py4DSTEM.datacube.datacube import DataCube from py4DSTEM.datacube.virtualimage import VirtualImage from py4DSTEM.datacube.virtualdiffraction import VirtualDiffraction - - diff --git a/py4DSTEM/datacube/datacube.py b/py4DSTEM/datacube/datacube.py index ae3a82a36..4d87afdd5 100644 --- a/py4DSTEM/datacube/datacube.py +++ b/py4DSTEM/datacube/datacube.py @@ -2,9 +2,15 @@ import numpy as np from scipy.interpolate import interp1d -from scipy.ndimage import (binary_opening, binary_dilation, - distance_transform_edt, binary_fill_holes, gaussian_filter1d, gaussian_filter) -from typing import Optional,Union +from scipy.ndimage import ( + binary_opening, + binary_dilation, + distance_transform_edt, + binary_fill_holes, + gaussian_filter1d, + gaussian_filter, +) +from typing import Optional, Union from emdfile import Array, Metadata, Node, Root, tqdmnd from py4DSTEM.data import Data, Calibration @@ -17,7 +23,7 @@ class DataCube( Data, DataCubeVirtualImager, DataCubeVirtualDiffraction, - ): +): """ Storage and processing methods for 4D-STEM datasets. """ @@ -25,10 +31,10 @@ class DataCube( def __init__( self, data: np.ndarray, - name: Optional[str] = 'datacube', - slicelabels: Optional[Union[bool,list]] = None, - calibration: Optional[Union[Calibration,None]] = None, - ): + name: Optional[str] = "datacube", + slicelabels: Optional[Union[bool, list]] = None, + calibration: Optional[Union[Calibration, None]] = None, + ): """ Accepts: data (np.ndarray): the data @@ -45,23 +51,15 @@ def __init__( # initialize as an Array Array.__init__( self, - data = data, - name = name, - units = 'pixel intensity', - dim_names = [ - 'Rx', - 'Ry', - 'Qx', - 'Qy' - ], - slicelabels = slicelabels + data=data, + name=name, + units="pixel intensity", + dim_names=["Rx", "Ry", "Qx", "Qy"], + slicelabels=slicelabels, ) # initialize as Data - Data.__init__( - self, - calibration - ) + Data.__init__(self, calibration) # register with calibration self.calibration.register_target(self) @@ -72,9 +70,6 @@ def __init__( # polar coords self.polar = None - - - def calibrate(self): """ Calibrate the coordinate axes of the datacube. Using the calibrations @@ -82,7 +77,7 @@ def calibrate(self): to the pixel size, units and origin positions, then updates the meshgrids representing Q and R space. """ - assert(self.calibration is not None), "No calibration found!" + assert self.calibration is not None, "No calibration found!" # Get calibration values rpixsize = self.calibration.get_R_pixel_size() @@ -90,86 +85,82 @@ def calibrate(self): qpixsize = self.calibration.get_Q_pixel_size() qpixunits = self.calibration.get_Q_pixel_units() origin = self.calibration.get_origin_mean() - if origin is None or origin==(None,None): - origin = (0,0) + if origin is None or origin == (None, None): + origin = (0, 0) # Calc dim vectors - dim_rx = np.arange(self.R_Nx)*rpixsize - dim_ry = np.arange(self.R_Ny)*rpixsize - dim_qx = -origin[0] + np.arange(self.Q_Nx)*qpixsize - dim_qy = -origin[1] + np.arange(self.Q_Ny)*qpixsize + dim_rx = np.arange(self.R_Nx) * rpixsize + dim_ry = np.arange(self.R_Ny) * rpixsize + dim_qx = -origin[0] + np.arange(self.Q_Nx) * qpixsize + dim_qy = -origin[1] + np.arange(self.Q_Ny) * qpixsize # Set dim vectors - self.set_dim( - 0, - dim_rx, - units = rpixunits - ) - self.set_dim( - 1, - dim_ry, - units = rpixunits - ) - self.set_dim( - 2, - dim_qx, - units = qpixunits - ) - self.set_dim( - 3, - dim_qy, - units = qpixunits - ) + self.set_dim(0, dim_rx, units=rpixunits) + self.set_dim(1, dim_ry, units=rpixunits) + self.set_dim(2, dim_qx, units=qpixunits) + self.set_dim(3, dim_qy, units=qpixunits) # Set meshgrids - self._qxx,self._qyy = np.meshgrid( dim_qx,dim_qy ) - self._rxx,self._ryy = np.meshgrid( dim_rx,dim_ry ) - - self._qyy_raw,self._qxx_raw = np.meshgrid( np.arange(self.Q_Ny),np.arange(self.Q_Nx) ) - self._ryy_raw,self._rxx_raw = np.meshgrid( np.arange(self.R_Ny),np.arange(self.R_Nx) ) - + self._qxx, self._qyy = np.meshgrid(dim_qx, dim_qy) + self._rxx, self._ryy = np.meshgrid(dim_rx, dim_ry) + self._qyy_raw, self._qxx_raw = np.meshgrid( + np.arange(self.Q_Ny), np.arange(self.Q_Nx) + ) + self._ryy_raw, self._rxx_raw = np.meshgrid( + np.arange(self.R_Ny), np.arange(self.R_Nx) + ) # coordinate meshgrids @property def rxx(self): return self._rxx + @property def ryy(self): return self._ryy + @property def qxx(self): return self._qxx + @property def qyy(self): return self._qyy + @property def rxx_raw(self): return self._rxx_raw + @property def ryy_raw(self): return self._ryy_raw + @property def qxx_raw(self): return self._qxx_raw + @property def qyy_raw(self): return self._qyy_raw # coordinate meshgrids with shifted origin - def qxxs(self,rx,ry): - qx0_shift = self.calibration.get_qx0shift(rx,ry) + def qxxs(self, rx, ry): + qx0_shift = self.calibration.get_qx0shift(rx, ry) if qx0_shift is None: - raise Exception("Can't compute shifted meshgrid - origin shift is not defined") + raise Exception( + "Can't compute shifted meshgrid - origin shift is not defined" + ) return self.qxx - qx0_shift - def qyys(self,rx,ry): - qy0_shift = self.calibration.get_qy0shift(rx,ry) + + def qyys(self, rx, ry): + qy0_shift = self.calibration.get_qy0shift(rx, ry) if qy0_shift is None: - raise Exception("Can't compute shifted meshgrid - origin shift is not defined") + raise Exception( + "Can't compute shifted meshgrid - origin shift is not defined" + ) return self.qyy - qy0_shift - - # shape properties ## shape @@ -178,26 +169,30 @@ def qyys(self,rx,ry): @property def R_Nx(self): return self.data.shape[0] + @property def R_Ny(self): return self.data.shape[1] + @property def Q_Nx(self): return self.data.shape[2] + @property def Q_Ny(self): return self.data.shape[3] @property def Rshape(self): - return (self.data.shape[0],self.data.shape[1]) + return (self.data.shape[0], self.data.shape[1]) + @property def Qshape(self): - return (self.data.shape[2],self.data.shape[3]) + return (self.data.shape[2], self.data.shape[3]) @property def R_N(self): - return self.R_Nx*self.R_Ny + return self.R_Nx * self.R_Ny # aliases qnx = Q_Nx @@ -208,14 +203,13 @@ def R_N(self): qshape = Qshape rn = R_N - - ## pixel size / units - # Q + # Q @property def Q_pixel_size(self): return self.calibration.get_Q_pixel_size() + @property def Q_pixel_units(self): return self.calibration.get_Q_pixel_units() @@ -224,6 +218,7 @@ def Q_pixel_units(self): @property def R_pixel_size(self): return self.calibration.get_R_pixel_size() + @property def R_pixel_units(self): return self.calibration.get_R_pixel_units() @@ -234,72 +229,40 @@ def R_pixel_units(self): rpixsize = R_pixel_size rpixunit = R_pixel_units - - - - - def copy(self): """ Copys datacube """ from py4DSTEM import DataCube + new_datacube = DataCube( - data = self.data.copy(), - name = self.name, - calibration = self.calibration.copy(), - slicelabels = self.slicelabels, + data=self.data.copy(), + name=self.name, + calibration=self.calibration.copy(), + slicelabels=self.slicelabels, ) - Qpixsize = new_datacube.calibration.get_Q_pixel_size() + Qpixsize = new_datacube.calibration.get_Q_pixel_size() Qpixunits = new_datacube.calibration.get_Q_pixel_units() - Rpixsize = new_datacube.calibration.get_R_pixel_size() + Rpixsize = new_datacube.calibration.get_R_pixel_size() Rpixunits = new_datacube.calibration.get_R_pixel_units() - new_datacube.set_dim( - 0, - [0,Rpixsize], - units = Rpixunits, - name = 'Rx' - ) - new_datacube.set_dim( - 1, - [0,Rpixsize], - units = Rpixunits, - name = 'Ry' - ) + new_datacube.set_dim(0, [0, Rpixsize], units=Rpixunits, name="Rx") + new_datacube.set_dim(1, [0, Rpixsize], units=Rpixunits, name="Ry") - new_datacube.set_dim( - 2, - [0,Qpixsize], - units = Qpixunits, - name = 'Qx' - ) - new_datacube.set_dim( - 3, - [0,Qpixsize], - units = Qpixunits, - name = 'Qy' - ) + new_datacube.set_dim(2, [0, Qpixsize], units=Qpixunits, name="Qx") + new_datacube.set_dim(3, [0, Qpixsize], units=Qpixunits, name="Qy") return new_datacube - - - - - - - # I/O # to_h5 is inherited from Array # read @classmethod - def _get_constructor_args(cls,group): - """ Construct a datacube with no calibration / metadata - """ + def _get_constructor_args(cls, group): + """Construct a datacube with no calibration / metadata""" # We only need some of the Array constructors; # dim vector/units are passed through when Calibration # is loaded, and the runtim dim vectors are then set @@ -307,56 +270,42 @@ def _get_constructor_args(cls,group): ar_args = Array._get_constructor_args(group) args = { - 'data': ar_args['data'], - 'name': ar_args['name'], - 'slicelabels': ar_args['slicelabels'], - 'calibration': None + "data": ar_args["data"], + "name": ar_args["name"], + "slicelabels": ar_args["slicelabels"], + "calibration": None, } return args - - def _add_root_links(self,group): - """ When reading from file, link to calibration metadata, + def _add_root_links(self, group): + """When reading from file, link to calibration metadata, then use it to populate the datacube dim vectors """ # Link to the datacube self.calibration._datacube = self # Populate dim vectors - self.calibration.set_Q_pixel_size( self.calibration.get_Q_pixel_size() ) - self.calibration.set_R_pixel_size( self.calibration.get_R_pixel_size() ) - self.calibration.set_Q_pixel_units( self.calibration.get_Q_pixel_units() ) - self.calibration.set_R_pixel_units( self.calibration.get_R_pixel_units() ) + self.calibration.set_Q_pixel_size(self.calibration.get_Q_pixel_size()) + self.calibration.set_R_pixel_size(self.calibration.get_R_pixel_size()) + self.calibration.set_Q_pixel_units(self.calibration.get_Q_pixel_units()) + self.calibration.set_R_pixel_units(self.calibration.get_R_pixel_units()) return - - - # Class methods - def add( - self, - data, - name = '' - ): + def add(self, data, name=""): """ Adds a block of data to the DataCube's tree. If `data` is an instance of an EMD/py4DSTEM class, add it to the tree. If it's a numpy array, turn it into an Array instance, then save to the tree. """ if isinstance(data, np.ndarray): - data = Array( - data = data, - name = name - ) - self.attach( data ) + data = Array(data=data, name=name) + self.attach(data) - def set_scan_shape( - self, - Rshape - ): + def set_scan_shape(self, Rshape): """ Reshape the data given the real space scan shape. @@ -364,45 +313,39 @@ def set_scan_shape( Rshape (2-tuple) """ from py4DSTEM.preprocess import set_scan_shape - assert len(Rshape)==2, "Rshape must have a length of 2" - d = set_scan_shape(self,Rshape[0],Rshape[1]) - return d + assert len(Rshape) == 2, "Rshape must have a length of 2" + d = set_scan_shape(self, Rshape[0], Rshape[1]) + return d - def swap_RQ( - self - ): + def swap_RQ(self): """ Swaps the first and last two dimensions of the 4D datacube. """ from py4DSTEM.preprocess import swap_RQ + d = swap_RQ(self) return d - def swap_Rxy( - self - ): + def swap_Rxy(self): """ Swaps the real space x and y coordinates. """ from py4DSTEM.preprocess import swap_Rxy + d = swap_Rxy(self) return d - def swap_Qxy( - self - ): + def swap_Qxy(self): """ Swaps the diffraction space x and y coordinates. """ from py4DSTEM.preprocess import swap_Qxy + d = swap_Qxy(self) return d - def crop_Q( - self, - ROI - ): + def crop_Q(self, ROI): """ Crops the data in diffraction space about the region specified by ROI. @@ -410,14 +353,12 @@ def crop_Q( ROI (4-tuple): Specifies (Qx_min,Qx_max,Qy_min,Qy_max) """ from py4DSTEM.preprocess import crop_data_diffraction - assert len(ROI)==4, "Crop region `ROI` must have length 4" - d = crop_data_diffraction(self,ROI[0],ROI[1],ROI[2],ROI[3]) + + assert len(ROI) == 4, "Crop region `ROI` must have length 4" + d = crop_data_diffraction(self, ROI[0], ROI[1], ROI[2], ROI[3]) return d - def crop_R( - self, - ROI - ): + def crop_R(self, ROI): """ Crops the data in real space about the region specified by ROI. @@ -425,15 +366,12 @@ def crop_R( ROI (4-tuple): Specifies (Rx_min,Rx_max,Ry_min,Ry_max) """ from py4DSTEM.preprocess import crop_data_real - assert len(ROI)==4, "Crop region `ROI` must have length 4" - d = crop_data_real(self,ROI[0],ROI[1],ROI[2],ROI[3]) + + assert len(ROI) == 4, "Crop region `ROI` must have length 4" + d = crop_data_real(self, ROI[0], ROI[1], ROI[2], ROI[3]) return d - def bin_Q( - self, - N, - dtype = None - ): + def bin_Q(self, N, dtype=None): """ Bins the data in diffraction space by bin factor N @@ -450,14 +388,11 @@ def bin_Q( datacube : DataCube """ from py4DSTEM.preprocess import bin_data_diffraction - d = bin_data_diffraction(self,N,dtype) + + d = bin_data_diffraction(self, N, dtype) return d - def pad_Q( - self, - N = None, - output_size = None - ): + def pad_Q(self, N=None, output_size=None): """ Pads the data in diffraction space by pad factor N, or to match output_size. @@ -466,15 +401,11 @@ def pad_Q( output_size ((int,int)): the padded output size """ from py4DSTEM.preprocess import pad_data_diffraction - d = pad_data_diffraction(self,pad_factor=N,output_size=output_size) + + d = pad_data_diffraction(self, pad_factor=N, output_size=output_size) return d - def resample_Q( - self, - N = None, - output_size = None, - method='bilinear' - ): + def resample_Q(self, N=None, output_size=None, method="bilinear"): """ Resamples the data in diffraction space by resampling factor N, or to match output_size, using either 'fourier' or 'bilinear' interpolation. @@ -485,14 +416,13 @@ def resample_Q( method (str): 'fourier' or 'bilinear' (default) """ from py4DSTEM.preprocess import resample_data_diffraction - d = resample_data_diffraction(self,resampling_factor=N,output_size=output_size,method=method) + + d = resample_data_diffraction( + self, resampling_factor=N, output_size=output_size, method=method + ) return d - def bin_Q_mmap( - self, - N, - dtype=np.float32 - ): + def bin_Q_mmap(self, N, dtype=np.float32): """ Bins the data in diffraction space by bin factor N for memory mapped data @@ -501,13 +431,11 @@ def bin_Q_mmap( dtype: the data type """ from py4DSTEM.preprocess import bin_data_mmap - d = bin_data_mmap(self,N) + + d = bin_data_mmap(self, N) return d - def bin_R( - self, - N - ): + def bin_R(self, N): """ Bins the data in real space by bin factor N @@ -515,13 +443,11 @@ def bin_R( N (int): the binning factor """ from py4DSTEM.preprocess import bin_data_real - d = bin_data_real(self,N) + + d = bin_data_real(self, N) return d - def thin_R( - self, - N - ): + def thin_R(self, N): """ Reduces the data in real space by skipping every N patterns in the x and y directions. @@ -529,15 +455,11 @@ def thin_R( N (int): the thinning factor """ from py4DSTEM.preprocess import thin_data_real - d = thin_data_real(self,N) + + d = thin_data_real(self, N) return d - def filter_hot_pixels( - self, - thresh, - ind_compare=1, - return_mask=False - ): + def filter_hot_pixels(self, thresh, ind_compare=1, return_mask=False): """ This function performs pixel filtering to remove hot / bright pixels. We first compute a moving local ordering filter, applied to the mean diffraction image. This ordering filter will return a single value from the local sorted intensity @@ -556,6 +478,7 @@ def filter_hot_pixels( mask (optional, boolean Array) the bad pixel mask """ from py4DSTEM.preprocess import filter_hot_pixels + d = filter_hot_pixels( self, thresh, @@ -564,21 +487,19 @@ def filter_hot_pixels( ) return d - - # Probe def get_vacuum_probe( self, - ROI = None, - align = True, - mask = None, - threshold = 0.2, - expansion = 12, - opening = 3, - verbose = False, - returncalc = True - ): + ROI=None, + align=True, + mask=None, + threshold=0.2, + expansion=12, + opening=3, + verbose=False, + returncalc=True, + ): """ Computes a vacuum probe. @@ -625,14 +546,14 @@ def get_vacuum_probe( # parse region to use if ROI is None: - ROI = np.ones(self.Rshape,dtype=bool) - elif isinstance(ROI,tuple): - assert(len(ROI)==4), "if ROI is a tuple must be length 4" - _ROI = np.ones(self.Rshape,dtype=bool) - ROI = _ROI[ROI[0]:ROI[1],ROI[2]:ROI[3]] + ROI = np.ones(self.Rshape, dtype=bool) + elif isinstance(ROI, tuple): + assert len(ROI) == 4, "if ROI is a tuple must be length 4" + _ROI = np.ones(self.Rshape, dtype=bool) + ROI = _ROI[ROI[0] : ROI[1], ROI[2] : ROI[3]] else: - assert(isinstance(ROI,np.ndarray)) - assert(ROI.shape == self.Rshape) + assert isinstance(ROI, np.ndarray) + assert ROI.shape == self.Rshape xy = np.vstack(np.nonzero(ROI)) length = xy.shape[1] @@ -640,22 +561,30 @@ def get_vacuum_probe( if mask is None: mask = 1 else: - assert(mask.shape == self.Qshape) + assert mask.shape == self.Qshape # compute average probe - probe = self.data[xy[0,0],xy[1,0],:,:] - for n in tqdmnd(range(1,length)): - curr_DP = self.data[xy[0,n],xy[1,n],:,:] * mask + probe = self.data[xy[0, 0], xy[1, 0], :, :] + for n in tqdmnd(range(1, length)): + curr_DP = self.data[xy[0, n], xy[1, n], :, :] * mask if align: - xshift,yshift = get_shift(probe, curr_DP) + xshift, yshift = get_shift(probe, curr_DP) curr_DP = get_shifted_ar(curr_DP, xshift, yshift) - probe = probe*(n-1)/n + curr_DP/n + probe = probe * (n - 1) / n + curr_DP / n # mask - mask = probe > np.max(probe)*threshold + mask = probe > np.max(probe) * threshold mask = binary_opening(mask, iterations=opening) mask = binary_dilation(mask, iterations=1) - mask = np.cos((np.pi/2)*np.minimum(distance_transform_edt(np.logical_not(mask)) / expansion, 1))**2 + mask = ( + np.cos( + (np.pi / 2) + * np.minimum( + distance_transform_edt(np.logical_not(mask)) / expansion, 1 + ) + ) + ** 2 + ) probe *= mask # make a probe, add to tree, and return @@ -664,19 +593,17 @@ def get_vacuum_probe( if returncalc: return probe - - def get_probe_size( self, - dp = None, + dp=None, thresh_lower=0.01, thresh_upper=0.99, N=100, - plot = False, - returncal = True, - write_to_cal = True, + plot=False, + returncal=True, + write_to_cal=True, **kwargs, - ): + ): """ Gets the center and radius of the probe in the diffraction plane. @@ -718,24 +645,26 @@ def get_probe_size( * **x0**: *(float)* the x position of the central disk center * **y0**: *(float)* the y position of the central disk center """ - #perform computation + # perform computation from py4DSTEM.process.calibration import get_probe_size if dp is None: - assert('dp_mean' in self.treekeys), "calculate .get_dp_mean() or pass a `dp` arg" - DP = self.tree( 'dp_mean' ).data + assert ( + "dp_mean" in self.treekeys + ), "calculate .get_dp_mean() or pass a `dp` arg" + DP = self.tree("dp_mean").data elif type(dp) == str: - assert(dp in self.treekeys), f"mode {dp} not found in the tree" - DP = self.tree( dp ) + assert dp in self.treekeys, f"mode {dp} not found in the tree" + DP = self.tree(dp) elif type(dp) == np.ndarray: - assert(dp.shape == self.Qshape), "must be a diffraction space shape 2D array" + assert dp.shape == self.Qshape, "must be a diffraction space shape 2D array" DP = dp x = get_probe_size( DP, - thresh_lower = thresh_lower, - thresh_upper = thresh_upper, - N = N, + thresh_lower=thresh_lower, + thresh_upper=thresh_upper, + N=N, ) # try to add to calibration @@ -743,62 +672,50 @@ def get_probe_size( try: self.calibration.set_probe_param(x) except AttributeError: - raise Exception('writing to calibrations were requested, but could not be completed') + raise Exception( + "writing to calibrations were requested, but could not be completed" + ) - #plot results + # plot results if plot: from py4DSTEM.visualize import show_circles - show_circles( - DP, - (x[1], x[2]), - x[0], - vmin = 0, - vmax = 1, - **kwargs - ) + + show_circles(DP, (x[1], x[2]), x[0], vmin=0, vmax=1, **kwargs) # return if returncal: return x - # Bragg disks def find_Bragg_disks( self, - template, - data = None, - - radial_bksb = False, - filter_function = None, - - corrPower = 1, - sigma = None, - sigma_dp = 0, - sigma_cc = 2, - subpixel = 'multicorr', - upsample_factor = 16, - - minAbsoluteIntensity = 0, - minRelativeIntensity = 0.005, - relativeToPeak = 0, - minPeakSpacing = 60, - edgeBoundary = 20, - maxNumPeaks = 70, - - CUDA = False, - CUDA_batched = True, - distributed = None, - - ML = False, - ml_model_path = None, - ml_num_attempts = 1, - ml_batch_size = 8, - - name = 'braggvectors', - returncalc = True, - ): + data=None, + radial_bksb=False, + filter_function=None, + corrPower=1, + sigma=None, + sigma_dp=0, + sigma_cc=2, + subpixel="multicorr", + upsample_factor=16, + minAbsoluteIntensity=0, + minRelativeIntensity=0.005, + relativeToPeak=0, + minPeakSpacing=60, + edgeBoundary=20, + maxNumPeaks=70, + CUDA=False, + CUDA_batched=True, + distributed=None, + ML=False, + ml_model_path=None, + ml_num_attempts=1, + ml_batch_size=8, + name="braggvectors", + returncalc=True, + ): """ Finds the Bragg disks in the diffraction patterns represented by `data` by cross/phase correlatin with `template`. @@ -946,98 +863,87 @@ def find_Bragg_disks( elif isinstance(data, tuple): x = self, data[0], data[1] elif isinstance(data, np.ndarray): - assert data.dtype == bool, 'array must be boolean' - assert data.shape == self.Rshape, 'array must be Rspace shaped' - x = self.data[data,:,:] + assert data.dtype == bool, "array must be boolean" + assert data.shape == self.Rshape, "array must be Rspace shaped" + x = self.data[data, :, :] else: - raise Exception(f'unexpected type for `data` {type(data)}') - + raise Exception(f"unexpected type for `data` {type(data)}") # compute peaks = find_Bragg_disks( - data = x, - template = template, - - radial_bksb = radial_bksb, - filter_function = filter_function, - - corrPower = corrPower, - sigma_dp = sigma_dp, - sigma_cc = sigma_cc, - subpixel = subpixel, - upsample_factor = upsample_factor, - - minAbsoluteIntensity = minAbsoluteIntensity, - minRelativeIntensity = minRelativeIntensity, - relativeToPeak = relativeToPeak, - minPeakSpacing = minPeakSpacing, - edgeBoundary = edgeBoundary, - maxNumPeaks = maxNumPeaks, - - CUDA = CUDA, - CUDA_batched = CUDA_batched, - distributed = distributed, - ML = ML, - ml_model_path = ml_model_path, - ml_num_attempts = ml_num_attempts, - ml_batch_size = ml_batch_size, + data=x, + template=template, + radial_bksb=radial_bksb, + filter_function=filter_function, + corrPower=corrPower, + sigma_dp=sigma_dp, + sigma_cc=sigma_cc, + subpixel=subpixel, + upsample_factor=upsample_factor, + minAbsoluteIntensity=minAbsoluteIntensity, + minRelativeIntensity=minRelativeIntensity, + relativeToPeak=relativeToPeak, + minPeakSpacing=minPeakSpacing, + edgeBoundary=edgeBoundary, + maxNumPeaks=maxNumPeaks, + CUDA=CUDA, + CUDA_batched=CUDA_batched, + distributed=distributed, + ML=ML, + ml_model_path=ml_model_path, + ml_num_attempts=ml_num_attempts, + ml_batch_size=ml_batch_size, ) - if isinstance(peaks,Node): - + if isinstance(peaks, Node): # add metadata peaks.name = name peaks.metadata = Metadata( - name = 'gen_params', - data = { - #'gen_func' : - 'template' : template, - 'filter_function' : filter_function, - 'corrPower' : corrPower, - 'sigma_dp' : sigma_dp, - 'sigma_cc' : sigma_cc, - 'subpixel' : subpixel, - 'upsample_factor' : upsample_factor, - 'minAbsoluteIntensity' : minAbsoluteIntensity, - 'minRelativeIntensity' : minRelativeIntensity, - 'relativeToPeak' : relativeToPeak, - 'minPeakSpacing' : minPeakSpacing, - 'edgeBoundary' : edgeBoundary, - 'maxNumPeaks' : maxNumPeaks, - 'CUDA' : CUDA, - 'CUDA_batched' : CUDA_batched, - 'distributed' : distributed, - 'ML' : ML, - 'ml_model_path' : ml_model_path, - 'ml_num_attempts' : ml_num_attempts, - 'ml_batch_size' : ml_batch_size, - - } + name="gen_params", + data={ + #'gen_func' : + "template": template, + "filter_function": filter_function, + "corrPower": corrPower, + "sigma_dp": sigma_dp, + "sigma_cc": sigma_cc, + "subpixel": subpixel, + "upsample_factor": upsample_factor, + "minAbsoluteIntensity": minAbsoluteIntensity, + "minRelativeIntensity": minRelativeIntensity, + "relativeToPeak": relativeToPeak, + "minPeakSpacing": minPeakSpacing, + "edgeBoundary": edgeBoundary, + "maxNumPeaks": maxNumPeaks, + "CUDA": CUDA, + "CUDA_batched": CUDA_batched, + "distributed": distributed, + "ML": ML, + "ml_model_path": ml_model_path, + "ml_num_attempts": ml_num_attempts, + "ml_batch_size": ml_batch_size, + }, ) # add to tree if data is None: - self.attach( peaks ) + self.attach(peaks) # return if returncalc: return peaks - - - - def get_beamstop_mask( self, - threshold = 0.25, - distance_edge = 2.0, - include_edges = True, - sigma = 0, - use_max_dp = False, - scale_radial = None, - name = "mask_beamstop", - returncalc = True, - ): + threshold=0.25, + distance_edge=2.0, + include_edges=True, + sigma=0, + use_max_dp=False, + scale_radial=None, + name="mask_beamstop", + returncalc=True, + ): """ This function uses the mean diffraction pattern plus a threshold to create a beamstop mask. @@ -1067,7 +973,7 @@ def get_beamstop_mask( x = np.arange(self.data.shape[2]) * 2.0 / self.data.shape[2] y = np.arange(self.data.shape[3]) * 2.0 / self.data.shape[3] ya, xa = np.meshgrid(y - np.mean(y), x - np.mean(x)) - im_scale = 1.0 + np.sqrt(xa**2 + ya**2)*scale_radial + im_scale = 1.0 + np.sqrt(xa**2 + ya**2) * scale_radial # Get image for beamstop mask if use_max_dp: @@ -1075,11 +981,11 @@ def get_beamstop_mask( # self.get_dp_max(); # im = self.tree["dp_max"].data.astype('float') if not "dp_max" in self._branch.keys(): - self.get_dp_max(); - im = self.tree("dp_max").data.copy().astype('float') + self.get_dp_max() + im = self.tree("dp_max").data.copy().astype("float") else: if not "dp_mean" in self._branch.keys(): - self.get_dp_mean(); + self.get_dp_mean() im = self.tree("dp_mean").data.copy() # if not "dp_mean" in self.tree.keys(): @@ -1088,15 +994,15 @@ def get_beamstop_mask( # smooth and scale if needed if sigma > 0.0: - im = gaussian_filter(im, sigma, mode='nearest') + im = gaussian_filter(im, sigma, mode="nearest") if scale_radial is not None: im *= im_scale # Calculate beamstop mask int_sort = np.sort(im.ravel()) - ind = np.round(np.clip( - int_sort.shape[0]*threshold, - 0,int_sort.shape[0])).astype('int') + ind = np.round( + np.clip(int_sort.shape[0] * threshold, 0, int_sort.shape[0]) + ).astype("int") intensity_threshold = int_sort[ind] mask_beamstop = im >= intensity_threshold @@ -1106,32 +1012,28 @@ def get_beamstop_mask( # Edges if include_edges: - mask_beamstop[0,:] = False - mask_beamstop[:,0] = False - mask_beamstop[-1,:] = False - mask_beamstop[:,-1] = False - + mask_beamstop[0, :] = False + mask_beamstop[:, 0] = False + mask_beamstop[-1, :] = False + mask_beamstop[:, -1] = False # Expand mask mask_beamstop = distance_transform_edt(mask_beamstop) < distance_edge # Wrap beamstop mask in a class - x = Array( - data = mask_beamstop, - name = name - ) + x = Array(data=mask_beamstop, name=name) # Add metadata x.metadata = Metadata( - name = 'gen_params', - data = { - #'gen_func' : - 'threshold' : 0.25, - 'distance_edge' : 4.0, - 'include_edges' : True, - 'name' : "mask_beamstop", - 'returncalc' : True, - } + name="gen_params", + data={ + #'gen_func' : + "threshold": threshold, + "distance_edge": distance_edge, + "include_edges": include_edges, + "name": "mask_beamstop", + "returncalc": returncalc, + }, ) # Add to tree @@ -1141,14 +1043,7 @@ def get_beamstop_mask( if returncalc: return mask_beamstop - - - def get_radial_bkgrnd( - self, - rx, - ry, - sigma = 2 - ): + def get_radial_bkgrnd(self, rx, ry, sigma=2): """ Computes and returns a background image for the diffraction pattern at (rx,ry), populated by radial rings of constant intensity @@ -1171,52 +1066,41 @@ def get_radial_bkgrnd( The radial background """ # ensure a polar cube and origin exist - assert(self.polar is not None), "No polar datacube found!" - assert(self.calibration.get_origin() is not None), "No origin found!" + assert self.polar is not None, "No polar datacube found!" + assert self.calibration.get_origin() is not None, "No origin found!" # get the 1D median background - bkgrd_ma_1d = np.ma.median( self.polar.data[rx,ry], axis=0 ) + bkgrd_ma_1d = np.ma.median(self.polar.data[rx, ry], axis=0) bkgrd_1d = bkgrd_ma_1d.data bkgrd_1d[bkgrd_ma_1d.mask] = 0 # smooth - if sigma>0: + if sigma > 0: bkgrd_1d = gaussian_filter1d(bkgrd_1d, sigma) # define the 2D cartesian coordinate system origin = self.calibration.get_origin() - origin = origin[0][rx,ry],origin[1][rx,ry] - qxx,qyy = self.qxx_raw-origin[0], self.qyy_raw-origin[1] + origin = origin[0][rx, ry], origin[1][rx, ry] + qxx, qyy = self.qxx_raw - origin[0], self.qyy_raw - origin[1] # get distance qr in polar-elliptical coords ellipse = self.calibration.get_ellipse() - ellipse = (1,1,0) if ellipse is None else ellipse - a,b,theta = ellipse + ellipse = (1, 1, 0) if ellipse is None else ellipse + a, b, theta = ellipse qrr = np.sqrt( - ( (qxx*np.cos(theta)) + (qyy*np.sin(theta)) )**2 + - ( (qxx*np.sin(theta)) - (qyy*np.cos(theta)) )**2 / (b/a)**2 + ((qxx * np.cos(theta)) + (qyy * np.sin(theta))) ** 2 + + ((qxx * np.sin(theta)) - (qyy * np.cos(theta))) ** 2 / (b / a) ** 2 ) # make an interpolation function and get the 2D background - f = interp1d( - self.polar.radial_bins, - bkgrd_1d, - fill_value = 'extrapolate' - ) + f = interp1d(self.polar.radial_bins, bkgrd_1d, fill_value="extrapolate") background = f(qrr) # return return background - - - def get_radial_bksb_dp( - self, - rx, - ry, - sigma = 2 - ): + def get_radial_bksb_dp(self, rx, ry, sigma=2): """ Computes and returns the diffraction pattern at beam position (rx,ry) with a radial background subtracted. See the docstring for @@ -1238,24 +1122,22 @@ def get_radial_bksb_dp( The radial background subtracted diffraction image """ # get 2D background - background = self.get_radial_bkgrnd( rx,ry,sigma ) + background = self.get_radial_bkgrnd(rx, ry, sigma) # subtract, zero negative values, return - ans = self.data[rx,ry] - background - ans[ans<0] = 0 + ans = self.data[rx, ry] - background + ans[ans < 0] = 0 return ans - - def get_local_ave_dp( self, rx, ry, - radial_bksb = False, - sigma = 2, - braggmask = False, - braggvectors = None, - braggmask_radius = None + radial_bksb=False, + sigma=2, + braggmask=False, + braggvectors=None, + braggmask_radius=None, ): """ Computes and returns the diffraction pattern at beam position (rx,ry) @@ -1289,163 +1171,133 @@ def get_local_ave_dp( The radial background subtracted diffraction image """ # define the kernel - kernel = np.array([[1,2,1], - [2,4,2], - [1,2,1]])/16. + kernel = np.array([[1, 2, 1], [2, 4, 2], [1, 2, 1]]) / 16.0 # get shape and check for valid inputs - nx,ny = self.data.shape[:2] - assert(rx>=0 and rx=0 and ry= 0 and rx < nx, "rx outside of scan range" + assert ry >= 0 and ry < ny, "ry outside of scan range" # get the subcube, checking for edge patterns # and modifying the kernel as needed - if rx!=0 and rx!=(nx-1) and ry!=0 and ry!=(ny-1): - subcube = self.data[rx-1:rx+2,ry-1:ry+2,:,:] - elif rx==0 and ry==0: - subcube = self.data[:2,:2,:,:] - kernel = kernel[1:,1:] - elif rx==0 and ry==(ny-1): - subcube = self.data[:2,-2:,:,:] - kernel = kernel[1:,:-1] - elif rx==(nx-1) and ry==0: - subcube = self.data[-2:,:2,:,:] - kernel = kernel[:-1,1:] - elif rx==(nx-1) and ry==(ny-1): - subcube = self.data[-2:,-2:,:,:] - kernel = kernel[:-1,:-1] - elif rx==0: - subcube = self.data[:2,ry-1:ry+2,:,:] - kernel = kernel[1:,:] - elif rx==(nx-1): - subcube = self.data[-2:,ry-1:ry+2,:,:] - kernel = kernel[:-1,:] - elif ry==0: - subcube = self.data[rx-1:rx+2,:2,:,:] - kernel = kernel[:,1:] - elif ry==(ny-1): - subcube = self.data[rx-1:rx+2,-2:,:,:] - kernel = kernel[:,:-1] + if rx != 0 and rx != (nx - 1) and ry != 0 and ry != (ny - 1): + subcube = self.data[rx - 1 : rx + 2, ry - 1 : ry + 2, :, :] + elif rx == 0 and ry == 0: + subcube = self.data[:2, :2, :, :] + kernel = kernel[1:, 1:] + elif rx == 0 and ry == (ny - 1): + subcube = self.data[:2, -2:, :, :] + kernel = kernel[1:, :-1] + elif rx == (nx - 1) and ry == 0: + subcube = self.data[-2:, :2, :, :] + kernel = kernel[:-1, 1:] + elif rx == (nx - 1) and ry == (ny - 1): + subcube = self.data[-2:, -2:, :, :] + kernel = kernel[:-1, :-1] + elif rx == 0: + subcube = self.data[:2, ry - 1 : ry + 2, :, :] + kernel = kernel[1:, :] + elif rx == (nx - 1): + subcube = self.data[-2:, ry - 1 : ry + 2, :, :] + kernel = kernel[:-1, :] + elif ry == 0: + subcube = self.data[rx - 1 : rx + 2, :2, :, :] + kernel = kernel[:, 1:] + elif ry == (ny - 1): + subcube = self.data[rx - 1 : rx + 2, -2:, :, :] + kernel = kernel[:, :-1] else: - raise Exception(f'Invalid (rx,ry) = ({rx},{ry})...') + raise Exception(f"Invalid (rx,ry) = ({rx},{ry})...") # normalize the kernel kernel /= np.sum(kernel) - # compute... # ...in the simple case - if not(radial_bksb) and not(braggmask): - ans = np.tensordot(subcube,kernel,axes=((0,1),(0,1))) + if not (radial_bksb) and not (braggmask): + ans = np.tensordot(subcube, kernel, axes=((0, 1), (0, 1))) # ...with radial background subtration - elif radial_bksb and not(braggmask): + elif radial_bksb and not (braggmask): # get position of (rx,ry) relative to kernel - _xs = 1 if rx!=0 else 0 - _ys = 1 if ry!=0 else 0 + _xs = 1 if rx != 0 else 0 + _ys = 1 if ry != 0 else 0 x0 = rx - _xs y0 = ry - _ys # compute ans = np.zeros(self.Qshape) - for (i,j),w in np.ndenumerate(kernel): + for (i, j), w in np.ndenumerate(kernel): x = x0 + i y = y0 + j - ans += self.get_radial_bksb_dp(x,y,sigma) * w + ans += self.get_radial_bksb_dp(x, y, sigma) * w # ...with bragg masking - elif not(radial_bksb) and braggmask: - assert(braggvectors is not None), "`braggvectors` must be specified or `braggmask` must be turned off!" - assert(braggmask_radius is not None), "`braggmask_radius` must be specified or `braggmask` must be turned off!" + elif not (radial_bksb) and braggmask: + assert ( + braggvectors is not None + ), "`braggvectors` must be specified or `braggmask` must be turned off!" + assert ( + braggmask_radius is not None + ), "`braggmask_radius` must be specified or `braggmask` must be turned off!" # get position of (rx,ry) relative to kernel - _xs = 1 if rx!=0 else 0 - _ys = 1 if ry!=0 else 0 + _xs = 1 if rx != 0 else 0 + _ys = 1 if ry != 0 else 0 x0 = rx - _xs y0 = ry - _ys # compute ans = np.zeros(self.Qshape) weights = np.zeros(self.Qshape) - for (i,j),w in np.ndenumerate(kernel): + for (i, j), w in np.ndenumerate(kernel): x = x0 + i y = y0 + j - mask = self.get_braggmask( - braggvectors, - x, - y, - braggmask_radius - ) + mask = self.get_braggmask(braggvectors, x, y, braggmask_radius) weights_curr = mask * w - ans += self.data[x,y] * weights_curr + ans += self.data[x, y] * weights_curr weights += weights_curr # normalize out = np.full_like(ans, np.nan) - ans_mask = weights>0 - ans = np.divide( - ans, - weights, - out = out, - where = ans_mask - ) + ans_mask = weights > 0 + ans = np.divide(ans, weights, out=out, where=ans_mask) # make masked array - ans = np.ma.array( - data = ans, - mask = np.logical_not(ans_mask) - ) + ans = np.ma.array(data=ans, mask=np.logical_not(ans_mask)) pass # ...with both radial background subtraction and bragg masking else: - assert(braggvectors is not None), "`braggvectors` must be specified or `braggmask` must be turned off!" - assert(braggmask_radius is not None), "`braggmask_radius` must be specified or `braggmask` must be turned off!" + assert ( + braggvectors is not None + ), "`braggvectors` must be specified or `braggmask` must be turned off!" + assert ( + braggmask_radius is not None + ), "`braggmask_radius` must be specified or `braggmask` must be turned off!" # get position of (rx,ry) relative to kernel - _xs = 1 if rx!=0 else 0 - _ys = 1 if ry!=0 else 0 + _xs = 1 if rx != 0 else 0 + _ys = 1 if ry != 0 else 0 x0 = rx - _xs y0 = ry - _ys # compute ans = np.zeros(self.Qshape) weights = np.zeros(self.Qshape) - for (i,j),w in np.ndenumerate(kernel): + for (i, j), w in np.ndenumerate(kernel): x = x0 + i y = y0 + j - mask = self.get_braggmask( - braggvectors, - x, - y, - braggmask_radius - ) + mask = self.get_braggmask(braggvectors, x, y, braggmask_radius) weights_curr = mask * w - ans += self.get_radial_bksb_dp(x,y,sigma) * weights_curr + ans += self.get_radial_bksb_dp(x, y, sigma) * weights_curr weights += weights_curr # normalize out = np.full_like(ans, np.nan) - ans_mask = weights>0 - ans = np.divide( - ans, - weights, - out = out, - where = ans_mask - ) + ans_mask = weights > 0 + ans = np.divide(ans, weights, out=out, where=ans_mask) # make masked array - ans = np.ma.array( - data = ans, - mask = np.logical_not(ans_mask) - ) + ans = np.ma.array(data=ans, mask=np.logical_not(ans_mask)) pass # return return ans - - - - def get_braggmask( - self, - braggvectors, - rx, - ry, - radius - ): + def get_braggmask(self, braggvectors, rx, ry, radius): """ Returns a boolean mask which is False in a radius of `radius` around each bragg scattering vector at scan position (rx,ry). @@ -1466,11 +1318,11 @@ def get_braggmask( mask : boolean ndarray """ # allocate space - mask = np.ones( self.Qshape, dtype=bool ) + mask = np.ones(self.Qshape, dtype=bool) # get the vectors - vects = braggvectors.raw[rx,ry] + vects = braggvectors.raw[rx, ry] # loop for idx in range(len(vects.data)): - qr = np.hypot(self.qxx_raw-vects.qx[idx], self.qyy_raw-vects.qy[idx]) - mask = np.logical_and(mask, qr>radius) - return mask \ No newline at end of file + qr = np.hypot(self.qxx_raw - vects.qx[idx], self.qyy_raw - vects.qy[idx]) + mask = np.logical_and(mask, qr > radius) + return mask diff --git a/py4DSTEM/datacube/virtualdiffraction.py b/py4DSTEM/datacube/virtualdiffraction.py index f211a35a9..65665728d 100644 --- a/py4DSTEM/datacube/virtualdiffraction.py +++ b/py4DSTEM/datacube/virtualdiffraction.py @@ -3,26 +3,27 @@ # * DataCubeVirtualDiffraction - methods inherited by DataCube for virt diffraction import numpy as np -import dask.array as da from typing import Optional import inspect -from emdfile import tqdmnd,Metadata -from py4DSTEM.data import Calibration, DiffractionSlice, Data -from py4DSTEM.visualize.show import show +from emdfile import tqdmnd, Metadata +from py4DSTEM.data import DiffractionSlice, Data +from py4DSTEM.preprocess import get_shifted_ar # Virtual diffraction container class -class VirtualDiffraction(DiffractionSlice,Data): + +class VirtualDiffraction(DiffractionSlice, Data): """ Stores a diffraction-space shaped 2D image with metadata indicating how this image was generated from a self. """ + def __init__( self, data: np.ndarray, - name: Optional[str] = 'virtualdiffraction', - ): + name: Optional[str] = "virtualdiffraction", + ): """ Args: data (np.ndarray) : the 2D data @@ -34,40 +35,40 @@ def __init__( # initialize as a DiffractionSlice DiffractionSlice.__init__( self, - data = data, - name = name, + data=data, + name=name, ) # read @classmethod - def _get_constructor_args(cls,group): + def _get_constructor_args(cls, group): """ Returns a dictionary of args/values to pass to the class constructor """ ar_constr_args = DiffractionSlice._get_constructor_args(group) args = { - 'data' : ar_constr_args['data'], - 'name' : ar_constr_args['name'], + "data": ar_constr_args["data"], + "name": ar_constr_args["name"], } return args # DataCube virtual diffraction methods -class DataCubeVirtualDiffraction: +class DataCubeVirtualDiffraction: def __init__(self): pass def get_virtual_diffraction( self, method, - mask = None, - shift_center = False, - subpixel = False, - verbose = True, - name = 'virtual_diffraction', - returncalc = True + mask=None, + shift_center=False, + subpixel=False, + verbose=True, + name="virtual_diffraction", + returncalc=True, ): """ Function to calculate virtual diffraction images. @@ -101,182 +102,213 @@ def get_virtual_diffraction( diff_im : DiffractionImage """ # parse inputs - assert method in ('max', 'median', 'mean'), 'check doc strings for supported types' - assert(mask is None or mask.shape == self.Rshape), "mask must be None or real-space shaped" - + assert method in ( + "max", + "median", + "mean", + ), "check doc strings for supported types" + assert ( + mask is None or mask.shape == self.Rshape + ), "mask must be None or real-space shaped" - # Calculate + # Calculate # ...with no center shifting if shift_center == False: - # ...for the whole pattern if mask is None: - if method == 'mean': - virtual_diffraction = np.mean(self.data, axis=(0,1)) - elif method == 'max': - virtual_diffraction = np.max(self.data, axis=(0,1)) + if method == "mean": + virtual_diffraction = np.mean(self.data, axis=(0, 1)) + elif method == "max": + virtual_diffraction = np.max(self.data, axis=(0, 1)) else: - virtual_diffraction = np.median(self.data, axis=(0,1)) + virtual_diffraction = np.median(self.data, axis=(0, 1)) # ...for boolean masks elif mask.dtype == bool: mask_indices = np.nonzero(mask) - if method == 'mean': + if method == "mean": virtual_diffraction = np.mean( - self.data[mask_indices[0],mask_indices[1],:,:], axis=0) - elif method == 'max': + self.data[mask_indices[0], mask_indices[1], :, :], axis=0 + ) + elif method == "max": virtual_diffraction = np.max( - self.data[mask_indices[0],mask_indices[1],:,:], axis=0) + self.data[mask_indices[0], mask_indices[1], :, :], axis=0 + ) else: virtual_diffraction = np.median( - self.data[mask_indices[0],mask_indices[1],:,:], axis=0) + self.data[mask_indices[0], mask_indices[1], :, :], axis=0 + ) # ...for complex and floating point masks else: # allocate space - if mask.dtype == 'complex': - virtual_diffraction = np.zeros(self.Qshape, dtype='complex') + if mask.dtype == "complex": + virtual_diffraction = np.zeros(self.Qshape, dtype="complex") else: virtual_diffraction = np.zeros(self.Qshape) # set computation method - if method == 'mean': + if method == "mean": fn = np.sum - elif method == 'max': + elif method == "max": fn = np.max else: fn = np.median # loop - for qx,qy in tqdmnd( + for qx, qy in tqdmnd( self.Q_Nx, self.Q_Ny, - disable = not verbose, + disable=not verbose, ): - virtual_diffraction[qx,qy] = fn( np.squeeze(self.data[:,:,qx,qy])*mask ) + virtual_diffraction[qx, qy] = fn( + np.squeeze(self.data[:, :, qx, qy]) * mask + ) # normalize weighted means - if method == 'mean': + if method == "mean": virtual_diffraction /= np.sum(mask) - # ...with center shifting else: - assert method in ('max', 'mean'),\ - "only 'mean' and 'max' are supported for center-shifted virtual diffraction" + assert method in ( + "max", + "mean", + ), "only 'mean' and 'max' are supported for center-shifted virtual diffraction" # Get calibration metadata - assert(self.calibration.get_origin() is not None), "origin is not calibrated" + assert self.calibration.get_origin() is not None, "origin is not calibrated" x0, y0 = self.calibration.get_origin() x0_mean, y0_mean = self.calibration.get_origin_mean() # get shifts - qx_shift = x0_mean-x0 - qy_shift = y0_mean-y0 - - - # ...for integer shifts - if not subpixel: + qx_shift = x0_mean - x0 + qy_shift = y0_mean - y0 + if subpixel is False: # round shifts -> int qx_shift = qx_shift.round().astype(int) qy_shift = qy_shift.round().astype(int) - # ...for boolean masks and unmasked - if mask is None or mask.dtype==bool: - # get scan points - mask = np.ones(self.Rshape,dtype=bool) if mask is None else mask - mask_indices = np.nonzero(mask) - # allocate space - virtual_diffraction = np.zeros(self.Qshape) - # loop - for rx,ry in zip(mask_indices[0],mask_indices[1]): - # get shifted DP + # ...for boolean masks and unmasked + if mask is None or mask.dtype == bool: + # get scan points + mask = np.ones(self.Rshape, dtype=bool) if mask is None else mask + mask_indices = np.nonzero(mask) + # allocate space + virtual_diffraction = np.zeros(self.Qshape) + # loop + for rx, ry in zip(mask_indices[0], mask_indices[1]): + # get shifted DP + if subpixel: + DP = get_shifted_ar( + self.data[ + rx, + ry, + :, + :, + ], + qx_shift[rx, ry], + qy_shift[rx, ry], + ) + else: DP = np.roll( - self.data[rx,ry, :,:,], - (qx_shift[rx,ry], qy_shift[rx,ry]), - axis=(0,1), - ) - # compute - if method == 'mean': - virtual_diffraction += DP - elif method == 'max': - virtual_diffraction = np.maximum(virtual_diffraction, DP) - # normalize means - if method == 'mean': - virtual_diffraction /= len(mask_indices[0]) - - # ...for floating point and complex masks + self.data[ + rx, + ry, + :, + :, + ], + (qx_shift[rx, ry], qy_shift[rx, ry]), + axis=(0, 1), + ) + # compute + if method == "mean": + virtual_diffraction += DP + elif method == "max": + virtual_diffraction = np.maximum(virtual_diffraction, DP) + # normalize means + if method == "mean": + virtual_diffraction /= len(mask_indices[0]) + + # ...for floating point and complex masks + else: + # allocate space + if mask.dtype == "complex": + virtual_diffraction = np.zeros(self.Qshape, dtype="complex") else: - # allocate space - if mask.dtype == 'complex': - virtual_diffraction = np.zeros(self.Qshape, dtype = 'complex') + virtual_diffraction = np.zeros(self.Qshape) + # loop + for rx, ry in tqdmnd( + self.R_Nx, + self.R_Ny, + disable=not verbose, + ): + # get shifted DP + if subpixel: + DP = get_shifted_ar( + self.data[ + rx, + ry, + :, + :, + ], + qx_shift[rx, ry], + qy_shift[rx, ry], + ) else: - virtual_diffraction = np.zeros(self.Qshape) - # loop - for rx,ry in tqdmnd( - self.R_Nx, - self.R_Ny, - disable = not verbose, - ): - # get shifted DP DP = np.roll( - self.data[rx,ry, :,:,], - (qx_shift[rx,ry], qy_shift[rx,ry]), - axis=(0,1), - ) - # compute - w = mask[rx,ry] - if method == 'mean': - virtual_diffraction += DP*w - elif method == 'max': - virtual_diffraction = np.maximum(virtual_diffraction, DP*w) - if method == 'mean': - virtual_diffraction /= np.sum(mask) - - # TODO subpixel shifting - else: - raise Exception("subpixel shifting has not been implemented yet!") - pass - + self.data[ + rx, + ry, + :, + :, + ], + (qx_shift[rx, ry], qy_shift[rx, ry]), + axis=(0, 1), + ) + + # compute + w = mask[rx, ry] + if method == "mean": + virtual_diffraction += DP * w + elif method == "max": + virtual_diffraction = np.maximum(virtual_diffraction, DP * w) + if method == "mean": + virtual_diffraction /= np.sum(mask) # wrap, add to tree, and return # wrap in DiffractionImage - ans = VirtualDiffraction( - data = virtual_diffraction, - name = name - ) + ans = VirtualDiffraction(data=virtual_diffraction, name=name) # add the args used to gen this dp as metadata ans.metadata = Metadata( - name='gen_params', - data = { - '_calling_method' : inspect.stack()[0][3], - '_calling_class' : __class__.__name__, - 'method' : method, - 'mask' : mask, - 'shift_center' : shift_center, - 'subpixel' : subpixel, - 'verbose' : verbose, - 'name' : name, - 'returncalc' : returncalc - } + name="gen_params", + data={ + "_calling_method": inspect.stack()[0][3], + "_calling_class": __class__.__name__, + "method": method, + "mask": mask, + "shift_center": shift_center, + "subpixel": subpixel, + "verbose": verbose, + "name": name, + "returncalc": returncalc, + }, ) # add to the tree - self.attach( ans ) + self.attach(ans) # return if returncalc: return ans - - # additional interfaces def get_dp_max( self, - returncalc = True, - ): + returncalc=True, + ): """ Calculates the max diffraction pattern. @@ -293,19 +325,19 @@ def get_dp_max( max_dp : VirtualDiffraction """ return self.get_virtual_diffraction( - method = 'max', - mask = None, - shift_center = False, - subpixel = False, - verbose = True, - name = 'dp_max', - returncalc = True + method="max", + mask=None, + shift_center=False, + subpixel=False, + verbose=True, + name="dp_max", + returncalc=True, ) def get_dp_mean( self, - returncalc = True, - ): + returncalc=True, + ): """ Calculates the mean diffraction pattern. @@ -322,19 +354,19 @@ def get_dp_mean( mean_dp : VirtualDiffraction """ return self.get_virtual_diffraction( - method = 'mean', - mask = None, - shift_center = False, - subpixel = False, - verbose = True, - name = 'dp_mean', - returncalc = True + method="mean", + mask=None, + shift_center=False, + subpixel=False, + verbose=True, + name="dp_mean", + returncalc=True, ) def get_dp_median( self, - returncalc = True, - ): + returncalc=True, + ): """ Calculates the max diffraction pattern. @@ -351,12 +383,11 @@ def get_dp_median( max_dp : VirtualDiffraction """ return self.get_virtual_diffraction( - method = 'median', - mask = None, - shift_center = False, - subpixel = False, - verbose = True, - name = 'dp_median', - returncalc = True + method="median", + mask=None, + shift_center=False, + subpixel=False, + verbose=True, + name="dp_median", + returncalc=True, ) - diff --git a/py4DSTEM/datacube/virtualimage.py b/py4DSTEM/datacube/virtualimage.py index ad6344c7d..50a297914 100644 --- a/py4DSTEM/datacube/virtualimage.py +++ b/py4DSTEM/datacube/virtualimage.py @@ -9,25 +9,27 @@ from typing import Optional import inspect -from emdfile import tqdmnd,Metadata +from emdfile import tqdmnd, Metadata from py4DSTEM.data import Calibration, RealSlice, Data, DiffractionSlice -from py4DSTEM.visualize.show import show - +from py4DSTEM.preprocess import get_shifted_ar +from py4DSTEM.visualize import show # Virtual image container class -class VirtualImage(RealSlice,Data): + +class VirtualImage(RealSlice, Data): """ A container for storing virtual image data and metadata, including the real-space shaped 2D image and metadata indicating how this image was generated from a datacube. """ + def __init__( self, data: np.ndarray, - name: Optional[str] = 'virtualimage', - ): + name: Optional[str] = "virtualimage", + ): """ Parameters ---------- @@ -39,49 +41,46 @@ def __init__( # initialize as a RealSlice RealSlice.__init__( self, - data = data, - name = name, + data=data, + name=name, ) # read @classmethod - def _get_constructor_args(cls,group): + def _get_constructor_args(cls, group): """ Returns a dictionary of args/values to pass to the class constructor """ ar_constr_args = RealSlice._get_constructor_args(group) args = { - 'data' : ar_constr_args['data'], - 'name' : ar_constr_args['name'], + "data": ar_constr_args["data"], + "name": ar_constr_args["name"], } return args - - - # DataCube virtual imaging methods -class DataCubeVirtualImager: +class DataCubeVirtualImager: def __init__(self): pass - def get_virtual_image( self, mode, geometry, - centered = False, - calibrated = False, - shift_center = False, - verbose = True, - dask = False, - return_mask = False, - name = 'virtual_image', - returncalc = True, - test_config = False - ): + centered=False, + calibrated=False, + shift_center=False, + subpixel=False, + verbose=True, + dask=False, + return_mask=False, + name="virtual_image", + returncalc=True, + test_config=False, + ): """ Calculate a virtual image. @@ -141,6 +140,8 @@ def get_virtual_image( position and the mean origin position over all patterns, rounded to the nearest integer for speed. Default is False. If `shift_center` is True, `centered` is automatically set to True. + subpixel : bool + if True, applies subpixel shifts to virtual image verbose : bool toggles a progress bar dask : bool @@ -169,22 +170,28 @@ def get_virtual_image( virt_im : VirtualImage (optional, if returncalc is True) """ # parse inputs - assert mode in ('point', 'circle', 'circular', 'annulus', 'annular', 'rectangle', 'square', 'rectangular', 'mask'),\ - 'check doc strings for supported modes' - if shift_center == True: - centered = True + assert mode in ( + "point", + "circle", + "circular", + "annulus", + "annular", + "rectangle", + "square", + "rectangular", + "mask", + ), "check doc strings for supported modes" + if test_config: - for x,y in zip(['centered','calibrated','shift_center'], - [centered,calibrated,shift_center]): + for x, y in zip( + ["centered", "calibrated", "shift_center"], + [centered, calibrated, shift_center], + ): print(f"{x} = {y}") # Get geometry g = self.get_calibrated_detector_geometry( - self.calibration, - mode, - geometry, - centered, - calibrated + self.calibration, mode, geometry, centered, calibrated ) # Get mask @@ -193,39 +200,39 @@ def get_virtual_image( if return_mask == True and shift_center == False: return mask - # Calculate virtual image # no center shifting if shift_center == False: - # single CPU if not dask: - # allocate space - if mask.dtype == 'complex': - virtual_image = np.zeros(self.Rshape, dtype = 'complex') + if mask.dtype == "complex": + virtual_image = np.zeros(self.Rshape, dtype="complex") else: virtual_image = np.zeros(self.Rshape) # compute - for rx,ry in tqdmnd( + for rx, ry in tqdmnd( self.R_Nx, self.R_Ny, - disable = not verbose, + disable=not verbose, ): - virtual_image[rx,ry] = np.sum(self.data[rx,ry]*mask) + virtual_image[rx, ry] = np.sum(self.data[rx, ry] * mask) - # dask + # dask if dask == True: - # set up a generalized universal function for dask distribution - def _apply_mask_dask(self,mask): - virtual_image = np.sum(np.multiply(self.data,mask), dtype=np.float64) + def _apply_mask_dask(self, mask): + virtual_image = np.sum( + np.multiply(self.data, mask), dtype=np.float64 + ) + apply_mask_dask = da.as_gufunc( - _apply_mask_dask,signature='(i,j),(i,j)->()', + _apply_mask_dask, + signature="(i,j),(i,j)->()", output_dtypes=np.float64, - axes=[(2,3),(0,1),()], - vectorize=True + axes=[(2, 3), (0, 1), ()], + vectorize=True, ) # compute @@ -233,101 +240,110 @@ def _apply_mask_dask(self,mask): # with center shifting else: - # get shifts - assert(self.calibration.get_origin_shift() is not None), "origin need to be calibrated" - qx_shift,qy_shift = self.calibration.get_origin_shift() - qx_shift = qx_shift.round().astype(int) - qy_shift = qy_shift.round().astype(int) + assert ( + self.calibration.get_origin_shift() is not None + ), "origin need to be calibrated" + qx_shift, qy_shift = self.calibration.get_origin_shift() + if subpixel is False: + qx_shift = qx_shift.round().astype(int) + qy_shift = qy_shift.round().astype(int) # if return_mask is True, get+return the mask and skip the computation if return_mask is not False: try: - rx,ry = return_mask + rx, ry = return_mask except TypeError: - raise Exception(f"if `shift_center=True`, return_mask must be a 2-tuple of ints or False, but revieced inpute value of {return_mask}") - _mask = np.roll( - mask, - (qx_shift[rx,ry], qy_shift[rx,ry]), - axis=(0,1) - ) + raise Exception( + f"if `shift_center=True`, return_mask must be a 2-tuple of \ + ints or False, but revieced inpute value of {return_mask}" + ) + if subpixel: + _mask = get_shifted_ar( + mask, qx_shift[rx, ry], qy_shift[rx, ry], bilinear=True + ) + else: + _mask = np.roll( + mask, (qx_shift[rx, ry], qy_shift[rx, ry]), axis=(0, 1) + ) return _mask # allocate space - if mask.dtype == 'complex': - virtual_image = np.zeros(self.Rshape, dtype = 'complex') + if mask.dtype == "complex": + virtual_image = np.zeros(self.Rshape, dtype="complex") else: virtual_image = np.zeros(self.Rshape) # loop - for rx,ry in tqdmnd( + for rx, ry in tqdmnd( self.R_Nx, self.R_Ny, - disable = not verbose, + disable=not verbose, ): # get shifted mask - _mask = np.roll( - mask, - (qx_shift[rx,ry], qy_shift[rx,ry]), - axis=(0,1) - ) + if subpixel: + _mask = get_shifted_ar( + mask, qx_shift[rx, ry], qy_shift[rx, ry], bilinear=True + ) + else: + _mask = np.roll( + mask, (qx_shift[rx, ry], qy_shift[rx, ry]), axis=(0, 1) + ) # add to output array - virtual_image[rx,ry] = np.sum(self.data[rx,ry]*_mask) - + virtual_image[rx, ry] = np.sum(self.data[rx, ry] * _mask) # data handling # wrap with a py4dstem class ans = VirtualImage( - data = virtual_image, - name = name, + data=virtual_image, + name=name, ) # add generating params as metadata ans.metadata = Metadata( - name = 'gen_params', - data = { - '_calling_method' : inspect.stack()[0][3], - '_calling_class' : __class__.__name__, - 'mode' : mode, - 'geometry' : geometry, - 'centered' : centered, - 'calibrated' : calibrated, - 'shift_center' : shift_center, - 'verbose' : verbose, - 'dask' : dask, - 'return_mask' : return_mask, - 'name' : name, - 'returncalc' : True, - 'test_config' : test_config - } + name="gen_params", + data={ + "_calling_method": inspect.stack()[0][3], + "_calling_class": __class__.__name__, + "mode": mode, + "geometry": geometry, + "centered": centered, + "calibrated": calibrated, + "shift_center": shift_center, + "subpixel": subpixel, + "verbose": verbose, + "dask": dask, + "return_mask": return_mask, + "name": name, + "returncalc": True, + "test_config": test_config, + }, ) # add to the tree - self.attach( ans ) + self.attach(ans) # return if returncalc: return ans - - - # Position detector def position_detector( self, mode, geometry, - data = None, - centered = None, - calibrated = None, - shift_center = False, - scan_position = None, - invert = False, - color = 'r', - alpha = 0.7, - **kwargs + data=None, + centered=None, + calibrated=None, + shift_center=False, + subpixel=True, + scan_position=None, + invert=False, + color="r", + alpha=0.7, + **kwargs, ): """ Position a virtual detector by displaying a mask over a diffraction @@ -363,6 +379,8 @@ def position_detector( regardless of the value of `data` (enabling e.g. overlaying the mask for a specific scan position on a max or mean diffraction image.) + subpixel : bool + if True, applies subpixel shifts to virtual image invert : bool if True, invert the masked pixel (i.e. pixels *outside* the detector are overlaid with a mask) @@ -376,13 +394,22 @@ def position_detector( # parse inputs # mode - assert mode in ('point', 'circle', 'circular', 'annulus', 'annular', 'rectangle', 'square', 'rectangular', 'mask'),\ - 'check doc strings for supported modes' + assert mode in ( + "point", + "circle", + "circular", + "annulus", + "annular", + "rectangle", + "square", + "rectangular", + "mask", + ), "check doc strings for supported modes" # data if data is None: image = None - keys = ['dp_mean','dp_max','dp_median'] + keys = ["dp_mean", "dp_max", "dp_median"] for k in keys: try: image = self.tree(k) @@ -390,84 +417,91 @@ def position_detector( except: pass if image is None: - image = self[0,0] + image = self[0, 0] elif isinstance(data, np.ndarray): - assert(data.shape == self.Qshape), f"Can't position a detector over an image with a shape that is different from diffraction space. Diffraction space in this dataset has shape {self.Qshape} but the image passed has shape {data.shape}" + assert ( + data.shape == self.Qshape + ), f"Can't position a detector over an image with a shape that is different \ + from diffraction space. Diffraction space in this dataset has shape {self.Qshape} \ + but the image passed has shape {data.shape}" image = data elif isinstance(data, DiffractionSlice): - assert(data.shape == self.Qshape), f"Can't position a detector over an image with a shape that is different from diffraction space. Diffraction space in this dataset has shape {self.Qshape} but the image passed has shape {data.shape}" + assert ( + data.shape == self.Qshape + ), f"Can't position a detector over an image with a shape that is different \ + from diffraction space. Diffraction space in this dataset has shape {self.Qshape} \ + but the image passed has shape {data.shape}" image = data.data - elif isinstance(data,tuple): - rx,ry = data[:2] - image = self[rx,ry] + elif isinstance(data, tuple): + rx, ry = data[:2] + image = self[rx, ry] else: - raise Exception(f"Invalid argument passed to `data`. Expected None or np.ndarray or tuple, not type {type(data)}") + raise Exception( + f"Invalid argument passed to `data`. Expected None or np.ndarray or \ + tuple, not type {type(data)}" + ) # shift center if shift_center is None: shift_center = False elif shift_center == True: - assert(isinstance(data,tuple)), "If shift_center is set to True, `data` should be a 2-tuple (rx,ry). To shift the detector mask while using some other input for `data`, set `shift_center` to a 2-tuple (rx,ry)" - elif isinstance(shift_center,tuple): - rx,ry = shift_center[:2] + assert isinstance( + data, tuple + ), "If shift_center is set to True, `data` should be a 2-tuple (rx,ry). \ + To shift the detector mask while using some other input for `data`, \ + set `shift_center` to a 2-tuple (rx,ry)" + elif isinstance(shift_center, tuple): + rx, ry = shift_center[:2] shift_center = True else: shift_center = False - # Get the mask # Get geometry g = self.get_calibrated_detector_geometry( - calibration = self.calibration, - mode = mode, - geometry = geometry, - centered = centered, - calibrated = calibrated + calibration=self.calibration, + mode=mode, + geometry=geometry, + centered=centered, + calibrated=calibrated, ) # Get mask mask = self.make_detector(image.shape, mode, g) - if not(invert): + if not (invert): mask = np.logical_not(mask) # Shift center if shift_center: try: - rx,ry + rx, ry except NameError: - raise Exception("if `shift_center` is True then `data` must be the 3-tuple (DataCube,rx,ry)") + raise Exception( + "if `shift_center` is True then `data` must be the 3-tuple (DataCube,rx,ry)" + ) # get shifts - assert(self.calibration.get_origin_shift() is not None), "origin shifts need to be calibrated" - qx_shift,qy_shift = self.calibration.cal.get_origin_shift() - qx_shift = int(np.round(qx_shift[rx,ry])) - qy_shift = int(np.round(qy_shift[rx,ry])) - mask = np.roll( - mask, - (qx_shift, qy_shift), - axis=(0,1) - ) + assert ( + self.calibration.get_origin_shift() is not None + ), "origin shifts need to be calibrated" + qx_shift, qy_shift = self.calibration.get_origin_shift() + if subpixel: + mask = get_shifted_ar( + mask, qx_shift[rx, ry], qy_shift[rx, ry], bilinear=True + ) + else: + qx_shift = int(np.round(qx_shift[rx, ry])) + qy_shift = int(np.round(qy_shift[rx, ry])) + mask = np.roll(mask, (qx_shift, qy_shift), axis=(0, 1)) # Show - show( - image, - mask = mask, - mask_color = color, - mask_alpha = alpha, - **kwargs - ) + show(image, mask=mask, mask_color=color, mask_alpha=alpha, **kwargs) return - - @staticmethod def get_calibrated_detector_geometry( - calibration, - mode, - geometry, - centered, - calibrated - ): + calibration, mode, geometry, centered, calibrated + ): """ Determine the detector geometry in pixels, given some mode and geometry in calibrated units, where the calibration state is specified by { @@ -496,10 +530,12 @@ def get_calibrated_detector_geometry( # Parse inputs g = geometry if calibration is None: - assert calibrated is False and centered is False, "No calibration found - set a calibration or set `centered` and `calibrated` to False" + assert ( + calibrated is False and centered is False + ), "No calibration found - set a calibration or set `centered` and `calibrated` to False" return g else: - assert(isinstance(calibration, Calibration)) + assert isinstance(calibration, Calibration) cal = calibration # Get calibration metadata @@ -508,46 +544,53 @@ def get_calibrated_detector_geometry( x0_mean, y0_mean = cal.get_origin_mean() if calibrated: - assert cal['Q_pixel_units'] == 'A^-1', \ - 'check calibration - must be calibrated in A^-1 to use `calibrated=True`' + assert ( + cal["Q_pixel_units"] == "A^-1" + ), "check calibration - must be calibrated in A^-1 to use `calibrated=True`" unit_conversion = cal.get_Q_pixel_size() - # Convert units into detector pixels # Shift center if centered == True: - if mode == 'point': + if mode == "point": g = (g[0] + x0_mean, g[1] + y0_mean) - if mode in('circle', 'circular', 'annulus', 'annular'): + if mode in ("circle", "circular", "annulus", "annular"): g = ((g[0][0] + x0_mean, g[0][1] + y0_mean), g[1]) - if mode in('rectangle', 'square', 'rectangular') : - g = (g[0] + x0_mean, g[1] + x0_mean, g[2] + y0_mean, g[3] + y0_mean) + if mode in ("rectangle", "square", "rectangular"): + g = (g[0] + x0_mean, g[1] + x0_mean, g[2] + y0_mean, g[3] + y0_mean) # Scale by the detector pixel size if calibrated == True: - if mode == 'point': - g = (g[0]/unit_conversion, g[1]/unit_conversion) - if mode in('circle', 'circular'): - g = ((g[0][0]/unit_conversion, g[0][1]/unit_conversion), - (g[1]/unit_conversion)) - if mode in('annulus', 'annular'): - g = ((g[0][0]/unit_conversion, g[0][1]/unit_conversion), - (g[1][0]/unit_conversion, g[1][1]/unit_conversion)) - if mode in('rectangle', 'square', 'rectangular') : - g = (g[0]/unit_conversion, g[1]/unit_conversion, - g[2]/unit_conversion, g[3]/unit_conversion) + if mode == "point": + g = (g[0] / unit_conversion, g[1] / unit_conversion) + if mode in ("circle", "circular"): + g = ( + (g[0][0] / unit_conversion, g[0][1] / unit_conversion), + (g[1] / unit_conversion), + ) + if mode in ("annulus", "annular"): + g = ( + (g[0][0] / unit_conversion, g[0][1] / unit_conversion), + (g[1][0] / unit_conversion, g[1][1] / unit_conversion), + ) + if mode in ("rectangle", "square", "rectangular"): + g = ( + g[0] / unit_conversion, + g[1] / unit_conversion, + g[2] / unit_conversion, + g[3] / unit_conversion, + ) return g - @staticmethod def make_detector( shape, mode, geometry, ): - ''' + """ Generate a 2D mask representing a detector function. Parameters @@ -564,31 +607,41 @@ def make_detector( Returns ------- detector_mask : 2d array - ''' + """ g = geometry - #point mask - if mode == 'point': - assert(isinstance(g,tuple) and len(g)==2), 'specify qx and qy as tuple (qx, qy)' + # point mask + if mode == "point": + assert ( + isinstance(g, tuple) and len(g) == 2 + ), "specify qx and qy as tuple (qx, qy)" mask = np.zeros(shape, dtype=bool) qx = int(g[0]) qy = int(g[1]) - mask[qx,qy] = 1 + mask[qx, qy] = 1 - #circular mask - if mode in('circle', 'circular'): - assert(isinstance(g,tuple) and len(g)==2 and len(g[0])==2 and isinstance(g[1],(float,int))), \ - 'specify qx, qy, radius_i as ((qx, qy), radius)' + # circular mask + if mode in ("circle", "circular"): + assert ( + isinstance(g, tuple) + and len(g) == 2 + and len(g[0]) == 2 + and isinstance(g[1], (float, int)) + ), "specify qx, qy, radius_i as ((qx, qy), radius)" qxa, qya = np.indices(shape) mask = (qxa - g[0][0]) ** 2 + (qya - g[0][1]) ** 2 < g[1] ** 2 - #annular mask - if mode in('annulus', 'annular'): - assert(isinstance(g,tuple) and len(g)==2 and len(g[0])==2 and len(g[1])==2), \ - 'specify qx, qy, radius_i, radius_0 as ((qx, qy), (radius_i, radius_o))' + # annular mask + if mode in ("annulus", "annular"): + assert ( + isinstance(g, tuple) + and len(g) == 2 + and len(g[0]) == 2 + and len(g[1]) == 2 + ), "specify qx, qy, radius_i, radius_0 as ((qx, qy), (radius_i, radius_o))" assert g[1][1] > g[1][0], "Inner radius must be smaller than outer radius" @@ -597,10 +650,11 @@ def make_detector( mask2 = (qxa - g[0][0]) ** 2 + (qya - g[0][1]) ** 2 < g[1][1] ** 2 mask = np.logical_and(mask1, mask2) - #rectangle mask - if mode in('rectangle', 'square', 'rectangular') : - assert(isinstance(g,tuple) and len(g)==4), \ - 'specify x_min, x_max, y_min, y_max as (x_min, x_max, y_min, y_max)' + # rectangle mask + if mode in ("rectangle", "square", "rectangular"): + assert ( + isinstance(g, tuple) and len(g) == 4 + ), "specify x_min, x_max, y_min, y_max as (x_min, x_max, y_min, y_max)" mask = np.zeros(shape, dtype=bool) xmin = int(np.round(g[0])) @@ -610,16 +664,13 @@ def make_detector( mask[xmin:xmax, ymin:ymax] = 1 - #flexible mask - if mode == 'mask': - assert type(g) == np.ndarray, '`geometry` type should be `np.ndarray`' - assert (g.shape == shape), 'mask and diffraction pattern shapes do not match' + # flexible mask + if mode == "mask": + assert type(g) == np.ndarray, "`geometry` type should be `np.ndarray`" + assert g.shape == shape, "mask and diffraction pattern shapes do not match" mask = g return mask - - - # TODO where should this go? def make_bragg_mask( self, @@ -629,10 +680,10 @@ def make_bragg_mask( radius, origin, max_q, - return_sum = True, + return_sum=True, **kwargs, - ): - ''' + ): + """ Creates and returns a mask consisting of circular disks about the points of a 2D lattice. @@ -648,43 +699,39 @@ def make_bragg_mask( Returns: (2 or 3D array) the mask - ''' + """ nas = np.asarray - g1,g2,origin = nas(g1),nas(g2),nas(origin) + g1, g2, origin = nas(g1), nas(g2), nas(origin) # Get N,M, the maximum indices to tile out to L1 = np.sqrt(np.sum(g1**2)) - H = int(max_q/L1) + 1 - L2 = np.hypot(-g2[0]*g1[1],g2[1]*g1[0])/np.sqrt(np.sum(g1**2)) - K = int(max_q/L2) + 1 + H = int(max_q / L1) + 1 + L2 = np.hypot(-g2[0] * g1[1], g2[1] * g1[0]) / np.sqrt(np.sum(g1**2)) + K = int(max_q / L2) + 1 # Compute number of points N = 0 - for h in range(-H,H+1): - for k in range(-K,K+1): - v = h*g1 + k*g2 + for h in range(-H, H + 1): + for k in range(-K, K + 1): + v = h * g1 + k * g2 if np.sqrt(v.dot(v)) < max_q: N += 1 - #create mask + # create mask mask = np.zeros((Qshape[0], Qshape[1], N), dtype=bool) N = 0 - for h in range(-H,H+1): - for k in range(-K,K+1): - v = h*g1 + k*g2 + for h in range(-H, H + 1): + for k in range(-K, K + 1): + v = h * g1 + k * g2 if np.sqrt(v.dot(v)) < max_q: center = origin + v - mask[:,:,N] = self.make_detector( + mask[:, :, N] = self.make_detector( Qshape, - mode = 'circle', - geometry = (center, radius), + mode="circle", + geometry=(center, radius), ) N += 1 - if return_sum: - mask = np.sum(mask, axis = 2) + mask = np.sum(mask, axis=2) return mask - - - diff --git a/py4DSTEM/io/__init__.py b/py4DSTEM/io/__init__.py index 80b591f8a..fa7cd099e 100644 --- a/py4DSTEM/io/__init__.py +++ b/py4DSTEM/io/__init__.py @@ -1,4 +1,3 @@ - # read / write from py4DSTEM.io.importfile import import_file from py4DSTEM.io.read import read @@ -7,6 +6,3 @@ # google downloader from py4DSTEM.io.google_drive_downloader import gdrive_download, get_sample_file_ids - - - diff --git a/py4DSTEM/io/filereaders/empad.py b/py4DSTEM/io/filereaders/empad.py index 98b515863..25c0a113b 100644 --- a/py4DSTEM/io/filereaders/empad.py +++ b/py4DSTEM/io/filereaders/empad.py @@ -49,6 +49,7 @@ def read_empad(filename, mem="RAM", binfactor=1, metadata=False, **kwargs): data_shape = kwargs["EMPAD_shape"] else: import os + filesize = os.path.getsize(fPath) pattern_size = row * col * 4 # 4 bytes per pixel N_patterns = filesize / pattern_size diff --git a/py4DSTEM/io/filereaders/read_K2.py b/py4DSTEM/io/filereaders/read_K2.py index c03de98d3..61405a437 100644 --- a/py4DSTEM/io/filereaders/read_K2.py +++ b/py4DSTEM/io/filereaders/read_K2.py @@ -4,6 +4,7 @@ from collections.abc import Sequence import numpy as np + try: import numba as nb except ImportError: @@ -44,8 +45,12 @@ def read_gatan_K2_bin(fp, mem="MEMMAP", binfactor=1, metadata=False, **kwargs): return None block_sync = kwargs.get("K2_sync_block_IDs", True) - NR = kwargs.get("K2_hidden_stripe_noise_reduction",True) - return DataCube(data=K2DataArray(fp,sync_block_IDs=block_sync, hidden_stripe_noise_reduction=NR)) + NR = kwargs.get("K2_hidden_stripe_noise_reduction", True) + return DataCube( + data=K2DataArray( + fp, sync_block_IDs=block_sync, hidden_stripe_noise_reduction=NR + ) + ) class K2DataArray(Sequence): @@ -78,7 +83,9 @@ class K2DataArray(Sequence): into memory. To reduce RAM pressure, only call small slices or loop over each diffraction pattern. """ - def __init__(self, filepath, sync_block_IDs = True, hidden_stripe_noise_reduction=True): + def __init__( + self, filepath, sync_block_IDs=True, hidden_stripe_noise_reduction=True + ): from ncempy.io import dm import os import glob @@ -133,7 +140,10 @@ def __init__(self, filepath, sync_block_IDs = True, hidden_stripe_noise_reductio ("pad1", np.void, 5), ("shutter", ">u1"), ("pad2", np.void, 6), - ("block", ">u4",), + ( + "block", + ">u4", + ), ("pad4", np.void, 4), ("frame", ">u4"), ("coords", ">u2", (4,)), @@ -225,7 +235,7 @@ def mean(self, axis=None, dtype=None, out=None, keepdims=False): # handle average DP if axis == (0, 1): avgDP = np.zeros((self.shape[2], self.shape[3])) - for (Ry, Rx) in tqdmnd(self.shape[1], self.shape[0]): + for Ry, Rx in tqdmnd(self.shape[1], self.shape[0]): avgDP += self[Rx, Ry, :, :] return avgDP / (self.shape[0] * self.shape[1]) @@ -233,7 +243,7 @@ def mean(self, axis=None, dtype=None, out=None, keepdims=False): # handle average image if axis == (2, 3): avgImg = np.zeros((self.shape[0], self.shape[1])) - for (Ry, Rx) in tqdmnd(self.shape[1], self.shape[0]): + for Ry, Rx in tqdmnd(self.shape[1], self.shape[0]): avgImg[Rx, Ry] = np.mean(self[Rx, Ry, :, :]) return avgImg @@ -243,7 +253,7 @@ def sum(self, axis=None, dtype=None, out=None, keepdims=False): # handle average DP if axis == (0, 1): sumDP = np.zeros((self.shape[2], self.shape[3])) - for (Ry, Rx) in tqdmnd(self.shape[1], self.shape[0]): + for Ry, Rx in tqdmnd(self.shape[1], self.shape[0]): sumDP += self[Rx, Ry, :, :] return sumDP @@ -251,7 +261,7 @@ def sum(self, axis=None, dtype=None, out=None, keepdims=False): # handle average image if axis == (2, 3): sumImg = np.zeros((self.shape[0], self.shape[1])) - for (Ry, Rx) in tqdmnd(self.shape[1], self.shape[0]): + for Ry, Rx in tqdmnd(self.shape[1], self.shape[0]): sumImg[Rx, Ry] = np.sum(self[Rx, Ry, :, :]) return sumImg @@ -261,7 +271,7 @@ def max(self, axis=None, out=None): # handle average DP if axis == (0, 1): maxDP = np.zeros((self.shape[2], self.shape[3])) - for (Ry, Rx) in tqdmnd(self.shape[1], self.shape[0]): + for Ry, Rx in tqdmnd(self.shape[1], self.shape[0]): maxDP = np.maximum(maxDP, self[Rx, Ry, :, :]) return maxDP @@ -269,7 +279,7 @@ def max(self, axis=None, out=None): # handle average image if axis == (2, 3): maxImg = np.zeros((self.shape[0], self.shape[1])) - for (Ry, Rx) in tqdmnd(self.shape[1], self.shape[0]): + for Ry, Rx in tqdmnd(self.shape[1], self.shape[0]): maxImg[Rx, Ry] = np.max(self[Rx, Ry, :, :]) return maxImg @@ -281,13 +291,15 @@ def _attach_to_files(self): # Synchronize to the magic sync word # First, open the file in binary mode and read ~1 MB - with open(binName, 'rb') as f: + with open(binName, "rb") as f: s = f.read(1_000_000) # Scan the chunk and find everywhere the sync word appears - sync = [s.find(b'\xff\xff\x00\x55'),] + sync = [ + s.find(b"\xff\xff\x00\x55"), + ] while sync[-1] >= 0: - sync.append(s.find(b'\xff\xff\x00\x55',sync[-1]+1)) + sync.append(s.find(b"\xff\xff\x00\x55", sync[-1] + 1)) # Since the sync word can conceivably occur within the data region, # check that there is another sync word 22360 bytes away @@ -296,7 +308,9 @@ def _attach_to_files(self): sync_idx += 1 if sync_idx > 0: - print(f"Beginning file {i} at offset {sync[sync_idx]} due to incomplete data block!") + print( + f"Beginning file {i} at offset {sync[sync_idx]} due to incomplete data block!" + ) # Start the memmap at the offset of the sync byte self._bin_files[i] = np.memmap( @@ -505,7 +519,9 @@ def _write_to_hdf5(self, group): # ======= UTILITIES OUTSIDE THE CLASS ======# import sys -if 'numba' in sys.modules: + +if "numba" in sys.modules: + @nb.njit(nb.int16[::1](nb.uint8[::1]), fastmath=False, parallel=False) def _convert_uint12(data_chunk): """ @@ -532,7 +548,9 @@ def _convert_uint12(data_chunk): DP = out.astype(np.int16) return DP + else: + def _convert_uint12(data_chunk): """ data_chunk is a contigous 1D array of uint8 data) @@ -558,6 +576,3 @@ def _convert_uint12(data_chunk): DP = out.astype(np.int16) return DP - - - diff --git a/py4DSTEM/io/filereaders/read_abTEM.py b/py4DSTEM/io/filereaders/read_abTEM.py index 1fec9e73e..805439023 100644 --- a/py4DSTEM/io/filereaders/read_abTEM.py +++ b/py4DSTEM/io/filereaders/read_abTEM.py @@ -2,6 +2,7 @@ from py4DSTEM.data import DiffractionSlice, RealSlice from py4DSTEM.datacube import DataCube + def read_abTEM( filename, mem="RAM", @@ -37,7 +38,6 @@ def read_abTEM( assert len(data.shape) in (2, 4), "abtem reader supports only 4D and 2D data" if len(data.shape) == 4: - datacube = DataCube(data=data) datacube.calibration.set_R_pixel_size(sampling[0]) diff --git a/py4DSTEM/io/filereaders/read_arina.py b/py4DSTEM/io/filereaders/read_arina.py index 323b5643f..6f7c463d2 100644 --- a/py4DSTEM/io/filereaders/read_arina.py +++ b/py4DSTEM/io/filereaders/read_arina.py @@ -13,7 +13,6 @@ def read_arina( dtype_bin: float = None, flatfield: np.ndarray = None, ): - """ File reader for arina 4D-STEM datasets Args: diff --git a/py4DSTEM/io/filereaders/read_dm.py b/py4DSTEM/io/filereaders/read_dm.py index bc41695e1..617529708 100644 --- a/py4DSTEM/io/filereaders/read_dm.py +++ b/py4DSTEM/io/filereaders/read_dm.py @@ -9,13 +9,7 @@ from py4DSTEM.preprocess.utils import bin2D -def read_dm( - filepath, - name="dm_dataset", - mem="RAM", - binfactor=1, - **kwargs - ): +def read_dm(filepath, name="dm_dataset", mem="RAM", binfactor=1, **kwargs): """ Read a digital micrograph 4D-STEM file. @@ -38,7 +32,6 @@ def read_dm( # open the file with dm.fileDM(filepath, on_memory=False) as dmFile: - # loop through datasets looking for one with more than 2D # This is needed because: # NCEM TitanX files store 4D data in a 3D array @@ -112,7 +105,7 @@ def read_dm( _mmap = dmFile.getMemmap(dataset_index) # get the dtype for the binned data - dtype = kwargs.get("dtype", _mmap[0,0].dtype) + dtype = kwargs.get("dtype", _mmap[0, 0].dtype) if titan_shape is not None: # NCEM TitanX tags were found diff --git a/py4DSTEM/io/filereaders/read_mib.py b/py4DSTEM/io/filereaders/read_mib.py index e9dccff90..079c9d1bd 100644 --- a/py4DSTEM/io/filereaders/read_mib.py +++ b/py4DSTEM/io/filereaders/read_mib.py @@ -1,19 +1,21 @@ # Read the mib file captured using the Merlin detector -# Author: Tara Mishra, tara.matsci@gmail. +# Author: Tara Mishra, tara.matsci@gmail. # Based on the PyXEM load_mib module https://github.com/pyxem/pyxem/blob/563a3bb5f3233f46cd3e57f3cd6f9ddf7af55ad0/pyxem/utils/io_utils.py import numpy as np from py4DSTEM.datacube import DataCube import os + def load_mib( file_path, mem="MEMMAP", binfactor=1, reshape=True, flip=True, - scan = (256,256), - **kwargs): + scan=(256, 256), + **kwargs +): """ Read a MIB file and return as py4DSTEM DataCube. @@ -48,19 +50,20 @@ def load_mib( data = data[:, hdr_bits:] if header["raw"] == "MIB": - data = data.reshape(depth,width,height) + data = data.reshape(depth, width, height) else: - print('Data type not supported as MIB reader') + print("Data type not supported as MIB reader") if reshape: - data = data.reshape(scan[0],scan[1],width,height) + data = data.reshape(scan[0], scan[1], width, height) if mem == "RAM": - data = np.array(data) # Load entire dataset into RAM + data = np.array(data) # Load entire dataset into RAM py4dstem_data = DataCube(data=data) return py4dstem_data + def manageHeader(fname): """Get necessary information from the header of the .mib file. Parameters @@ -84,7 +87,6 @@ def manageHeader(fname): Header += str(aByte.decode("ascii")) # This gets rid of the header while aByte and ord(aByte) != 0: - aByte = input.read(1) Header += str(aByte.decode("ascii")) @@ -120,6 +122,7 @@ def manageHeader(fname): return hdr + def parse_hdr(fp): """Parse information from mib file header info from _manageHeader function. Parameters @@ -217,6 +220,7 @@ def parse_hdr(fp): return hdr_info + def get_mib_memmap(fp, mmap_mode="r"): """Reads the binary mib file into a numpy memmap object and returns as dask array object. Parameters @@ -259,6 +263,7 @@ def get_mib_memmap(fp, mmap_mode="r"): data_mem = np.memmap(fp, offset=read_offset, dtype=data_type, mode=mmap_mode) return data_mem + def get_mib_depth(hdr_info, fp): """Determine the total number of frames based on .mib file size. Parameters @@ -290,7 +295,6 @@ def get_mib_depth(hdr_info, fp): file_size = os.path.getsize(fp[:-3] + "mib") if hdr_info["raw"] == "R64": - single_frame = mib_file_size_dict.get(str(hdr_info["Counter Depth (number)"])) depth = int(file_size / single_frame) elif hdr_info["raw"] == "MIB": @@ -306,6 +310,7 @@ def get_mib_depth(hdr_info, fp): return depth + def get_hdr_bits(hdr_info): """Gets the number of character bits for the header for each frame given the data type. Parameters @@ -348,4 +353,3 @@ def get_hdr_bits(hdr_info): hdr_bits = int(hdr_info["data offset"] * hdr_multiplier) return hdr_bits - diff --git a/py4DSTEM/io/google_drive_downloader.py b/py4DSTEM/io/google_drive_downloader.py index 86ad1a9f4..5b53f19ae 100644 --- a/py4DSTEM/io/google_drive_downloader.py +++ b/py4DSTEM/io/google_drive_downloader.py @@ -1,199 +1,185 @@ import gdown import os +import warnings ### File IDs # single files file_ids = { - 'sample_diffraction_pattern' : ( - 'a_diffraction_pattern.h5', - '1ymYMnuDC0KV6dqduxe2O1qafgSd0jjnU', + "sample_diffraction_pattern": ( + "a_diffraction_pattern.h5", + "1ymYMnuDC0KV6dqduxe2O1qafgSd0jjnU", ), - 'Au_sim' : ( - 'Au_sim.h5', - '1PmbCYosA1eYydWmmZebvf6uon9k_5g_S', + "Au_sim": ( + "Au_sim.h5", + "1PmbCYosA1eYydWmmZebvf6uon9k_5g_S", ), - 'carbon_nanotube' : ( - 'carbon_nanotube.h5', - '1bHv3u61Cr-y_GkdWHrJGh1lw2VKmt3UM', + "carbon_nanotube": ( + "carbon_nanotube.h5", + "1bHv3u61Cr-y_GkdWHrJGh1lw2VKmt3UM", ), - 'Si_SiGe_exp' : ( - 'Si_SiGe_exp.h5', - '1fXNYSGpe6w6E9RBA-Ai_owZwoj3w8PNC', + "Si_SiGe_exp": ( + "Si_SiGe_exp.h5", + "1fXNYSGpe6w6E9RBA-Ai_owZwoj3w8PNC", ), - 'Si_SiGe_probe' : ( - 'Si_SiGe_probe.h5', - '141Tv0YF7c5a-MCrh3CkY_w4FgWtBih80', + "Si_SiGe_probe": ( + "Si_SiGe_probe.h5", + "141Tv0YF7c5a-MCrh3CkY_w4FgWtBih80", ), - 'Si_SiGe_EELS_strain' : ( - 'Si_SiGe_EELS_strain.h5', - '1klkecq8IuEOYB-bXchO7RqOcgCl4bmDJ', + "Si_SiGe_EELS_strain": ( + "Si_SiGe_EELS_strain.h5", + "1klkecq8IuEOYB-bXchO7RqOcgCl4bmDJ", ), - 'AuAgPd_wire' : ( - 'AuAgPd_wire.h5', - '1OQYW0H6VELsmnLTcwicP88vo2V5E3Oyt', + "AuAgPd_wire": ( + "AuAgPd_wire.h5", + "1OQYW0H6VELsmnLTcwicP88vo2V5E3Oyt", ), - 'AuAgPd_wire_probe' : ( - 'AuAgPd_wire_probe.h5', - '17OduUKpxVBDumSK_VHtnc2XKkaFVN8kq', + "AuAgPd_wire_probe": ( + "AuAgPd_wire_probe.h5", + "17OduUKpxVBDumSK_VHtnc2XKkaFVN8kq", ), - 'polycrystal_2D_WS2' : ( - 'polycrystal_2D_WS2.h5', - '1AWB3-UTPiTR9dgrEkNFD7EJYsKnbEy0y', + "polycrystal_2D_WS2": ( + "polycrystal_2D_WS2.h5", + "1AWB3-UTPiTR9dgrEkNFD7EJYsKnbEy0y", ), - 'WS2cif' : ( - 'WS2.cif', - '13zBl6aFExtsz_sew-L0-_ALYJfcgHKjo', + "WS2cif": ( + "WS2.cif", + "13zBl6aFExtsz_sew-L0-_ALYJfcgHKjo", ), - 'polymers' : ( - 'polymers.h5', - '1lK-TAMXN1MpWG0Q3_4vss_uEZgW2_Xh7', + "polymers": ( + "polymers.h5", + "1lK-TAMXN1MpWG0Q3_4vss_uEZgW2_Xh7", ), - 'vac_probe' : ( - 'vac_probe.h5', - '1QTcSKzZjHZd1fDimSI_q9_WsAU25NIXe', + "vac_probe": ( + "vac_probe.h5", + "1QTcSKzZjHZd1fDimSI_q9_WsAU25NIXe", ), - 'small_dm3_3Dstack' : ( - 'small_dm3_3Dstack.dm3', - '1B-xX3F65JcWzAg0v7f1aVwnawPIfb5_o' + "small_dm3_3Dstack": ("small_dm3_3Dstack.dm3", "1B-xX3F65JcWzAg0v7f1aVwnawPIfb5_o"), + "FCU-Net": ( + "filename.name", + "1-KX0saEYfhZ9IJAOwabH38PCVtfXidJi", ), - 'FCU-Net' : ( - 'filename.name', - '1-KX0saEYfhZ9IJAOwabH38PCVtfXidJi', - ), - 'small_datacube' : ( - 'small_datacube.dm4', + "small_datacube": ( + "small_datacube.dm4", # TODO - change this file to something smaller - ideally e.g. shape (4,8,256,256) ~= 4.2MB' - '1QTcSKzZjHZd1fDimSI_q9_WsAU25NIXe' - ), - 'legacy_v0.9' : ( - 'legacy_v0.9_simAuNanoplatelet_bin.h5', - '1AIRwpcj87vK3ubLaKGj1UiYXZByD2lpu' + "1QTcSKzZjHZd1fDimSI_q9_WsAU25NIXe", ), - 'legacy_v0.13' : ( - 'legacy_v0.13.h5', - '1VEqUy0Gthama7YAVkxwbjQwdciHpx8rA' + "legacy_v0.9": ( + "legacy_v0.9_simAuNanoplatelet_bin.h5", + "1AIRwpcj87vK3ubLaKGj1UiYXZByD2lpu", ), - 'legacy_v0.14' : ( - 'legacy_v0.14.h5', - '1eOTEJrpHnNv9_DPrWgZ4-NTN21UbH4aR', + "legacy_v0.13": ("legacy_v0.13.h5", "1VEqUy0Gthama7YAVkxwbjQwdciHpx8rA"), + "legacy_v0.14": ( + "legacy_v0.14.h5", + "1eOTEJrpHnNv9_DPrWgZ4-NTN21UbH4aR", ), - 'test_realslice_io' : ( - 'test_realslice_io.h5', - '1siH80-eRJwG5R6AnU4vkoqGWByrrEz1y' + "test_realslice_io": ("test_realslice_io.h5", "1siH80-eRJwG5R6AnU4vkoqGWByrrEz1y"), + "test_arina_master": ( + "STO_STEM_bench_20us_master.h5", + "1q_4IjFuWRkw5VM84NhxrNTdIq4563BOC", ), - 'test_arina_master' : ( - 'STO_STEM_bench_20us_master.h5', - '1q_4IjFuWRkw5VM84NhxrNTdIq4563BOC' + "test_arina_01": ( + "STO_STEM_bench_20us_data_000001.h5", + "1_3Dbm22-hV58iffwK9x-3vqJUsEXZBFQ", ), - 'test_arina_01' : ( - 'STO_STEM_bench_20us_data_000001.h5', - '1_3Dbm22-hV58iffwK9x-3vqJUsEXZBFQ' + "test_arina_02": ( + "STO_STEM_bench_20us_data_000002.h5", + "1x29RzHLnCzP0qthLhA1kdlUQ09ENViR8", ), - 'test_arina_02' : ( - 'STO_STEM_bench_20us_data_000002.h5', - '1x29RzHLnCzP0qthLhA1kdlUQ09ENViR8' + "test_arina_03": ( + "STO_STEM_bench_20us_data_000003.h5", + "1qsbzdEVD8gt4DYKnpwjfoS_Mg4ggObAA", ), - 'test_arina_03' : ( - 'STO_STEM_bench_20us_data_000003.h5', - '1qsbzdEVD8gt4DYKnpwjfoS_Mg4ggObAA' + "test_arina_04": ( + "STO_STEM_bench_20us_data_000004.h5", + "1Lcswld0Y9fNBk4-__C9iJbc854BuHq-h", ), - 'test_arina_04' : ( - 'STO_STEM_bench_20us_data_000004.h5', - '1Lcswld0Y9fNBk4-__C9iJbc854BuHq-h' + "test_arina_05": ( + "STO_STEM_bench_20us_data_000005.h5", + "13YTO2ABsTK5nObEr7RjOZYCV3sEk3gt9", ), - 'test_arina_05' : ( - 'STO_STEM_bench_20us_data_000005.h5', - '13YTO2ABsTK5nObEr7RjOZYCV3sEk3gt9' + "test_arina_06": ( + "STO_STEM_bench_20us_data_000006.h5", + "1RywPXt6HRbCvjgjSuYFf60QHWlOPYXwy", ), - 'test_arina_06' : ( - 'STO_STEM_bench_20us_data_000006.h5', - '1RywPXt6HRbCvjgjSuYFf60QHWlOPYXwy' + "test_arina_07": ( + "STO_STEM_bench_20us_data_000007.h5", + "1GRoBecCvAUeSIujzsPywv1vXKSIsNyoT", ), - 'test_arina_07' : ( - 'STO_STEM_bench_20us_data_000007.h5', - '1GRoBecCvAUeSIujzsPywv1vXKSIsNyoT' + "test_arina_08": ( + "STO_STEM_bench_20us_data_000008.h5", + "1sTFuuvgKbTjZz1lVUfkZbbTDTQmwqhuU", ), - 'test_arina_08' : ( - 'STO_STEM_bench_20us_data_000008.h5', - '1sTFuuvgKbTjZz1lVUfkZbbTDTQmwqhuU' + "test_arina_09": ( + "STO_STEM_bench_20us_data_000009.h5", + "1JmBiMg16iMVfZ5wz8z_QqcNPVRym1Ezh", ), - 'test_arina_09' : ( - 'STO_STEM_bench_20us_data_000009.h5', - '1JmBiMg16iMVfZ5wz8z_QqcNPVRym1Ezh' + "test_arina_10": ( + "STO_STEM_bench_20us_data_000010.h5", + "1_90xAfclNVwMWwQ-YKxNNwBbfR1nfHoB", ), - 'test_arina_10' : ( - 'STO_STEM_bench_20us_data_000010.h5', - '1_90xAfclNVwMWwQ-YKxNNwBbfR1nfHoB' + "test_strain": ( + "downsample_Si_SiGe_analysis_braggdisks_cal.h5", + "1bYgDdAlnWHyFmY-SwN3KVpMutWBI5MhP", ), - 'test_strain' : ( - 'downsample_Si_SiGe_analysis_braggdisks_cal.h5', - '1bYgDdAlnWHyFmY-SwN3KVpMutWBI5MhP' - ) } # collections of files collection_ids = { - 'tutorials' : ( - 'Au_sim', - 'carbon_nanotube', - 'Si_SiGe_exp', - 'Si_SiGe_probe', - 'Si_SiGe_EELS_strain', - 'AuAgPd_wire', - 'AuAgPd_wire_probe', - 'polycrystal_2D_WS2', - 'WS2cif', - 'polymers', - 'vac_probe', - ), - 'test_io' : ( - 'small_dm3_3Dstack', - 'vac_probe', - 'legacy_v0.9', - 'legacy_v0.13', - 'legacy_v0.14', - 'test_realslice_io', - ), - 'test_arina' : ( - 'test_arina_master', - 'test_arina_01', - 'test_arina_02', - 'test_arina_03', - 'test_arina_04', - 'test_arina_05', - 'test_arina_06', - 'test_arina_07', - 'test_arina_08', - 'test_arina_09', - 'test_arina_10', - ), - 'test_braggvectors' : ( - 'Au_sim', - ), - 'strain' : ( - 'test_strain', - ) + "tutorials": ( + "Au_sim", + "carbon_nanotube", + "Si_SiGe_exp", + "Si_SiGe_probe", + "Si_SiGe_EELS_strain", + "AuAgPd_wire", + "AuAgPd_wire_probe", + "polycrystal_2D_WS2", + "WS2cif", + "polymers", + "vac_probe", + ), + "test_io": ( + "small_dm3_3Dstack", + "vac_probe", + "legacy_v0.9", + "legacy_v0.13", + "legacy_v0.14", + "test_realslice_io", + ), + "test_arina": ( + "test_arina_master", + "test_arina_01", + "test_arina_02", + "test_arina_03", + "test_arina_04", + "test_arina_05", + "test_arina_06", + "test_arina_07", + "test_arina_08", + "test_arina_09", + "test_arina_10", + ), + "test_braggvectors": ("Au_sim",), + "strain": ("test_strain",), } -def get_sample_file_ids(): - return { - 'files' : file_ids.keys(), - 'collections' : collection_ids.keys() - } +def get_sample_file_ids(): + return {"files": file_ids.keys(), "collections": collection_ids.keys()} ### Downloader + def gdrive_download( id_, - destination = None, - overwrite = False, - filename = None, - verbose = True, - ): + destination=None, + overwrite=False, + filename=None, + verbose=True, +): """ Downloads a file or collection of files from google drive. @@ -225,34 +211,61 @@ def gdrive_download( # parse destination if destination is None: destination = os.getcwd() - assert(os.path.exists(destination)), f"`destination` must exist on filesystem. Received {destination}" + assert os.path.exists( + destination + ), f"`destination` must exist on filesystem. Received {destination}" # download single files if id_ not in collection_ids: - # assign the name and id - kwargs = { - 'fuzzy' : True - } + kwargs = {"fuzzy": True} if id_ in file_ids: f = file_ids[id_] filename = f[0] - kwargs['id'] = f[1] + kwargs["id"] = f[1] + + # if its not in the list of files we expect + + # TODO simplify the logic here else: - filename = 'gdrivedownload.file' if filename is None else filename - kwargs['url'] = id_ + filename = "gdrivedownload.file" if filename is None else filename + # check if its a url + if id_.startswith("http"): + # check the url is the correct format i.e. https://drive.google.com/uc?id= + # and not https://drive.google.com/file/d/ + # if correct format + if "uc?id=" in id_: + kwargs["url"] = id_ + # if incorrect format, strip the google ID from the URL + # making http/https agnostic + elif "drive.google.com/file/d/" in id_: + # warn the user the the url syntax was incorrect and this is making a guess + warnings.warn( + f"URL provided {id_} was not in the correct format https://drive.google.com/uc?id=, attempting to interpret link and download the file. Most likely a URL with this format was provided https://drive.google.com/file/d/" + ) + # try stripping + stripped_id = id_.split("/")[-1] + # Currently the length of the google drive IDs appears to always be 33 characters + # check for length and warn if it appears malformed, if so raise warning and the ID it guessed + if len(stripped_id) != 33: + warnings.warn( + f"Guessed ID {stripped_id}: appears to be in the wrong length (not 33 characters), attempting download" + ) + kwargs["id"] = stripped_id + # if its just a Google Drive string + else: + kwargs["id"] = id_ # download - kwargs['output'] = os.path.join(destination, filename) - if not(overwrite) and os.path.exists(kwargs['output']): + kwargs["output"] = os.path.join(destination, filename) + if not (overwrite) and os.path.exists(kwargs["output"]): if verbose: print(f"A file already exists at {kwargs['output']}, skipping...") else: - gdown.download( **kwargs ) + gdown.download(**kwargs) # download a collections of files else: - # set destination destination = os.path.join(destination, id_) if not os.path.exists(destination): @@ -260,19 +273,11 @@ def gdrive_download( # loop for x in collection_ids[id_]: - file_name,file_id = file_ids[x] + file_name, file_id = file_ids[x] output = os.path.join(destination, file_name) # download - if not(overwrite) and os.path.exists(output): + if not (overwrite) and os.path.exists(output): if verbose: print(f"A file already exists at {output}, skipping...") else: - gdown.download( - id = file_id, - output = output, - fuzzy = True - ) - - - - + gdown.download(id=file_id, output=output, fuzzy=True) diff --git a/py4DSTEM/io/legacy/__init__.py b/py4DSTEM/io/legacy/__init__.py index e50808f0c..ee340a7d4 100644 --- a/py4DSTEM/io/legacy/__init__.py +++ b/py4DSTEM/io/legacy/__init__.py @@ -1,4 +1,3 @@ from py4DSTEM.io.legacy.read_legacy_13 import * from py4DSTEM.io.legacy.read_legacy_12 import * from py4DSTEM.io.legacy.read_utils import * - diff --git a/py4DSTEM/io/legacy/legacy12/__init__.py b/py4DSTEM/io/legacy/legacy12/__init__.py index 37efaf18d..2370b8ca6 100644 --- a/py4DSTEM/io/legacy/legacy12/__init__.py +++ b/py4DSTEM/io/legacy/legacy12/__init__.py @@ -3,5 +3,3 @@ from .read_v0_7 import read_v0_7 from .read_v0_9 import read_v0_9 from .read_v0_12 import read_v0_12 - - diff --git a/py4DSTEM/io/legacy/legacy12/read_utils_v0_12.py b/py4DSTEM/io/legacy/legacy12/read_utils_v0_12.py index 0ef3e4366..a8a646e8d 100644 --- a/py4DSTEM/io/legacy/legacy12/read_utils_v0_12.py +++ b/py4DSTEM/io/legacy/legacy12/read_utils_v0_12.py @@ -4,68 +4,77 @@ import numpy as np from py4DSTEM.io.legacy.read_utils import is_py4DSTEM_file -def get_py4DSTEM_dataobject_info(filepath, topgroup='4DSTEM_experiment'): - """ Returns a numpy structured array with basic metadata for all contained dataobjects. - Keys for the info array are: 'index','type','shape','name'. + +def get_py4DSTEM_dataobject_info(filepath, topgroup="4DSTEM_experiment"): + """Returns a numpy structured array with basic metadata for all contained dataobjects. + Keys for the info array are: 'index','type','shape','name'. """ - assert(is_py4DSTEM_file(filepath)), "Error: not recognized as a py4DSTEM file" - with h5py.File(filepath,'r') as f: - assert(topgroup in f.keys()), "Error: unrecognized topgroup" + assert is_py4DSTEM_file(filepath), "Error: not recognized as a py4DSTEM file" + with h5py.File(filepath, "r") as f: + assert topgroup in f.keys(), "Error: unrecognized topgroup" i = 0 l_md = [] - with h5py.File(filepath,'r') as f: - grp_dc = f[topgroup+'/data/datacubes/'] - grp_cdc = f[topgroup+'/data/counted_datacubes/'] - grp_ds = f[topgroup+'/data/diffractionslices/'] - grp_rs = f[topgroup+'/data/realslices/'] - grp_pl = f[topgroup+'/data/pointlists/'] - grp_pla = f[topgroup+'/data/pointlistarrays/'] - grp_coords = f[topgroup+'/data/coordinates/'] - N = len(grp_dc)+len(grp_cdc)+len(grp_ds)+len(grp_rs)+len(grp_pl)+len(grp_pla)+len(grp_coords) - info = np.zeros(N,dtype=[('index',int),('type','U16'),('shape',tuple),('name','U64')]) + with h5py.File(filepath, "r") as f: + grp_dc = f[topgroup + "/data/datacubes/"] + grp_cdc = f[topgroup + "/data/counted_datacubes/"] + grp_ds = f[topgroup + "/data/diffractionslices/"] + grp_rs = f[topgroup + "/data/realslices/"] + grp_pl = f[topgroup + "/data/pointlists/"] + grp_pla = f[topgroup + "/data/pointlistarrays/"] + grp_coords = f[topgroup + "/data/coordinates/"] + N = ( + len(grp_dc) + + len(grp_cdc) + + len(grp_ds) + + len(grp_rs) + + len(grp_pl) + + len(grp_pla) + + len(grp_coords) + ) + info = np.zeros( + N, + dtype=[("index", int), ("type", "U16"), ("shape", tuple), ("name", "U64")], + ) for name in sorted(grp_dc.keys()): - shape = grp_dc[name+'/data/'].shape - dtype = 'DataCube' - info[i] = i,dtype,shape,name + shape = grp_dc[name + "/data/"].shape + dtype = "DataCube" + info[i] = i, dtype, shape, name i += 1 for name in sorted(grp_cdc.keys()): # TODO - shape = grp_cdc[name+'/data/'].shape - dtype = 'CountedDataCube' - info[i] = i,dtype,shape,name + shape = grp_cdc[name + "/data/"].shape + dtype = "CountedDataCube" + info[i] = i, dtype, shape, name i += 1 for name in sorted(grp_ds.keys()): - shape = grp_ds[name+'/data/'].shape - dtype = 'DiffractionSlice' - info[i] = i,dtype,shape,name + shape = grp_ds[name + "/data/"].shape + dtype = "DiffractionSlice" + info[i] = i, dtype, shape, name i += 1 for name in sorted(grp_rs.keys()): - shape = grp_rs[name+'/data/'].shape - dtype = 'RealSlice' - info[i] = i,dtype,shape,name + shape = grp_rs[name + "/data/"].shape + dtype = "RealSlice" + info[i] = i, dtype, shape, name i += 1 for name in sorted(grp_pl.keys()): coordinates = list(grp_pl[name].keys()) - length = grp_pl[name+'/'+coordinates[0]+'/data'].shape[0] - shape = (len(coordinates),length) - dtype = 'PointList' - info[i] = i,dtype,shape,name + length = grp_pl[name + "/" + coordinates[0] + "/data"].shape[0] + shape = (len(coordinates), length) + dtype = "PointList" + info[i] = i, dtype, shape, name i += 1 for name in sorted(grp_pla.keys()): - ar_shape = grp_pla[name+'/data'].shape - pla_dtype = h5py.check_vlen_dtype(grp_pla[name+'/data'].dtype) + ar_shape = grp_pla[name + "/data"].shape + pla_dtype = h5py.check_vlen_dtype(grp_pla[name + "/data"].dtype) N_coords = len(pla_dtype) - shape = (ar_shape[0],ar_shape[1],N_coords,-1) - dtype = 'PointListArray' - info[i] = i,dtype,shape,name + shape = (ar_shape[0], ar_shape[1], N_coords, -1) + dtype = "PointListArray" + info[i] = i, dtype, shape, name i += 1 for name in sorted(grp_coords.keys()): - shape=0 #TODO? - dtype = 'Coordinates' - info[i] = i,dtype,shape,name + shape = 0 # TODO? + dtype = "Coordinates" + info[i] = i, dtype, shape, name i += 1 return info - - - diff --git a/py4DSTEM/io/legacy/legacy12/read_utils_v0_5.py b/py4DSTEM/io/legacy/legacy12/read_utils_v0_5.py index d7df6da44..1e868c4b7 100644 --- a/py4DSTEM/io/legacy/legacy12/read_utils_v0_5.py +++ b/py4DSTEM/io/legacy/legacy12/read_utils_v0_5.py @@ -4,56 +4,57 @@ import numpy as np from py4DSTEM.io.legacy.read_utils import is_py4DSTEM_file -def get_py4DSTEM_dataobject_info(fp, topgroup='4DSTEM_experiment'): - """ Returns a numpy structured array with basic metadata for all contained dataobjects. - Keys for the info array are: 'index','type','shape','name'. + +def get_py4DSTEM_dataobject_info(fp, topgroup="4DSTEM_experiment"): + """Returns a numpy structured array with basic metadata for all contained dataobjects. + Keys for the info array are: 'index','type','shape','name'. """ - assert(is_py4DSTEM_file(fp)), "Error: not recognized as a py4DSTEM file" - with h5py.File(fp,'r') as f: - assert(topgroup in f.keys()), "Error: unrecognized topgroup" + assert is_py4DSTEM_file(fp), "Error: not recognized as a py4DSTEM file" + with h5py.File(fp, "r") as f: + assert topgroup in f.keys(), "Error: unrecognized topgroup" i = 0 l_md = [] - with h5py.File(fp,'r') as f: - grp_dc = f[topgroup+'/data/datacubes/'] - grp_ds = f[topgroup+'/data/diffractionslices/'] - grp_rs = f[topgroup+'/data/realslices/'] - grp_pl = f[topgroup+'/data/pointlists/'] - grp_pla = f[topgroup+'/data/pointlistarrays/'] - N = len(grp_dc)+len(grp_ds)+len(grp_rs)+len(grp_pl)+len(grp_pla) - info = np.zeros(N,dtype=[('index',int),('type','U16'),('shape',tuple),('name','U64')]) + with h5py.File(fp, "r") as f: + grp_dc = f[topgroup + "/data/datacubes/"] + grp_ds = f[topgroup + "/data/diffractionslices/"] + grp_rs = f[topgroup + "/data/realslices/"] + grp_pl = f[topgroup + "/data/pointlists/"] + grp_pla = f[topgroup + "/data/pointlistarrays/"] + N = len(grp_dc) + len(grp_ds) + len(grp_rs) + len(grp_pl) + len(grp_pla) + info = np.zeros( + N, + dtype=[("index", int), ("type", "U16"), ("shape", tuple), ("name", "U64")], + ) for name in sorted(grp_dc.keys()): - shape = grp_dc[name+'/datacube/'].shape - dtype = 'DataCube' - info[i] = i,dtype,shape,name + shape = grp_dc[name + "/datacube/"].shape + dtype = "DataCube" + info[i] = i, dtype, shape, name i += 1 for name in sorted(grp_ds.keys()): - shape = grp_ds[name+'/diffractionslice/'].shape - dtype = 'DiffractionSlice' - info[i] = i,dtype,shape,name + shape = grp_ds[name + "/diffractionslice/"].shape + dtype = "DiffractionSlice" + info[i] = i, dtype, shape, name i += 1 for name in sorted(grp_rs.keys()): - shape = grp_rs[name+'/realslice/'].shape - dtype = 'RealSlice' - info[i] = i,dtype,shape,name + shape = grp_rs[name + "/realslice/"].shape + dtype = "RealSlice" + info[i] = i, dtype, shape, name i += 1 for name in sorted(grp_pl.keys()): coordinates = list(grp_pl[name].keys()) - length = grp_pl[name+'/'+coordinates[0]+'/pointlist'].shape[0] - shape = (len(coordinates),length) - dtype = 'PointList' - info[i] = i,dtype,shape,name + length = grp_pl[name + "/" + coordinates[0] + "/pointlist"].shape[0] + shape = (len(coordinates), length) + dtype = "PointList" + info[i] = i, dtype, shape, name i += 1 for name in sorted(grp_pla.keys()): l = list(grp_pla[name]) - ar = np.array([l[j].split('_') for j in range(len(l))]).astype(int) - ar_shape = (np.max(ar[:,0])+1,np.max(ar[:,1])+1) - N_coords = len(list(grp_pla[name+'/0_0'])) - shape = (ar_shape[0],ar_shape[1],N_coords,-1) - dtype = 'PointListArray' - info[i] = i,dtype,shape,name + ar = np.array([l[j].split("_") for j in range(len(l))]).astype(int) + ar_shape = (np.max(ar[:, 0]) + 1, np.max(ar[:, 1]) + 1) + N_coords = len(list(grp_pla[name + "/0_0"])) + shape = (ar_shape[0], ar_shape[1], N_coords, -1) + dtype = "PointListArray" + info[i] = i, dtype, shape, name i += 1 return info - - - diff --git a/py4DSTEM/io/legacy/legacy12/read_utils_v0_6.py b/py4DSTEM/io/legacy/legacy12/read_utils_v0_6.py index 51dd3533d..79cd7f048 100644 --- a/py4DSTEM/io/legacy/legacy12/read_utils_v0_6.py +++ b/py4DSTEM/io/legacy/legacy12/read_utils_v0_6.py @@ -4,55 +4,57 @@ import numpy as np from py4DSTEM.io.legacy.read_utils import is_py4DSTEM_file -def get_py4DSTEM_dataobject_info(fp, topgroup='4DSTEM_experiment'): - """ Returns a numpy structured array with basic metadata for all contained dataobjects. - Keys for the info array are: 'index','type','shape','name'. + +def get_py4DSTEM_dataobject_info(fp, topgroup="4DSTEM_experiment"): + """Returns a numpy structured array with basic metadata for all contained dataobjects. + Keys for the info array are: 'index','type','shape','name'. """ - assert(is_py4DSTEM_file(fp)), "Error: not recognized as a py4DSTEM file" - with h5py.File(fp,'r') as f: - assert(topgroup in f.keys()), "Error: unrecognized topgroup" + assert is_py4DSTEM_file(fp), "Error: not recognized as a py4DSTEM file" + with h5py.File(fp, "r") as f: + assert topgroup in f.keys(), "Error: unrecognized topgroup" i = 0 l_md = [] - with h5py.File(fp,'r') as f: - grp_dc = f[topgroup+'/data/datacubes/'] - grp_ds = f[topgroup+'/data/diffractionslices/'] - grp_rs = f[topgroup+'/data/realslices/'] - grp_pl = f[topgroup+'/data/pointlists/'] - grp_pla = f[topgroup+'/data/pointlistarrays/'] - N = len(grp_dc)+len(grp_ds)+len(grp_rs)+len(grp_pl)+len(grp_pla) - info = np.zeros(N,dtype=[('index',int),('type','U16'),('shape',tuple),('name','U64')]) + with h5py.File(fp, "r") as f: + grp_dc = f[topgroup + "/data/datacubes/"] + grp_ds = f[topgroup + "/data/diffractionslices/"] + grp_rs = f[topgroup + "/data/realslices/"] + grp_pl = f[topgroup + "/data/pointlists/"] + grp_pla = f[topgroup + "/data/pointlistarrays/"] + N = len(grp_dc) + len(grp_ds) + len(grp_rs) + len(grp_pl) + len(grp_pla) + info = np.zeros( + N, + dtype=[("index", int), ("type", "U16"), ("shape", tuple), ("name", "U64")], + ) for name in sorted(grp_dc.keys()): - shape = grp_dc[name+'/data/'].shape - dtype = 'DataCube' - info[i] = i,dtype,shape,name + shape = grp_dc[name + "/data/"].shape + dtype = "DataCube" + info[i] = i, dtype, shape, name i += 1 for name in sorted(grp_ds.keys()): - shape = grp_ds[name+'/data/'].shape - dtype = 'DiffractionSlice' - info[i] = i,dtype,shape,name + shape = grp_ds[name + "/data/"].shape + dtype = "DiffractionSlice" + info[i] = i, dtype, shape, name i += 1 for name in sorted(grp_rs.keys()): - shape = grp_rs[name+'/data/'].shape - dtype = 'RealSlice' - info[i] = i,dtype,shape,name + shape = grp_rs[name + "/data/"].shape + dtype = "RealSlice" + info[i] = i, dtype, shape, name i += 1 for name in sorted(grp_pl.keys()): coordinates = list(grp_pl[name].keys()) - length = grp_pl[name+'/'+coordinates[0]+'/data'].shape[0] - shape = (len(coordinates),length) - dtype = 'PointList' - info[i] = i,dtype,shape,name + length = grp_pl[name + "/" + coordinates[0] + "/data"].shape[0] + shape = (len(coordinates), length) + dtype = "PointList" + info[i] = i, dtype, shape, name i += 1 for name in sorted(grp_pla.keys()): l = list(grp_pla[name]) - ar = np.array([l[j].split('_') for j in range(len(l))]).astype(int) - ar_shape = (np.max(ar[:,0])+1,np.max(ar[:,1])+1) - N_coords = len(list(grp_pla[name+'/0_0'])) - shape = (ar_shape[0],ar_shape[1],N_coords,-1) - dtype = 'PointListArray' - info[i] = i,dtype,shape,name + ar = np.array([l[j].split("_") for j in range(len(l))]).astype(int) + ar_shape = (np.max(ar[:, 0]) + 1, np.max(ar[:, 1]) + 1) + N_coords = len(list(grp_pla[name + "/0_0"])) + shape = (ar_shape[0], ar_shape[1], N_coords, -1) + dtype = "PointListArray" + info[i] = i, dtype, shape, name i += 1 return info - - diff --git a/py4DSTEM/io/legacy/legacy12/read_utils_v0_7.py b/py4DSTEM/io/legacy/legacy12/read_utils_v0_7.py index 4f6dc11a2..56b09059d 100644 --- a/py4DSTEM/io/legacy/legacy12/read_utils_v0_7.py +++ b/py4DSTEM/io/legacy/legacy12/read_utils_v0_7.py @@ -4,55 +4,56 @@ import numpy as np from py4DSTEM.io.legacy.read_utils import is_py4DSTEM_file -def get_py4DSTEM_dataobject_info(fp, topgroup='4DSTEM_experiment'): - """ Returns a numpy structured array with basic metadata for all contained dataobjects. - Keys for the info array are: 'index','type','shape','name'. + +def get_py4DSTEM_dataobject_info(fp, topgroup="4DSTEM_experiment"): + """Returns a numpy structured array with basic metadata for all contained dataobjects. + Keys for the info array are: 'index','type','shape','name'. """ - assert(is_py4DSTEM_file(fp)), "Error: not recognized as a py4DSTEM file" - with h5py.File(fp,'r') as f: - assert(topgroup in f.keys()), "Error: unrecognized topgroup" + assert is_py4DSTEM_file(fp), "Error: not recognized as a py4DSTEM file" + with h5py.File(fp, "r") as f: + assert topgroup in f.keys(), "Error: unrecognized topgroup" i = 0 l_md = [] - with h5py.File(fp,'r') as f: - grp_dc = f[topgroup+'/data/datacubes/'] - grp_ds = f[topgroup+'/data/diffractionslices/'] - grp_rs = f[topgroup+'/data/realslices/'] - grp_pl = f[topgroup+'/data/pointlists/'] - grp_pla = f[topgroup+'/data/pointlistarrays/'] - N = len(grp_dc)+len(grp_ds)+len(grp_rs)+len(grp_pl)+len(grp_pla) - info = np.zeros(N,dtype=[('index',int),('type','U16'),('shape',tuple),('name','U64')]) + with h5py.File(fp, "r") as f: + grp_dc = f[topgroup + "/data/datacubes/"] + grp_ds = f[topgroup + "/data/diffractionslices/"] + grp_rs = f[topgroup + "/data/realslices/"] + grp_pl = f[topgroup + "/data/pointlists/"] + grp_pla = f[topgroup + "/data/pointlistarrays/"] + N = len(grp_dc) + len(grp_ds) + len(grp_rs) + len(grp_pl) + len(grp_pla) + info = np.zeros( + N, + dtype=[("index", int), ("type", "U16"), ("shape", tuple), ("name", "U64")], + ) for name in sorted(grp_dc.keys()): - shape = grp_dc[name+'/data/'].shape - dtype = 'DataCube' - info[i] = i,dtype,shape,name + shape = grp_dc[name + "/data/"].shape + dtype = "DataCube" + info[i] = i, dtype, shape, name i += 1 for name in sorted(grp_ds.keys()): - shape = grp_ds[name+'/data/'].shape - dtype = 'DiffractionSlice' - info[i] = i,dtype,shape,name + shape = grp_ds[name + "/data/"].shape + dtype = "DiffractionSlice" + info[i] = i, dtype, shape, name i += 1 for name in sorted(grp_rs.keys()): - shape = grp_rs[name+'/data/'].shape - dtype = 'RealSlice' - info[i] = i,dtype,shape,name + shape = grp_rs[name + "/data/"].shape + dtype = "RealSlice" + info[i] = i, dtype, shape, name i += 1 for name in sorted(grp_pl.keys()): coordinates = list(grp_pl[name].keys()) - length = grp_pl[name+'/'+coordinates[0]+'/data'].shape[0] - shape = (len(coordinates),length) - dtype = 'PointList' - info[i] = i,dtype,shape,name + length = grp_pl[name + "/" + coordinates[0] + "/data"].shape[0] + shape = (len(coordinates), length) + dtype = "PointList" + info[i] = i, dtype, shape, name i += 1 for name in sorted(grp_pla.keys()): - ar = np.array(grp_pla[name+'/data']) + ar = np.array(grp_pla[name + "/data"]) ar_shape = ar.shape - N_coords = len(ar[0,0].dtype) - shape = (ar_shape[0],ar_shape[1],N_coords,-1) - dtype = 'PointListArray' - info[i] = i,dtype,shape,name + N_coords = len(ar[0, 0].dtype) + shape = (ar_shape[0], ar_shape[1], N_coords, -1) + dtype = "PointListArray" + info[i] = i, dtype, shape, name i += 1 return info - - - diff --git a/py4DSTEM/io/legacy/legacy12/read_utils_v0_9.py b/py4DSTEM/io/legacy/legacy12/read_utils_v0_9.py index 4f6dc11a2..56b09059d 100644 --- a/py4DSTEM/io/legacy/legacy12/read_utils_v0_9.py +++ b/py4DSTEM/io/legacy/legacy12/read_utils_v0_9.py @@ -4,55 +4,56 @@ import numpy as np from py4DSTEM.io.legacy.read_utils import is_py4DSTEM_file -def get_py4DSTEM_dataobject_info(fp, topgroup='4DSTEM_experiment'): - """ Returns a numpy structured array with basic metadata for all contained dataobjects. - Keys for the info array are: 'index','type','shape','name'. + +def get_py4DSTEM_dataobject_info(fp, topgroup="4DSTEM_experiment"): + """Returns a numpy structured array with basic metadata for all contained dataobjects. + Keys for the info array are: 'index','type','shape','name'. """ - assert(is_py4DSTEM_file(fp)), "Error: not recognized as a py4DSTEM file" - with h5py.File(fp,'r') as f: - assert(topgroup in f.keys()), "Error: unrecognized topgroup" + assert is_py4DSTEM_file(fp), "Error: not recognized as a py4DSTEM file" + with h5py.File(fp, "r") as f: + assert topgroup in f.keys(), "Error: unrecognized topgroup" i = 0 l_md = [] - with h5py.File(fp,'r') as f: - grp_dc = f[topgroup+'/data/datacubes/'] - grp_ds = f[topgroup+'/data/diffractionslices/'] - grp_rs = f[topgroup+'/data/realslices/'] - grp_pl = f[topgroup+'/data/pointlists/'] - grp_pla = f[topgroup+'/data/pointlistarrays/'] - N = len(grp_dc)+len(grp_ds)+len(grp_rs)+len(grp_pl)+len(grp_pla) - info = np.zeros(N,dtype=[('index',int),('type','U16'),('shape',tuple),('name','U64')]) + with h5py.File(fp, "r") as f: + grp_dc = f[topgroup + "/data/datacubes/"] + grp_ds = f[topgroup + "/data/diffractionslices/"] + grp_rs = f[topgroup + "/data/realslices/"] + grp_pl = f[topgroup + "/data/pointlists/"] + grp_pla = f[topgroup + "/data/pointlistarrays/"] + N = len(grp_dc) + len(grp_ds) + len(grp_rs) + len(grp_pl) + len(grp_pla) + info = np.zeros( + N, + dtype=[("index", int), ("type", "U16"), ("shape", tuple), ("name", "U64")], + ) for name in sorted(grp_dc.keys()): - shape = grp_dc[name+'/data/'].shape - dtype = 'DataCube' - info[i] = i,dtype,shape,name + shape = grp_dc[name + "/data/"].shape + dtype = "DataCube" + info[i] = i, dtype, shape, name i += 1 for name in sorted(grp_ds.keys()): - shape = grp_ds[name+'/data/'].shape - dtype = 'DiffractionSlice' - info[i] = i,dtype,shape,name + shape = grp_ds[name + "/data/"].shape + dtype = "DiffractionSlice" + info[i] = i, dtype, shape, name i += 1 for name in sorted(grp_rs.keys()): - shape = grp_rs[name+'/data/'].shape - dtype = 'RealSlice' - info[i] = i,dtype,shape,name + shape = grp_rs[name + "/data/"].shape + dtype = "RealSlice" + info[i] = i, dtype, shape, name i += 1 for name in sorted(grp_pl.keys()): coordinates = list(grp_pl[name].keys()) - length = grp_pl[name+'/'+coordinates[0]+'/data'].shape[0] - shape = (len(coordinates),length) - dtype = 'PointList' - info[i] = i,dtype,shape,name + length = grp_pl[name + "/" + coordinates[0] + "/data"].shape[0] + shape = (len(coordinates), length) + dtype = "PointList" + info[i] = i, dtype, shape, name i += 1 for name in sorted(grp_pla.keys()): - ar = np.array(grp_pla[name+'/data']) + ar = np.array(grp_pla[name + "/data"]) ar_shape = ar.shape - N_coords = len(ar[0,0].dtype) - shape = (ar_shape[0],ar_shape[1],N_coords,-1) - dtype = 'PointListArray' - info[i] = i,dtype,shape,name + N_coords = len(ar[0, 0].dtype) + shape = (ar_shape[0], ar_shape[1], N_coords, -1) + dtype = "PointListArray" + info[i] = i, dtype, shape, name i += 1 return info - - - diff --git a/py4DSTEM/io/legacy/legacy12/read_v0_12.py b/py4DSTEM/io/legacy/legacy12/read_v0_12.py index 470e64075..44aa86b6a 100644 --- a/py4DSTEM/io/legacy/legacy12/read_v0_12.py +++ b/py4DSTEM/io/legacy/legacy12/read_v0_12.py @@ -3,12 +3,14 @@ import h5py import numpy as np from os.path import splitext, exists -from py4DSTEM.io.legacy.read_utils import is_py4DSTEM_file, get_py4DSTEM_topgroups, get_py4DSTEM_version, version_is_geq -from py4DSTEM.io.legacy.legacy12.read_utils_v0_12 import get_py4DSTEM_dataobject_info -from emdfile import ( - PointList, - PointListArray +from py4DSTEM.io.legacy.read_utils import ( + is_py4DSTEM_file, + get_py4DSTEM_topgroups, + get_py4DSTEM_version, + version_is_geq, ) +from py4DSTEM.io.legacy.legacy12.read_utils_v0_12 import get_py4DSTEM_dataobject_info +from emdfile import PointList, PointListArray from py4DSTEM.data import ( DiffractionSlice, RealSlice, @@ -16,6 +18,7 @@ from py4DSTEM.datacube import DataCube from emdfile import tqdmnd + def read_v0_12(fp, **kwargs): """ File reader for files written by py4DSTEM v0.12. Precise behavior is detemined by which @@ -57,131 +60,149 @@ def read_v0_12(fp, **kwargs): will return a length two tuple, the first element being a list of 3 DataObject instances and the second a MetaData instance. """ - assert(exists(fp)), "Error: specified filepath does not exist" - assert(is_py4DSTEM_file(fp)), "Error: {} isn't recognized as a py4DSTEM file.".format(fp) + assert exists(fp), "Error: specified filepath does not exist" + assert is_py4DSTEM_file( + fp + ), "Error: {} isn't recognized as a py4DSTEM file.".format(fp) # For HDF5 files containing multiple valid EMD type 2 files, disambiguate desired data tgs = get_py4DSTEM_topgroups(fp) - if 'topgroup' in kwargs.keys(): - tg = kwargs['topgroup'] - assert(tg in tgs), "Error: specified topgroup, {}, not found.".format(tg) + if "topgroup" in kwargs.keys(): + tg = kwargs["topgroup"] + assert tg in tgs, "Error: specified topgroup, {}, not found.".format(tg) else: - if len(tgs)==1: + if len(tgs) == 1: tg = tgs[0] else: - print("Multiple topgroups detected. Please specify one by passing the 'topgroup' keyword argument.") + print( + "Multiple topgroups detected. Please specify one by passing the 'topgroup' keyword argument." + ) print("") print("Topgroups found:") for tg in tgs: print(tg) - return None,None + return None, None version = get_py4DSTEM_version(fp, tg) - assert(version_is_geq(version,(0,12,0))), "File must be v0.12+" - _data_id = 'data_id' in kwargs.keys() # Flag indicating if data was requested + assert version_is_geq(version, (0, 12, 0)), "File must be v0.12+" + _data_id = "data_id" in kwargs.keys() # Flag indicating if data was requested # If metadata is requested - if 'metadata' in kwargs.keys(): - if kwargs['metadata']: + if "metadata" in kwargs.keys(): + if kwargs["metadata"]: raise NotImplementedError("Legacy metadata reader missing...") # return metadata_from_h5(fp, tg) # If data is requested - elif 'data_id' in kwargs.keys(): - data_id = kwargs['data_id'] - assert(isinstance(data_id,(int,np.int_,str,list,tuple))), "Error: data must be specified with strings or integers only." - if not isinstance(data_id,(int,np.int_,str)): - assert(all([isinstance(d,(int,np.int_,str)) for d in data_id])), "Error: data must be specified with strings or integers only." + elif "data_id" in kwargs.keys(): + data_id = kwargs["data_id"] + assert isinstance( + data_id, (int, np.int_, str, list, tuple) + ), "Error: data must be specified with strings or integers only." + if not isinstance(data_id, (int, np.int_, str)): + assert all( + [isinstance(d, (int, np.int_, str)) for d in data_id] + ), "Error: data must be specified with strings or integers only." # Parse optional arguments - if 'mem' in kwargs.keys(): - mem = kwargs['mem'] - assert(mem in ('RAM','MEMMAP', 'DASK')) + if "mem" in kwargs.keys(): + mem = kwargs["mem"] + assert mem in ("RAM", "MEMMAP", "DASK") else: - mem='RAM' - if 'binfactor' in kwargs.keys(): - binfactor = kwargs['binfactor'] - assert(isinstance(binfactor,(int,np.int_))) + mem = "RAM" + if "binfactor" in kwargs.keys(): + binfactor = kwargs["binfactor"] + assert isinstance(binfactor, (int, np.int_)) else: - binfactor=1 - if 'dtype' in kwargs.keys(): - bindtype = kwargs['dtype'] - assert(isinstance(bindtype,type)) + binfactor = 1 + if "dtype" in kwargs.keys(): + bindtype = kwargs["dtype"] + assert isinstance(bindtype, type) else: bindtype = None - return get_data(fp,tg,data_id,mem,binfactor,bindtype) + return get_data(fp, tg, data_id, mem, binfactor, bindtype) # If no data is requested else: - print_py4DSTEM_file(fp,tg) + print_py4DSTEM_file(fp, tg) return ###### Get data ###### -def get_data(filepath,tg,data_id,mem='RAM',binfactor=1,bindtype=None): - """ Accepts a filepath to a valid py4DSTEM file and an int/str/list specifying data, and returns the data. - """ - if isinstance(data_id,(int,np.int_)): - return get_data_from_int(filepath,tg,data_id,mem=mem,binfactor=binfactor,bindtype=bindtype) - elif isinstance(data_id,str): - return get_data_from_str(filepath,tg,data_id,mem=mem,binfactor=binfactor,bindtype=bindtype) - else: - return get_data_from_list(filepath,tg,data_id) -def get_data_from_int(filepath,tg,data_id,mem='RAM',binfactor=1,bindtype=None): - """ Accepts a filepath to a valid py4DSTEM file and an integer specifying data, and returns the data. - """ - assert(isinstance(data_id,(int,np.int_))) - with h5py.File(filepath,'r') as f: - grp_dc = f[tg+'/data/datacubes/'] - grp_cdc = f[tg+'/data/counted_datacubes/'] - grp_ds = f[tg+'/data/diffractionslices/'] - grp_rs = f[tg+'/data/realslices/'] - grp_pl = f[tg+'/data/pointlists/'] - grp_pla = f[tg+'/data/pointlistarrays/'] - grp_coords = f[tg+'/data/coordinates/'] - grps = [grp_dc,grp_cdc,grp_ds,grp_rs,grp_pl,grp_pla,grp_coords] +def get_data(filepath, tg, data_id, mem="RAM", binfactor=1, bindtype=None): + """Accepts a filepath to a valid py4DSTEM file and an int/str/list specifying data, and returns the data.""" + if isinstance(data_id, (int, np.int_)): + return get_data_from_int( + filepath, tg, data_id, mem=mem, binfactor=binfactor, bindtype=bindtype + ) + elif isinstance(data_id, str): + return get_data_from_str( + filepath, tg, data_id, mem=mem, binfactor=binfactor, bindtype=bindtype + ) + else: + return get_data_from_list(filepath, tg, data_id) + + +def get_data_from_int(filepath, tg, data_id, mem="RAM", binfactor=1, bindtype=None): + """Accepts a filepath to a valid py4DSTEM file and an integer specifying data, and returns the data.""" + assert isinstance(data_id, (int, np.int_)) + with h5py.File(filepath, "r") as f: + grp_dc = f[tg + "/data/datacubes/"] + grp_cdc = f[tg + "/data/counted_datacubes/"] + grp_ds = f[tg + "/data/diffractionslices/"] + grp_rs = f[tg + "/data/realslices/"] + grp_pl = f[tg + "/data/pointlists/"] + grp_pla = f[tg + "/data/pointlistarrays/"] + grp_coords = f[tg + "/data/coordinates/"] + grps = [grp_dc, grp_cdc, grp_ds, grp_rs, grp_pl, grp_pla, grp_coords] Ns = np.cumsum([len(grp.keys()) for grp in grps]) - i = np.nonzero(data_id 0: for field in fields: data[field] = np.array(group[field]) # Make the PointList - pl = PointList( - data=data, - name=basename(group.name)) + pl = PointList(data=data, name=basename(group.name)) # Add additional metadata _read_metadata(pl, group) @@ -602,15 +545,11 @@ def PointList_from_h5(group:h5py.Group): return pl - ## POINTLISTARRAY # write -def PointListArray_to_h5( - pointlistarray, - group - ): +def PointListArray_to_h5(pointlistarray, group): """ Takes a valid HDF5 group for an HDF5 file object which is open in write or append mode. Writes a new group with a name given by this @@ -622,27 +561,23 @@ def PointListArray_to_h5( ## Write grp = group.create_group(pointlistarray.name) - grp.attrs.create("emd_group_type",3) # this tag indicates a PointListArray - grp.attrs.create("py4dstem_class",pointlistarray.__class__.__name__) + grp.attrs.create("emd_group_type", 3) # this tag indicates a PointListArray + grp.attrs.create("py4dstem_class", pointlistarray.__class__.__name__) # Add metadata dtype = h5py.special_dtype(vlen=pointlistarray.dtype) - dset = grp.create_dataset( - "data", - pointlistarray.shape, - dtype - ) + dset = grp.create_dataset("data", pointlistarray.shape, dtype) # Add data - for (i,j) in tqdmnd(dset.shape[0],dset.shape[1]): - dset[i,j] = pointlistarray[i,j].data + for i, j in tqdmnd(dset.shape[0], dset.shape[1]): + dset[i, j] = pointlistarray[i, j].data # Add additional metadata _write_metadata(pointlistarray, grp) # read -def PointListArray_from_h5(group:h5py.Group): +def PointListArray_from_h5(group: h5py.Group): """ Takes a valid HDF5 group for an HDF5 file object which is open in read mode, and a name. Determines if a valid PointListArray object of this name exists @@ -654,30 +589,29 @@ def PointListArray_from_h5(group:h5py.Group): Returns: A PointListArray instance """ - from py4DSTEM.io.legacy.legacy13.v13_emd_classes.pointlistarray import PointListArray + from py4DSTEM.io.legacy.legacy13.v13_emd_classes.pointlistarray import ( + PointListArray, + ) from os.path import basename er = f"Group {group} is not a valid EMD PointListArray group" - assert("emd_group_type" in group.attrs.keys()), er - assert(group.attrs["emd_group_type"] == EMD_group_types['PointListArray']), er - + assert "emd_group_type" in group.attrs.keys(), er + assert group.attrs["emd_group_type"] == EMD_group_types["PointListArray"], er - # Get the DataSet - dset = group['data'] - dtype = h5py.check_vlen_dtype( dset.dtype ) + # Get the DataSet + dset = group["data"] + dtype = h5py.check_vlen_dtype(dset.dtype) shape = dset.shape # Initialize a PointListArray - pla = PointListArray( - dtype=dtype, - shape=shape, - name=basename(group.name) - ) + pla = PointListArray(dtype=dtype, shape=shape, name=basename(group.name)) # Add data - for (i,j) in tqdmnd(shape[0],shape[1],desc="Reading PointListArray",unit="PointList"): + for i, j in tqdmnd( + shape[0], shape[1], desc="Reading PointListArray", unit="PointList" + ): try: - pla[i,j].add(dset[i,j]) + pla[i, j].add(dset[i, j]) except ValueError: pass @@ -687,27 +621,22 @@ def PointListArray_from_h5(group:h5py.Group): return pla - - - - # Metadata helper functions -def _write_metadata(obj,grp): + +def _write_metadata(obj, grp): items = obj._metadata.items() - if len(items)>0: - grp_metadata = grp.create_group('_metadata') - for name,md in items: + if len(items) > 0: + grp_metadata = grp.create_group("_metadata") + for name, md in items: obj._metadata[name].name = name obj._metadata[name].to_h5(grp_metadata) + def _read_metadata(obj, grp): try: - grp_metadata = grp['_metadata'] + grp_metadata = grp["_metadata"] for key in grp_metadata.keys(): obj.metadata = Metadata_from_h5(grp_metadata[key]) except KeyError: pass - - - diff --git a/py4DSTEM/io/legacy/legacy13/v13_emd_classes/metadata.py b/py4DSTEM/io/legacy/legacy13/v13_emd_classes/metadata.py index 4631a20bf..d430528e1 100644 --- a/py4DSTEM/io/legacy/legacy13/v13_emd_classes/metadata.py +++ b/py4DSTEM/io/legacy/legacy13/v13_emd_classes/metadata.py @@ -17,13 +17,11 @@ class Metadata: >>> val = meta['param'] If the parameter has not been set, the getter methods return None. """ - def __init__( - self, - name: Optional[str] ='metadata' - ): + + def __init__(self, name: Optional[str] = "metadata"): """ - Args: - name (Optional, string): + Args: + name (Optional, string): """ self.name = name self.tree = Tree() @@ -31,59 +29,52 @@ def __init__( # create parameter dictionary self._params = {} - - ### __get/setitem__ - def __getitem__(self,x): + def __getitem__(self, x): return self._params[x] - def __setitem__(self,k,v): - self._params[k] = v + def __setitem__(self, k, v): + self._params[k] = v @property def keys(self): return self._params.keys() - - def copy(self,name=None): - """ - """ - if name is None: name = self.name+"_copy" + def copy(self, name=None): + """ """ + if name is None: + name = self.name + "_copy" md = Metadata(name=name) md._params.update(self._params) return md - - def __repr__(self): - - space = ' '*len(self.__class__.__name__)+' ' + space = " " * len(self.__class__.__name__) + " " string = f"{self.__class__.__name__}( A Metadata instance called '{self.name}', containing the following fields:" string += "\n" maxlen = 0 for k in self._params.keys(): - if len(k)>maxlen: maxlen=len(k) + if len(k) > maxlen: + maxlen = len(k) - for k,v in self._params.items(): - if isinstance(v,np.ndarray): + for k, v in self._params.items(): + if isinstance(v, np.ndarray): v = f"{v.ndim}D-array" - string += "\n"+space+f"{k}:{(maxlen-len(k)+3)*' '}{str(v)}" + string += "\n" + space + f"{k}:{(maxlen-len(k)+3)*' '}{str(v)}" string += "\n)" return string - - # HDF5 read/write - def to_h5(self,group): + def to_h5(self, group): from py4DSTEM.io.legacy.legacy13.v13_emd_classes.io import Metadata_to_h5 - Metadata_to_h5(self,group) + + Metadata_to_h5(self, group) def from_h5(group): from py4DSTEM.io.legacy.legacy13.v13_emd_classes.io import Metadata_from_h5 - return Metadata_from_h5(group) - + return Metadata_from_h5(group) diff --git a/py4DSTEM/io/legacy/legacy13/v13_emd_classes/pointlist.py b/py4DSTEM/io/legacy/legacy13/v13_emd_classes/pointlist.py index 55d3518f7..c7f0c7fc1 100644 --- a/py4DSTEM/io/legacy/legacy13/v13_emd_classes/pointlist.py +++ b/py4DSTEM/io/legacy/legacy13/v13_emd_classes/pointlist.py @@ -16,13 +16,14 @@ class PointList: A wrapper around structured numpy arrays, with read/write functionality in/out of py4DSTEM formatted HDF5 files. """ + def __init__( self, data: np.ndarray, - name: Optional[str] = 'pointlist', - ): + name: Optional[str] = "pointlist", + ): """ - Instantiate a PointList. + Instantiate a PointList. Args: data (structured numpy ndarray): the data; the dtype of this array will specify the fields of the PointList. @@ -41,11 +42,11 @@ def __init__( if not hasattr(self, "_metadata"): self._metadata = {} - # properties @property def dtype(self): return self._dtype + @dtype.setter def dtype(self, dtype): self._dtype = dtype @@ -53,6 +54,7 @@ def dtype(self, dtype): @property def fields(self): return self._fields + @fields.setter def fields(self, x): self.data.dtype.names = x @@ -66,8 +68,6 @@ def types(self): def length(self): return np.atleast_1d(self.data).shape[0] - - ## Add, remove, sort data def add(self, data): @@ -78,43 +78,40 @@ def add(self, data): self.data = np.append(self.data, data) def remove(self, mask): - """ Removes points wherever mask==True - """ - assert np.atleast_1d(mask).shape[0] == self.length, "deletemask must be same length as the data" + """Removes points wherever mask==True""" + assert ( + np.atleast_1d(mask).shape[0] == self.length + ), "deletemask must be same length as the data" inds = mask.nonzero()[0] self.data = np.delete(self.data, inds) - def sort(self, field, order='descending'): + def sort(self, field, order="descending"): """ Sorts the point list according to field, which must be a field in self.dtype. order should be 'descending' or 'ascending'. """ assert field in self.fields - assert (order=='descending') or (order=='ascending') - if order=='ascending': + assert (order == "descending") or (order == "ascending") + if order == "ascending": self.data = np.sort(self.data, order=field) else: self.data = np.sort(self.data, order=field)[::-1] - ## Copy, copy+modify PointList def copy(self, name=None): - """ Returns a copy of the PointList. If name=None, sets to `{name}_copy` - """ - name = name if name is not None else self.name+"_copy" + """Returns a copy of the PointList. If name=None, sets to `{name}_copy`""" + name = name if name is not None else self.name + "_copy" - pl = PointList( - data = np.copy(self.data), - name = name) + pl = PointList(data=np.copy(self.data), name=name) - for k,v in self.metadata.items(): + for k, v in self.metadata.items(): pl.metadata = v.copy(name=k) return pl - def add_fields(self, new_fields, name=''): + def add_fields(self, new_fields, name=""): """ Creates a copy of the PointList, but with additional fields given by new_fields. Args: @@ -122,10 +119,10 @@ def add_fields(self, new_fields, name=''): name: a name for the new pointlist """ dtype = [] - for f,t in zip(self.fields,self.types): - dtype.append((f,t)) - for f,t in new_fields: - dtype.append((f,t)) + for f, t in zip(self.fields, self.types): + dtype.append((f, t)) + for f, t in new_fields: + dtype.append((f, t)) data = np.zeros(self.length, dtype=dtype) for f in self.fields: @@ -143,61 +140,54 @@ def add_data_by_field(self, data, fields=None): """ if data[0].ndim == 0: - L = 1, + L = (1,) else: L = data[0].shape[0] - newdata = np.zeros(L,dtype=self.dtype) + newdata = np.zeros(L, dtype=self.dtype) _fields = self.fields if fields is None else fields - for d,f in zip(data, _fields): + for d, f in zip(data, _fields): newdata[f] = d - self.data = np.append(self.data,newdata) - + self.data = np.append(self.data, newdata) # set up metadata property @property def metadata(self): return self._metadata + @metadata.setter - def metadata(self,x): - assert(isinstance(x,Metadata)) + def metadata(self, x): + assert isinstance(x, Metadata) self._metadata[x.name] = x - - ## Representation to standard output def __repr__(self): - - space = ' '*len(self.__class__.__name__)+' ' + space = " " * len(self.__class__.__name__) + " " string = f"{self.__class__.__name__}( A length {self.length} PointList called '{self.name}'," - string += "\n"+space+f"with {len(self.fields)} fields:" + string += "\n" + space + f"with {len(self.fields)} fields:" string += "\n" - space2 = max([len(field) for field in self.fields])+3 - for f,t in zip(self.fields,self.types): - string += "\n"+space+f"{f}{(space2-len(f))*' '}({str(t)})" + space2 = max([len(field) for field in self.fields]) + 3 + for f, t in zip(self.fields, self.types): + string += "\n" + space + f"{f}{(space2-len(f))*' '}({str(t)})" string += "\n)" return string - # Slicing def __getitem__(self, v): return self.data[v] - - # HDF5 read/write - def to_h5(self,group): + def to_h5(self, group): from py4DSTEM.io.legacy.legacy13.v13_emd_classes.io import PointList_to_h5 - PointList_to_h5(self,group) + + PointList_to_h5(self, group) def from_h5(group): from py4DSTEM.io.legacy.legacy13.v13_emd_classes.io import PointList_from_h5 - return PointList_from_h5(group) - - + return PointList_from_h5(group) diff --git a/py4DSTEM/io/legacy/legacy13/v13_emd_classes/pointlistarray.py b/py4DSTEM/io/legacy/legacy13/v13_emd_classes/pointlistarray.py index 7898bccb9..c246672bd 100644 --- a/py4DSTEM/io/legacy/legacy13/v13_emd_classes/pointlistarray.py +++ b/py4DSTEM/io/legacy/legacy13/v13_emd_classes/pointlistarray.py @@ -7,18 +7,20 @@ from py4DSTEM.io.legacy.legacy13.v13_emd_classes.metadata import Metadata from py4DSTEM.io.legacy.legacy13.v13_emd_classes.pointlist import PointList + class PointListArray: """ An 2D array of PointLists which share common coordinates. """ + def __init__( self, dtype, shape, - name: Optional[str] = 'pointlistarray', - ): + name: Optional[str] = "pointlistarray", + ): """ - Creates an empty PointListArray. + Creates an empty PointListArray. Args: dtype: the dtype of the numpy structured arrays which will comprise the data of each PointList @@ -41,9 +43,13 @@ def __init__( self._metadata = {} # Populate with empty PointLists - self._pointlists = [[PointList(data=np.zeros(0,dtype=self.dtype), name=f"{i},{j}") - for j in range(self.shape[1])] for i in range(self.shape[0])] - + self._pointlists = [ + [ + PointList(data=np.zeros(0, dtype=self.dtype), name=f"{i},{j}") + for j in range(self.shape[1]) + ] + for i in range(self.shape[0]) + ] ## get/set pointlists @@ -57,40 +63,35 @@ def get_pointlist(self, i, j, name=None): return pl def __getitem__(self, tup): - l = len(tup) if isinstance(tup,tuple) else 1 - assert(l==2), f"Expected 2 slice values, recieved {l}" - return self.get_pointlist(tup[0],tup[1]) + l = len(tup) if isinstance(tup, tuple) else 1 + assert l == 2, f"Expected 2 slice values, recieved {l}" + return self.get_pointlist(tup[0], tup[1]) def __setitem__(self, tup, pointlist): - l = len(tup) if isinstance(tup,tuple) else 1 - assert(l==2), f"Expected 2 slice values, recieved {l}" - assert(pointlist.fields == self.fields), "fields must match" + l = len(tup) if isinstance(tup, tuple) else 1 + assert l == 2, f"Expected 2 slice values, recieved {l}" + assert pointlist.fields == self.fields, "fields must match" self._pointlists[tup[0]][tup[1]] = pointlist - - ## Make copies - def copy(self, name=''): + def copy(self, name=""): """ Returns a copy of itself. """ - new_pla = PointListArray( - dtype=self.dtype, - shape=self.shape, - name=name) + new_pla = PointListArray(dtype=self.dtype, shape=self.shape, name=name) for i in range(new_pla.shape[0]): for j in range(new_pla.shape[1]): - pl = new_pla.get_pointlist(i,j) - pl.add(np.copy(self.get_pointlist(i,j).data)) + pl = new_pla.get_pointlist(i, j) + pl.add(np.copy(self.get_pointlist(i, j).data)) - for k,v in self.metadata.items(): + for k, v in self.metadata.items(): new_pla.metadata = v.copy(name=k) return new_pla - def add_fields(self, new_fields, name=''): + def add_fields(self, new_fields, name=""): """ Creates a copy of the PointListArray, but with additional fields given by new_fields. @@ -99,66 +100,61 @@ def add_fields(self, new_fields, name=''): name: a name for the new pointlist """ dtype = [] - for f,t in zip(self.fields,self.types): - dtype.append((f,t)) - for f,t in new_fields: - dtype.append((f,t)) + for f, t in zip(self.fields, self.types): + dtype.append((f, t)) + for f, t in new_fields: + dtype.append((f, t)) - new_pla = PointListArray( - dtype=dtype, - shape=self.shape, - name=name) + new_pla = PointListArray(dtype=dtype, shape=self.shape, name=name) for i in range(new_pla.shape[0]): for j in range(new_pla.shape[1]): # Copy old data into a new structured array - pl_old = self.get_pointlist(i,j) + pl_old = self.get_pointlist(i, j) data = np.zeros(pl_old.length, np.dtype(dtype)) for f in self.fields: data[f] = np.copy(pl_old.data[f]) # Write into new pointlist - pl_new = new_pla.get_pointlist(i,j) + pl_new = new_pla.get_pointlist(i, j) pl_new.add(data) return new_pla - # set up metadata property @property def metadata(self): return self._metadata + @metadata.setter - def metadata(self,x): - assert(isinstance(x,Metadata)) + def metadata(self, x): + assert isinstance(x, Metadata) self._metadata[x.name] = x - ## Representation to standard output def __repr__(self): - - space = ' '*len(self.__class__.__name__)+' ' + space = " " * len(self.__class__.__name__) + " " string = f"{self.__class__.__name__}( A shape {self.shape} PointListArray called '{self.name}'," - string += "\n"+space+f"with {len(self.fields)} fields:" + string += "\n" + space + f"with {len(self.fields)} fields:" string += "\n" - space2 = max([len(field) for field in self.fields])+3 - for f,t in zip(self.fields,self.types): - string += "\n"+space+f"{f}{(space2-len(f))*' '}({str(t)})" + space2 = max([len(field) for field in self.fields]) + 3 + for f, t in zip(self.fields, self.types): + string += "\n" + space + f"{f}{(space2-len(f))*' '}({str(t)})" string += "\n)" return string - - # HDF5 read/write - def to_h5(self,group): + def to_h5(self, group): from py4DSTEM.io.legacy.legacy13.v13_emd_classes.io import PointListArray_to_h5 - PointListArray_to_h5(self,group) - def from_h5(group): - from py4DSTEM.io.legacy.legacy13.v13_emd_classes.io import PointListArray_from_h5 - return PointListArray_from_h5(group) + PointListArray_to_h5(self, group) + def from_h5(group): + from py4DSTEM.io.legacy.legacy13.v13_emd_classes.io import ( + PointListArray_from_h5, + ) + return PointListArray_from_h5(group) diff --git a/py4DSTEM/io/legacy/legacy13/v13_emd_classes/root.py b/py4DSTEM/io/legacy/legacy13/v13_emd_classes/root.py index dd3ed0ed3..c5137d9f4 100644 --- a/py4DSTEM/io/legacy/legacy13/v13_emd_classes/root.py +++ b/py4DSTEM/io/legacy/legacy13/v13_emd_classes/root.py @@ -10,54 +10,44 @@ class Root: """ A class serving as a container for Trees """ - def __init__( - self, - name: Optional[str] ='root' - ): + + def __init__(self, name: Optional[str] = "root"): """ - Args: - name (Optional, string): + Args: + name (Optional, string): """ self.name = name self.tree = Tree() - ### __get/setitem__ - def __getitem__(self,x): + def __getitem__(self, x): return self.tree[x] - def __setitem__(self,k,v): - self.tree[k] = v + def __setitem__(self, k, v): + self.tree[k] = v @property def keys(self): return self.tree.keys() - - def __repr__(self): - - space = ' '*len(self.__class__.__name__)+' ' + space = " " * len(self.__class__.__name__) + " " string = f"{self.__class__.__name__}( A Root instance called '{self.name}', containing the following top-level object instances:" string += "\n" - for k,v in self.tree._tree.items(): - string += "\n"+space+f" {k} \t\t ({v.__class__.__name__})" + for k, v in self.tree._tree.items(): + string += "\n" + space + f" {k} \t\t ({v.__class__.__name__})" string += "\n)" return string - - # HDF5 read/write - def to_h5(self,group): + def to_h5(self, group): from py4DSTEM.io.legacy.legacy13.v13_emd_classes.io import Root_to_h5 - Root_to_h5(self,group) + + Root_to_h5(self, group) def from_h5(group): from py4DSTEM.io.legacy.legacy13.v13_emd_classes.io import Root_from_h5 - return Root_from_h5(group) - - - + return Root_from_h5(group) diff --git a/py4DSTEM/io/legacy/legacy13/v13_emd_classes/tree.py b/py4DSTEM/io/legacy/legacy13/v13_emd_classes/tree.py index 76f4ee211..51f124122 100644 --- a/py4DSTEM/io/legacy/legacy13/v13_emd_classes/tree.py +++ b/py4DSTEM/io/legacy/legacy13/v13_emd_classes/tree.py @@ -3,33 +3,28 @@ class Tree: - def __init__(self): self._tree = {} - - def __setitem__(self, key, value): self._tree[key] = value - - - def __getitem__(self,x): - l = x.split('/') + def __getitem__(self, x): + l = x.split("/") try: - l.remove('') - l.remove('') + l.remove("") + l.remove("") except ValueError: pass return self._getitem_from_list(l) - def _getitem_from_list(self,x): + def _getitem_from_list(self, x): if len(x) == 0: raise Exception("invalid slice value to tree") k = x.pop(0) er = f"{k} not found in tree - check keys" - assert(k in self._tree.keys()), er + assert k in self._tree.keys(), er if len(x) == 0: return self._tree[k] @@ -37,60 +32,48 @@ def _getitem_from_list(self,x): tree = self._tree[k].tree return tree._getitem_from_list(x) - - - - def __repr__(self): - space = ' '*len(self.__class__.__name__)+' ' + space = " " * len(self.__class__.__name__) + " " string = f"{self.__class__.__name__}( An object tree containing the following top-level object instances:" string += "\n" - for k,v in self._tree.items(): - string += "\n"+space+f" {k} \t\t ({v.__class__.__name__})" + for k, v in self._tree.items(): + string += "\n" + space + f" {k} \t\t ({v.__class__.__name__})" string += "\n)" return string def keys(self): return self._tree.keys() - - - def print(self): """ Prints the tree contents to screen. """ - print('/') + print("/") self._print_tree_to_screen(self) - print('\n') + print("\n") def _print_tree_to_screen(self, tree, tablevel=0, linelevels=[]): - """ - """ + """ """ if tablevel not in linelevels: linelevels.append(tablevel) keys = [k for k in tree.keys()] - #keys = [k for k in keys if k != 'metadata'] + # keys = [k for k in keys if k != 'metadata'] N = len(keys) - for i,k in enumerate(keys): - string = '' - string += '|' if 0 in linelevels else '' + for i, k in enumerate(keys): + string = "" + string += "|" if 0 in linelevels else "" for idx in range(tablevel): - l = '|' if idx+1 in linelevels else '' - string += '\t'+l - #print(string) - print(string+'--'+k) - if i == N-1: + l = "|" if idx + 1 in linelevels else "" + string += "\t" + l + # print(string) + print(string + "--" + k) + if i == N - 1: linelevels.remove(tablevel) try: self._print_tree_to_screen( - tree[k].tree, - tablevel=tablevel+1, - linelevels=linelevels) + tree[k].tree, tablevel=tablevel + 1, linelevels=linelevels + ) except AttributeError: pass pass - - - diff --git a/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/__init__.py b/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/__init__.py index 7263fb441..19317f040 100644 --- a/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/__init__.py +++ b/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/__init__.py @@ -8,6 +8,3 @@ from py4DSTEM.io.legacy.legacy13.v13_py4dstem_classes.qpoints import * from py4DSTEM.io.legacy.legacy13.v13_py4dstem_classes.braggvectors import * from py4DSTEM.io.legacy.legacy13.v13_py4dstem_classes.io import * - - - diff --git a/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/braggvectors.py b/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/braggvectors.py index f04bfec02..4e51bdebf 100644 --- a/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/braggvectors.py +++ b/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/braggvectors.py @@ -1,7 +1,7 @@ # Defines the BraggVectors class -from typing import Optional,Union +from typing import Optional, Union import numpy as np import h5py @@ -10,7 +10,6 @@ from py4DSTEM.io.legacy.legacy13.v13_emd_classes.metadata import Metadata - class BraggVectors: """ Stores bragg scattering information for a 4D datacube. @@ -26,14 +25,7 @@ class BraggVectors: retrieve the positiona and intensity of the scattering. """ - - def __init__( - self, - Rshape, - Qshape, - name = 'braggvectors' - ): - + def __init__(self, Rshape, Qshape, name="braggvectors"): self.name = name self.Rshape = Rshape self.shape = self.Rshape @@ -42,21 +34,16 @@ def __init__( self.tree = Tree() if not hasattr(self, "_metadata"): self._metadata = {} - if 'braggvectors' not in self._metadata.keys(): - self.metadata = Metadata( name='braggvectors' ) - self.metadata['braggvectors']['Qshape'] = self.Qshape + if "braggvectors" not in self._metadata.keys(): + self.metadata = Metadata(name="braggvectors") + self.metadata["braggvectors"]["Qshape"] = self.Qshape self._v_uncal = PointListArray( - dtype = [ - ('qx',np.float64), - ('qy',np.float64), - ('intensity',np.float64) - ], - shape = Rshape, - name = '_v_uncal' + dtype=[("qx", np.float64), ("qy", np.float64), ("intensity", np.float64)], + shape=Rshape, + name="_v_uncal", ) - @property def vectors(self): try: @@ -72,45 +59,37 @@ def vectors_uncal(self): @property def metadata(self): return self._metadata + @metadata.setter - def metadata(self,x): - assert(isinstance(x,Metadata)) + def metadata(self, x): + assert isinstance(x, Metadata) self._metadata[x.name] = x - - ## Representation to standard output def __repr__(self): - - space = ' '*len(self.__class__.__name__)+' ' + space = " " * len(self.__class__.__name__) + " " string = f"{self.__class__.__name__}( " string += f"A {self.shape}-shaped array of lists of bragg vectors )" return string - - # HDF5 read/write # write - def to_h5(self,group): - from py4DSTEM.io.legacy.legacy13.v13_py4dstem_classes.io import BraggVectors_to_h5 - BraggVectors_to_h5(self,group) + def to_h5(self, group): + from py4DSTEM.io.legacy.legacy13.v13_py4dstem_classes.io import ( + BraggVectors_to_h5, + ) + BraggVectors_to_h5(self, group) # read def from_h5(group): - from py4DSTEM.io.legacy.legacy13.v13_py4dstem_classes.io import BraggVectors_from_h5 - return BraggVectors_from_h5(group) - + from py4DSTEM.io.legacy.legacy13.v13_py4dstem_classes.io import ( + BraggVectors_from_h5, + ) + return BraggVectors_from_h5(group) ############ END OF CLASS ########### - - - - - - - diff --git a/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/calibration.py b/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/calibration.py index 05b3dbe4b..cbf4cd1fe 100644 --- a/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/calibration.py +++ b/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/calibration.py @@ -3,49 +3,50 @@ from typing import Optional from py4DSTEM.io.legacy.legacy13.v13_emd_classes.metadata import Metadata + class Calibration(Metadata): - """ - """ + """ """ + def __init__( self, - name: Optional[str] ='calibration', - ): + name: Optional[str] = "calibration", + ): """ Args: name (optional, str): """ - Metadata.__init__( - self, - name=name) + Metadata.__init__(self, name=name) self.set_Q_pixel_size(1) self.set_R_pixel_size(1) - self.set_Q_pixel_units('pixels') - self.set_R_pixel_units('pixels') + self.set_Q_pixel_units("pixels") + self.set_R_pixel_units("pixels") + def set_Q_pixel_size(self, x): + self._params["Q_pixel_size"] = x - - def set_Q_pixel_size(self,x): - self._params['Q_pixel_size'] = x def get_Q_pixel_size(self): - return self._get_value('Q_pixel_size') - def set_R_pixel_size(self,x): - self._params['R_pixel_size'] = x + return self._get_value("Q_pixel_size") + + def set_R_pixel_size(self, x): + self._params["R_pixel_size"] = x + def get_R_pixel_size(self): - return self._get_value('R_pixel_size') - def set_Q_pixel_units(self,x): - pix = ('pixels','A^-1','mrad') - assert(x in pix), f"{x} must be in {pix}" - self._params['Q_pixel_units'] = x - def get_Q_pixel_units(self): - return self._get_value('Q_pixel_units') - def set_R_pixel_units(self,x): - self._params['R_pixel_units'] = x - def get_R_pixel_units(self): - return self._get_value('R_pixel_units') + return self._get_value("R_pixel_size") + def set_Q_pixel_units(self, x): + pix = ("pixels", "A^-1", "mrad") + assert x in pix, f"{x} must be in {pix}" + self._params["Q_pixel_units"] = x + def get_Q_pixel_units(self): + return self._get_value("Q_pixel_units") + def set_R_pixel_units(self, x): + self._params["R_pixel_units"] = x + + def get_R_pixel_units(self): + return self._get_value("R_pixel_units") # HDF5 read/write @@ -53,12 +54,11 @@ def get_R_pixel_units(self): # read def from_h5(group): - from py4DSTEM.io.legacy.legacy13.v13_py4dstem_classes.io import Calibration_from_h5 - return Calibration_from_h5(group) - + from py4DSTEM.io.legacy.legacy13.v13_py4dstem_classes.io import ( + Calibration_from_h5, + ) + return Calibration_from_h5(group) ########## End of class ########## - - diff --git a/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/datacube.py b/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/datacube.py index 6cc359115..422d47bc6 100644 --- a/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/datacube.py +++ b/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/datacube.py @@ -4,10 +4,11 @@ from py4DSTEM.io.legacy.legacy13.v13_py4dstem_classes.calibration import Calibration from py4DSTEM.io.legacy.legacy13.v13_py4dstem_classes.parenttree import ParentTree -from typing import Optional,Union +from typing import Optional, Union import numpy as np import h5py + class DataCube(Array): """ Stores 4D-STEM datasets. @@ -16,14 +17,14 @@ class DataCube(Array): def __init__( self, data: np.ndarray, - name: Optional[str] = 'datacube', - R_pixel_size: Optional[Union[float,list]] = 1, - R_pixel_units: Optional[Union[str,list]] = 'pixels', - Q_pixel_size: Optional[Union[float,list]] = 1, - Q_pixel_units: Optional[Union[str,list]] = 'pixels', - slicelabels: Optional[Union[bool,list]] = None, + name: Optional[str] = "datacube", + R_pixel_size: Optional[Union[float, list]] = 1, + R_pixel_units: Optional[Union[str, list]] = "pixels", + Q_pixel_size: Optional[Union[float, list]] = 1, + Q_pixel_units: Optional[Union[str, list]] = "pixels", + slicelabels: Optional[Union[bool, list]] = None, calibration: Optional = None, - ): + ): """ Accepts: data (np.ndarray): the data @@ -46,32 +47,15 @@ def __init__( # initialize as an Array Array.__init__( self, - data = data, - name = name, - units = 'pixel intensity', - dims = [ - R_pixel_size, - R_pixel_size, - Q_pixel_size, - Q_pixel_size - ], - dim_units = [ - R_pixel_units, - R_pixel_units, - Q_pixel_units, - Q_pixel_units - ], - dim_names = [ - 'Rx', - 'Ry', - 'Qx', - 'Qy' - ], - slicelabels = slicelabels + data=data, + name=name, + units="pixel intensity", + dims=[R_pixel_size, R_pixel_size, Q_pixel_size, Q_pixel_size], + dim_units=[R_pixel_units, R_pixel_units, Q_pixel_units, Q_pixel_units], + dim_names=["Rx", "Ry", "Qx", "Qy"], + slicelabels=slicelabels, ) - - # make a tree # we're overwriting the emd Tree with the py4DSTEM Tree # which knows how to track the parent datacube @@ -79,13 +63,10 @@ def __init__( self.tree = ParentTree(self, Calibration()) # set size/units - self.tree['calibration'].set_R_pixel_size( R_pixel_size ) - self.tree['calibration'].set_R_pixel_units( R_pixel_units ) - self.tree['calibration'].set_Q_pixel_size( Q_pixel_size ) - self.tree['calibration'].set_Q_pixel_units( Q_pixel_units ) - - - + self.tree["calibration"].set_R_pixel_size(R_pixel_size) + self.tree["calibration"].set_R_pixel_units(R_pixel_units) + self.tree["calibration"].set_Q_pixel_size(Q_pixel_size) + self.tree["calibration"].set_Q_pixel_units(Q_pixel_units) ## properties @@ -93,27 +74,30 @@ def __init__( @property def R_Nx(self): return self.data.shape[0] + @property def R_Ny(self): return self.data.shape[1] + @property def Q_Nx(self): return self.data.shape[2] + @property def Q_Ny(self): return self.data.shape[3] @property def Rshape(self): - return (self.data.shape[0],self.data.shape[1]) + return (self.data.shape[0], self.data.shape[1]) + @property def Qshape(self): - return (self.data.shape[2],self.data.shape[3]) + return (self.data.shape[2], self.data.shape[3]) @property def R_N(self): - return self.R_Nx*self.R_Ny - + return self.R_Nx * self.R_Ny # pixel sizes/units @@ -121,61 +105,67 @@ def R_N(self): @property def R_pixel_size(self): return self.calibration.get_R_pixel_size() + @R_pixel_size.setter - def R_pixel_size(self,x): - if type(x) is not list: x = [x,x] - self.set_dim(0,[0,x[0]]) - self.set_dim(1,[0,x[1]]) + def R_pixel_size(self, x): + if type(x) is not list: + x = [x, x] + self.set_dim(0, [0, x[0]]) + self.set_dim(1, [0, x[1]]) self.calibration.set_R_pixel_size(x) + @property def R_pixel_units(self): return self.calibration.get_R_pixel_units() + @R_pixel_units.setter - def R_pixel_units(self,x): - if type(x) is not list: x = [x,x] + def R_pixel_units(self, x): + if type(x) is not list: + x = [x, x] self.dim_units[0] = x[0] self.dim_units[1] = x[1] self.calibration.set_R_pixel_units(x) - # Q + # Q @property def Q_pixel_size(self): return self.calibration.get_Q_pixel_size() + @Q_pixel_size.setter - def Q_pixel_size(self,x): - if type(x) is not list: x = [x,x] - self.set_dim(2,[0,x[0]]) - self.set_dim(3,[0,x[1]]) + def Q_pixel_size(self, x): + if type(x) is not list: + x = [x, x] + self.set_dim(2, [0, x[0]]) + self.set_dim(3, [0, x[1]]) self.calibration.set_Q_pixel_size(x) + @property def Q_pixel_units(self): return self.calibration.get_Q_pixel_units() + @Q_pixel_units.setter - def Q_pixel_units(self,x): - if type(x) is not list: x = [x,x] + def Q_pixel_units(self, x): + if type(x) is not list: + x = [x, x] self.dim_units[2] = x[0] self.dim_units[3] = x[1] self.calibration.set_Q_pixel_units(x) - # calibration @property def calibration(self): - return self.tree['calibration'] + return self.tree["calibration"] + @calibration.setter def calibration(self, x): - assert( isinstance( x, Calibration)) - self.tree['calibration'] = x - - + assert isinstance(x, Calibration) + self.tree["calibration"] = x # for parent datacube tracking def track_parent(self, x): x._parent = self x.calibration = self.calibration - - # HDF5 read/write # write is inherited from Array @@ -183,13 +173,8 @@ def track_parent(self, x): # read def from_h5(group): from py4DSTEM.io.legacy.legacy13.v13_py4dstem_classes.io import DataCube_from_h5 - return DataCube_from_h5(group) + return DataCube_from_h5(group) ############ END OF CLASS ########### - - - - - diff --git a/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/diffractionslice.py b/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/diffractionslice.py index 6df0f4981..b32877a4a 100644 --- a/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/diffractionslice.py +++ b/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/diffractionslice.py @@ -3,20 +3,22 @@ from py4DSTEM.io.legacy.legacy13.v13_emd_classes.array import Array -from typing import Optional,Union +from typing import Optional, Union import numpy as np import h5py + class DiffractionSlice(Array): """ Stores a diffraction-space shaped 2D data array. """ + def __init__( self, data: np.ndarray, - name: Optional[str] = 'diffractionslice', - slicelabels: Optional[Union[bool,list]] = None - ): + name: Optional[str] = "diffractionslice", + slicelabels: Optional[Union[bool, list]] = None, + ): """ Accepts: data (np.ndarray): the data @@ -28,30 +30,20 @@ def __init__( # initialize as an Array Array.__init__( - self, - data = data, - name = name, - units = 'intensity', - slicelabels = slicelabels + self, data=data, name=name, units="intensity", slicelabels=slicelabels ) - - # HDF5 read/write # write inherited from Array # read def from_h5(group): - from py4DSTEM.io.legacy.legacy13.v13_py4dstem_classes.io import DiffractionSlice_from_h5 - return DiffractionSlice_from_h5(group) - - - + from py4DSTEM.io.legacy.legacy13.v13_py4dstem_classes.io import ( + DiffractionSlice_from_h5, + ) + return DiffractionSlice_from_h5(group) ############ END OF CLASS ########### - - - diff --git a/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/io.py b/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/io.py index d15fa9dce..2556ebe8f 100644 --- a/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/io.py +++ b/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/io.py @@ -4,17 +4,26 @@ import h5py from os.path import basename -from py4DSTEM.io.legacy.legacy13.v13_emd_classes.io import Array_from_h5, Metadata_from_h5 +from py4DSTEM.io.legacy.legacy13.v13_emd_classes.io import ( + Array_from_h5, + Metadata_from_h5, +) from py4DSTEM.io.legacy.legacy13.v13_emd_classes.io import PointList_from_h5 -from py4DSTEM.io.legacy.legacy13.v13_emd_classes.io import PointListArray_from_h5, PointListArray_to_h5 -from py4DSTEM.io.legacy.legacy13.v13_emd_classes.io import _write_metadata, _read_metadata - +from py4DSTEM.io.legacy.legacy13.v13_emd_classes.io import ( + PointListArray_from_h5, + PointListArray_to_h5, +) +from py4DSTEM.io.legacy.legacy13.v13_emd_classes.io import ( + _write_metadata, + _read_metadata, +) # Calibration + # read -def Calibration_from_h5(group:h5py.Group): +def Calibration_from_h5(group: h5py.Group): """ Takes a valid HDF5 group for an HDF5 file object which is open in read mode. Determines if it's a valid Metadata representation, and @@ -29,6 +38,7 @@ def Calibration_from_h5(group:h5py.Group): cal = Calibration_from_Metadata(cal) return cal + def Calibration_from_Metadata(metadata): """ Constructs a Calibration object with the dict entries of a Metadata object @@ -39,21 +49,18 @@ def Calibration_from_Metadata(metadata): """ from py4DSTEM.io.legacy.legacy13.v13_py4dstem_classes.calibration import Calibration - cal = Calibration(name = metadata.name) + cal = Calibration(name=metadata.name) cal._params.update(metadata._params) return cal - - - - # DataCube # read -def DataCube_from_h5(group:h5py.Group): + +def DataCube_from_h5(group: h5py.Group): """ Takes a valid HDF5 group for an HDF5 file object which is open in read mode. Determines if an Array object of this name exists inside this group, @@ -68,6 +75,7 @@ def DataCube_from_h5(group:h5py.Group): datacube = DataCube_from_Array(datacube) return datacube + def DataCube_from_Array(array): """ Converts an Array to a DataCube. @@ -77,36 +85,35 @@ def DataCube_from_Array(array): datacube (DataCube) """ from py4DSTEM.io.legacy.legacy13.v13_py4dstem_classes.datacube import DataCube - assert(array.rank == 4), "Array must have 4 dimensions" + + assert array.rank == 4, "Array must have 4 dimensions" array.__class__ = DataCube try: - R_pixel_size = array.dims[0][1]-array.dims[0][0] + R_pixel_size = array.dims[0][1] - array.dims[0][0] except IndexError: R_pixel_size = 1 try: - Q_pixel_size = array.dims[2][1]-array.dims[2][0] + Q_pixel_size = array.dims[2][1] - array.dims[2][0] except IndexError: Q_pixel_size = 1 array.__init__( - data = array.data, - name = array.name, - R_pixel_size = R_pixel_size, - R_pixel_units = array.dim_units[0], - Q_pixel_size = Q_pixel_size, - Q_pixel_units = array.dim_units[2], - slicelabels = array.slicelabels + data=array.data, + name=array.name, + R_pixel_size=R_pixel_size, + R_pixel_units=array.dim_units[0], + Q_pixel_size=Q_pixel_size, + Q_pixel_units=array.dim_units[2], + slicelabels=array.slicelabels, ) return array - - - # DiffractionSlice # read -def DiffractionSlice_from_h5(group:h5py.Group): + +def DiffractionSlice_from_h5(group: h5py.Group): """ Takes a valid HDF5 group for an HDF5 file object which is open in read mode. Determines if it's a valid Array, and if so loads and @@ -129,24 +136,22 @@ def DiffractionSlice_from_Array(array): Returns: (DiffractionSlice) """ - from py4DSTEM.io.legacy.legacy13.v13_py4dstem_classes.diffractionslice import DiffractionSlice - assert(array.rank == 2), "Array must have 2 dimensions" - array.__class__ = DiffractionSlice - array.__init__( - data = array.data, - name = array.name, - slicelabels = array.slicelabels + from py4DSTEM.io.legacy.legacy13.v13_py4dstem_classes.diffractionslice import ( + DiffractionSlice, ) - return array - + assert array.rank == 2, "Array must have 2 dimensions" + array.__class__ = DiffractionSlice + array.__init__(data=array.data, name=array.name, slicelabels=array.slicelabels) + return array # RealSlice # read -def RealSlice_from_h5(group:h5py.Group): + +def RealSlice_from_h5(group: h5py.Group): """ Takes a valid HDF5 group for an HDF5 file object which is open in read mode. Determines if it's a valid Array, and if so loads and @@ -174,25 +179,19 @@ def RealSlice_from_Array(array): (RealSlice) """ from py4DSTEM.io.legacy.legacy13.v13_py4dstem_classes.realslice import RealSlice - assert(array.rank == 2), "Array must have 2 dimensions" + + assert array.rank == 2, "Array must have 2 dimensions" array.__class__ = RealSlice - array.__init__( - data = array.data, - name = array.name, - slicelabels = array.slicelabels - ) + array.__init__(data=array.data, name=array.name, slicelabels=array.slicelabels) return array - - - - # VirtualDiffraction # read -def VirtualDiffraction_from_h5(group:h5py.Group): + +def VirtualDiffraction_from_h5(group: h5py.Group): """ Takes a valid HDF5 group for an HDF5 file object which is open in read mode. Determines if it's a valid Array, and if so loads and @@ -215,44 +214,45 @@ def VirtualDiffraction_from_Array(array): Returns: (VirtualDiffraction) """ - from py4DSTEM.io.legacy.legacy13.v13_py4dstem_classes.virtualdiffraction import VirtualDiffraction - assert(array.rank == 2), "Array must have 2 dimensions" + from py4DSTEM.io.legacy.legacy13.v13_py4dstem_classes.virtualdiffraction import ( + VirtualDiffraction, + ) + + assert array.rank == 2, "Array must have 2 dimensions" # get diffraction image metadata try: - md = array.metadata['virtualdiffraction'] - method = md['method'] - mode = md['mode'] - geometry = md['geometry'] - shift_center = md['shift_center'] + md = array.metadata["virtualdiffraction"] + method = md["method"] + mode = md["mode"] + geometry = md["geometry"] + shift_center = md["shift_center"] except KeyError: print("Warning: VirtualDiffraction metadata could not be found") - method = '' - mode = '' - geometry = '' - shift_center = '' - + method = "" + mode = "" + geometry = "" + shift_center = "" # instantiate as a DiffractionImage array.__class__ = VirtualDiffraction array.__init__( - data = array.data, - name = array.name, - method = method, - mode = mode, - geometry = geometry, - shift_center = shift_center, + data=array.data, + name=array.name, + method=method, + mode=mode, + geometry=geometry, + shift_center=shift_center, ) return array - - # VirtualImage # read -def VirtualImage_from_h5(group:h5py.Group): + +def VirtualImage_from_h5(group: h5py.Group): """ Takes a valid HDF5 group for an HDF5 file object which is open in read mode. Determines if it's a valid Array, and if so loads and @@ -275,44 +275,46 @@ def VirtualImage_from_Array(array): Returns: (VirtualImage) """ - from py4DSTEM.io.legacy.legacy13.v13_py4dstem_classes.virtualimage import VirtualImage - assert(array.rank == 2), "Array must have 2 dimensions" + from py4DSTEM.io.legacy.legacy13.v13_py4dstem_classes.virtualimage import ( + VirtualImage, + ) + + assert array.rank == 2, "Array must have 2 dimensions" # get diffraction image metadata try: - md = array.metadata['virtualimage'] - mode = md['mode'] - geo = md['geometry'] - centered = md._params.get('centered',None) - calibrated = md._params.get('calibrated',None) - shift_center = md._params.get('shift_center',None) - dask = md._params.get('dask',None) + md = array.metadata["virtualimage"] + mode = md["mode"] + geo = md["geometry"] + centered = md._params.get("centered", None) + calibrated = md._params.get("calibrated", None) + shift_center = md._params.get("shift_center", None) + dask = md._params.get("dask", None) except KeyError: er = "VirtualImage metadata could not be found" raise Exception(er) - # instantiate as a DiffractionImage array.__class__ = VirtualImage array.__init__( - data = array.data, - name = array.name, - mode = mode, - geometry = geo, - centered = centered, - calibrated = calibrated, - shift_center = shift_center, - dask = dask + data=array.data, + name=array.name, + mode=mode, + geometry=geo, + centered=centered, + calibrated=calibrated, + shift_center=shift_center, + dask=dask, ) return array - # Probe # read -def Probe_from_h5(group:h5py.Group): + +def Probe_from_h5(group: h5py.Group): """ Takes a valid HDF5 group for an HDF5 file object which is open in read mode. Determines if it's a valid Array, and if so loads and @@ -336,10 +338,11 @@ def Probe_from_Array(array): (Probe) """ from py4DSTEM.io.legacy.legacy13.v13_py4dstem_classes.probe import Probe - assert(array.rank == 2), "Array must have 2 dimensions" + + assert array.rank == 2, "Array must have 2 dimensions" # get diffraction image metadata try: - md = array.metadata['probe'] + md = array.metadata["probe"] kwargs = {} for k in md.keys: v = md[k] @@ -348,23 +351,18 @@ def Probe_from_Array(array): er = "Probe metadata could not be found" raise Exception(er) - # instantiate as a DiffractionImage array.__class__ = Probe - array.__init__( - data = array.data, - name = array.name, - **kwargs - ) + array.__init__(data=array.data, name=array.name, **kwargs) return array - # QPoints # Reading -def QPoints_from_h5(group:h5py.Group): + +def QPoints_from_h5(group: h5py.Group): """ Takes a valid HDF5 group for an HDF5 file object which is open in read mode. Determines if it's a valid QPoints instance, and if so @@ -388,25 +386,20 @@ def QPoints_from_PointList(pointlist): (QPoints) """ from py4DSTEM.io.legacy.legacy13.v13_py4dstem_classes.qpoints import QPoints + pointlist.__class__ = QPoints pointlist.__init__( - data = pointlist.data, - name = pointlist.name, + data=pointlist.data, + name=pointlist.name, ) return pointlist - - - # BraggVectors # write -def BraggVectors_to_h5( - braggvectors, - group - ): +def BraggVectors_to_h5(braggvectors, group): """ Takes a valid HDF5 group for an HDF5 file object which is open in write or append mode. Writes a new group with a name given by this @@ -418,23 +411,17 @@ def BraggVectors_to_h5( ## Write grp = group.create_group(braggvectors.name) - grp.attrs.create("emd_group_type", 4) # this tag indicates a Custom type + grp.attrs.create("emd_group_type", 4) # this tag indicates a Custom type grp.attrs.create("py4dstem_class", braggvectors.__class__.__name__) # Ensure that the PointListArrays have the appropriate names braggvectors._v_uncal.name = "_v_uncal" # Add vectors - PointListArray_to_h5( - braggvectors._v_uncal, - grp - ) + PointListArray_to_h5(braggvectors._v_uncal, grp) try: braggvectors._v_cal.name = "_v_cal" - PointListArray_to_h5( - braggvectors._v_cal, - grp - ) + PointListArray_to_h5(braggvectors._v_cal, grp) except AttributeError: pass @@ -443,7 +430,7 @@ def BraggVectors_to_h5( # read -def BraggVectors_from_h5(group:h5py.Group): +def BraggVectors_from_h5(group: h5py.Group): """ Takes a valid HDF5 group for an HDF5 file object which is open in read mode, and a name. Determines if a valid BraggVectors object of this name exists inside @@ -455,34 +442,31 @@ def BraggVectors_from_h5(group:h5py.Group): Returns: A BraggVectors instance """ - from py4DSTEM.io.legacy.legacy13.v13_py4dstem_classes.braggvectors import BraggVectors + from py4DSTEM.io.legacy.legacy13.v13_py4dstem_classes.braggvectors import ( + BraggVectors, + ) er = f"Group {group} is not a valid BraggVectors group" - assert("emd_group_type" in group.attrs.keys()), er - assert(group.attrs["emd_group_type"] == 4), er - + assert "emd_group_type" in group.attrs.keys(), er + assert group.attrs["emd_group_type"] == 4, er # Get uncalibrated peak - v_uncal = PointListArray_from_h5(group['_v_uncal']) + v_uncal = PointListArray_from_h5(group["_v_uncal"]) # Get Qshape metadata try: - grp_metadata = group['_metadata'] - Qshape = Metadata_from_h5(grp_metadata['braggvectors'])['Qshape'] + grp_metadata = group["_metadata"] + Qshape = Metadata_from_h5(grp_metadata["braggvectors"])["Qshape"] except KeyError: raise Exception("could not read Qshape") # Set up BraggVectors - braggvectors = BraggVectors( - v_uncal.shape, - Qshape = Qshape, - name = basename(group.name) - ) + braggvectors = BraggVectors(v_uncal.shape, Qshape=Qshape, name=basename(group.name)) braggvectors._v_uncal = v_uncal # Add calibrated peaks, if they're there try: - v_cal = PointListArray_from_h5(group['_v_cal']) + v_cal = PointListArray_from_h5(group["_v_cal"]) braggvectors._v_cal = v_cal except KeyError: pass @@ -491,13 +475,3 @@ def BraggVectors_from_h5(group:h5py.Group): _read_metadata(braggvectors, group) return braggvectors - - - - - - - - - - diff --git a/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/parenttree.py b/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/parenttree.py index 7696d5616..cb3c7853e 100644 --- a/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/parenttree.py +++ b/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/parenttree.py @@ -8,7 +8,6 @@ class ParentTree(Tree): - def __init__(self, parent, calibration): """ Creates a tree which is aware of and can point objects @@ -16,42 +15,35 @@ def __init__(self, parent, calibration): `parent` is typically a DataCube, but need not be. `calibration` should be a Calibration instance. """ - assert(isinstance(calibration, Calibration)) + assert isinstance(calibration, Calibration) Tree.__init__(self) - self._tree['calibration'] = calibration + self._tree["calibration"] = calibration self._parent = parent - - def __setitem__(self, key, value): if isinstance(value, ndarray): - value = Array( - data = value, - name = key - ) + value = Array(data=value, name=key) self._tree[key] = value value._parent = self._parent value.calibration = self._parent.calibration - - - def __getitem__(self,x): - l = x.split('/') + def __getitem__(self, x): + l = x.split("/") try: - l.remove('') - l.remove('') + l.remove("") + l.remove("") except ValueError: pass return self._getitem_from_list(l) - def _getitem_from_list(self,x): + def _getitem_from_list(self, x): if len(x) == 0: raise Exception("invalid slice value to tree") k = x.pop(0) er = f"{k} not found in tree - check keys" - assert(k in self._tree.keys()), er + assert k in self._tree.keys(), er if len(x) == 0: return self._tree[k] @@ -59,64 +51,48 @@ def _getitem_from_list(self,x): tree = self._tree[k].tree return tree._getitem_from_list(x) - - - - def __repr__(self): - space = ' '*len(self.__class__.__name__)+' ' + space = " " * len(self.__class__.__name__) + " " string = f"{self.__class__.__name__}( An object tree containing the following top-level object instances:" string += "\n" - for k,v in self._tree.items(): - string += "\n"+space+f" {k} \t\t ({v.__class__.__name__})" + for k, v in self._tree.items(): + string += "\n" + space + f" {k} \t\t ({v.__class__.__name__})" string += "\n)" return string def keys(self): return self._tree.keys() - - - def print(self): """ Prints the tree contents to screen. """ - print('/') + print("/") self._print_tree_to_screen(self) - print('\n') + print("\n") def _print_tree_to_screen(self, tree, tablevel=0, linelevels=[]): - """ - """ + """ """ if tablevel not in linelevels: linelevels.append(tablevel) keys = [k for k in tree.keys()] - #keys = [k for k in keys if k != 'metadata'] + # keys = [k for k in keys if k != 'metadata'] N = len(keys) - for i,k in enumerate(keys): - string = '' - string += '|' if 0 in linelevels else '' + for i, k in enumerate(keys): + string = "" + string += "|" if 0 in linelevels else "" for idx in range(tablevel): - l = '|' if idx+1 in linelevels else '' - string += '\t'+l - #print(string) - print(string+'--'+k) - if i == N-1: + l = "|" if idx + 1 in linelevels else "" + string += "\t" + l + # print(string) + print(string + "--" + k) + if i == N - 1: linelevels.remove(tablevel) try: self._print_tree_to_screen( - tree[k].tree, - tablevel=tablevel+1, - linelevels=linelevels) + tree[k].tree, tablevel=tablevel + 1, linelevels=linelevels + ) except AttributeError: pass pass - - - - - - - diff --git a/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/probe.py b/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/probe.py index 424c9a221..cd1c7d9d9 100644 --- a/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/probe.py +++ b/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/probe.py @@ -2,23 +2,21 @@ # and cross-correlation kernels derived from them from py4DSTEM.io.legacy.legacy13.v13_emd_classes.array import Array, Metadata -from py4DSTEM.io.legacy.legacy13.v13_py4dstem_classes.diffractionslice import DiffractionSlice +from py4DSTEM.io.legacy.legacy13.v13_py4dstem_classes.diffractionslice import ( + DiffractionSlice, +) -from typing import Optional,Union +from typing import Optional, Union import numpy as np import h5py + class Probe(DiffractionSlice): """ Stores a vacuum probe. """ - def __init__( - self, - data: np.ndarray, - name: Optional[str] = 'probe', - **kwargs - ): + def __init__(self, data: np.ndarray, name: Optional[str] = "probe", **kwargs): """ Accepts: data (2D or 3D np.ndarray): the vacuum probe, or @@ -29,50 +27,38 @@ def __init__( """ # if only the probe is passed, make space for the kernel if data.ndim == 2: - data = np.dstack([ - data, - np.zeros_like(data) - ]) + data = np.dstack([data, np.zeros_like(data)]) # initialize as a DiffractionSlice DiffractionSlice.__init__( - self, - name = name, - data = data, - slicelabels = [ - 'probe', - 'kernel' - ] + self, name=name, data=data, slicelabels=["probe", "kernel"] ) # Set metadata - md = Metadata(name='probe') - for k,v in kwargs.items(): + md = Metadata(name="probe") + for k, v in kwargs.items(): md[k] = v self.metadata = md - - ## properties @property def probe(self): - return self.get_slice('probe').data + return self.get_slice("probe").data + @probe.setter - def probe(self,x): - assert(x.shape == (self.data.shape[:2])) - self.data[:,:,0] = x + def probe(self, x): + assert x.shape == (self.data.shape[:2]) + self.data[:, :, 0] = x + @property def kernel(self): - return self.get_slice('kernel').data - @kernel.setter - def kernel(self,x): - assert(x.shape == (self.data.shape[:2])) - self.data[:,:,1] = x - - - + return self.get_slice("kernel").data + @kernel.setter + def kernel(self, x): + assert x.shape == (self.data.shape[:2]) + self.data[:, :, 1] = x # HDF5 read/write @@ -81,16 +67,8 @@ def kernel(self,x): # read def from_h5(group): from py4DSTEM.io.legacy.legacy13.v13_py4dstem_classes.io import Probe_from_h5 - return Probe_from_h5(group) - - - + return Probe_from_h5(group) ############ END OF CLASS ########### - - - - - diff --git a/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/qpoints.py b/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/qpoints.py index b9c7bd1d5..3429c4c8d 100644 --- a/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/qpoints.py +++ b/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/qpoints.py @@ -2,20 +2,22 @@ from py4DSTEM.io.legacy.legacy13.v13_emd_classes.pointlist import PointList -from typing import Optional,Union +from typing import Optional, Union import numpy as np import h5py + class QPoints(PointList): """ Stores a set of diffraction space points, with fields 'qx', 'qy' and 'intensity' """ + def __init__( self, data: np.ndarray, - name: Optional[str] = 'qpoints', - ): + name: Optional[str] = "qpoints", + ): """ Accepts: data (structured numpy ndarray): should have three fields, which @@ -28,25 +30,24 @@ def __init__( # initialize as a PointList PointList.__init__( self, - data = data, - name = name, + data=data, + name=name, ) # rename fields - self.fields = 'qx','qy','intensity' - + self.fields = "qx", "qy", "intensity" @property def qx(self): - return self.data['qx'] + return self.data["qx"] + @property def qy(self): - return self.data['qy'] + return self.data["qy"] + @property def intensity(self): - return self.data['intensity'] - - + return self.data["intensity"] # HDF5 read/write @@ -55,15 +56,8 @@ def intensity(self): # read def from_h5(group): from py4DSTEM.io.legacy.legacy13.v13_py4dstem_classes.io import QPoints_from_h5 - return QPoints_from_h5(group) - - - + return QPoints_from_h5(group) ############ END OF CLASS ########### - - - - diff --git a/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/realslice.py b/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/realslice.py index 82231ef37..367401055 100644 --- a/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/realslice.py +++ b/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/realslice.py @@ -1,21 +1,23 @@ from py4DSTEM.io.legacy.legacy13.v13_emd_classes.array import Array -from typing import Optional,Union +from typing import Optional, Union import numpy as np import h5py + class RealSlice(Array): """ Stores a real-space shaped 2D data array. """ + def __init__( self, data: np.ndarray, - name: Optional[str] = 'realslice', - pixel_size: Optional[Union[float,list]] = 1, - pixel_units: Optional[Union[str,list]] = 'pixels', - slicelabels: Optional[Union[bool,list]] = None - ): + name: Optional[str] = "realslice", + pixel_size: Optional[Union[float, list]] = 1, + pixel_units: Optional[Union[str, list]] = "pixels", + slicelabels: Optional[Union[bool, list]] = None, + ): """ Accepts: data (np.ndarray): the data @@ -28,28 +30,27 @@ def __init__( A new RealSlice instance """ # expand pixel inputs to include 2 dimensions - if type(pixel_size) is not list: pixel_size = [pixel_size,pixel_size] - if type(pixel_units) is not list: pixel_units = [pixel_units,pixel_units] + if type(pixel_size) is not list: + pixel_size = [pixel_size, pixel_size] + if type(pixel_units) is not list: + pixel_units = [pixel_units, pixel_units] # initialize as an Array Array.__init__( self, - data = data, - name = name, - units = 'intensity', - dims = [ + data=data, + name=name, + units="intensity", + dims=[ pixel_size[0], pixel_size[1], ], - dim_units = [ + dim_units=[ pixel_units[0], pixel_units[1], ], - dim_names = [ - 'Rx', - 'Ry' - ], - slicelabels = slicelabels + dim_names=["Rx", "Ry"], + slicelabels=slicelabels, ) # setup the size/units with getter/setters @@ -59,40 +60,38 @@ def __init__( @property def pixel_size(self): return self._pixel_size + @pixel_size.setter - def pixel_size(self,x): - if type(x) is not list: x = [x,x] - self.set_dim(0,[0,x[0]]) - self.set_dim(1,[0,x[1]]) + def pixel_size(self, x): + if type(x) is not list: + x = [x, x] + self.set_dim(0, [0, x[0]]) + self.set_dim(1, [0, x[1]]) self._pixel_size = x + @property def pixel_units(self): return self._pixel_units + @pixel_units.setter - def pixel_units(self,x): - if type(x) is not list: x = [x,x] + def pixel_units(self, x): + if type(x) is not list: + x = [x, x] self.dim_units[0] = x[0] self.dim_units[1] = x[1] self._pixel_units = x - - # HDF5 read/write # write inherited from Array # read def from_h5(group): - from py4DSTEM.io.legacy.legacy13.v13_py4dstem_classes.io import RealSlice_from_h5 - return RealSlice_from_h5(group) - - - + from py4DSTEM.io.legacy.legacy13.v13_py4dstem_classes.io import ( + RealSlice_from_h5, + ) + return RealSlice_from_h5(group) ############ END OF CLASS ########### - - - - diff --git a/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/virtualdiffraction.py b/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/virtualdiffraction.py index 40211a3f7..188f1d646 100644 --- a/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/virtualdiffraction.py +++ b/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/virtualdiffraction.py @@ -1,28 +1,32 @@ # Defines the VirtualDiffraction class, which stores 2D, diffraction-shaped data # with metadata about how it was created -from py4DSTEM.io.legacy.legacy13.v13_py4dstem_classes.diffractionslice import DiffractionSlice +from py4DSTEM.io.legacy.legacy13.v13_py4dstem_classes.diffractionslice import ( + DiffractionSlice, +) from py4DSTEM.io.legacy.legacy13.v13_emd_classes.metadata import Metadata -from typing import Optional,Union +from typing import Optional, Union import numpy as np import h5py + class VirtualDiffraction(DiffractionSlice): """ Stores a diffraction-space shaped 2D image with metadata indicating how this image was generated from a datacube. """ + def __init__( self, data: np.ndarray, - name: Optional[str] = 'diffractionimage', + name: Optional[str] = "diffractionimage", method: Optional[str] = None, mode: Optional[str] = None, - geometry: Optional[Union[tuple,np.ndarray]] = None, + geometry: Optional[Union[tuple, np.ndarray]] = None, calibrated: Optional[bool] = False, - shift_center: bool = False - ): + shift_center: bool = False, + ): """ Args: data (np.ndarray) : the 2D data @@ -63,36 +67,29 @@ def __init__( # initialize as a DiffractionSlice DiffractionSlice.__init__( self, - data = data, - name = name, + data=data, + name=name, ) # Set metadata - md = Metadata(name='virtualdiffraction') - md['method'] = method - md['mode'] = mode - md['geometry'] = geometry - md['shift_center'] = shift_center + md = Metadata(name="virtualdiffraction") + md["method"] = method + md["mode"] = mode + md["geometry"] = geometry + md["shift_center"] = shift_center self.metadata = md - - # HDF5 read/write # write inherited from Array # read def from_h5(group): - from py4DSTEM.io.legacy.legacy13.v13_py4dstem_classes.io import VirtualDiffraction_from_h5 - return VirtualDiffraction_from_h5(group) - - - + from py4DSTEM.io.legacy.legacy13.v13_py4dstem_classes.io import ( + VirtualDiffraction_from_h5, + ) + return VirtualDiffraction_from_h5(group) ############ END OF CLASS ########### - - - - diff --git a/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/virtualimage.py b/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/virtualimage.py index 563936378..4d6c38845 100644 --- a/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/virtualimage.py +++ b/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/virtualimage.py @@ -4,26 +4,28 @@ from py4DSTEM.io.legacy.legacy13.v13_py4dstem_classes.realslice import RealSlice from py4DSTEM.io.legacy.legacy13.v13_emd_classes.metadata import Metadata -from typing import Optional,Union +from typing import Optional, Union import numpy as np import h5py + class VirtualImage(RealSlice): """ Stores a real-space shaped 2D image with metadata indicating how this image was generated from a datacube. """ + def __init__( self, data: np.ndarray, - name: Optional[str] = 'virtualimage', + name: Optional[str] = "virtualimage", mode: Optional[str] = None, - geometry: Optional[Union[tuple,np.ndarray]] = None, + geometry: Optional[Union[tuple, np.ndarray]] = None, centered: Optional[bool] = False, calibrated: Optional[bool] = False, shift_center: Optional[bool] = False, - dask: Optional[bool] = False - ): + dask: Optional[bool] = False, + ): """ Args: data (np.ndarray) : the 2D data @@ -68,38 +70,31 @@ def __init__( # initialize as a RealSlice RealSlice.__init__( self, - data = data, - name = name, + data=data, + name=name, ) # Set metadata - md = Metadata(name='virtualimage') - md['mode'] = mode - md['geometry'] = geometry - md['centered'] = centered - md['calibrated'] = calibrated - md['shift_center'] = shift_center - md['dask'] = dask + md = Metadata(name="virtualimage") + md["mode"] = mode + md["geometry"] = geometry + md["centered"] = centered + md["calibrated"] = calibrated + md["shift_center"] = shift_center + md["dask"] = dask self.metadata = md - # HDF5 read/write # write inherited from Array # read def from_h5(group): - from py4DSTEM.io.legacy.legacy13.v13_py4dstem_classes.io import VirtualImage_from_h5 - return VirtualImage_from_h5(group) - - - + from py4DSTEM.io.legacy.legacy13.v13_py4dstem_classes.io import ( + VirtualImage_from_h5, + ) + return VirtualImage_from_h5(group) ############ END OF CLASS ########### - - - - - diff --git a/py4DSTEM/io/legacy/legacy13/v13_to_14.py b/py4DSTEM/io/legacy/legacy13/v13_to_14.py index 18c08c777..650529b22 100644 --- a/py4DSTEM/io/legacy/legacy13/v13_to_14.py +++ b/py4DSTEM/io/legacy/legacy13/v13_to_14.py @@ -11,7 +11,7 @@ Metadata as Metadata13, Array as Array13, PointList as PointList13, - PointListArray as PointListArray13 + PointListArray as PointListArray13, ) from py4DSTEM.io.legacy.legacy13.v13_py4dstem_classes import ( Calibration as Calibration13, @@ -22,20 +22,13 @@ VirtualImage as VirtualImage13, Probe as Probe13, QPoints as QPoints13, - BraggVectors as BraggVectors13 + BraggVectors as BraggVectors13, ) - # v14 imports -from emdfile import ( - Root, - Metadata, - Array, - PointList, - PointListArray -) +from emdfile import Root, Metadata, Array, PointList, PointListArray from py4DSTEM.data import ( Calibration, @@ -50,9 +43,7 @@ ) - - -def v13_to_14( v13tree, v13cal ): +def v13_to_14(v13tree, v13cal): """ Converts a v13 data tree to a v14 data tree """ @@ -64,16 +55,16 @@ def v13_to_14( v13tree, v13cal ): node = _v13_to_14_cls(v13tree) # handle the root - if isinstance(node,Root): + if isinstance(node, Root): root = node elif node.root is None: - root = Root( name=node.name ) + root = Root(name=node.name) root.tree(node) else: root = node.root # populate tree - _populate_tree(v13tree,node,root) + _populate_tree(v13tree, node, root) # add calibration if v13cal is not None: @@ -84,19 +75,16 @@ def v13_to_14( v13tree, v13cal ): return node - -def _populate_tree(node13,node14,root14): +def _populate_tree(node13, node14, root14): for key in node13.tree.keys(): newnode13 = node13.tree[key] newnode14 = _v13_to_14_cls(newnode13) # skip calibrations and metadata - if isinstance(newnode14,Metadata): + if isinstance(newnode14, Metadata): pass else: - node14.tree(newnode14,force=True) - _populate_tree(newnode13,newnode14,root14) - - + node14.tree(newnode14, force=True) + _populate_tree(newnode13, newnode14, root14) def _v13_to_14_cls(obj): @@ -105,37 +93,35 @@ def _v13_to_14_cls(obj): including metadata. """ - assert(isinstance(obj, ( - Root13, - Metadata13, - Array13, - PointList13, - PointListArray13, - Calibration13, - DataCube13, - DiffractionSlice13, - VirtualDiffraction13, - RealSlice13, - VirtualImage13, - Probe13, - QPoints13, - BraggVectors13 - ))), f"obj must be a v13 class instance, not type {type(obj)}" - + assert isinstance( + obj, + ( + Root13, + Metadata13, + Array13, + PointList13, + PointListArray13, + Calibration13, + DataCube13, + DiffractionSlice13, + VirtualDiffraction13, + RealSlice13, + VirtualImage13, + Probe13, + QPoints13, + BraggVectors13, + ), + ), f"obj must be a v13 class instance, not type {type(obj)}" if isinstance(obj, Root13): - x = Root( name=obj.name ) + x = Root(name=obj.name) elif isinstance(obj, Calibration13): - x = Calibration( name=obj.name ) - x._params.update( obj._params ) + x = Calibration(name=obj.name) + x._params.update(obj._params) elif isinstance(obj, DataCube13): - x = DataCube( - name = obj.name, - data = obj.data, - slicelabels = obj.slicelabels - ) + x = DataCube(name=obj.name, data=obj.data, slicelabels=obj.slicelabels) elif isinstance(obj, DiffractionSlice13): if obj.is_stack: @@ -143,17 +129,11 @@ def _v13_to_14_cls(obj): else: data = obj.data x = DiffractionSlice( - name = obj.name, - data = data, - units = obj.units, - slicelabels = obj.slicelabels + name=obj.name, data=data, units=obj.units, slicelabels=obj.slicelabels ) elif isinstance(obj, VirtualDiffraction13): - x = VirtualDiffraction( - name = obj.name, - data = obj.data - ) + x = VirtualDiffraction(name=obj.name, data=obj.data) elif isinstance(obj, RealSlice13): if obj.is_stack: @@ -161,106 +141,81 @@ def _v13_to_14_cls(obj): else: data = obj.data x = RealSlice( - name = obj.name, - data = data, - units = obj.units, - slicelabels = obj.slicelabels + name=obj.name, data=data, units=obj.units, slicelabels=obj.slicelabels ) pass elif isinstance(obj, VirtualImage13): - x = VirtualImage( - name = obj.name, - data = obj.data - ) + x = VirtualImage(name=obj.name, data=obj.data) pass elif isinstance(obj, Probe13): from py4DSTEM.braggvectors import Probe - x = Probe( - name = obj.name, - data = obj.data - ) + + x = Probe(name=obj.name, data=obj.data) elif isinstance(obj, QPoints13): - x = PointList( - name = obj.name, - data = obj.data - ) + x = PointList(name=obj.name, data=obj.data) elif isinstance(obj, BraggVectors13): from py4DSTEM.braggvectors import BraggVectors - x = BraggVectors( - name = obj.name, - Rshape = obj.Rshape, - Qshape = obj.Qshape - ) + + x = BraggVectors(name=obj.name, Rshape=obj.Rshape, Qshape=obj.Qshape) x._v_uncal = obj._v_uncal - if hasattr(obj,'_v_cal'): + if hasattr(obj, "_v_cal"): x._v_cal = obj._v_cal elif isinstance(obj, Metadata13): - x = Metadata( name=obj.name ) - x._params.update( obj._params ) + x = Metadata(name=obj.name) + x._params.update(obj._params) elif isinstance(obj, Array13): - # prepare arguments if obj.is_stack: data = np.rollaxis(obj.data, axis=2) else: data = obj.data - args = { - 'name' : obj.name, - 'data' : data - } - if hasattr(obj,'units'): args['units'] = obj.units - if hasattr(obj,'dim_names'): args['dim_names'] = obj.dim_names - if hasattr(obj,'dim_units'): args['dim_units'] = obj.dim_units - if hasattr(obj,'slicelabels'): args['slicelabels'] = obj.slicelabels - if hasattr(obj,'dims'): + args = {"name": obj.name, "data": data} + if hasattr(obj, "units"): + args["units"] = obj.units + if hasattr(obj, "dim_names"): + args["dim_names"] = obj.dim_names + if hasattr(obj, "dim_units"): + args["dim_units"] = obj.dim_units + if hasattr(obj, "slicelabels"): + args["slicelabels"] = obj.slicelabels + if hasattr(obj, "dims"): dims = [] for dim in obj.dims: dims.append(dim) - args['dims'] = dims + args["dims"] = dims # get the array - x = Array( - **args - ) + x = Array(**args) elif isinstance(obj, PointList13): - x = PointList( - name = obj.name, - data = obj.data - ) + x = PointList(name=obj.name, data=obj.data) elif isinstance(obj, PointListArray13): - x = PointListArray( - name = obj.name, - dtype = obj.dtype, - shape = obj.shape - ) - for idx,jdx in tqdmnd( - x.shape[0],x.shape[1], - desc='transferring PointListArray v13->14', - unit='foolishness'): - x[idx,jdx] = obj[idx,jdx] + x = PointListArray(name=obj.name, dtype=obj.dtype, shape=obj.shape) + for idx, jdx in tqdmnd( + x.shape[0], + x.shape[1], + desc="transferring PointListArray v13->14", + unit="foolishness", + ): + x[idx, jdx] = obj[idx, jdx] else: raise Exception(f"Unexpected object type {type(obj)}") - - # Handle metadata - if hasattr(obj,'metadata'): + if hasattr(obj, "metadata"): for key in obj.metadata.keys(): md = obj.metadata[key] - dm = Metadata( name=md.name ) - dm._params.update( md._params ) + dm = Metadata(name=md.name) + dm._params.update(md._params) x.metadata = dm - # Return return x - diff --git a/py4DSTEM/io/legacy/read_legacy_12.py b/py4DSTEM/io/legacy/read_legacy_12.py index c385c6427..40bfcfc94 100644 --- a/py4DSTEM/io/legacy/read_legacy_12.py +++ b/py4DSTEM/io/legacy/read_legacy_12.py @@ -10,13 +10,10 @@ read_v0_9, read_v0_7, read_v0_6, - read_v0_5 + read_v0_5, ) - - - def read_legacy12(filepath, **kwargs): """ File reader for older legacy py4DSTEM (v<0.13) formated HDF5 files. @@ -55,18 +52,19 @@ def read_legacy12(filepath, **kwargs): * Otherwise, a single DataObject or list of DataObjects are returned, based on the value of the argument data_id. """ - assert(exists(filepath)), "Error: specified filepath does not exist" - assert(is_py4DSTEM_file(filepath)), "Error: {} isn't recognized as a py4DSTEM file.".format(filepath) - + assert exists(filepath), "Error: specified filepath does not exist" + assert is_py4DSTEM_file( + filepath + ), "Error: {} isn't recognized as a py4DSTEM file.".format(filepath) # For HDF5 files containing multiple valid EMD type 2 files (i.e. py4DSTEM files), # disambiguate desired data tgs = get_py4DSTEM_topgroups(filepath) - if 'topgroup' in kwargs.keys(): - tg = kwargs['topgroup'] - #assert(tg in tgs), "Error: specified topgroup, {}, not found.".format(tg) + if "topgroup" in kwargs.keys(): + tg = kwargs["topgroup"] + # assert(tg in tgs), "Error: specified topgroup, {}, not found.".format(tg) else: - if len(tgs)==1: + if len(tgs) == 1: tg = tgs[0] else: print("Multiple topgroups were found -- please specify one:") @@ -75,17 +73,21 @@ def read_legacy12(filepath, **kwargs): print(tg) return - # Get py4DSTEM version and call the appropriate read function version = get_py4DSTEM_version(filepath, tg) - if version_is_geq(version,(0,12,0)): return read_v0_12(filepath, **kwargs) - elif version_is_geq(version,(0,9,0)): return read_v0_9(filepath, **kwargs) - elif version_is_geq(version,(0,7,0)): return read_v0_7(filepath, **kwargs) - elif version_is_geq(version,(0,6,0)): return read_v0_6(filepath, **kwargs) - elif version_is_geq(version,(0,5,0)): return read_v0_5(filepath, **kwargs) + if version_is_geq(version, (0, 12, 0)): + return read_v0_12(filepath, **kwargs) + elif version_is_geq(version, (0, 9, 0)): + return read_v0_9(filepath, **kwargs) + elif version_is_geq(version, (0, 7, 0)): + return read_v0_7(filepath, **kwargs) + elif version_is_geq(version, (0, 6, 0)): + return read_v0_6(filepath, **kwargs) + elif version_is_geq(version, (0, 5, 0)): + return read_v0_5(filepath, **kwargs) else: - raise Exception('Support for legacy v{}.{}.{} files is no longer available.'.format(version[0],version[1],version[2])) - - - - + raise Exception( + "Support for legacy v{}.{}.{} files is no longer available.".format( + version[0], version[1], version[2] + ) + ) diff --git a/py4DSTEM/io/legacy/read_legacy_13.py b/py4DSTEM/io/legacy/read_legacy_13.py index 56e931ea9..04da1e65a 100644 --- a/py4DSTEM/io/legacy/read_legacy_13.py +++ b/py4DSTEM/io/legacy/read_legacy_13.py @@ -3,7 +3,7 @@ import h5py import numpy as np import warnings -from os.path import exists,basename,dirname,join +from os.path import exists, basename, dirname, join from typing import Optional, Union from py4DSTEM.io.legacy.read_utils import is_py4DSTEM_version13 @@ -16,25 +16,17 @@ VirtualImage, Probe, QPoints, - BraggVectors -) -from py4DSTEM.io.legacy.legacy13 import ( - Root, - Metadata, - Array, - PointList, - PointListArray + BraggVectors, ) +from py4DSTEM.io.legacy.legacy13 import Root, Metadata, Array, PointList, PointListArray from py4DSTEM.io.legacy.legacy13 import v13_to_14 - - def read_legacy13( filepath, root: Optional[str] = None, - tree: Optional[Union[bool,str]] = True, - ): + tree: Optional[Union[bool, str]] = True, +): """ File reader for legacy py4DSTEM (v=0.13.x) formated HDF5 files. @@ -63,42 +55,47 @@ def read_legacy13( (the data) """ # Check that filepath is valid - assert(exists(filepath)), "Error: specified filepath does not exist" - assert(is_py4DSTEM_version13(filepath)), f"Error: {filepath} isn't recognized as a v13 py4DSTEM file." - + assert exists(filepath), "Error: specified filepath does not exist" + assert is_py4DSTEM_version13( + filepath + ), f"Error: {filepath} isn't recognized as a v13 py4DSTEM file." if root is None: # check if there is a single object in the file # if so, set root to that file; otherwise raise an Exception or Warning - with h5py.File(filepath,'r') as f: + with h5py.File(filepath, "r") as f: l1keys = list(f.keys()) - if len(l1keys)==0: - raise Exception('No top level groups found in this HDF5 file!') - elif len(l1keys)>1: - warnings.warn('Multiple top level groups found; please specify. Returning group names.') + if len(l1keys) == 0: + raise Exception("No top level groups found in this HDF5 file!") + elif len(l1keys) > 1: + warnings.warn( + "Multiple top level groups found; please specify. Returning group names." + ) return l1keys else: l2keys = list(f[l1keys[0]].keys()) - if len(l2keys)==0: - raise Exception('No top level data blocks found in this HDF5 file!') - elif len(l2keys)>1: - warnings.warn('Multiple top level data blocks found; please specify. Returning h5 paths to top level data blocks.') - return [join(l1keys[0],k) for k in l2keys] + if len(l2keys) == 0: + raise Exception("No top level data blocks found in this HDF5 file!") + elif len(l2keys) > 1: + warnings.warn( + "Multiple top level data blocks found; please specify. Returning h5 paths to top level data blocks." + ) + return [join(l1keys[0], k) for k in l2keys] else: - root = join(l1keys[0],l2keys[0]) - #this is a windows fix - root = root.replace("\\","/") - + root = join(l1keys[0], l2keys[0]) + # this is a windows fix + root = root.replace("\\", "/") # Open file - with h5py.File(filepath,'r') as f: - + with h5py.File(filepath, "r") as f: # open the selected group try: group_data = f[root] except KeyError: - raise Exception(f"the provided root {root} is not a valid path to a recognized data group") + raise Exception( + f"the provided root {root} is not a valid path to a recognized data group" + ) # Read data if tree is True: @@ -107,7 +104,7 @@ def read_legacy13( elif tree is False: data = _read_without_tree(group_data) - elif tree == 'noroot': + elif tree == "noroot": data = _read_without_root(group_data) else: @@ -116,22 +113,19 @@ def read_legacy13( # Read calibration cal = _read_calibration(group_data) - # convert version 13 -> 14 - data = v13_to_14(data,cal) + data = v13_to_14(data, cal) return data - - # utilities -def _read_without_tree(grp): +def _read_without_tree(grp): # handle empty datasets - if grp.attrs['emd_group_type'] == 'root': + if grp.attrs["emd_group_type"] == "root": data = Root( - name = basename(grp.name), + name=basename(grp.name), ) return data @@ -144,66 +138,53 @@ def _read_without_tree(grp): def _read_with_tree(grp): data = _read_without_tree(grp) - _populate_tree( - data.tree, - grp - ) + _populate_tree(data.tree, grp) return data def _read_without_root(grp): root = Root() - _populate_tree( - root.tree, - grp - ) + _populate_tree(root.tree, grp) return root def _read_calibration(grp): - keys = [k for k in grp.keys() if isinstance(grp[k],h5py.Group)] + keys = [k for k in grp.keys() if isinstance(grp[k], h5py.Group)] keys = [k for k in keys if (_get_v13_class(grp[k]) == Calibration)] - if len(keys)>0: + if len(keys) > 0: k = keys[0] cal = Calibration.from_h5(grp[k]) return cal else: name = dirname(grp.name) - if name != '/': + if name != "/": grp_upstream = grp.file[dirname(grp.name)] return _read_calibration(grp_upstream) else: return None -def _populate_tree(tree,grp): - keys = [k for k in grp.keys() if isinstance(grp[k],h5py.Group)] - keys = [k for k in keys if (k[0] != '_' and not _get_v13_class( - grp[k]) == Calibration)] +def _populate_tree(tree, grp): + keys = [k for k in grp.keys() if isinstance(grp[k], h5py.Group)] + keys = [ + k for k in keys if (k[0] != "_" and not _get_v13_class(grp[k]) == Calibration) + ] for key in keys: - tree[key] = _read_without_tree( - grp[key] - ) - _populate_tree( - tree[key].tree, - grp[key] - ) + tree[key] = _read_without_tree(grp[key]) + _populate_tree(tree[key].tree, grp[key]) pass - - - - def print_v13h5_tree(filepath, show_metadata=False): """ Prints the contents of an h5 file from a filepath. """ - with h5py.File(filepath,'r') as f: - print('/') + with h5py.File(filepath, "r") as f: + print("/") print_v13h5pyFile_tree(f, show_metadata=show_metadata) - print('\n') + print("\n") + def print_v13h5pyFile_tree(f, tablevel=0, linelevels=[], show_metadata=False): """ @@ -211,63 +192,57 @@ def print_v13h5pyFile_tree(f, tablevel=0, linelevels=[], show_metadata=False): """ if tablevel not in linelevels: linelevels.append(tablevel) - keys = [k for k in f.keys() if isinstance(f[k],h5py.Group)] + keys = [k for k in f.keys() if isinstance(f[k], h5py.Group)] if not show_metadata: - keys = [k for k in keys if k != '_metadata'] + keys = [k for k in keys if k != "_metadata"] N = len(keys) - for i,k in enumerate(keys): - string = '' - string += '|' if 0 in linelevels else '' + for i, k in enumerate(keys): + string = "" + string += "|" if 0 in linelevels else "" for idx in range(tablevel): - l = '|' if idx+1 in linelevels else '' - string += '\t'+l - print(string+'--'+k) - if i == N-1: + l = "|" if idx + 1 in linelevels else "" + string += "\t" + l + print(string + "--" + k) + if i == N - 1: linelevels.remove(tablevel) print_v13h5pyFile_tree( f[k], - tablevel=tablevel+1, + tablevel=tablevel + 1, linelevels=linelevels, - show_metadata=show_metadata) + show_metadata=show_metadata, + ) pass - - - - - - def _get_v13_class(grp): - lookup = { - 'Metadata' : Metadata, - 'Array' : Array, - 'PointList' : PointList, - 'PointListArray' : PointListArray, - 'Calibration' : Calibration, - 'DataCube' : DataCube, - 'DiffractionSlice' : DiffractionSlice, - 'VirtualDiffraction' : VirtualDiffraction, - 'DiffractionImage' : VirtualDiffraction, - 'RealSlice' : RealSlice, - 'VirtualImage' : VirtualImage, - 'Probe' : Probe, - 'QPoints' : QPoints, - 'BraggVectors' : BraggVectors + "Metadata": Metadata, + "Array": Array, + "PointList": PointList, + "PointListArray": PointListArray, + "Calibration": Calibration, + "DataCube": DataCube, + "DiffractionSlice": DiffractionSlice, + "VirtualDiffraction": VirtualDiffraction, + "DiffractionImage": VirtualDiffraction, + "RealSlice": RealSlice, + "VirtualImage": VirtualImage, + "Probe": Probe, + "QPoints": QPoints, + "BraggVectors": BraggVectors, } - if 'py4dstem_class' in grp.attrs: - classname = grp.attrs['py4dstem_class'] - elif 'emd_group_type' in grp.attrs: - emd_group_type = grp.attrs['emd_group_type'] + if "py4dstem_class" in grp.attrs: + classname = grp.attrs["py4dstem_class"] + elif "emd_group_type" in grp.attrs: + emd_group_type = grp.attrs["emd_group_type"] classname = { - 'root' : 'root', - 0 : Metadata, - 1 : Array, - 2 : PointList, - 3 : PointListArray, + "root": "root", + 0: Metadata, + 1: Array, + 2: PointList, + 3: PointListArray, }[emd_group_type] else: warnings.warn(f"Can't determine class type of H5 group {grp}; skipping...") @@ -278,12 +253,3 @@ def _get_v13_class(grp): except KeyError: warnings.warn(f"Can't determine class type of H5 group {grp}; skipping...") return None - - - - - - - - - diff --git a/py4DSTEM/io/legacy/read_utils.py b/py4DSTEM/io/legacy/read_utils.py index 761ce01e2..7cd48cde7 100644 --- a/py4DSTEM/io/legacy/read_utils.py +++ b/py4DSTEM/io/legacy/read_utils.py @@ -3,100 +3,104 @@ import h5py import numpy as np + def get_py4DSTEM_topgroups(filepath): - """ Returns a list of toplevel groups in an HDF5 file which are valid py4DSTEM file trees. - """ + """Returns a list of toplevel groups in an HDF5 file which are valid py4DSTEM file trees.""" topgroups = [] - with h5py.File(filepath,'r') as f: + with h5py.File(filepath, "r") as f: for key in f.keys(): - if 'emd_group_type' in f[key].attrs: + if "emd_group_type" in f[key].attrs: topgroups.append(key) return topgroups + def is_py4DSTEM_version13(filepath): - """ Returns True for data written by a py4DSTEM v0.13.x release. - """ - with h5py.File(filepath,'r') as f: + """Returns True for data written by a py4DSTEM v0.13.x release.""" + with h5py.File(filepath, "r") as f: for k in f.keys(): if "emd_group_type" in f[k].attrs: - if f[k].attrs["emd_group_type"] == 'root': - if all([x in f[k].attrs for x in ("version_major","version_minor")]): - if (int(f[k].attrs["version_major"]),int(f[k].attrs["version_minor"])) == (0,13): + if f[k].attrs["emd_group_type"] == "root": + if all( + [x in f[k].attrs for x in ("version_major", "version_minor")] + ): + if ( + int(f[k].attrs["version_major"]), + int(f[k].attrs["version_minor"]), + ) == (0, 13): return True return False + def is_py4DSTEM_file(filepath): - """ Returns True iff filepath points to a py4DSTEM formatted (EMD type 2) file. - """ + """Returns True iff filepath points to a py4DSTEM formatted (EMD type 2) file.""" if is_py4DSTEM_version13(filepath): return True else: try: topgroups = get_py4DSTEM_topgroups(filepath) - if len(topgroups)>0: + if len(topgroups) > 0: return True else: return False except OSError: return False -def get_py4DSTEM_version(filepath, topgroup='4DSTEM_experiment'): - """ Returns the version (major,minor,release) of a py4DSTEM file. - """ - assert(is_py4DSTEM_file(filepath)), "Error: not recognized as a py4DSTEM file" - with h5py.File(filepath,'r') as f: - version_major = int(f[topgroup].attrs['version_major']) - version_minor = int(f[topgroup].attrs['version_minor']) - if 'version_release' in f[topgroup].attrs.keys(): - version_release = int(f[topgroup].attrs['version_release']) + +def get_py4DSTEM_version(filepath, topgroup="4DSTEM_experiment"): + """Returns the version (major,minor,release) of a py4DSTEM file.""" + assert is_py4DSTEM_file(filepath), "Error: not recognized as a py4DSTEM file" + with h5py.File(filepath, "r") as f: + version_major = int(f[topgroup].attrs["version_major"]) + version_minor = int(f[topgroup].attrs["version_minor"]) + if "version_release" in f[topgroup].attrs.keys(): + version_release = int(f[topgroup].attrs["version_release"]) else: version_release = 0 return version_major, version_minor, version_release -def get_UUID(filepath, topgroup='4DSTEM_experiment'): - """ Returns the UUID of a py4DSTEM file, or if unavailable returns -1. - """ - assert(is_py4DSTEM_file(filepath)), "Error: not recognized as a py4DSTEM file" - with h5py.File(filepath,'r') as f: + +def get_UUID(filepath, topgroup="4DSTEM_experiment"): + """Returns the UUID of a py4DSTEM file, or if unavailable returns -1.""" + assert is_py4DSTEM_file(filepath), "Error: not recognized as a py4DSTEM file" + with h5py.File(filepath, "r") as f: if topgroup in f.keys(): - if 'UUID' in f[topgroup].attrs: - return f[topgroup].attrs['UUID'] + if "UUID" in f[topgroup].attrs: + return f[topgroup].attrs["UUID"] return -1 -def version_is_geq(current,minimum): - """ Returns True iff current version (major,minor,release) is greater than or equal to minimum." - """ - if current[0]>minimum[0]: + +def version_is_geq(current, minimum): + """Returns True iff current version (major,minor,release) is greater than or equal to minimum." """ + if current[0] > minimum[0]: return True - elif current[0]==minimum[0]: - if current[1]>minimum[1]: + elif current[0] == minimum[0]: + if current[1] > minimum[1]: return True - elif current[1]==minimum[1]: - if current[2]>=minimum[2]: + elif current[1] == minimum[1]: + if current[2] >= minimum[2]: return True else: return False else: return False -def get_N_dataobjects(filepath, topgroup='4DSTEM_experiment'): - """ Returns a 7-tuple of ints with the numbers of: DataCubes, CountedDataCubes, - DiffractionSlices, RealSlices, PointLists, PointListArrays, total DataObjects. + +def get_N_dataobjects(filepath, topgroup="4DSTEM_experiment"): + """Returns a 7-tuple of ints with the numbers of: DataCubes, CountedDataCubes, + DiffractionSlices, RealSlices, PointLists, PointListArrays, total DataObjects. """ - assert(is_py4DSTEM_file(filepath)), "Error: not recognized as a py4DSTEM file" - with h5py.File(filepath,'r') as f: - assert(topgroup in f.keys()), "Error: unrecognized topgroup" - N_dc = len(f[topgroup]['data/datacubes'].keys()) - N_cdc = len(f[topgroup]['data/counted_datacubes'].keys()) - N_ds = len(f[topgroup]['data/diffractionslices'].keys()) - N_rs = len(f[topgroup]['data/realslices'].keys()) - N_pl = len(f[topgroup]['data/pointlists'].keys()) - N_pla = len(f[topgroup]['data/pointlistarrays'].keys()) + assert is_py4DSTEM_file(filepath), "Error: not recognized as a py4DSTEM file" + with h5py.File(filepath, "r") as f: + assert topgroup in f.keys(), "Error: unrecognized topgroup" + N_dc = len(f[topgroup]["data/datacubes"].keys()) + N_cdc = len(f[topgroup]["data/counted_datacubes"].keys()) + N_ds = len(f[topgroup]["data/diffractionslices"].keys()) + N_rs = len(f[topgroup]["data/realslices"].keys()) + N_pl = len(f[topgroup]["data/pointlists"].keys()) + N_pla = len(f[topgroup]["data/pointlistarrays"].keys()) try: - N_coords = len(f[topgroup]['data/coordinates'].keys()) + N_coords = len(f[topgroup]["data/coordinates"].keys()) except: N_coords = 0 - N_do = N_dc+N_cdc+N_ds+N_rs+N_pl+N_pla+N_coords - return N_dc,N_cdc,N_ds,N_rs,N_pl,N_pla,N_coords,N_do - - + N_do = N_dc + N_cdc + N_ds + N_rs + N_pl + N_pla + N_coords + return N_dc, N_cdc, N_ds, N_rs, N_pl, N_pla, N_coords, N_do diff --git a/py4DSTEM/io/parsefiletype.py b/py4DSTEM/io/parsefiletype.py index 1838f89b6..84b53e4dc 100644 --- a/py4DSTEM/io/parsefiletype.py +++ b/py4DSTEM/io/parsefiletype.py @@ -11,7 +11,7 @@ def _parse_filetype(fp): - """ + """ Accepts a path to a data file, and returns the file type as a string. """ _, fext = splitext(fp) @@ -55,32 +55,35 @@ def _parse_filetype(fp): else: raise Exception(f"Unrecognized file extension {fext}.") + def _is_arina(filepath): """ Check if an h5 file is an Arina file. """ - with h5py.File(filepath,'r') as f: + with h5py.File(filepath, "r") as f: try: - assert("entry" in f.keys()) + assert "entry" in f.keys() except AssertionError: return False try: - assert("NX_class" in f["entry"].attrs.keys()) + assert "NX_class" in f["entry"].attrs.keys() except AssertionError: return False return True + def _is_abTEM(filepath): """ - Check if an h5 file is an abTEM file. + Check if an h5 file is an abTEM file. """ - with h5py.File(filepath,'r') as f: + with h5py.File(filepath, "r") as f: try: - assert("array" in f.keys()) + assert "array" in f.keys() except AssertionError: return False return True + def _is_arina(filepath): """ Check if an h5 file is an Arina file. diff --git a/py4DSTEM/io/read.py b/py4DSTEM/io/read.py index bab555eaf..6dc4ce37e 100644 --- a/py4DSTEM/io/read.py +++ b/py4DSTEM/io/read.py @@ -33,8 +33,9 @@ def read( https://github.com/py4dstem/emdfile. To read file written by older verions of py4DSTEM, different keyword - arguments should be passed. See the docstring for - `py4DSTEM.io.native.legacy.read_py4DSTEM_legacy` for further details. + arguments should be passed. See the docstring for + `py4DSTEM.io.native.legacy.read_py4DSTEM_legacy` for a complete list. + For example, `data_id` may need to be specified to select dataset. Args: filepath (str or Path): the file path @@ -80,7 +81,6 @@ def read( # EMD 1.0 formatted files (py4DSTEM v0.14+) if filetype == "emd": - # check version version = emd._get_EMD_version(filepath) if verbose: @@ -139,7 +139,6 @@ def read( # read v13 if legacy.is_py4DSTEM_version13(filepath): - # load the data if verbose: print("Legacy py4DSTEM version 13 file detected. Reading...") @@ -155,7 +154,6 @@ def read( # read <= v12 else: - # parse the root/data_id from the datapath arg if datapath is not None: datapath = datapath.split("/") diff --git a/py4DSTEM/io/save.py b/py4DSTEM/io/save.py index ce0076724..148f246d5 100644 --- a/py4DSTEM/io/save.py +++ b/py4DSTEM/io/save.py @@ -1,13 +1,8 @@ from emdfile import save as _save import warnings -def save( - filepath, - data, - mode='w', - emdpath=None, - tree=True - ): + +def save(filepath, data, mode="w", emdpath=None, tree=True): """ Saves data to an EMD 1.0 formatted HDF5 file at filepath. @@ -19,22 +14,13 @@ def save( cal = None with warnings.catch_warnings(): - warnings.simplefilter('ignore') - if hasattr(data,'calibration') and data.calibration is not None: + warnings.simplefilter("ignore") + if hasattr(data, "calibration") and data.calibration is not None: cal = data.calibration - rp = '/'.join(data._treepath.split('/')[:-1]) - cal['_root_treepath'] = rp + rp = "/".join(data._treepath.split("/")[:-1]) + cal["_root_treepath"] = rp - _save( - filepath, - data = data, - mode = mode, - emdpath = emdpath, - tree = tree - ) + _save(filepath, data=data, mode=mode, emdpath=emdpath, tree=tree) if cal is not None: - del(cal._params['_root_treepath']) - - - + del cal._params["_root_treepath"] diff --git a/py4DSTEM/preprocess/__init__.py b/py4DSTEM/preprocess/__init__.py index 22374c0a7..4ba0bdee0 100644 --- a/py4DSTEM/preprocess/__init__.py +++ b/py4DSTEM/preprocess/__init__.py @@ -3,4 +3,3 @@ from py4DSTEM.preprocess.darkreference import * from py4DSTEM.preprocess.electroncount import * from py4DSTEM.preprocess.radialbkgrd import * - diff --git a/py4DSTEM/preprocess/darkreference.py b/py4DSTEM/preprocess/darkreference.py index a2ab83f4c..a23d4271b 100644 --- a/py4DSTEM/preprocess/darkreference.py +++ b/py4DSTEM/preprocess/darkreference.py @@ -4,6 +4,7 @@ #### Subtrack darkreference from datacube frame at (Rx,Ry) #### + def get_bksbtr_DP(datacube, darkref, Rx, Ry): """ Returns a background subtracted diffraction pattern. @@ -16,14 +17,19 @@ def get_bksbtr_DP(datacube, darkref, Rx, Ry): Returns: (ndarray) the background subtracted diffraction pattern """ - assert darkref.shape==(datacube.Q_Nx,datacube.Q_Ny), "background must have shape (datacube.Q_Nx, datacube.Q_Ny)" - return datacube.data[Rx,Ry,:,:].astype(float) - darkref.astype(float) + assert darkref.shape == ( + datacube.Q_Nx, + datacube.Q_Ny, + ), "background must have shape (datacube.Q_Nx, datacube.Q_Ny)" + return datacube.data[Rx, Ry, :, :].astype(float) - darkref.astype(float) #### Get dark reference #### -def get_darkreference(datacube, N_frames, width_x=0, width_y=0, side_x='end', - side_y='end'): + +def get_darkreference( + datacube, N_frames, width_x=0, width_y=0, side_x="end", side_y="end" +): """ Gets a dark reference image. @@ -51,25 +57,36 @@ def get_darkreference(datacube, N_frames, width_x=0, width_y=0, side_x='end', (ndarray): a 2D ndarray of shape (datacube.Q_Nx, datacube.Ny) giving the background. """ - if width_x==0 and width_y==0: - print("Warning: either width_x or width_y should be a positive integer. Returning an empty dark reference.") - return np.zeros((datacube.Q_Nx,datacube.Q_Ny)) - elif width_x==0: - return get_background_streaks_y(datacube=datacube, N_frames=N_frames, - width=width_y,side=side_y) - elif width_y==0: - return get_background_streaks_x(datacube=datacube, N_frames=N_frames, - width=width_x,side=side_x) + if width_x == 0 and width_y == 0: + print( + "Warning: either width_x or width_y should be a positive integer. Returning an empty dark reference." + ) + return np.zeros((datacube.Q_Nx, datacube.Q_Ny)) + elif width_x == 0: + return get_background_streaks_y( + datacube=datacube, N_frames=N_frames, width=width_y, side=side_y + ) + elif width_y == 0: + return get_background_streaks_x( + datacube=datacube, N_frames=N_frames, width=width_x, side=side_x + ) else: - darkref_x = get_background_streaks_x(datacube=datacube, N_frames=N_frames, - width=width_x,side=side_x) - darkref_y = get_background_streaks_y(datacube=datacube, N_frames=N_frames, - width=width_y,side=side_y) - return darkref_x + darkref_y - (np.mean(darkref_x)*width_x + \ - np.mean(darkref_y)*width_y)/(width_x+width_y) - # Mean has been added twice; subtract one off - -def get_background_streaks(datacube, N_frames, width, side='end', direction='x'): + darkref_x = get_background_streaks_x( + datacube=datacube, N_frames=N_frames, width=width_x, side=side_x + ) + darkref_y = get_background_streaks_y( + datacube=datacube, N_frames=N_frames, width=width_y, side=side_y + ) + return ( + darkref_x + + darkref_y + - (np.mean(darkref_x) * width_x + np.mean(darkref_y) * width_y) + / (width_x + width_y) + ) + # Mean has been added twice; subtract one off + + +def get_background_streaks(datacube, N_frames, width, side="end", direction="x"): """ Gets background streaking in either the x- or y-direction, by finding the average of a strip of pixels along the edge of the detector over a random selection of @@ -92,78 +109,92 @@ def get_background_streaks(datacube, N_frames, width, side='end', direction='x') (ndarray): a 2D ndarray of shape (datacube.Q_Nx,datacube.Q_Ny), giving the the x- or y-direction background streaking. """ - assert ((direction=='x') or (direction=='y')), "direction must be 'x' or 'y'." - if direction=='x': - return get_background_streaks_x(datacube=datacube, N_frames=N_frames, width=width, side=side) + assert (direction == "x") or (direction == "y"), "direction must be 'x' or 'y'." + if direction == "x": + return get_background_streaks_x( + datacube=datacube, N_frames=N_frames, width=width, side=side + ) else: - return get_background_streaks_y(datacube=datacube, N_frames=N_frames, width=width, side=side) + return get_background_streaks_y( + datacube=datacube, N_frames=N_frames, width=width, side=side + ) -def get_background_streaks_x(datacube, width, N_frames, side='start'): + +def get_background_streaks_x(datacube, width, N_frames, side="start"): """ Gets background streaking, by finding the average of a strip of pixels along the y-edge of the detector over a random selection of diffraction patterns. See docstring for get_background_streaks() for more info. """ - assert N_frames <= datacube.R_Nx*datacube.R_Ny, "N_frames must be less than or equal to the total number of diffraction patterns." - assert ((side=='start') or (side=='end')), "side must be 'start' or 'end'." + assert ( + N_frames <= datacube.R_Nx * datacube.R_Ny + ), "N_frames must be less than or equal to the total number of diffraction patterns." + assert (side == "start") or (side == "end"), "side must be 'start' or 'end'." # Get random subset of DPs - indices = np.arange(datacube.R_Nx*datacube.R_Ny) + indices = np.arange(datacube.R_Nx * datacube.R_Ny) np.random.shuffle(indices) indices = indices[:N_frames] - indices_x, indices_y = np.unravel_index(indices, (datacube.R_Nx,datacube.R_Ny)) + indices_x, indices_y = np.unravel_index(indices, (datacube.R_Nx, datacube.R_Ny)) # Make a reference strip array refstrip = np.zeros((width, datacube.Q_Ny)) - if side=='start': + if side == "start": for i in range(N_frames): - refstrip += datacube.data[indices_x[i], indices_y[i], :width, :].astype(float) + refstrip += datacube.data[indices_x[i], indices_y[i], :width, :].astype( + float + ) else: for i in range(N_frames): - refstrip += datacube.data[indices_x[i], indices_y[i], -width:, :].astype(float) + refstrip += datacube.data[indices_x[i], indices_y[i], -width:, :].astype( + float + ) # Calculate mean and return 1D array of streaks bkgrnd_streaks = np.sum(refstrip, axis=0) // width // N_frames # Broadcast to 2D array - darkref = np.zeros((datacube.Q_Nx,datacube.Q_Ny)) - darkref += bkgrnd_streaks[np.newaxis,:] + darkref = np.zeros((datacube.Q_Nx, datacube.Q_Ny)) + darkref += bkgrnd_streaks[np.newaxis, :] return darkref -def get_background_streaks_y(datacube, N_frames, width, side='start'): + +def get_background_streaks_y(datacube, N_frames, width, side="start"): """ Gets background streaking, by finding the average of a strip of pixels along the x-edge of the detector over a random selection of diffraction patterns. See docstring for get_background_streaks_1D() for more info. """ - assert N_frames <= datacube.R_Nx*datacube.R_Ny, "N_frames must be less than or equal to the total number of diffraction patterns." - assert ((side=='start') or (side=='end')), "side must be 'start' or 'end'." + assert ( + N_frames <= datacube.R_Nx * datacube.R_Ny + ), "N_frames must be less than or equal to the total number of diffraction patterns." + assert (side == "start") or (side == "end"), "side must be 'start' or 'end'." # Get random subset of DPs - indices = np.arange(datacube.R_Nx*datacube.R_Ny) + indices = np.arange(datacube.R_Nx * datacube.R_Ny) np.random.shuffle(indices) indices = indices[:N_frames] - indices_x, indices_y = np.unravel_index(indices, (datacube.R_Nx,datacube.R_Ny)) + indices_x, indices_y = np.unravel_index(indices, (datacube.R_Nx, datacube.R_Ny)) # Make a reference strip array refstrip = np.zeros((datacube.Q_Nx, width)) - if side=='start': + if side == "start": for i in range(N_frames): - refstrip += datacube.data[indices_x[i], indices_y[i], :, :width].astype(float) + refstrip += datacube.data[indices_x[i], indices_y[i], :, :width].astype( + float + ) else: for i in range(N_frames): - refstrip += datacube.data[indices_x[i], indices_y[i], :, -width:].astype(float) + refstrip += datacube.data[indices_x[i], indices_y[i], :, -width:].astype( + float + ) # Calculate mean and return 1D array of streaks bkgrnd_streaks = np.sum(refstrip, axis=1) // width // N_frames # Broadcast to 2D array - darkref = np.zeros((datacube.Q_Nx,datacube.Q_Ny)) - darkref += bkgrnd_streaks[:,np.newaxis] + darkref = np.zeros((datacube.Q_Nx, datacube.Q_Ny)) + darkref += bkgrnd_streaks[:, np.newaxis] return darkref - - - - diff --git a/py4DSTEM/preprocess/electroncount.py b/py4DSTEM/preprocess/electroncount.py index ff931fa81..7a498a061 100644 --- a/py4DSTEM/preprocess/electroncount.py +++ b/py4DSTEM/preprocess/electroncount.py @@ -10,12 +10,17 @@ from emdfile import PointListArray from py4DSTEM.preprocess.utils import get_maxima_2D, bin2D -def electron_count(datacube, darkreference, Nsamples=40, - thresh_bkgrnd_Nsigma=4, - thresh_xray_Nsigma=10, - binfactor = 1, - sub_pixel=True, - output='pointlist'): + +def electron_count( + datacube, + darkreference, + Nsamples=40, + thresh_bkgrnd_Nsigma=4, + thresh_xray_Nsigma=10, + binfactor=1, + sub_pixel=True, + output="pointlist", +): """ Performs electron counting. @@ -54,73 +59,82 @@ def electron_count(datacube, darkreference, Nsamples=40, True indicating electron strikes """ assert isinstance(output, str), "output must be a str" - assert output in ['pointlist', 'datacube'], "output must be 'pointlist' or 'datacube'" + assert output in [ + "pointlist", + "datacube", + ], "output must be 'pointlist' or 'datacube'" # Get dimensions - R_Nx,R_Ny,Q_Nx,Q_Ny = np.shape(datacube) + R_Nx, R_Ny, Q_Nx, Q_Ny = np.shape(datacube) # Get threshholds - print('Calculating threshholds') + print("Calculating threshholds") thresh_bkgrnd, thresh_xray = calculate_thresholds( - datacube, - darkreference, - Nsamples=Nsamples, - thresh_bkgrnd_Nsigma=thresh_bkgrnd_Nsigma, - thresh_xray_Nsigma=thresh_xray_Nsigma) + datacube, + darkreference, + Nsamples=Nsamples, + thresh_bkgrnd_Nsigma=thresh_bkgrnd_Nsigma, + thresh_xray_Nsigma=thresh_xray_Nsigma, + ) # Save to a new datacube - if output=='datacube': - counted = np.ones((R_Nx,R_Ny,Q_Nx//binfactor,Q_Ny//binfactor)) + if output == "datacube": + counted = np.ones((R_Nx, R_Ny, Q_Nx // binfactor, Q_Ny // binfactor)) # Loop through frames for Rx in range(R_Nx): for Ry in range(R_Ny): - frame = datacube[Rx,Ry,:,:].astype(np.int16) # Get frame from file - workingarray = frame-darkreference # Subtract dark ref from frame - events = workingarray>thresh_bkgrnd # Threshold electron events - events *= thresh_xray>workingarray + frame = datacube[Rx, Ry, :, :].astype(np.int16) # Get frame from file + workingarray = frame - darkreference # Subtract dark ref from frame + events = workingarray > thresh_bkgrnd # Threshold electron events + events *= thresh_xray > workingarray ## Keep events which are greater than all NN pixels ## - events = get_maxima_2D(workingarray*events) + events = get_maxima_2D(workingarray * events) - if(binfactor>1): + if binfactor > 1: # Perform binning - counted[Rx,Ry,:,:]=bin2D(events, factor=binfactor) + counted[Rx, Ry, :, :] = bin2D(events, factor=binfactor) else: - counted[Rx,Ry,:,:]=events + counted[Rx, Ry, :, :] = events return counted # Save to a PointListArray else: - coordinates = [('qx',int),('qy',int)] - pointlistarray = PointListArray(coordinates=coordinates, shape=(R_Nx,R_Ny)) + coordinates = [("qx", int), ("qy", int)] + pointlistarray = PointListArray(coordinates=coordinates, shape=(R_Nx, R_Ny)) # Loop through frames for Rx in range(R_Nx): for Ry in range(R_Ny): - frame = datacube[Rx,Ry,:,:].astype(np.int16) # Get frame from file - workingarray = frame-darkreference # Subtract dark ref from frame - events = workingarray>thresh_bkgrnd # Threshold electron events - events *= thresh_xray>workingarray + frame = datacube[Rx, Ry, :, :].astype(np.int16) # Get frame from file + workingarray = frame - darkreference # Subtract dark ref from frame + events = workingarray > thresh_bkgrnd # Threshold electron events + events *= thresh_xray > workingarray ## Keep events which are greater than all NN pixels ## - events = get_maxima_2D(workingarray*events) + events = get_maxima_2D(workingarray * events) # Perform binning - if(binfactor>1): - events=bin2D(events, factor=binfactor) + if binfactor > 1: + events = bin2D(events, factor=binfactor) # Save to PointListArray - x,y = np.nonzero(events) - pointlist = pointlistarray.get_pointlist(Rx,Ry) - pointlist.add_tuple_of_nparrays((x,y)) + x, y = np.nonzero(events) + pointlist = pointlistarray.get_pointlist(Rx, Ry) + pointlist.add_tuple_of_nparrays((x, y)) return pointlistarray -def electron_count_GPU(datacube, darkreference, Nsamples=40, - thresh_bkgrnd_Nsigma=4, - thresh_xray_Nsigma=10, - binfactor = 1, - sub_pixel=True, - output='pointlist'): + +def electron_count_GPU( + datacube, + darkreference, + Nsamples=40, + thresh_bkgrnd_Nsigma=4, + thresh_xray_Nsigma=10, + binfactor=1, + sub_pixel=True, + output="pointlist", +): """ Performs electron counting on the GPU. @@ -130,77 +144,105 @@ def electron_count_GPU(datacube, darkreference, Nsamples=40, """ import torch import dm + assert isinstance(output, str), "output must be a str" - assert output in ['pointlist', 'datacube'], "output must be 'pointlist' or 'datacube'" + assert output in [ + "pointlist", + "datacube", + ], "output must be 'pointlist' or 'datacube'" # Get dimensions - R_Nx,R_Ny,Q_Nx,Q_Ny = np.shape(datacube) + R_Nx, R_Ny, Q_Nx, Q_Ny = np.shape(datacube) # Get threshholds - print('Calculating threshholds') - thresh_bkgrnd, thresh_xray = calculate_thresholds(datacube, - darkreference, - Nsamples=Nsamples, - thresh_bkgrnd_Nsigma=thresh_bkgrnd_Nsigma, - thresh_xray_Nsigma=thresh_xray_Nsigma) + print("Calculating threshholds") + thresh_bkgrnd, thresh_xray = calculate_thresholds( + datacube, + darkreference, + Nsamples=Nsamples, + thresh_bkgrnd_Nsigma=thresh_bkgrnd_Nsigma, + thresh_xray_Nsigma=thresh_xray_Nsigma, + ) # Make a torch device object, to interface numpy with the GPU # Put a few arrays on it - dark reference, counted image - device = torch.device('cuda') + device = torch.device("cuda") darkref = torch.from_numpy(darkreference.astype(np.int16)).to(device) - counted = torch.ones(R_Nx,R_Ny,Q_Nx//binfactor,Q_Ny//binfactor,dtype=torch.short).to(device) + counted = torch.ones( + R_Nx, R_Ny, Q_Nx // binfactor, Q_Ny // binfactor, dtype=torch.short + ).to(device) # Loop through frames for Rx in range(R_Nx): for Ry in range(R_Ny): - frame = datacube[Rx,Ry,:,:].astype(np.int16) # Get frame from file - gframe = torch.from_numpy(frame).to(device) # Move frame to GPU - workingarray = gframe-darkref # Subtract dark ref from frame - events = workingarray>thresh_bkgrnd # Threshold electron events - events = thresh_xray>workingarray + frame = datacube[Rx, Ry, :, :].astype(np.int16) # Get frame from file + gframe = torch.from_numpy(frame).to(device) # Move frame to GPU + workingarray = gframe - darkref # Subtract dark ref from frame + events = workingarray > thresh_bkgrnd # Threshold electron events + events = thresh_xray > workingarray ## Keep events which are greater than all NN pixels ## - #Check pixel is greater than all adjacent pixels - log = workingarray[1:-1,:]>workingarray[0:-2,:] - events[1:-1,:] = events[1:-1,:] & log - log = workingarray[0:-2,:]>workingarray[1:-1,:] - events[0:-2,:] = events[0:-2,:] & log - log = workingarray[:,1:-1]>workingarray[:,0:-2] - events[:,1:-1] = events[:,1:-1] & log - log = workingarray[:,0:-2]>workingarray[:,1:-1] - events[:,0:-2] = events[:,0:-2] & log - #Check pixel is greater than adjacent diagonal pixels - log = workingarray[1:-1,1:-1]>workingarray[0:-2,0:-2] - events[1:-1,1:-1] = events[1:-1,1:-1] & log - log = workingarray[0:-2,1:-1]>workingarray[1:-1,0:-2] - events[0:-2,1:-1] = events[0:-2,1:-1] & log - log = workingarray[1:-1,0:-2]>workingarray[0:-2,1:-1] - events[2:-1,0:-2] = events[1:-1,0:-2] & log - log = workingarray[0:-2,0:-2]>workingarray[1:-1,1:-1] - events[0:-2,0:-2] = events[0:-2,0:-2] & log - - if(binfactor>1): + # Check pixel is greater than all adjacent pixels + log = workingarray[1:-1, :] > workingarray[0:-2, :] + events[1:-1, :] = events[1:-1, :] & log + log = workingarray[0:-2, :] > workingarray[1:-1, :] + events[0:-2, :] = events[0:-2, :] & log + log = workingarray[:, 1:-1] > workingarray[:, 0:-2] + events[:, 1:-1] = events[:, 1:-1] & log + log = workingarray[:, 0:-2] > workingarray[:, 1:-1] + events[:, 0:-2] = events[:, 0:-2] & log + # Check pixel is greater than adjacent diagonal pixels + log = workingarray[1:-1, 1:-1] > workingarray[0:-2, 0:-2] + events[1:-1, 1:-1] = events[1:-1, 1:-1] & log + log = workingarray[0:-2, 1:-1] > workingarray[1:-1, 0:-2] + events[0:-2, 1:-1] = events[0:-2, 1:-1] & log + log = workingarray[1:-1, 0:-2] > workingarray[0:-2, 1:-1] + events[2:-1, 0:-2] = events[1:-1, 0:-2] & log + log = workingarray[0:-2, 0:-2] > workingarray[1:-1, 1:-1] + events[0:-2, 0:-2] = events[0:-2, 0:-2] & log + + if binfactor > 1: # Perform binning on GPU in torch_bin function - counted[Rx,Ry,:,:]=torch.transpose(torch_bin(events.type(torch.cuda.ShortTensor), - device,factor=binfactor),0,1).flip(0).flip(1) + counted[Rx, Ry, :, :] = ( + torch.transpose( + torch_bin( + events.type(torch.cuda.ShortTensor), + device, + factor=binfactor, + ), + 0, + 1, + ) + .flip(0) + .flip(1) + ) else: # I'm not sure I understand this - we're flipping coordinates to match what? # TODO: check array flipping - may vary by camera - counted[Rx,Ry,:,:]=torch.transpose(events.type(torch.cuda.ShortTensor),0,1).flip(0).flip(1) + counted[Rx, Ry, :, :] = ( + torch.transpose(events.type(torch.cuda.ShortTensor), 0, 1) + .flip(0) + .flip(1) + ) - if output=='datacube': + if output == "datacube": return counted.cpu().numpy() else: return counted_datacube_to_pointlistarray(counted) + ####### Support functions ######## -def calculate_thresholds(datacube, darkreference, - Nsamples=20, - thresh_bkgrnd_Nsigma=4, - thresh_xray_Nsigma=10, - return_params=False): + +def calculate_thresholds( + datacube, + darkreference, + Nsamples=20, + thresh_bkgrnd_Nsigma=4, + thresh_xray_Nsigma=10, + return_params=False, +): """ Calculate the upper and lower thresholds for thresholding what to register as an electron count. @@ -237,47 +279,49 @@ def calculate_thresholds(datacube, darkreference, * **popt**: returned iff return_params==True. The fit gaussian parameters, (A, mu, sigma). """ - R_Nx,R_Ny,Q_Nx,Q_Ny = datacube.shape + R_Nx, R_Ny, Q_Nx, Q_Ny = datacube.shape # Select random set of frames - nframes = R_Nx*R_Ny + nframes = R_Nx * R_Ny samples = np.arange(nframes) np.random.shuffle(samples) samples = samples[:Nsamples] # Get frames and subtract dark references - sample = np.zeros((Q_Nx,Q_Ny,Nsamples),dtype=np.int16) + sample = np.zeros((Q_Nx, Q_Ny, Nsamples), dtype=np.int16) for i in range(Nsamples): - sample[:,:,i] = datacube[samples[i]//R_Nx,samples[i]%R_Ny,:,:] - sample[:,:,i] -= darkreference + sample[:, :, i] = datacube[samples[i] // R_Nx, samples[i] % R_Ny, :, :] + sample[:, :, i] -= darkreference sample = np.ravel(sample) # Flatten array # Get upper (X-ray) threshold mean = np.mean(sample) stddev = np.std(sample) - thresh_xray = mean+thresh_xray_Nsigma*stddev + thresh_xray = mean + thresh_xray_Nsigma * stddev # Make a histogram - binmax = min(int(np.ceil(np.amax(sample))),int(mean+thresh_xray*stddev)) - binmin = max(int(np.ceil(np.amin(sample))),int(mean-thresh_xray*stddev)) - step = max(1,(binmax-binmin)//1000) - bins = np.arange(binmin,binmax,step=step,dtype=np.int16) - n, bins = np.histogram(sample,bins=bins) + binmax = min(int(np.ceil(np.amax(sample))), int(mean + thresh_xray * stddev)) + binmin = max(int(np.ceil(np.amin(sample))), int(mean - thresh_xray * stddev)) + step = max(1, (binmax - binmin) // 1000) + bins = np.arange(binmin, binmax, step=step, dtype=np.int16) + n, bins = np.histogram(sample, bins=bins) # Define Guassian to fit to, with parameters p: # p[0] is amplitude # p[1] is the mean # p[2] is std deviation - fitfunc = lambda p, x: p[0]*np.exp(-0.5*np.square((x-p[1])/p[2])) - errfunc = lambda p, x, y: fitfunc(p, x) - y # Error for scipy's optimize routine + fitfunc = lambda p, x: p[0] * np.exp(-0.5 * np.square((x - p[1]) / p[2])) + errfunc = lambda p, x, y: fitfunc(p, x) - y # Error for scipy's optimize routine # Get initial guess - p0 = [n.max(),(bins[n.argmax()+1]-bins[n.argmax()])/2,np.std(sample)] - p1,success = optimize.leastsq(errfunc,p0[:],args=(bins[:-1],n)) # Use the scipy optimize routine - p1[1] += 0.5 #Add a half to account for integer bin width + p0 = [n.max(), (bins[n.argmax() + 1] - bins[n.argmax()]) / 2, np.std(sample)] + p1, success = optimize.leastsq( + errfunc, p0[:], args=(bins[:-1], n) + ) # Use the scipy optimize routine + p1[1] += 0.5 # Add a half to account for integer bin width # Set lower threshhold for electron counts to count - thresh_bkgrnd = p1[1]+p1[2]*thresh_bkgrnd_Nsigma + thresh_bkgrnd = p1[1] + p1[2] * thresh_bkgrnd_Nsigma if return_params: return thresh_bkgrnd, thresh_xray, n, bins, p1 @@ -285,7 +329,7 @@ def calculate_thresholds(datacube, darkreference, return thresh_bkgrnd, thresh_xray -def torch_bin(array,device,factor=2): +def torch_bin(array, device, factor=2): """ Bin data on the GPU using torch. @@ -300,19 +344,20 @@ def torch_bin(array,device,factor=2): import torch - x,y = array.shape - binx,biny = x//factor,y//factor - xx,yy = binx*factor,biny*factor + x, y = array.shape + binx, biny = x // factor, y // factor + xx, yy = binx * factor, biny * factor # Make a binned array on the device - binned_ar = torch.zeros(biny,binx,device=device,dtype = array.dtype) + binned_ar = torch.zeros(biny, binx, device=device, dtype=array.dtype) # Collect pixel sums into new bins for ix in range(factor): for iy in range(factor): - binned_ar += array[0+ix:xx+ix:factor,0+iy:yy+iy:factor] + binned_ar += array[0 + ix : xx + ix : factor, 0 + iy : yy + iy : factor] return binned_ar + def counted_datacube_to_pointlistarray(counted_datacube, subpixel=False): """ Converts an electron counted datacube to PointListArray. @@ -325,23 +370,24 @@ def counted_datacube_to_pointlistarray(counted_datacube, subpixel=False): (PointListArray): a PointListArray of electron strike events """ # Get shape, initialize PointListArray - R_Nx,R_Ny,Q_Nx,Q_Ny = counted_datacube.shape + R_Nx, R_Ny, Q_Nx, Q_Ny = counted_datacube.shape if subpixel: - coordinates = [('qx',float),('qy',float)] + coordinates = [("qx", float), ("qy", float)] else: - coordinates = [('qx',int),('qy',int)] - pointlistarray = PointListArray(coordinates=coordinates, shape=(R_Nx,R_Ny)) + coordinates = [("qx", int), ("qy", int)] + pointlistarray = PointListArray(coordinates=coordinates, shape=(R_Nx, R_Ny)) # Loop through frames, adding electron counts to the PointListArray for each. for Rx in range(R_Nx): for Ry in range(R_Ny): - frame = counted_datacube[Rx,Ry,:,:] - x,y = np.nonzero(frame) - pointlist = pointlistarray.get_pointlist(Rx,Ry) - pointlist.add_tuple_of_nparrays((x,y)) + frame = counted_datacube[Rx, Ry, :, :] + x, y = np.nonzero(frame) + pointlist = pointlistarray.get_pointlist(Rx, Ry) + pointlist.add_tuple_of_nparrays((x, y)) return pointlistarray + def counted_pointlistarray_to_datacube(counted_pointlistarray, shape, subpixel=False): """ Converts an electron counted PointListArray to a datacube. @@ -355,28 +401,26 @@ def counted_pointlistarray_to_datacube(counted_pointlistarray, shape, subpixel=F Returns: (4D array of bools): a 4D array of bools, with true indicating an electron strike. """ - assert len(shape)==4 - assert subpixel==False, "subpixel mode not presently supported." - R_Nx,R_Ny,Q_Nx,Q_Ny = shape - counted_datacube = np.zeros((R_Nx,R_Nx,Q_Nx,Q_Ny),dtype=bool) + assert len(shape) == 4 + assert subpixel == False, "subpixel mode not presently supported." + R_Nx, R_Ny, Q_Nx, Q_Ny = shape + counted_datacube = np.zeros((R_Nx, R_Nx, Q_Nx, Q_Ny), dtype=bool) # Loop through frames, adding electron counts to the datacube for each. for Rx in range(R_Nx): for Ry in range(R_Ny): - pointlist = counted_pointlistarray.get_pointlist(Rx,Ry) - counted_datacube[Rx,Ry,pointlist.data['qx'],pointlist.data['qy']] = True + pointlist = counted_pointlistarray.get_pointlist(Rx, Ry) + counted_datacube[Rx, Ry, pointlist.data["qx"], pointlist.data["qy"]] = True return counted_datacube - -if __name__=="__main__": - +if __name__ == "__main__": from py4DSTEM.process.preprocess import get_darkreference from py4DSTEM.io import DataCube, save from ncempy.io import dm - dm4_filepath = 'Capture25.dm4' + dm4_filepath = "Capture25.dm4" # Parameters for dark reference determination drwidth = 100 @@ -387,29 +431,29 @@ def counted_pointlistarray_to_datacube(counted_pointlistarray, shape, subpixel=F thresh_xray_Nsigma = 30 binfactor = 1 subpixel = False - output = 'pointlist' - + output = "pointlist" # Get memory mapped 4D datacube from dm file - datacube = dm.dmReader(dm4_filepath,dSetNum=0,verbose=False)['data'] - datacube = np.moveaxis(datacube,(0,1),(2,3)) + datacube = dm.dmReader(dm4_filepath, dSetNum=0, verbose=False)["data"] + datacube = np.moveaxis(datacube, (0, 1), (2, 3)) # Get dark reference - darkreference = 1 # TODO: get_darkreference(datacube = ...! - - electron_counted_data = electron_count(datacube, darkreference, Nsamples=Nsamples, - thresh_bkgrnd_Nsigma=thresh_bkgrnd_Nsigma, - thresh_xray_Nsigma=thresh_xray_Nsigma, - binfactor=binfactor, - sub_pixel=True, - output='pointlist') + darkreference = 1 # TODO: get_darkreference(datacube = ...! + + electron_counted_data = electron_count( + datacube, + darkreference, + Nsamples=Nsamples, + thresh_bkgrnd_Nsigma=thresh_bkgrnd_Nsigma, + thresh_xray_Nsigma=thresh_xray_Nsigma, + binfactor=binfactor, + sub_pixel=True, + output="pointlist", + ) # For outputting datacubes, wrap counted into a py4DSTEM DataCube - if output=='datacube': + if output == "datacube": electron_counted_data = DataCube(data=electron_counted_data) - output_path = dm4_filepath.replace('.dm4','.h5') + output_path = dm4_filepath.replace(".dm4", ".h5") save(electron_counted_data, output_path) - - - diff --git a/py4DSTEM/preprocess/preprocess.py b/py4DSTEM/preprocess/preprocess.py index 9b67392d1..fb4983622 100644 --- a/py4DSTEM/preprocess/preprocess.py +++ b/py4DSTEM/preprocess/preprocess.py @@ -29,14 +29,8 @@ def set_scan_shape(datacube, R_Nx, R_Ny): # set dim vectors Rpixsize = datacube.calibration.get_R_pixel_size() Rpixunits = datacube.calibration.get_R_pixel_units() - datacube.set_dim( - 0, - [0,Rpixsize], - units = Rpixunits) - datacube.set_dim( - 1, - [0,Rpixsize], - units = Rpixunits) + datacube.set_dim(0, [0, Rpixsize], units=Rpixunits) + datacube.set_dim(1, [0, Rpixsize], units=Rpixunits) # return return datacube @@ -73,30 +67,10 @@ def swap_RQ(datacube): Rpixunits = datacube.calibration.get_R_pixel_units() Qpixsize = datacube.calibration.get_Q_pixel_size() Qpixunits = datacube.calibration.get_Q_pixel_units() - datacube.set_dim( - 0, - [0,Rpixsize], - units = Rpixunits, - name = 'Rx' - ) - datacube.set_dim( - 1, - [0,Rpixsize], - units = Rpixunits, - name = 'Ry' - ) - datacube.set_dim( - 2, - [0,Qpixsize], - units = Qpixunits, - name = 'Qx' - ) - datacube.set_dim( - 3, - [0,Qpixsize], - units = Qpixunits, - name = 'Qy' - ) + datacube.set_dim(0, [0, Rpixsize], units=Rpixunits, name="Rx") + datacube.set_dim(1, [0, Rpixsize], units=Rpixunits, name="Ry") + datacube.set_dim(2, [0, Qpixsize], units=Qpixunits, name="Qx") + datacube.set_dim(3, [0, Qpixsize], units=Qpixunits, name="Qy") # return return datacube @@ -120,18 +94,8 @@ def swap_Rxy(datacube): # set dim vectors Rpixsize = datacube.calibration.get_R_pixel_size() Rpixunits = datacube.calibration.get_R_pixel_units() - datacube.set_dim( - 0, - [0,Rpixsize], - units = Rpixunits, - name = 'Rx' - ) - datacube.set_dim( - 1, - [0,Rpixsize], - units = Rpixunits, - name = 'Ry' - ) + datacube.set_dim(0, [0, Rpixsize], units=Rpixunits, name="Rx") + datacube.set_dim(1, [0, Rpixsize], units=Rpixunits, name="Ry") # return return datacube @@ -165,18 +129,8 @@ def crop_data_diffraction(datacube, crop_Qx_min, crop_Qx_max, crop_Qy_min, crop_ # set dim vectors Qpixsize = datacube.calibration.get_Q_pixel_size() Qpixunits = datacube.calibration.get_Q_pixel_units() - datacube.set_dim( - 2, - [0,Qpixsize], - units = Qpixunits, - name = 'Qx' - ) - datacube.set_dim( - 3, - [0,Qpixsize], - units = Qpixunits, - name = 'Qy' - ) + datacube.set_dim(2, [0, Qpixsize], units=Qpixunits, name="Qx") + datacube.set_dim(3, [0, Qpixsize], units=Qpixunits, name="Qy") # return return datacube @@ -191,28 +145,14 @@ def crop_data_real(datacube, crop_Rx_min, crop_Rx_max, crop_Ry_min, crop_Ry_max) # set dim vectors Rpixsize = datacube.calibration.get_R_pixel_size() Rpixunits = datacube.calibration.get_R_pixel_units() - datacube.set_dim( - 0, - [0,Rpixsize], - units = Rpixunits, - name = 'Rx' - ) - datacube.set_dim( - 1, - [0,Rpixsize], - units = Rpixunits, - name = 'Ry' - ) + datacube.set_dim(0, [0, Rpixsize], units=Rpixunits, name="Rx") + datacube.set_dim(1, [0, Rpixsize], units=Rpixunits, name="Ry") # return return datacube -def bin_data_diffraction( - datacube, - bin_factor, - dtype=None - ): +def bin_data_diffraction(datacube, bin_factor, dtype=None): """ Performs diffraction space binning of data by bin_factor. @@ -226,8 +166,7 @@ def bin_data_diffraction( """ # validate inputs - assert(type(bin_factor) is int - ), f"Error: binning factor {bin_factor} is not an int." + assert type(bin_factor) is int, f"Error: binning factor {bin_factor} is not an int." if bin_factor == 1: return datacube if dtype is None: @@ -253,37 +192,29 @@ def bin_data_diffraction( ] # bin - datacube.data = datacube.data.reshape( - R_Nx, - R_Ny, - int(Q_Nx / bin_factor), - bin_factor, - int(Q_Ny / bin_factor), - bin_factor, - ).sum(axis=(3, 5)).astype(dtype) + datacube.data = ( + datacube.data.reshape( + R_Nx, + R_Ny, + int(Q_Nx / bin_factor), + bin_factor, + int(Q_Ny / bin_factor), + bin_factor, + ) + .sum(axis=(3, 5)) + .astype(dtype) + ) # set dim vectors Qpixsize = datacube.calibration.get_Q_pixel_size() * bin_factor Qpixunits = datacube.calibration.get_Q_pixel_units() - # set dims - datacube.set_dim( - 2, - [0,Qpixsize], - units = Qpixunits, - name = 'Qx' - ) - datacube.set_dim( - 3, - [0,Qpixsize], - units = Qpixunits, - name = 'Qy' - ) + datacube.set_dim(2, [0, Qpixsize], units=Qpixunits, name="Qx") + datacube.set_dim(3, [0, Qpixsize], units=Qpixunits, name="Qy") # set calibration pixel size datacube.calibration.set_Q_pixel_size(Qpixsize) - # return return datacube @@ -300,7 +231,11 @@ def bin_data_mmap(datacube, bin_factor, dtype=np.float32): # get shape R_Nx, R_Ny, Q_Nx, Q_Ny = ( - datacube.R_Nx, datacube.R_Ny, datacube.Q_Nx, datacube.Q_Ny) + datacube.R_Nx, + datacube.R_Ny, + datacube.Q_Nx, + datacube.Q_Ny, + ) # allocate space data = np.zeros( ( @@ -319,18 +254,8 @@ def bin_data_mmap(datacube, bin_factor, dtype=np.float32): # set dim vectors Qpixsize = datacube.calibration.get_Q_pixel_size() * bin_factor Qpixunits = datacube.calibration.get_Q_pixel_units() - datacube.set_dim( - 2, - [0,Qpixsize], - units = Qpixunits, - name = 'Qx' - ) - datacube.set_dim( - 3, - [0,Qpixsize], - units = Qpixunits, - name = 'Qy' - ) + datacube.set_dim(2, [0, Qpixsize], units=Qpixunits, name="Qx") + datacube.set_dim(3, [0, Qpixsize], units=Qpixunits, name="Qy") # set calibration pixel size datacube.calibration.set_Q_pixel_size(Qpixsize) @@ -343,8 +268,7 @@ def bin_data_real(datacube, bin_factor): Performs diffraction space binning of data by bin_factor. """ # validate inputs - assert(type(bin_factor) is int - ), f"Bin factor {bin_factor} is not an int." + assert type(bin_factor) is int, f"Bin factor {bin_factor} is not an int." if bin_factor <= 1: return datacube @@ -379,18 +303,8 @@ def bin_data_real(datacube, bin_factor): # set dim vectors Rpixsize = datacube.calibration.get_R_pixel_size() * bin_factor Rpixunits = datacube.calibration.get_R_pixel_units() - datacube.set_dim( - 0, - [0,Rpixsize], - units = Rpixunits, - name = 'Rx' - ) - datacube.set_dim( - 1, - [0,Rpixsize], - units = Rpixunits, - name = 'Ry' - ) + datacube.set_dim(0, [0, Rpixsize], units=Rpixunits, name="Rx") + datacube.set_dim(1, [0, Rpixsize], units=Rpixunits, name="Ry") # set calibration pixel size datacube.calibration.set_R_pixel_size(Rpixsize) @@ -414,7 +328,6 @@ def thin_data_real(datacube, thinning_factor): # populate data for rx, ry in tqdmnd(Rshapef[0], Rshapef[1]): - rx0 = rx * thinning_factor ry0 = ry * thinning_factor data[rx, ry, :, :] = datacube[rx0, ry0, :, :] @@ -424,18 +337,8 @@ def thin_data_real(datacube, thinning_factor): # set dim vectors Rpixsize = datacube.calibration.get_R_pixel_size() * thinning_factor Rpixunits = datacube.calibration.get_R_pixel_units() - datacube.set_dim( - 0, - [0,Rpixsize], - units = Rpixunits, - name = 'Rx' - ) - datacube.set_dim( - 1, - [0,Rpixsize], - units = Rpixunits, - name = 'Ry' - ) + datacube.set_dim(0, [0, Rpixsize], units=Rpixunits, name="Rx") + datacube.set_dim(1, [0, Rpixsize], units=Rpixunits, name="Ry") # set calibration pixel size datacube.calibration.set_R_pixel_size(Rpixsize) @@ -516,10 +419,7 @@ def filter_hot_pixels(datacube, thresh, ind_compare=1, return_mask=False): axis=0, ) # arry of the ind_compare'th pixel intensity - diff_compare = np.reshape( - diff_local_med[-ind_compare - 1, :], - shape - ) + diff_compare = np.reshape(diff_local_med[-ind_compare - 1, :], shape) # Generate mask mask = diff_mean - diff_compare > thresh @@ -535,20 +435,20 @@ def filter_hot_pixels(datacube, thresh, ind_compare=1, return_mask=False): # Otherwise, apply filtering # Get masked indices - x_ma,y_ma = np.nonzero(mask) + x_ma, y_ma = np.nonzero(mask) # Get local windows for each masked pixel - xslices,yslices = [],[] - for xm,ym in zip(x_ma,y_ma): - xslice,yslice = slice(xm-1,xm+2),slice(ym-1,ym+2) - if xslice.start<0: - xslice = slice(0,xslice.stop) - elif xslice.stop>shape[0]: - xslice = slice(xslice.start,shape[0]) - if yslice.start<0: - yslice = slice(0,yslice.stop) - elif yslice.stop>shape[1]: - yslice = slice(yslice.start,shape[1]) + xslices, yslices = [], [] + for xm, ym in zip(x_ma, y_ma): + xslice, yslice = slice(xm - 1, xm + 2), slice(ym - 1, ym + 2) + if xslice.start < 0: + xslice = slice(0, xslice.stop) + elif xslice.stop > shape[0]: + xslice = slice(xslice.start, shape[0]) + if yslice.start < 0: + yslice = slice(0, yslice.stop) + elif yslice.stop > shape[1]: + yslice = slice(yslice.start, shape[1]) xslices.append(xslice) yslices.append(yslice) @@ -556,19 +456,12 @@ def filter_hot_pixels(datacube, thresh, ind_compare=1, return_mask=False): for ax, ay in tqdmnd( *(datacube.R_Nx, datacube.R_Ny), desc="Cleaning pixels", unit=" images" ): - for xm,ym,xs,ys in zip( - x_ma, - y_ma, - xslices, - yslices - ): - datacube.data[ax, ay, xm, ym] = np.median( - datacube.data[ax, ay, xs, ys] - ) + for xm, ym, xs, ys in zip(x_ma, y_ma, xslices, yslices): + datacube.data[ax, ay, xm, ym] = np.median(datacube.data[ax, ay, xs, ys]) # Calculate local 3x3 median images - #im_med = median_filter(datacube.data[ax, ay, :, :], size=3, mode="nearest") - #datacube.data[ax, ay, :, :][mask] = im_med[mask] + # im_med = median_filter(datacube.data[ax, ay, :, :], size=3, mode="nearest") + # datacube.data[ax, ay, :, :][mask] = im_med[mask] # Return if return_mask is True: @@ -652,13 +545,14 @@ def resample_data_diffraction( if not resampling_factor: resampling_factor = output_size[0] / old_size[2] if datacube.calibration.get_Q_pixel_size() is not None: - datacube.calibration.set_Q_pixel_size(datacube.calibration.get_Q_pixel_size() / resampling_factor) + datacube.calibration.set_Q_pixel_size( + datacube.calibration.get_Q_pixel_size() / resampling_factor + ) elif method == "bilinear": from scipy.ndimage import zoom if resampling_factor is not None: - if output_size is not None: raise ValueError( "Only one of 'resampling_factor' or 'output_size' can be specified." @@ -669,7 +563,6 @@ def resample_data_diffraction( resampling_factor = np.tile(resampling_factor, 2) else: - if output_size is None: raise ValueError( "At-least one of 'resampling_factor' or 'output_size' must be specified." @@ -684,7 +577,9 @@ def resample_data_diffraction( resampling_factor = np.concatenate(((1, 1), resampling_factor)) datacube.data = zoom(datacube.data, resampling_factor, order=1) - datacube.calibration.set_Q_pixel_size(datacube.calibration.get_Q_pixel_size() / resampling_factor[2]) + datacube.calibration.set_Q_pixel_size( + datacube.calibration.get_Q_pixel_size() / resampling_factor[2] + ) else: raise ValueError( @@ -701,7 +596,6 @@ def pad_data_diffraction(datacube, pad_factor=None, output_size=None): Qx, Qy = datacube.shape[-2:] if pad_factor is not None: - if output_size is not None: raise ValueError( "Only one of 'pad_factor' or 'output_size' can be specified." @@ -720,7 +614,6 @@ def pad_data_diffraction(datacube, pad_factor=None, output_size=None): pad_ky = (pad_ky, pad_ky) else: - if output_size is None: raise ValueError( "At-least one of 'pad_factor' or 'output_size' must be specified." @@ -751,32 +644,12 @@ def pad_data_diffraction(datacube, pad_factor=None, output_size=None): datacube.data = np.pad(datacube.data, pad_width=pad_width, mode="constant") - Qpixsize = datacube.calibration.get_Q_pixel_size() + Qpixsize = datacube.calibration.get_Q_pixel_size() Qpixunits = datacube.calibration.get_Q_pixel_units() - datacube.set_dim( - 2, - [0,Qpixsize], - units = Qpixunits, - name = 'Qx' - ) - datacube.set_dim( - 3, - [0,Qpixsize], - units = Qpixunits, - name = 'Qy' - ) + datacube.set_dim(2, [0, Qpixsize], units=Qpixunits, name="Qx") + datacube.set_dim(3, [0, Qpixsize], units=Qpixunits, name="Qy") datacube.calibrate() return datacube - - - - - - - - - - diff --git a/py4DSTEM/preprocess/radialbkgrd.py b/py4DSTEM/preprocess/radialbkgrd.py index 71d035768..e0d402fe3 100644 --- a/py4DSTEM/preprocess/radialbkgrd.py +++ b/py4DSTEM/preprocess/radialbkgrd.py @@ -6,20 +6,21 @@ from scipy.interpolate import interp1d from scipy.signal import savgol_filter + ## Create look up table for background subtraction def get_1D_polar_background( data, p_ellipse, - center = None, + center=None, maskUpdateIter=3, - min_relative_threshold = 4, - smoothing = False, - smoothingWindowSize = 3, - smoothingPolyOrder = 4, - smoothing_log = True, - min_background_value=1E-3, - return_polararr=False - ): + min_relative_threshold=4, + smoothing=False, + smoothingWindowSize=3, + smoothingPolyOrder=4, + smoothing_log=True, + min_background_value=1e-3, + return_polararr=False, +): """ Gets the median polar background for a diffraction pattern @@ -62,66 +63,65 @@ def get_1D_polar_background( """ from py4DSTEM.process.utils import cartesian_to_polarelliptical_transform - # assert data is proper form + # assert data is proper form assert isinstance(smoothing, bool), "Smoothing must be bool" - assert smoothingWindowSize%2==1, 'Smoothing window must be odd' + assert smoothingWindowSize % 2 == 1, "Smoothing window must be odd" assert isinstance(return_polararr, bool), "return_polararr must be bool" # Prepare ellipse params if center is not None: - p_ellipse = tuple[center[0],center[1],p_ellipse[2],p_ellipse[3],p_ellipse[4]] + p_ellipse = tuple[ + center[0], center[1], p_ellipse[2], p_ellipse[3], p_ellipse[4] + ] # Compute Polar Transform - polarData, rr, tt = cartesian_to_polarelliptical_transform(data,p_ellipse) + polarData, rr, tt = cartesian_to_polarelliptical_transform(data, p_ellipse) # Crop polar data to maximum distance which contains information from original image - if (polarData.mask.sum(axis = (0))==polarData.shape[0]).any(): - ii = polarData.data.shape[1]-1 - while(polarData.mask[:,ii].all()==True): - ii = ii-1 - maximalDistance = ii - polarData = polarData[:,0:maximalDistance] - r_bins = rr[0,0:maximalDistance] + if (polarData.mask.sum(axis=(0)) == polarData.shape[0]).any(): + ii = polarData.data.shape[1] - 1 + while polarData.mask[:, ii].all() == True: + ii = ii - 1 + maximalDistance = ii + polarData = polarData[:, 0:maximalDistance] + r_bins = rr[0, 0:maximalDistance] else: - r_bins = rr[0,:] + r_bins = rr[0, :] # Iteratively mask off high intensity peaks maskPolar = np.copy(polarData.mask) - background1D = np.ma.median(polarData, axis = 0) - for ii in range(maskUpdateIter+1): + background1D = np.ma.median(polarData, axis=0) + for ii in range(maskUpdateIter + 1): if ii > 0: - maskUpdate = np.logical_or(maskPolar, - polarData/background1D > min_relative_threshold) - # Prevent entire columns from being masked off - colMaskMin = np.all(maskUpdate, axis = 0) # Detect columns that are empty - maskUpdate[:,colMaskMin] = polarData.mask[:,colMaskMin] # reset empty columns to values of previous iterations - polarData.mask = maskUpdate # Update Mask - + maskUpdate = np.logical_or( + maskPolar, polarData / background1D > min_relative_threshold + ) + # Prevent entire columns from being masked off + colMaskMin = np.all(maskUpdate, axis=0) # Detect columns that are empty + maskUpdate[:, colMaskMin] = polarData.mask[ + :, colMaskMin + ] # reset empty columns to values of previous iterations + polarData.mask = maskUpdate # Update Mask background1D = np.maximum(background1D, min_background_value) if smoothing == True: - if smoothing_log==True: + if smoothing_log == True: background1D = np.log(background1D) - background1D = savgol_filter(background1D, - smoothingWindowSize, - smoothingPolyOrder) - if smoothing_log==True: + background1D = savgol_filter( + background1D, smoothingWindowSize, smoothingPolyOrder + ) + if smoothing_log == True: background1D = np.exp(background1D) - if return_polararr ==True: - return(background1D, r_bins, polarData) + if return_polararr == True: + return (background1D, r_bins, polarData) else: - return(background1D, r_bins) + return (background1D, r_bins) -#Create 2D Background -def get_2D_polar_background( - data, - background1D, - r_bins, - p_ellipse, - center = None - ): + +# Create 2D Background +def get_2D_polar_background(data, background1D, r_bins, p_ellipse, center=None): """ Gets 2D polar elliptical background from linear 1D background @@ -147,32 +147,28 @@ def get_2D_polar_background( ndarray 2D polar elliptical median background image """ - assert r_bins.shape==background1D.shape, "1D background and r_bins must be same length" + assert ( + r_bins.shape == background1D.shape + ), "1D background and r_bins must be same length" # Prepare ellipse params - qx0,qy0,a,b,theta = p_ellipse + qx0, qy0, a, b, theta = p_ellipse if center is not None: - qx0,qy0 = center + qx0, qy0 = center # Define centered 2D cartesian coordinate system yc, xc = np.meshgrid( - np.arange(0,data.shape[1])-qy0, - np.arange(0,data.shape[0])-qx0 + np.arange(0, data.shape[1]) - qy0, np.arange(0, data.shape[0]) - qx0 ) # Calculate the semimajor axis distance for each point in the 2D array r = np.sqrt( - ((xc*np.cos(theta)+yc*np.sin(theta))**2)+ - (((xc*np.sin(theta)-yc*np.cos(theta))**2)/((b/a)**2)) + ((xc * np.cos(theta) + yc * np.sin(theta)) ** 2) + + (((xc * np.sin(theta) - yc * np.cos(theta)) ** 2) / ((b / a) ** 2)) ) - # Create a 2D eliptical background using linear interpolation - f = interp1d( - r_bins, - background1D, - fill_value = 'extrapolate' - ) + # Create a 2D eliptical background using linear interpolation + f = interp1d(r_bins, background1D, fill_value="extrapolate") background2D = f(r) - return(background2D) - + return background2D diff --git a/py4DSTEM/preprocess/utils.py b/py4DSTEM/preprocess/utils.py index 66d9dfe1a..0c76f35a7 100644 --- a/py4DSTEM/preprocess/utils.py +++ b/py4DSTEM/preprocess/utils.py @@ -2,6 +2,7 @@ import numpy as np from scipy.ndimage import gaussian_filter + try: import cupy as cp except ImportError: @@ -32,16 +33,16 @@ def bin2D(array, factor, dtype=np.float64): # Collect pixel sums into new bins for ix in range(factor): for iy in range(factor): - binned_ar += array[0 + ix:xx + ix:factor, 0 + iy:yy + iy:factor] + binned_ar += array[0 + ix : xx + ix : factor, 0 + iy : yy + iy : factor] return binned_ar def make_Fourier_coords2D(Nx, Ny, pixelSize=1): """ Generates Fourier coordinates for a (Nx,Ny)-shaped 2D array. - Specifying the pixelSize argument sets a unit size. - """ - if hasattr(pixelSize, '__len__'): + Specifying the pixelSize argument sets a unit size. + """ + if hasattr(pixelSize, "__len__"): assert len(pixelSize) == 2, "pixelSize must either be a scalar or have length 2" pixelSize_x = pixelSize[0] pixelSize_y = pixelSize[1] @@ -55,8 +56,6 @@ def make_Fourier_coords2D(Nx, Ny, pixelSize=1): return qx, qy - - def get_shifted_ar(ar, xshift, yshift, periodic=True, bilinear=False, device="cpu"): """ Shifts array ar by the shift vector (xshift,yshift), using the either @@ -121,12 +120,10 @@ def get_shifted_ar(ar, xshift, yshift, periodic=True, bilinear=False, device="cp return shifted_ar - - def get_maxima_2D( ar, - subpixel = 'poly', - upsample_factor = 16, + subpixel="poly", + upsample_factor=16, sigma=0, minAbsoluteIntensity=0, minRelativeIntensity=0, @@ -135,7 +132,7 @@ def get_maxima_2D( edgeBoundary=1, maxNumPeaks=1, _ar_FT=None, - ): +): """ Finds the maximal points of a 2D array. @@ -161,27 +158,29 @@ def get_maxima_2D( """ from py4DSTEM.process.utils.multicorr import upsampled_correlation - subpixel_modes = ( - 'pixel', - 'poly', - 'multicorr' - ) + subpixel_modes = ("pixel", "poly", "multicorr") er = f"Unrecognized subpixel option {subpixel}. Must be in {subpixel_modes}" assert subpixel in subpixel_modes, er # gaussian filtering - ar = ar if sigma<=0 else gaussian_filter(ar, sigma) + ar = ar if sigma <= 0 else gaussian_filter(ar, sigma) # local pixelwise maxima - maxima_bool = \ - (ar >= np.roll(ar, (-1, 0), axis=(0, 1))) & (ar > np.roll(ar, (1, 0), axis=(0, 1))) & \ - (ar >= np.roll(ar, (0, -1), axis=(0, 1))) & (ar > np.roll(ar, (0, 1), axis=(0, 1))) & \ - (ar >= np.roll(ar, (-1, -1), axis=(0, 1))) & (ar > np.roll(ar, (-1, 1), axis=(0, 1))) & \ - (ar >= np.roll(ar, (1, -1), axis=(0, 1))) & (ar > np.roll(ar, (1, 1), axis=(0, 1))) + maxima_bool = ( + (ar >= np.roll(ar, (-1, 0), axis=(0, 1))) + & (ar > np.roll(ar, (1, 0), axis=(0, 1))) + & (ar >= np.roll(ar, (0, -1), axis=(0, 1))) + & (ar > np.roll(ar, (0, 1), axis=(0, 1))) + & (ar >= np.roll(ar, (-1, -1), axis=(0, 1))) + & (ar > np.roll(ar, (-1, 1), axis=(0, 1))) + & (ar >= np.roll(ar, (1, -1), axis=(0, 1))) + & (ar > np.roll(ar, (1, 1), axis=(0, 1))) + ) # remove edges assert isinstance(edgeBoundary, (int, np.integer)) - if edgeBoundary < 1: edgeBoundary = 1 + if edgeBoundary < 1: + edgeBoundary = 1 maxima_bool[:edgeBoundary, :] = False maxima_bool[-edgeBoundary:, :] = False maxima_bool[:, :edgeBoundary] = False @@ -190,17 +189,16 @@ def get_maxima_2D( # get indices # sort by intensity maxima_x, maxima_y = np.nonzero(maxima_bool) - dtype = np.dtype([('x', float), ('y', float), ('intensity', float)]) + dtype = np.dtype([("x", float), ("y", float), ("intensity", float)]) maxima = np.zeros(len(maxima_x), dtype=dtype) - maxima['x'] = maxima_x - maxima['y'] = maxima_y - maxima['intensity'] = ar[maxima_x, maxima_y] - maxima = np.sort(maxima, order='intensity')[::-1] + maxima["x"] = maxima_x + maxima["y"] = maxima_y + maxima["intensity"] = ar[maxima_x, maxima_y] + maxima = np.sort(maxima, order="intensity")[::-1] if len(maxima) == 0: return maxima - # filter maxima = filter_2D_maxima( maxima, @@ -212,46 +210,46 @@ def get_maxima_2D( maxNumPeaks=maxNumPeaks, ) - if subpixel == 'pixel': + if subpixel == "pixel": return maxima - # Parabolic subpixel refinement for i in range(len(maxima)): - Ix1_ = ar[int(maxima['x'][i]) - 1, int(maxima['y'][i])].astype(np.float64) - Ix0 = ar[int(maxima['x'][i]), int(maxima['y'][i])].astype(np.float64) - Ix1 = ar[int(maxima['x'][i]) + 1, int(maxima['y'][i])].astype(np.float64) - Iy1_ = ar[int(maxima['x'][i]), int(maxima['y'][i]) - 1].astype(np.float64) - Iy0 = ar[int(maxima['x'][i]), int(maxima['y'][i])].astype(np.float64) - Iy1 = ar[int(maxima['x'][i]), int(maxima['y'][i]) + 1].astype(np.float64) + Ix1_ = ar[int(maxima["x"][i]) - 1, int(maxima["y"][i])].astype(np.float64) + Ix0 = ar[int(maxima["x"][i]), int(maxima["y"][i])].astype(np.float64) + Ix1 = ar[int(maxima["x"][i]) + 1, int(maxima["y"][i])].astype(np.float64) + Iy1_ = ar[int(maxima["x"][i]), int(maxima["y"][i]) - 1].astype(np.float64) + Iy0 = ar[int(maxima["x"][i]), int(maxima["y"][i])].astype(np.float64) + Iy1 = ar[int(maxima["x"][i]), int(maxima["y"][i]) + 1].astype(np.float64) deltax = (Ix1 - Ix1_) / (4 * Ix0 - 2 * Ix1 - 2 * Ix1_) deltay = (Iy1 - Iy1_) / (4 * Iy0 - 2 * Iy1 - 2 * Iy1_) - maxima['x'][i] += deltax - maxima['y'][i] += deltay - maxima['intensity'][i] = linear_interpolation_2D(ar, maxima['x'][i], maxima['y'][i]) + maxima["x"][i] += deltax + maxima["y"][i] += deltay + maxima["intensity"][i] = linear_interpolation_2D( + ar, maxima["x"][i], maxima["y"][i] + ) - if subpixel == 'poly': + if subpixel == "poly": return maxima # Fourier upsampling if _ar_FT is None: _ar_FT = np.fft.fft2(ar) - for ipeak in range(len(maxima['x'])): - xyShift = np.array((maxima['x'][ipeak],maxima['y'][ipeak])) + for ipeak in range(len(maxima["x"])): + xyShift = np.array((maxima["x"][ipeak], maxima["y"][ipeak])) # we actually have to lose some precision and go down to half-pixel # accuracy for multicorr xyShift[0] = np.round(xyShift[0] * 2) / 2 xyShift[1] = np.round(xyShift[1] * 2) / 2 - subShift = upsampled_correlation(_ar_FT,upsample_factor,xyShift) - maxima['x'][ipeak]=subShift[0] - maxima['y'][ipeak]=subShift[1] + subShift = upsampled_correlation(_ar_FT, upsample_factor, xyShift) + maxima["x"][ipeak] = subShift[0] + maxima["y"][ipeak] = subShift[1] - maxima = np.sort(maxima, order='intensity')[::-1] + maxima = np.sort(maxima, order="intensity")[::-1] return maxima - def filter_2D_maxima( maxima, minAbsoluteIntensity=0, @@ -260,7 +258,7 @@ def filter_2D_maxima( minSpacing=0, edgeBoundary=1, maxNumPeaks=1, - ): +): """ Args: maxima : a numpy structured array with fields 'x', 'y', 'intensity' @@ -278,14 +276,17 @@ def filter_2D_maxima( """ # Remove maxima which are too dim - if (minAbsoluteIntensity > 0): - deletemask = maxima['intensity'] < minAbsoluteIntensity + if minAbsoluteIntensity > 0: + deletemask = maxima["intensity"] < minAbsoluteIntensity maxima = maxima[~deletemask] # Remove maxima which are too dim, compared to the n-th brightest if (minRelativeIntensity > 0) & (len(maxima) > relativeToPeak): assert isinstance(relativeToPeak, (int, np.integer)) - deletemask = maxima['intensity'] / maxima['intensity'][relativeToPeak] < minRelativeIntensity + deletemask = ( + maxima["intensity"] / maxima["intensity"][relativeToPeak] + < minRelativeIntensity + ) maxima = maxima[~deletemask] # Remove maxima which are too close @@ -293,9 +294,11 @@ def filter_2D_maxima( deletemask = np.zeros(len(maxima), dtype=bool) for i in range(len(maxima)): if deletemask[i] == False: - tooClose = ((maxima['x'] - maxima['x'][i]) ** 2 + \ - (maxima['y'] - maxima['y'][i]) ** 2) < minSpacing ** 2 - tooClose[:i + 1] = False + tooClose = ( + (maxima["x"] - maxima["x"][i]) ** 2 + + (maxima["y"] - maxima["y"][i]) ** 2 + ) < minSpacing**2 + tooClose[: i + 1] = False deletemask[tooClose] = True maxima = maxima[~deletemask] @@ -316,11 +319,9 @@ def linear_interpolation_2D(ar, x, y): y0, y1 = int(np.floor(y)), int(np.ceil(y)) dx = x - x0 dy = y - y0 - return (1 - dx) * (1 - dy) * ar[x0, y0] + (1 - dx) * dy * ar[x0, y1] + dx * (1 - dy) * ar[x1, y0] + dx * dy * ar[ - x1, y1] - - - - - - + return ( + (1 - dx) * (1 - dy) * ar[x0, y0] + + (1 - dx) * dy * ar[x0, y1] + + dx * (1 - dy) * ar[x1, y0] + + dx * dy * ar[x1, y1] + ) diff --git a/py4DSTEM/process/__init__.py b/py4DSTEM/process/__init__.py index 73088068b..e711e907d 100644 --- a/py4DSTEM/process/__init__.py +++ b/py4DSTEM/process/__init__.py @@ -9,4 +9,3 @@ from py4DSTEM.process import latticevectors from py4DSTEM.process import diffraction from py4DSTEM.process import wholepatternfit - diff --git a/py4DSTEM/process/calibration/__init__.py b/py4DSTEM/process/calibration/__init__.py index 6fd3312b8..2f8de9c0d 100644 --- a/py4DSTEM/process/calibration/__init__.py +++ b/py4DSTEM/process/calibration/__init__.py @@ -3,4 +3,3 @@ from py4DSTEM.process.calibration.ellipse import * from py4DSTEM.process.calibration.rotation import * from py4DSTEM.process.calibration.probe import * - diff --git a/py4DSTEM/process/calibration/ellipse.py b/py4DSTEM/process/calibration/ellipse.py index c8151122e..8835aa95b 100644 --- a/py4DSTEM/process/calibration/ellipse.py +++ b/py4DSTEM/process/calibration/ellipse.py @@ -24,11 +24,8 @@ ###### Fitting a 1d elliptical curve to a 2d array, e.g. a Bragg vector map ###### -def fit_ellipse_1D( - ar, - center=None, - fitradii=None, - mask=None): + +def fit_ellipse_1D(ar, center=None, fitradii=None, mask=None): """ For a 2d array ar, fits a 1d elliptical curve to the data inside an annulus centered at `center` with inner and outer radii at `fitradii`. The data to fit make optionally @@ -52,34 +49,35 @@ def fit_ellipse_1D( # Default values if center is None: - center = (ar.shape[0]/2,ar.shape[1]/2) + center = (ar.shape[0] / 2, ar.shape[1] / 2) if fitradii is None: - fitradii = (0,np.minimum(ar.shape)/2) + fitradii = (0, np.minimum(ar.shape) / 2) # Unpack inputs - x0,y0 = center - ri,ro = fitradii + x0, y0 = center + ri, ro = fitradii # Get the datapoints to fit - yy,xx = np.meshgrid(np.arange(ar.shape[1]),np.arange(ar.shape[0])) - rr = np.sqrt((xx-x0)**2 + (yy-y0)**2) - _mask = (rr>ri) * (rr<=ro) + yy, xx = np.meshgrid(np.arange(ar.shape[1]), np.arange(ar.shape[0])) + rr = np.sqrt((xx - x0) ** 2 + (yy - y0) ** 2) + _mask = (rr > ri) * (rr <= ro) if mask is not None: - _mask *= mask==False - xs,ys = np.nonzero(_mask) + _mask *= mask == False + xs, ys = np.nonzero(_mask) vals = ar[_mask] # Get initial parameters guess - p0 = [x0,y0,(2/(ri+ro))**2,0,(2/(ri+ro))**2] + p0 = [x0, y0, (2 / (ri + ro)) ** 2, 0, (2 / (ri + ro)) ** 2] # Fit - x,y,A,B,C = leastsq(ellipse_err, p0, args=(xs,ys,vals))[0] + x, y, A, B, C = leastsq(ellipse_err, p0, args=(xs, ys, vals))[0] # Convert ellipse params - a,b,theta = convert_ellipse_params(A,B,C) + a, b, theta = convert_ellipse_params(A, B, C) + + return x, y, a, b, theta - return x,y,a,b,theta def ellipse_err(p, x, y, val): """ @@ -94,13 +92,14 @@ def ellipse_err(p, x, y, val): parameterization used in all the user-facing functions, for reasons of numerical stability. """ - x,y = x-p[0],y-p[1] - return (p[2]*x**2 + p[3]*x*y + p[4]*y**2 - 1)*val + x, y = x - p[0], y - p[1] + return (p[2] * x**2 + p[3] * x * y + p[4] * y**2 - 1) * val ###### Fitting from amorphous diffraction rings ###### -def fit_ellipse_amorphous_ring(data,center,fitradii,p0=None,mask=None): + +def fit_ellipse_amorphous_ring(data, center, fitradii, p0=None, mask=None): """ Fit the amorphous halo of a diffraction pattern, including any elliptical distortion. @@ -172,14 +171,14 @@ def fit_ellipse_amorphous_ring(data,center,fitradii,p0=None,mask=None): if mask is None: mask = np.ones_like(data).astype(bool) assert data.shape == mask.shape, "data and mask must have same shapes." - x0,y0 = center - ri,ro = fitradii + x0, y0 = center + ri, ro = fitradii # Get data mask - Nx,Ny = data.shape - yy,xx = np.meshgrid(np.arange(Ny),np.arange(Nx)) - rr = np.hypot(xx-x0,yy-y0) - _mask = ((rr>ri)*(rr ri) * (rr < ro)).astype(bool) _mask *= mask # Make coordinates, get data values @@ -188,33 +187,34 @@ def fit_ellipse_amorphous_ring(data,center,fitradii,p0=None,mask=None): # Get initial parameter guesses I0 = np.max(data) - I1 = np.max(data*mask) - sigma0 = ri/2. - sigma1 = (ro-ri)/4. - sigma2 = (ro-ri)/4. + I1 = np.max(data * mask) + sigma0 = ri / 2.0 + sigma1 = (ro - ri) / 4.0 + sigma2 = (ro - ri) / 4.0 c_bkgd = np.min(data) # To guess R, we take a radial integral - q,radial_profile = radial_integral(data,x0,y0,1) - R = q[(q>ri)*(qri)*(q ri) * (q < ro)][np.argmax(radial_profile[(q > ri) * (q < ro)])] # Initial guess at A,B,C - A,B,C = convert_ellipse_params_r(R,R,0) + A, B, C = convert_ellipse_params_r(R, R, 0) # Populate initial parameters - p0_guess = tuple([I0,I1,sigma0,sigma1,sigma2,c_bkgd,x0,y0,A,B,C]) + p0_guess = tuple([I0, I1, sigma0, sigma1, sigma2, c_bkgd, x0, y0, A, B, C]) if p0 is None: _p0 = p0_guess else: - assert len(p0)==11 + assert len(p0) == 11 _p0 = tuple([p0_guess[i] if p0[i] is None else p0[i] for i in range(len(p0))]) # Perform fit p = leastsq(double_sided_gaussian_fiterr, _p0, args=(x_inds, y_inds, vals))[0] # Return - _x0,_y0 = p[6],p[7] - _A,_B,_C = p[8],p[9],p[10] - _a,_b,_theta = convert_ellipse_params(_A,_B,_C) - return (_x0,_y0,_a,_b,_theta),p + _x0, _y0 = p[6], p[7] + _A, _B, _C = p[8], p[9], p[10] + _a, _b, _theta = convert_ellipse_params(_A, _B, _C) + return (_x0, _y0, _a, _b, _theta), p + def double_sided_gaussian_fiterr(p, x, y, val): """ @@ -230,23 +230,27 @@ def double_sided_gaussian(p, x, y): """ # Unpack parameters I0, I1, sigma0, sigma1, sigma2, c_bkgd, x0, y0, A, B, C = p - a,b,theta = convert_ellipse_params(A,B,C) - R = np.mean((a,b)) + a, b, theta = convert_ellipse_params(A, B, C) + R = np.mean((a, b)) R2 = R**2 - A,B,C = A*R2,B*R2,C*R2 - r2 = A*(x - x0)**2 + B*(x - x0)*(y - y0) + C*(y - y0)**2 + A, B, C = A * R2, B * R2, C * R2 + r2 = A * (x - x0) ** 2 + B * (x - x0) * (y - y0) + C * (y - y0) ** 2 r = np.sqrt(r2) - R return ( - I0 * np.exp(-r2 / (2 * sigma0 ** 2)) - + I1 * np.exp(-r ** 2 / (2 * sigma1 ** 2)) * np.heaviside(-r, 0.5) - + I1 * np.exp(-r ** 2 / (2 * sigma2 ** 2)) * np.heaviside(r, 0.5) + I0 * np.exp(-r2 / (2 * sigma0**2)) + + I1 * np.exp(-(r**2) / (2 * sigma1**2)) * np.heaviside(-r, 0.5) + + I1 * np.exp(-(r**2) / (2 * sigma2**2)) * np.heaviside(r, 0.5) + c_bkgd ) + ### Fit an ellipse to crystalline scattering with a known angle between peaks -def constrain_degenerate_ellipse(data, p_ellipse, r_inner, r_outer, phi_known, fitrad=6): + +def constrain_degenerate_ellipse( + data, p_ellipse, r_inner, r_outer, phi_known, fitrad=6 +): """ When fitting an ellipse to data containing 4 diffraction spots in a narrow annulus about the central beam, the answer is degenerate: an infinite number of ellipses @@ -275,42 +279,42 @@ def constrain_degenerate_ellipse(data, p_ellipse, r_inner, r_outer, phi_known, f * **b_constrained**: *(float)* the second semiaxis of the selected ellipse """ # Unpack ellipse params - x,y,a,b,theta = p_ellipse + x, y, a, b, theta = p_ellipse # Get 4 constraining points - xs,ys = np.zeros(4),np.zeros(4) - yy,xx = np.meshgrid(np.arange(data.shape[1]),np.arange(data.shape[0])) - rr = np.sqrt((xx-x)**2+(yy-y)**2) - annular_mask = (rr>r_inner)*(rr<=r_outer) + xs, ys = np.zeros(4), np.zeros(4) + yy, xx = np.meshgrid(np.arange(data.shape[1]), np.arange(data.shape[0])) + rr = np.sqrt((xx - x) ** 2 + (yy - y) ** 2) + annular_mask = (rr > r_inner) * (rr <= r_outer) data_temp = np.zeros_like(data) data_temp[annular_mask] = data[annular_mask] for i in range(4): - x_constr,y_constr = np.unravel_index(np.argmax(gaussian_filter(data_temp,2)),(data.shape[0],data.shape[1])) - rr = np.sqrt((xx-x_constr)**2+(yy-y_constr)**2) - mask = rr>> @measure_origin # >>> def get_the_origin(...): -# +# # will make the function also save those arrays as the measured origin in the # calibration associated with the data used for the measurement. Any existing # measured origin value will be overwritten. -# +# # For the wrapper to work, the decorated function's first argument must have # a .calibration property, and its first two return values must be qx0,qy0. # """ @@ -40,8 +40,8 @@ # cali.set_origin_meas((ans[0],ans[1])) # return ans # return wrapper -# -# +# +# # def set_fit_origin(fun): # """ # See docstring for `set_measured_origin` @@ -54,14 +54,12 @@ # cali.set_origin((ans[0],ans[1])) # return ans # return wrapper -# - - - +# # fit the origin + def fit_origin( data, mask=None, @@ -108,16 +106,16 @@ def fit_origin( giving fit parameters and covariance matrices with respect to the chosen fitting function. """ - assert isinstance(data,tuple) and len(data)==2 - qx0_meas,qy0_meas = data + assert isinstance(data, tuple) and len(data) == 2 + qx0_meas, qy0_meas = data assert isinstance(qx0_meas, np.ndarray) and len(qx0_meas.shape) == 2 assert isinstance(qx0_meas, np.ndarray) and len(qy0_meas.shape) == 2 assert qx0_meas.shape == qy0_meas.shape assert mask is None or mask.shape == qx0_meas.shape and mask.dtype == bool assert fitfunction in ("plane", "parabola", "bezier_two", "constant") if fitfunction == "constant": - qx0_fit = np.mean(qx0_meas)*np.ones_like(qx0_meas) - qy0_fit = np.mean(qy0_meas)*np.ones_like(qy0_meas) + qx0_fit = np.mean(qx0_meas) * np.ones_like(qx0_meas) + qy0_fit = np.mean(qy0_meas) * np.ones_like(qy0_meas) else: if fitfunction == "plane": f = plane @@ -174,19 +172,16 @@ def fit_origin( # Return ans = (qx0_fit, qy0_fit, qx0_residuals, qy0_residuals) if returnfitp: - return ans,(popt_x, popt_y, pcov_x, pcov_y) + return ans, (popt_x, popt_y, pcov_x, pcov_y) else: return ans - - - - ### Functions for finding the origin # for a diffraction pattern + def get_origin_single_dp(dp, r, rscale=1.2): """ Find the origin for a single diffraction pattern, assuming (a) there is no beam stop, @@ -210,13 +205,15 @@ def get_origin_single_dp(dp, r, rscale=1.2): # for a datacube + def get_origin( datacube, r=None, rscale=1.2, dp_max=None, - mask=None - ): + mask=None, + fast_center=False, +): """ Find the origin for all diffraction patterns in a datacube, assuming (a) there is no beam stop, and (b) the center beam contains the highest intensity. Stores the origin @@ -241,6 +238,8 @@ def get_origin( mask (ndarray or None): if not None, should be an (R_Nx,R_Ny) shaped boolean array. Origin is found only where mask==True, and masked arrays are returned for qx0,qy0 + fast_center: (bool) + Skip the center of mass refinement step. Returns: (2-tuple of (R_Nx,R_Ny)-shaped ndarrays): the origin, (x,y) at each scan position @@ -257,7 +256,7 @@ def get_origin( qyy, qxx = np.meshgrid(np.arange(datacube.Q_Ny), np.arange(datacube.Q_Nx)) if mask is None: - for (rx, ry) in tqdmnd( + for rx, ry in tqdmnd( datacube.R_Nx, datacube.R_Ny, desc="Finding origins", @@ -266,10 +265,14 @@ def get_origin( ): dp = datacube.data[rx, ry, :, :] _qx0, _qy0 = np.unravel_index( - np.argmax(gaussian_filter(dp, r)), (datacube.Q_Nx, datacube.Q_Ny) + np.argmax(gaussian_filter(dp, r, mode="nearest")), + (datacube.Q_Nx, datacube.Q_Ny), ) - _mask = np.hypot(qxx - _qx0, qyy - _qy0) < r * rscale - qx0[rx, ry], qy0[rx, ry] = get_CoM(dp * _mask) + if fast_center: + qx0[rx, ry], qy0[rx, ry] = _qx0, _qy0 + else: + _mask = np.hypot(qxx - _qx0, qyy - _qy0) < r * rscale + qx0[rx, ry], qy0[rx, ry] = get_CoM(dp * _mask) else: assert mask.shape == (datacube.R_Nx, datacube.R_Ny) @@ -280,7 +283,7 @@ def get_origin( qy0 = np.ma.array( data=qy0, mask=np.zeros((datacube.R_Nx, datacube.R_Ny), dtype=bool) ) - for (rx, ry) in tqdmnd( + for rx, ry in tqdmnd( datacube.R_Nx, datacube.R_Ny, desc="Finding origins", @@ -290,10 +293,14 @@ def get_origin( if mask[rx, ry]: dp = datacube.data[rx, ry, :, :] _qx0, _qy0 = np.unravel_index( - np.argmax(gaussian_filter(dp, r)), (datacube.Q_Nx, datacube.Q_Ny) + np.argmax(gaussian_filter(dp, r, mode="nearest")), + (datacube.Q_Nx, datacube.Q_Ny), ) - _mask = np.hypot(qxx - _qx0, qyy - _qy0) < r * rscale - qx0.data[rx, ry], qy0.data[rx, ry] = get_CoM(dp * _mask) + if fast_center: + qx0[rx, ry], qy0[rx, ry] = _qx0, _qy0 + else: + _mask = np.hypot(qxx - _qx0, qyy - _qy0) < r * rscale + qx0.data[rx, ry], qy0.data[rx, ry] = get_CoM(dp * _mask) else: qx0.mask, qy0.mask = True, True @@ -302,7 +309,7 @@ def get_origin( return qx0, qy0, mask -def get_origin_single_dp_beamstop(DP: np.ndarray,mask: np.ndarray, **kwargs): +def get_origin_single_dp_beamstop(DP: np.ndarray, mask: np.ndarray, **kwargs): """ Find the origin for a single diffraction pattern, assuming there is a beam stop. @@ -353,9 +360,7 @@ def get_origin_beamstop(datacube: DataCube, mask: np.ndarray, **kwargs): for rx, ry in tqdmnd(datacube.R_Nx, datacube.R_Ny): x, y = get_origin_single_dp_beamstop(datacube.data[rx, ry, :, :], mask) - qx0[rx,ry] = x - qy0[rx,ry] = y + qx0[rx, ry] = x + qy0[rx, ry] = y return qx0, qy0 - - diff --git a/py4DSTEM/process/calibration/probe.py b/py4DSTEM/process/calibration/probe.py index f42a353b6..dc0a38949 100644 --- a/py4DSTEM/process/calibration/probe.py +++ b/py4DSTEM/process/calibration/probe.py @@ -3,7 +3,6 @@ from py4DSTEM.process.utils import get_CoM - def get_probe_size(DP, thresh_lower=0.01, thresh_upper=0.99, N=100): """ Gets the center and radius of the probe in the diffraction plane. @@ -37,7 +36,7 @@ def get_probe_size(DP, thresh_lower=0.01, thresh_upper=0.99, N=100): from py4DSTEM.braggvectors import Probe # parse input - if isinstance(DP,Probe): + if isinstance(DP, Probe): DP = DP.probe thresh_vals = np.linspace(thresh_lower, thresh_upper, N) @@ -61,6 +60,3 @@ def get_probe_size(DP, thresh_lower=0.01, thresh_upper=0.99, N=100): x0, y0 = get_CoM(DP * mask) return r, x0, y0 - - - diff --git a/py4DSTEM/process/calibration/qpixelsize.py b/py4DSTEM/process/calibration/qpixelsize.py index 89e748b58..2abefd54c 100644 --- a/py4DSTEM/process/calibration/qpixelsize.py +++ b/py4DSTEM/process/calibration/qpixelsize.py @@ -8,7 +8,7 @@ from py4DSTEM.process.utils import get_CoM -def get_Q_pixel_size(q_meas, q_known, units='A'): +def get_Q_pixel_size(q_meas, q_known, units="A"): """ Computes the size of the Q-space pixels. @@ -20,7 +20,7 @@ def get_Q_pixel_size(q_meas, q_known, units='A'): Returns: (number,str): the detector pixel size, the associated units """ - return 1. / (q_meas * q_known), units+'^-1' + return 1.0 / (q_meas * q_known), units + "^-1" def get_dq_from_indexed_peaks(qs, hkl, a): @@ -48,7 +48,7 @@ def get_dq_from_indexed_peaks(qs, hkl, a): assert len(qs) == len(hkl), "qs and hkl must have same length" # Get spacings - d_inv = np.array([np.sqrt(a ** 2 + b ** 2 + c ** 2) for (a, b, c) in hkl]) + d_inv = np.array([np.sqrt(a**2 + b**2 + c**2) for (a, b, c) in hkl]) mask = d_inv != 0 # Get scaling factor @@ -63,6 +63,3 @@ def get_dq_from_indexed_peaks(qs, hkl, a): hkl_fit = [hkl[i] for i in range(len(hkl)) if mask[i] == True] return dq, qs_fit, hkl_fit - - - diff --git a/py4DSTEM/process/calibration/rotation.py b/py4DSTEM/process/calibration/rotation.py index 401963052..2c3e7bb43 100644 --- a/py4DSTEM/process/calibration/rotation.py +++ b/py4DSTEM/process/calibration/rotation.py @@ -4,7 +4,6 @@ from typing import Optional - def get_Qvector_from_Rvector(vx, vy, QR_rotation): """ For some vector (vx,vy) in real space, and some rotation QR between real and diff --git a/py4DSTEM/process/classification/__init__.py b/py4DSTEM/process/classification/__init__.py index efdb7469c..42a8a6d4a 100644 --- a/py4DSTEM/process/classification/__init__.py +++ b/py4DSTEM/process/classification/__init__.py @@ -1,4 +1,3 @@ from py4DSTEM.process.classification.braggvectorclassification import * from py4DSTEM.process.classification.classutils import * from py4DSTEM.process.classification.featurization import * - diff --git a/py4DSTEM/process/classification/braggvectorclassification.py b/py4DSTEM/process/classification/braggvectorclassification.py index a33fa75a6..d5c2ac0fc 100644 --- a/py4DSTEM/process/classification/braggvectorclassification.py +++ b/py4DSTEM/process/classification/braggvectorclassification.py @@ -5,7 +5,12 @@ from numpy.linalg import lstsq from itertools import permutations from scipy.ndimage import gaussian_filter -from scipy.ndimage import binary_opening, binary_closing, binary_dilation, binary_erosion +from scipy.ndimage import ( + binary_opening, + binary_closing, + binary_dilation, + binary_erosion, +) from skimage.measure import label from sklearn.decomposition import NMF @@ -106,44 +111,58 @@ def __init__(self, braggpeaks, Qx, Qy, X_is_boolean=True, max_dist=None): max_dist (None or number): maximum distance from a given voronoi point a peak can be and still be associated with this label """ - assert isinstance(braggpeaks,PointListArray), "braggpeaks must be a PointListArray" - assert np.all([name in braggpeaks.dtype.names for name in ('qx','qy')]), "braggpeaks must contain coords 'qx' and 'qy'" - assert len(Qx)==len(Qy), "Qx and Qy must have same length" + assert isinstance( + braggpeaks, PointListArray + ), "braggpeaks must be a PointListArray" + assert np.all( + [name in braggpeaks.dtype.names for name in ("qx", "qy")] + ), "braggpeaks must contain coords 'qx' and 'qy'" + assert len(Qx) == len(Qy), "Qx and Qy must have same length" self.braggpeaks = braggpeaks self.R_Nx = braggpeaks.shape[0] #: shape of real space (x) self.R_Ny = braggpeaks.shape[1] #: shape of real space (y) self.Qx = Qx #: x-coordinates of the voronoi points - self.Qy = Qy #: y-coordinates of the voronoi points + self.Qy = Qy #: y-coordinates of the voronoi points #: the sets of Bragg peaks present at each scan position - self.braggpeak_labels = get_braggpeak_labels_by_scan_position(braggpeaks, Qx, Qy, max_dist) + self.braggpeak_labels = get_braggpeak_labels_by_scan_position( + braggpeaks, Qx, Qy, max_dist + ) # Construct X matrix #: first dimension of the data matrix; the number of bragg peaks self.N_feat = len(self.Qx) #: second dimension of the data matrix; the number of scan positions - self.N_meas = self.R_Nx*self.R_Ny + self.N_meas = self.R_Nx * self.R_Ny - self.X = np.zeros((self.N_feat,self.N_meas)) #: the data matrix + self.X = np.zeros((self.N_feat, self.N_meas)) #: the data matrix for Rx in range(self.R_Nx): for Ry in range(self.R_Ny): - R = Rx*self.R_Ny + Ry + R = Rx * self.R_Ny + Ry s = self.braggpeak_labels[Rx][Ry] - pointlist = self.braggpeaks.get_pointlist(Rx,Ry) + pointlist = self.braggpeaks.get_pointlist(Rx, Ry) for i in s: if X_is_boolean: - self.X[i,R] = True + self.X[i, R] = True else: - ind = np.argmin(np.hypot(pointlist.data['qx']-Qx[i], - pointlist.data['qy']-Qy[i])) - self.X[i,R] = pointlist.data['intensity'][ind] + ind = np.argmin( + np.hypot( + pointlist.data["qx"] - Qx[i], + pointlist.data["qy"] - Qy[i], + ) + ) + self.X[i, R] = pointlist.data["intensity"][ind] return - def get_initial_classes_by_cooccurrence(self, thresh=0.3, BP_fraction_thresh=0.1, - max_iterations=200, - X_is_boolean=True, - n_corr_init=2): + def get_initial_classes_by_cooccurrence( + self, + thresh=0.3, + BP_fraction_thresh=0.1, + max_iterations=200, + X_is_boolean=True, + n_corr_init=2, + ): """ Populate the initial classes by finding sets of Bragg peaks that tend to co-occur in the @@ -164,28 +183,32 @@ def get_initial_classes_by_cooccurrence(self, thresh=0.3, BP_fraction_thresh=0.1 probability function. Must be 2 or 3. """ assert isinstance(X_is_boolean, bool) - assert isinstance(max_iterations, (int,np.integer)) - assert n_corr_init in (2,3) + assert isinstance(max_iterations, (int, np.integer)) + assert n_corr_init in (2, 3) # Get sets of integers representing the initial classes - BP_sets = get_initial_classes(self.braggpeak_labels, N=len(self.Qx), thresh=thresh, - BP_fraction_thresh=BP_fraction_thresh, - max_iterations=max_iterations, - n_corr_init=n_corr_init) + BP_sets = get_initial_classes( + self.braggpeak_labels, + N=len(self.Qx), + thresh=thresh, + BP_fraction_thresh=BP_fraction_thresh, + max_iterations=max_iterations, + n_corr_init=n_corr_init, + ) # Construct W, H matrices self.N_c = len(BP_sets) # W - self.W = np.zeros((self.N_feat,self.N_c)) + self.W = np.zeros((self.N_feat, self.N_c)) for i in range(self.N_c): BP_set = BP_sets[i] for j in BP_set: - self.W[j,i] = 1 + self.W[j, i] = 1 # H - self.H = lstsq(self.W,self.X,rcond=None)[0] - self.H = np.where(self.H<0,0,self.H) + self.H = lstsq(self.W, self.X, rcond=None)[0] + self.H = np.where(self.H < 0, 0, self.H) self.W_next = None self.H_next = None @@ -201,22 +224,22 @@ def get_initial_classes_from_images(self, class_images): class_images (ndarray): must have shape (R_Nx,R_Ny,N_c), where N_c is the number of classes, and class_images[:,:,i] is the image of class i. """ - assert class_images.shape[0]==self.R_Nx - assert class_images.shape[1]==self.R_Ny + assert class_images.shape[0] == self.R_Nx + assert class_images.shape[1] == self.R_Ny # Construct W, H matrices self.N_c = class_images.shape[2] # H - H = np.zeros((self.N_c,self.N_meas)) + H = np.zeros((self.N_c, self.N_meas)) for i in range(self.N_c): - H[i,:] = class_images[:,:,i].ravel() - self.H = np.copy(H, order='C') + H[i, :] = class_images[:, :, i].ravel() + self.H = np.copy(H, order="C") # W - W = lstsq(self.H.T, self.X.T,rcond=None)[0].T - W = np.where(W<0,0,W) - self.W = np.copy(W, order='C') + W = lstsq(self.H.T, self.X.T, rcond=None)[0].T + W = np.where(W < 0, 0, W) + self.W = np.copy(W, order="C") self.W_next = None self.H_next = None @@ -269,7 +292,7 @@ def nmf(self, max_iterations=1): Args: max_iterations (int): the maximum number of NMF steps to take """ - sklearn_nmf = NMF(n_components=self.N_c, init='custom', max_iter=max_iterations) + sklearn_nmf = NMF(n_components=self.N_c, init="custom", max_iter=max_iterations) self.W_next = sklearn_nmf.fit_transform(self.X, W=self.W, H=self.H) self.H_next = sklearn_nmf.components_ self.N_c_next = self.W_next.shape[1] @@ -306,18 +329,18 @@ def split(self, sigma=2, threshold_split=0.25, expand_mask=1, minimum_pixels=1): minimum_pixels (int): if, after splitting, a potential new class contains fewer than this number of pixels, ignore it """ - assert isinstance(expand_mask,(int,np.integer)) - assert isinstance(minimum_pixels,(int,np.integer)) + assert isinstance(expand_mask, (int, np.integer)) + assert isinstance(minimum_pixels, (int, np.integer)) - W_next = np.zeros((self.N_feat,1)) - H_next = np.zeros((1,self.N_meas)) + W_next = np.zeros((self.N_feat, 1)) + H_next = np.zeros((1, self.N_meas)) for i in range(self.N_c): # Get the class in real space class_image = self.get_class_image(i) # Turn into a binary mask - class_image = gaussian_filter(class_image,sigma) - mask = class_image > (np.max(class_image)*threshold_split) + class_image = gaussian_filter(class_image, sigma) + mask = class_image > (np.max(class_image) * threshold_split) mask = binary_opening(mask, iterations=1) mask = binary_closing(mask, iterations=1) mask = binary_dilation(mask, iterations=expand_mask) @@ -327,21 +350,20 @@ def split(self, sigma=2, threshold_split=0.25, expand_mask=1, minimum_pixels=1): # Add each region to the new W and H matrices for j in range(nlabels): - mask = (labels == (j+1)) + mask = labels == (j + 1) mask = binary_erosion(mask, iterations=expand_mask) if np.sum(mask) >= minimum_pixels: - # Leave the Bragg peak weightings the same - W_next = np.hstack((W_next,self.W[:,i,np.newaxis])) + W_next = np.hstack((W_next, self.W[:, i, np.newaxis])) # Use the existing real space pixel weightings h_i = np.zeros(self.N_meas) - h_i[mask.ravel()] = self.H[i,:][mask.ravel()] - H_next = np.vstack((H_next,h_i[np.newaxis,:])) + h_i[mask.ravel()] = self.H[i, :][mask.ravel()] + H_next = np.vstack((H_next, h_i[np.newaxis, :])) - self.W_next = W_next[:,1:] - self.H_next = H_next[1:,:] + self.W_next = W_next[:, 1:] + self.H_next = H_next[1:, :] self.N_c_next = self.W_next.shape[1] return @@ -400,25 +422,30 @@ def merge_by_class_index(self, i, j): i (int): index of the first class to merge j (int): index of the second class to merge """ - assert np.all([isinstance(ind,(int,np.integer)) for ind in [i,j]]), "i and j must be ints" + assert np.all( + [isinstance(ind, (int, np.integer)) for ind in [i, j]] + ), "i and j must be ints" # Get merged class - weight_i = np.sum(self.H[i,:]) - weight_j = np.sum(self.H[j,:]) - W_new = (self.W[:,i]*weight_i + self.W[:,j]*weight_j)/(weight_i+weight_j) - H_new = self.H[i,:] + self.H[j,:] + weight_i = np.sum(self.H[i, :]) + weight_j = np.sum(self.H[j, :]) + W_new = (self.W[:, i] * weight_i + self.W[:, j] * weight_j) / ( + weight_i + weight_j + ) + H_new = self.H[i, :] + self.H[j, :] # Remove old classes and add in new class - self.W_next = np.delete(self.W,j,axis=1) - self.H_next = np.delete(self.H,j,axis=0) - self.W_next[:,i] = W_new - self.H_next[i,:] = H_new - self.N_c_next = self.N_c-1 + self.W_next = np.delete(self.W, j, axis=1) + self.H_next = np.delete(self.H, j, axis=0) + self.W_next[:, i] = W_new + self.H_next[i, :] = H_new + self.N_c_next = self.N_c - 1 return - def split_by_class_index(self, i, sigma=2, threshold_split=0.25, expand_mask=1, - minimum_pixels=1): + def split_by_class_index( + self, i, sigma=2, threshold_split=0.25, expand_mask=1, minimum_pixels=1 + ): """ If class i contains multiple non-contiguous segments in real space, divide these regions into distinct classes. @@ -436,18 +463,18 @@ def split_by_class_index(self, i, sigma=2, threshold_split=0.25, expand_mask=1, minimum_pixels (int): if, after splitting, a potential new class contains fewer than this number of pixels, ignore it """ - assert isinstance(i,(int,np.integer)) - assert isinstance(expand_mask,(int,np.integer)) - assert isinstance(minimum_pixels,(int,np.integer)) - W_next = np.zeros((self.N_feat,1)) - H_next = np.zeros((1,self.N_meas)) + assert isinstance(i, (int, np.integer)) + assert isinstance(expand_mask, (int, np.integer)) + assert isinstance(minimum_pixels, (int, np.integer)) + W_next = np.zeros((self.N_feat, 1)) + H_next = np.zeros((1, self.N_meas)) # Get the class in real space class_image = self.get_class_image(i) # Turn into a binary mask - class_image = gaussian_filter(class_image,sigma) - mask = class_image > (np.max(class_image)*threshold_split) + class_image = gaussian_filter(class_image, sigma) + mask = class_image > (np.max(class_image) * threshold_split) mask = binary_opening(mask, iterations=1) mask = binary_closing(mask, iterations=1) mask = binary_dilation(mask, iterations=expand_mask) @@ -457,23 +484,22 @@ def split_by_class_index(self, i, sigma=2, threshold_split=0.25, expand_mask=1, # Add each region to the new W and H matrices for j in range(nlabels): - mask = (labels == (j+1)) + mask = labels == (j + 1) mask = binary_erosion(mask, iterations=expand_mask) if np.sum(mask) >= minimum_pixels: - # Leave the Bragg peak weightings the same - W_next = np.hstack((W_next,self.W[:,i,np.newaxis])) + W_next = np.hstack((W_next, self.W[:, i, np.newaxis])) # Use the existing real space pixel weightings h_i = np.zeros(self.N_meas) - h_i[mask.ravel()] = self.H[i,:][mask.ravel()] - H_next = np.vstack((H_next,h_i[np.newaxis,:])) + h_i[mask.ravel()] = self.H[i, :][mask.ravel()] + H_next = np.vstack((H_next, h_i[np.newaxis, :])) - W_prev = np.delete(self.W,i,axis=1) - H_prev = np.delete(self.H,i,axis=0) - self.W_next = np.concatenate((W_next[:,1:],W_prev),axis=1) - self.H_next = np.concatenate((H_next[1:,:],H_prev),axis=0) + W_prev = np.delete(self.W, i, axis=1) + H_prev = np.delete(self.H, i, axis=0) + self.W_next = np.concatenate((W_next[:, 1:], W_prev), axis=1) + self.H_next = np.concatenate((H_next[1:, :], H_prev), axis=0) self.N_c_next = self.W_next.shape[1] return @@ -485,10 +511,10 @@ def remove_class(self, i): Args: i (int): index of the class to remove """ - assert isinstance(i,(int,np.integer)) + assert isinstance(i, (int, np.integer)) - self.W_next = np.delete(self.W,i,axis=1) - self.H_next = np.delete(self.H,i,axis=0) + self.W_next = np.delete(self.W, i, axis=1) + self.H_next = np.delete(self.H, i, axis=0) self.N_c_next = self.W_next.shape[1] return @@ -514,7 +540,6 @@ def merge_iterative(self, threshBPs=0.1, threshScanPosition=0.1): Nc_ = W_.shape[1] while proceed: - # Get correlation coefficients W_corr = np.corrcoef(W_.T) H_corr = np.corrcoef(H_) @@ -522,46 +547,58 @@ def merge_iterative(self, threshBPs=0.1, threshScanPosition=0.1): # Get merge candidate pairs mask_BPs = W_corr > threshBPs mask_ScanPosition = H_corr > threshScanPosition - mask_upperright = np.zeros((Nc_,Nc_),dtype=bool) + mask_upperright = np.zeros((Nc_, Nc_), dtype=bool) for i in range(Nc_): - mask_upperright[i,i+1:] = 1 + mask_upperright[i, i + 1 :] = 1 merge_mask = mask_BPs * mask_ScanPosition * mask_upperright - merge_i,merge_j = np.nonzero(merge_mask) + merge_i, merge_j = np.nonzero(merge_mask) # Sort merge candidate pairs - merge_candidates = np.zeros(len(merge_i),dtype=[('i',int),('j',int),('cc_w',float), - ('cc_h',float),('score',float)]) - merge_candidates['i'] = merge_i - merge_candidates['j'] = merge_j - merge_candidates['cc_w'] = W_corr[merge_i,merge_j] - merge_candidates['cc_h'] = H_corr[merge_i,merge_j] - merge_candidates['score'] = W_corr[merge_i,merge_j]*H_corr[merge_i,merge_j] - merge_candidates = np.sort(merge_candidates,order='score')[::-1] + merge_candidates = np.zeros( + len(merge_i), + dtype=[ + ("i", int), + ("j", int), + ("cc_w", float), + ("cc_h", float), + ("score", float), + ], + ) + merge_candidates["i"] = merge_i + merge_candidates["j"] = merge_j + merge_candidates["cc_w"] = W_corr[merge_i, merge_j] + merge_candidates["cc_h"] = H_corr[merge_i, merge_j] + merge_candidates["score"] = ( + W_corr[merge_i, merge_j] * H_corr[merge_i, merge_j] + ) + merge_candidates = np.sort(merge_candidates, order="score")[::-1] # Perform merging - merged = np.zeros(Nc_,dtype=bool) - W_merge = np.zeros((self.N_feat,1)) - H_merge = np.zeros((1,self.N_meas)) + merged = np.zeros(Nc_, dtype=bool) + W_merge = np.zeros((self.N_feat, 1)) + H_merge = np.zeros((1, self.N_meas)) for index in range(len(merge_candidates)): - i = merge_candidates['i'][index] - j = merge_candidates['j'][index] + i = merge_candidates["i"][index] + j = merge_candidates["j"][index] if not (merged[i] or merged[j]): - weight_i = np.sum(H_[i,:]) - weight_j = np.sum(H_[j,:]) - W_new = (W_[:,i]*weight_i + W_[:,j]*weight_j)/(weight_i+weight_j) - H_new = H_[i,:] + H_[j,:] - W_merge = np.hstack((W_merge,W_new[:,np.newaxis])) - H_merge = np.vstack((H_merge,H_new[np.newaxis,:])) + weight_i = np.sum(H_[i, :]) + weight_j = np.sum(H_[j, :]) + W_new = (W_[:, i] * weight_i + W_[:, j] * weight_j) / ( + weight_i + weight_j + ) + H_new = H_[i, :] + H_[j, :] + W_merge = np.hstack((W_merge, W_new[:, np.newaxis])) + H_merge = np.vstack((H_merge, H_new[np.newaxis, :])) merged[i] = True merged[j] = True - W_merge = W_merge[:,1:] - H_merge = H_merge[1:,:] + W_merge = W_merge[:, 1:] + H_merge = H_merge[1:, :] - W_ = np.hstack((W_[:,merged==False],W_merge)) - H_ = np.vstack((H_[merged==False,:],H_merge)) + W_ = np.hstack((W_[:, merged == False], W_merge)) + H_ = np.vstack((H_[merged == False, :], H_merge)) Nc_ = W_.shape[1] - if len(merge_candidates)==0: + if len(merge_candidates) == 0: proceed = False self.W_next = W_ @@ -607,8 +644,8 @@ def get_class(self, i): * **class_image**: *(shape (R_Nx,R_Ny) array of floats)* the weights of each scan position in this class """ - class_BPs = self.W[:,i] - class_image = self.H[i,:].reshape((self.R_Nx,self.R_Ny)) + class_BPs = self.W[:, i] + class_image = self.H[i, :].reshape((self.R_Nx, self.R_Ny)) return class_BPs, class_image def get_class_BPs(self, i): @@ -622,7 +659,7 @@ def get_class_BPs(self, i): (length N_feat array of floats): the weights of the N_feat Bragg peaks for this class """ - return self.W[:,i] + return self.W[:, i] def get_class_image(self, i): """ @@ -635,7 +672,7 @@ def get_class_image(self, i): (shape (R_Nx,R_Ny) array of floats): the weights of each scan position in this class """ - return self.H[i,:].reshape((self.R_Nx,self.R_Ny)) + return self.H[i, :].reshape((self.R_Nx, self.R_Ny)) def get_candidate_class(self, i): """ @@ -655,8 +692,8 @@ def get_candidate_class(self, i): assert self.W_next is not None, "W_next is not assigned." assert self.H_next is not None, "H_next is not assigned." - class_BPs = self.W_next[:,i] - class_image = self.H_next[i,:].reshape((self.R_Nx,self.R_Ny)) + class_BPs = self.W_next[:, i] + class_image = self.H_next[i, :].reshape((self.R_Nx, self.R_Ny)) return class_BPs, class_image def get_candidate_class_BPs(self, i): @@ -672,7 +709,7 @@ def get_candidate_class_BPs(self, i): """ assert self.W_next is not None, "W_next is not assigned." - return self.W_next[:,i] + return self.W_next[:, i] def get_candidate_class_image(self, i): """ @@ -687,11 +724,12 @@ def get_candidate_class_image(self, i): """ assert self.H_next is not None, "H_next is not assigned." - return self.H_next[i,:].reshape((self.R_Nx,self.R_Ny)) + return self.H_next[i, :].reshape((self.R_Nx, self.R_Ny)) ### Functions for initial class determination ### + def get_braggpeak_labels_by_scan_position(braggpeaks, Qx, Qy, max_dist=None): """ For each scan position, gets a set of integers, specifying the bragg peaks at this @@ -718,18 +756,30 @@ def get_braggpeak_labels_by_scan_position(braggpeaks, Qx, Qy, max_dist=None): (list of lists of sets) the labels found at each scan position. Scan position (Rx,Ry) is accessed via braggpeak_labels[Rx][Ry] """ - assert isinstance(braggpeaks,PointListArray), "braggpeaks must be a PointListArray" - assert np.all([name in braggpeaks.dtype.names for name in ('qx','qy')]), "braggpeaks must contain coords 'qx' and 'qy'" - - braggpeak_labels = [[set() for i in range(braggpeaks.shape[1])] for j in range(braggpeaks.shape[0])] + assert isinstance(braggpeaks, PointListArray), "braggpeaks must be a PointListArray" + assert np.all( + [name in braggpeaks.dtype.names for name in ("qx", "qy")] + ), "braggpeaks must contain coords 'qx' and 'qy'" + + braggpeak_labels = [ + [set() for i in range(braggpeaks.shape[1])] for j in range(braggpeaks.shape[0]) + ] for Rx in range(braggpeaks.shape[0]): for Ry in range(braggpeaks.shape[1]): s = braggpeak_labels[Rx][Ry] - pointlist = braggpeaks.get_pointlist(Rx,Ry) + pointlist = braggpeaks.get_pointlist(Rx, Ry) for i in range(len(pointlist.data)): - label = np.argmin(np.hypot(Qx-pointlist.data['qx'][i],Qy-pointlist.data['qy'][i])) + label = np.argmin( + np.hypot(Qx - pointlist.data["qx"][i], Qy - pointlist.data["qy"][i]) + ) if max_dist is not None: - if np.hypot(Qx[label]-pointlist.data['qx'][i],Qy[label]-pointlist.data['qy'][i]) < max_dist: + if ( + np.hypot( + Qx[label] - pointlist.data["qx"][i], + Qy[label] - pointlist.data["qy"][i], + ) + < max_dist + ): s.add(label) else: s.add(label) @@ -737,8 +787,14 @@ def get_braggpeak_labels_by_scan_position(braggpeaks, Qx, Qy, max_dist=None): return braggpeak_labels -def get_initial_classes(braggpeak_labels, N, thresh=0.3, BP_fraction_thresh=0.1, - max_iterations=200, n_corr_init=2): +def get_initial_classes( + braggpeak_labels, + N, + thresh=0.3, + BP_fraction_thresh=0.1, + max_iterations=200, + n_corr_init=2, +): """ From the sets of Bragg peaks present at each scan position, get an initial guess classes at which Bragg peaks should be grouped together into classes. @@ -774,33 +830,33 @@ def get_initial_classes(braggpeak_labels, N, thresh=0.3, BP_fraction_thresh=0.1, Returns: (list of sets): the sets of Bragg peaks constituting the classes """ - assert isinstance(braggpeak_labels[0][0],set) + assert isinstance(braggpeak_labels[0][0], set) assert thresh >= 0 and thresh <= 1 assert BP_fraction_thresh >= 0 and BP_fraction_thresh <= 1 - assert isinstance(max_iterations,(int,np.integer)) - assert n_corr_init in (2,3) + assert isinstance(max_iterations, (int, np.integer)) + assert n_corr_init in (2, 3) R_Nx = len(braggpeak_labels) R_Ny = len(braggpeak_labels[0]) if n_corr_init == 2: # Get two-point function - n_point_function = np.zeros((N,N)) + n_point_function = np.zeros((N, N)) for Rx in range(R_Nx): for Ry in range(R_Ny): s = braggpeak_labels[Rx][Ry] - perms = permutations(s,2) + perms = permutations(s, 2) for perm in perms: - n_point_function[perm[0],perm[1]] += 1 - n_point_function /= R_Nx*R_Ny + n_point_function[perm[0], perm[1]] += 1 + n_point_function /= R_Nx * R_Ny # Main loop BP_sets = [] iteration = 0 - unused_BPs = np.ones(N,dtype=bool) + unused_BPs = np.ones(N, dtype=bool) seed_new_class = True while seed_new_class: - ind1,ind2 = np.unravel_index(np.argmax(n_point_function),(N,N)) - BP_set = set([ind1,ind2]) + ind1, ind2 = np.unravel_index(np.argmax(n_point_function), (N, N)) + BP_set = set([ind1, ind2]) grow_class = True while grow_class: frequencies = np.zeros(N) @@ -823,8 +879,8 @@ def get_initial_classes(braggpeak_labels, N, thresh=0.3, BP_fraction_thresh=0.1, # Modify 2-point function, add new BP set to list, and decide to continue or stop for i in BP_set: - n_point_function[i,:] = 0 - n_point_function[:,i] = 0 + n_point_function[i, :] = 0 + n_point_function[:, i] = 0 unused_BPs[i] = 0 for s in BP_sets: if len(s) == len(s.union(BP_set)): @@ -833,28 +889,28 @@ def get_initial_classes(braggpeak_labels, N, thresh=0.3, BP_fraction_thresh=0.1, BP_sets.append(BP_set) iteration += 1 N_unused_BPs = np.sum(unused_BPs) - if iteration > max_iterations or N_unused_BPs < N*BP_fraction_thresh: + if iteration > max_iterations or N_unused_BPs < N * BP_fraction_thresh: seed_new_class = False else: # Get three-point function - n_point_function = np.zeros((N,N,N)) + n_point_function = np.zeros((N, N, N)) for Rx in range(R_Nx): for Ry in range(R_Ny): s = braggpeak_labels[Rx][Ry] - perms = permutations(s,3) + perms = permutations(s, 3) for perm in perms: - n_point_function[perm[0],perm[1],perm[2]] += 1 - n_point_function /= R_Nx*R_Ny + n_point_function[perm[0], perm[1], perm[2]] += 1 + n_point_function /= R_Nx * R_Ny # Main loop BP_sets = [] iteration = 0 - unused_BPs = np.ones(N,dtype=bool) + unused_BPs = np.ones(N, dtype=bool) seed_new_class = True while seed_new_class: - ind1,ind2,ind3 = np.unravel_index(np.argmax(n_point_function),(N,N,N)) - BP_set = set([ind1,ind2,ind3]) + ind1, ind2, ind3 = np.unravel_index(np.argmax(n_point_function), (N, N, N)) + BP_set = set([ind1, ind2, ind3]) grow_class = True while grow_class: frequencies = np.zeros(N) @@ -877,9 +933,9 @@ def get_initial_classes(braggpeak_labels, N, thresh=0.3, BP_fraction_thresh=0.1, # Modify 3-point function, add new BP set to list, and decide to continue or stop for i in BP_set: - n_point_function[i,:,:] = 0 - n_point_function[:,i,:] = 0 - n_point_function[:,:,i] = 0 + n_point_function[i, :, :] = 0 + n_point_function[:, i, :] = 0 + n_point_function[:, :, i] = 0 unused_BPs[i] = 0 for s in BP_sets: if len(s) == len(s.union(BP_set)): @@ -888,16 +944,7 @@ def get_initial_classes(braggpeak_labels, N, thresh=0.3, BP_fraction_thresh=0.1, BP_sets.append(BP_set) iteration += 1 N_unused_BPs = np.sum(unused_BPs) - if iteration > max_iterations or N_unused_BPs < N*BP_fraction_thresh: + if iteration > max_iterations or N_unused_BPs < N * BP_fraction_thresh: seed_new_class = False return BP_sets - - - - - - - - - diff --git a/py4DSTEM/process/classification/classutils.py b/py4DSTEM/process/classification/classutils.py index bd4d0a053..51762a090 100644 --- a/py4DSTEM/process/classification/classutils.py +++ b/py4DSTEM/process/classification/classutils.py @@ -6,8 +6,16 @@ from py4DSTEM.datacube import DataCube from py4DSTEM.process.utils import get_shifted_ar -def get_class_DP(datacube, class_image, thresh=0.01, xshifts=None, yshifts=None, - darkref=None, intshifts=True): + +def get_class_DP( + datacube, + class_image, + thresh=0.01, + xshifts=None, + yshifts=None, + darkref=None, + intshifts=True, +): """ Get the average diffraction pattern for the class described in real space by class_image. @@ -28,40 +36,57 @@ def get_class_DP(datacube, class_image, thresh=0.01, xshifts=None, yshifts=None, Returns: (2D array): the average diffraction pattern for the class """ - assert isinstance(datacube,DataCube) - assert class_image.shape == (datacube.R_Nx,datacube.R_Ny) + assert isinstance(datacube, DataCube) + assert class_image.shape == (datacube.R_Nx, datacube.R_Ny) if xshifts is not None: - assert xshifts.shape == (datacube.R_Nx,datacube.R_Ny) + assert xshifts.shape == (datacube.R_Nx, datacube.R_Ny) if yshifts is not None: - assert yshifts.shape == (datacube.R_Nx,datacube.R_Ny) + assert yshifts.shape == (datacube.R_Nx, datacube.R_Ny) if darkref is not None: - assert darkref.shape == (datacube.Q_Nx,datacube.Q_Ny) + assert darkref.shape == (datacube.Q_Nx, datacube.Q_Ny) assert isinstance(intshifts, bool) - class_DP = np.zeros((datacube.Q_Nx,datacube.Q_Ny)) - for (Rx,Ry) in tqdmnd(datacube.R_Nx,datacube.R_Ny,desc='Computing class diffraction pattern',unit='DP',unit_scale=True): - if class_image[Rx,Ry] >= thresh: - curr_DP = class_image[Rx,Ry]*datacube.data[Rx,Ry,:,:] + class_DP = np.zeros((datacube.Q_Nx, datacube.Q_Ny)) + for Rx, Ry in tqdmnd( + datacube.R_Nx, + datacube.R_Ny, + desc="Computing class diffraction pattern", + unit="DP", + unit_scale=True, + ): + if class_image[Rx, Ry] >= thresh: + curr_DP = class_image[Rx, Ry] * datacube.data[Rx, Ry, :, :] if xshifts is not None and yshifts is not None: - xshift = xshifts[Rx,Ry] - yshift = yshifts[Rx,Ry] + xshift = xshifts[Rx, Ry] + yshift = yshifts[Rx, Ry] if intshifts is True: xshift = int(np.round(xshift)) yshift = int(np.round(yshift)) - curr_DP = np.roll(curr_DP,-xshift,axis=0) - curr_DP = np.roll(curr_DP,-yshift,axis=1) + curr_DP = np.roll(curr_DP, -xshift, axis=0) + curr_DP = np.roll(curr_DP, -yshift, axis=1) else: - curr_DP = get_shifted_ar(curr_DP,-xshift,-yshift) + curr_DP = get_shifted_ar(curr_DP, -xshift, -yshift) class_DP += curr_DP if darkref is not None: - class_DP -= darkref*class_image[Rx,Ry] - class_DP /= np.sum(class_image[class_image>=thresh]) - class_DP = np.where(class_DP>0,class_DP,0) + class_DP -= darkref * class_image[Rx, Ry] + class_DP /= np.sum(class_image[class_image >= thresh]) + class_DP = np.where(class_DP > 0, class_DP, 0) return class_DP -def get_class_DP_without_Bragg_scattering(datacube,class_image,braggpeaks,radius, - x0,y0,thresh=0.01,xshifts=None,yshifts=None, - darkref=None,intshifts=True): + +def get_class_DP_without_Bragg_scattering( + datacube, + class_image, + braggpeaks, + radius, + x0, + y0, + thresh=0.01, + xshifts=None, + yshifts=None, + darkref=None, + intshifts=True, +): """ Get the average diffraction pattern, removing any Bragg scattering, for the class described in real space by class_image. @@ -96,53 +121,66 @@ def get_class_DP_without_Bragg_scattering(datacube,class_image,braggpeaks,radius Returns: class_DP (2D array) the average diffraction pattern for the class """ - assert isinstance(datacube,DataCube) - assert class_image.shape == (datacube.R_Nx,datacube.R_Ny) + assert isinstance(datacube, DataCube) + assert class_image.shape == (datacube.R_Nx, datacube.R_Ny) assert isinstance(braggpeaks, PointListArray) if xshifts is not None: - assert xshifts.shape == (datacube.R_Nx,datacube.R_Ny) + assert xshifts.shape == (datacube.R_Nx, datacube.R_Ny) if yshifts is not None: - assert yshifts.shape == (datacube.R_Nx,datacube.R_Ny) + assert yshifts.shape == (datacube.R_Nx, datacube.R_Ny) if darkref is not None: - assert darkref.shape == (datacube.Q_Nx,datacube.Q_Ny) - assert isinstance(intshifts,bool) + assert darkref.shape == (datacube.Q_Nx, datacube.Q_Ny) + assert isinstance(intshifts, bool) - class_DP = np.zeros((datacube.Q_Nx,datacube.Q_Ny)) - mask_weights = np.zeros((datacube.Q_Nx,datacube.Q_Ny)) - yy,xx = np.meshgrid(np.arange(datacube.Q_Ny),np.arange(datacube.Q_Nx)) - for (Rx,Ry) in tqdmnd(datacube.R_Nx,datacube.R_Ny,desc='Computing class diffraction pattern',unit='DP',unit_scale=True): - weight = class_image[Rx,Ry] + class_DP = np.zeros((datacube.Q_Nx, datacube.Q_Ny)) + mask_weights = np.zeros((datacube.Q_Nx, datacube.Q_Ny)) + yy, xx = np.meshgrid(np.arange(datacube.Q_Ny), np.arange(datacube.Q_Nx)) + for Rx, Ry in tqdmnd( + datacube.R_Nx, + datacube.R_Ny, + desc="Computing class diffraction pattern", + unit="DP", + unit_scale=True, + ): + weight = class_image[Rx, Ry] if weight >= thresh: - braggpeaks_curr = braggpeaks.get_pointlist(Rx,Ry) - mask = np.ones((datacube.Q_Nx,datacube.Q_Ny)) + braggpeaks_curr = braggpeaks.get_pointlist(Rx, Ry) + mask = np.ones((datacube.Q_Nx, datacube.Q_Ny)) if braggpeaks_curr.length > 1: - center_index = np.argmin(np.hypot(braggpeaks_curr.data['qx']-x0, - braggpeaks_curr.data['qy']-y0)) + center_index = np.argmin( + np.hypot( + braggpeaks_curr.data["qx"] - x0, braggpeaks_curr.data["qy"] - y0 + ) + ) for i in range(braggpeaks_curr.length): if i != center_index: - mask_ = ((xx-braggpeaks_curr.data['qx'][i])**2 + \ - (yy-braggpeaks_curr.data['qy'][i])**2) >= radius**2 - mask = np.logical_and(mask,mask_) - curr_DP = datacube.data[Rx,Ry,:,:]*mask*weight + mask_ = ( + (xx - braggpeaks_curr.data["qx"][i]) ** 2 + + (yy - braggpeaks_curr.data["qy"][i]) ** 2 + ) >= radius**2 + mask = np.logical_and(mask, mask_) + curr_DP = datacube.data[Rx, Ry, :, :] * mask * weight if xshifts is not None and yshifts is not None: - xshift = xshifts[Rx,Ry] - yshift = yshifts[Rx,Ry] + xshift = xshifts[Rx, Ry] + yshift = yshifts[Rx, Ry] if intshifts: xshift = int(np.round(xshift)) yshift = int(np.round(yshift)) - curr_DP = np.roll(curr_DP,-xshift,axis=0) - curr_DP = np.roll(curr_DP,-yshift,axis=1) - mask = np.roll(mask,-xshift,axis=0) - mask = np.roll(mask,-yshift,axis=1) + curr_DP = np.roll(curr_DP, -xshift, axis=0) + curr_DP = np.roll(curr_DP, -yshift, axis=1) + mask = np.roll(mask, -xshift, axis=0) + mask = np.roll(mask, -yshift, axis=1) else: - curr_DP = get_shifted_ar(curr_DP,-xshift,-yshift) - mask = get_shifted_ar(mask,-xshift,-yshift) + curr_DP = get_shifted_ar(curr_DP, -xshift, -yshift) + mask = get_shifted_ar(mask, -xshift, -yshift) if darkref is not None: - curr_DP -= darkref*weight + curr_DP -= darkref * weight class_DP += curr_DP - mask_weights += mask*weight - class_DP = np.divide(class_DP,mask_weights,where=mask_weights!=0, - out=np.zeros((datacube.Q_Nx,datacube.Q_Ny))) + mask_weights += mask * weight + class_DP = np.divide( + class_DP, + mask_weights, + where=mask_weights != 0, + out=np.zeros((datacube.Q_Nx, datacube.Q_Ny)), + ) return class_DP - - diff --git a/py4DSTEM/process/classification/featurization.py b/py4DSTEM/process/classification/featurization.py index adb528aee..38b4e1412 100644 --- a/py4DSTEM/process/classification/featurization.py +++ b/py4DSTEM/process/classification/featurization.py @@ -86,29 +86,37 @@ def __init__(self, features, R_Nx, R_Ny, name): if isinstance(features, np.ndarray): if len(features.shape) == 3: - self.features = features.reshape(R_Nx*R_Ny, features.shape[-1]) + self.features = features.reshape(R_Nx * R_Ny, features.shape[-1]) elif len(features.shape) == 2: self.features = features else: raise ValueError( - 'feature array must be of dimensions (R_Nx*R_Ny, num_features) or (R_Nx, R_Ny, num_features)' - ) + "feature array must be of dimensions (R_Nx*R_Ny, num_features) or (R_Nx, R_Ny, num_features)" + ) elif isinstance(features, list): if all(isinstance(f, np.ndarray) for f in features): for i in range(len(features)): if features[i].shape == 3: - features[i] = features[i].reshape(R_Nx*R_Ny, features.shape[-1]) + features[i] = features[i].reshape( + R_Nx * R_Ny, features.shape[-1] + ) if len(features[i].shape) != 2: raise ValueError( - 'feature array(s) in list must be of dimensions (R_Nx*R_Ny, num_features) or (R_Nx, R_Ny, num_features)' - ) + "feature array(s) in list must be of dimensions (R_Nx*R_Ny, num_features) or (R_Nx, R_Ny, num_features)" + ) self.features = np.concatenate(features, axis=1) elif all(isinstance(f, Featurization) for f in features): - raise TypeError('List of Featurization instances must be initialized using the concatenate_features method.') + raise TypeError( + "List of Featurization instances must be initialized using the concatenate_features method." + ) else: - raise TypeError('Entries in list must be np.ndarrays for initialization of the Featurization instance.') + raise TypeError( + "Entries in list must be np.ndarrays for initialization of the Featurization instance." + ) else: - raise TypeError('Features must be either a single np.ndarray of shape 2 or 3 or a list of np.ndarrays or featurization instances.') + raise TypeError( + "Features must be either a single np.ndarray of shape 2 or 3 or a list of np.ndarrays or featurization instances." + ) return def from_braggvectors( @@ -145,57 +153,82 @@ def from_braggvectors( try: pointlistarray = braggvectors._v_cal.copy() except AttributeError: - er = 'No calibrated bragg vectors found. Try running .calibrate()!' + er = "No calibrated bragg vectors found. Try running .calibrate()!" raise Exception(er) try: q_pixel_size = braggvectors.calibration.get_Q_pixel_size() except AttributeError: - er = 'No q_pixel_size found. Please set value and recalibrate before continuing.' + er = "No q_pixel_size found. Please set value and recalibrate before continuing." raise Exception(er) peak_data = np.zeros((pointlistarray.shape[0], pointlistarray.shape[1], n_bins)) # Create Bragg Disk Features - for (Rx, Ry) in tqdmnd(pointlistarray.shape[0],pointlistarray.shape[1]): - pointlist = pointlistarray.get_pointlist(Rx,Ry) + for Rx, Ry in tqdmnd(pointlistarray.shape[0], pointlistarray.shape[1]): + pointlist = pointlistarray.get_pointlist(Rx, Ry) if pointlist.data.shape[0] == 0: continue if mask is not None: deletemask = np.zeros(pointlist.length, dtype=bool) for i in range(pointlist.length): - deletemask = np.where((mask[ - np.rint((pointlist.data['qx']/q_pixel_size) + Q_Nx/2).astype(int), - np.rint((pointlist.data['qy']/q_pixel_size) + Q_Ny/2).astype(int) - ] == False), True, False) + deletemask = np.where( + ( + mask[ + np.rint( + (pointlist.data["qx"] / q_pixel_size) + Q_Nx / 2 + ).astype(int), + np.rint( + (pointlist.data["qy"] / q_pixel_size) + Q_Ny / 2 + ).astype(int), + ] + == False + ), + True, + False, + ) pointlist.remove(deletemask) for i in range(pointlist.data.shape[0]): - floor_x = np.rint((pointlist.data[i][0]/q_pixel_size +Q_Nx/2)/bins_x) - floor_y = np.rint((pointlist.data[i][1]/q_pixel_size + Q_Ny/2)/bins_y) + floor_x = np.rint( + (pointlist.data[i][0] / q_pixel_size + Q_Nx / 2) / bins_x + ) + floor_y = np.rint( + (pointlist.data[i][1] / q_pixel_size + Q_Ny / 2) / bins_y + ) binval_ff = int((floor_x * ny_bins) + floor_y) binval_cf = int(((floor_x + 1) * ny_bins) + floor_y) - #Distribute Peaks + # Distribute Peaks if intensity_scale == 0: try: - peak_data[Rx,Ry,binval_ff] += 1 - peak_data[Rx,Ry,binval_ff + 1] += 1 - peak_data[Rx,Ry,binval_cf] += 1 - peak_data[Rx,Ry,binval_cf + 1] += 1 + peak_data[Rx, Ry, binval_ff] += 1 + peak_data[Rx, Ry, binval_ff + 1] += 1 + peak_data[Rx, Ry, binval_cf] += 1 + peak_data[Rx, Ry, binval_cf + 1] += 1 except IndexError: continue else: try: - peak_data[Rx,Ry,binval_ff] += pointlist.data[i][2]*intensity_scale - peak_data[Rx,Ry,binval_ff + 1] += pointlist.data[i][2]*intensity_scale - peak_data[Rx,Ry,binval_cf] += pointlist.data[i][2]*intensity_scale - peak_data[Rx,Ry,binval_cf + 1] += pointlist.data[i][2]*intensity_scale + peak_data[Rx, Ry, binval_ff] += ( + pointlist.data[i][2] * intensity_scale + ) + peak_data[Rx, Ry, binval_ff + 1] += ( + pointlist.data[i][2] * intensity_scale + ) + peak_data[Rx, Ry, binval_cf] += ( + pointlist.data[i][2] * intensity_scale + ) + peak_data[Rx, Ry, binval_cf + 1] += ( + pointlist.data[i][2] * intensity_scale + ) except IndexError: continue - peak_data.reshape(pointlistarray.shape[0]*pointlistarray.shape[1], n_bins) - new_instance = Featurization(peak_data,pointlistarray.shape[0],pointlistarray.shape[1], name) + peak_data.reshape(pointlistarray.shape[0] * pointlistarray.shape[1], n_bins) + new_instance = Featurization( + peak_data, pointlistarray.shape[0], pointlistarray.shape[1], name + ) return new_instance def concatenate_features(features, name): @@ -214,13 +247,17 @@ def concatenate_features(features, name): R_Nxs = [features[i].R_Nx for i in range(len(features))] R_Nys = [features[i].R_Ny for i in range(len(features))] if len(np.unique(R_Nxs)) != 1 or len(np.unique(R_Nys)) != 1: - raise ValueError('Can only concatenate Featurization instances with same R_Nx and R_Ny') - new_instance = Featurization( - np.concatenate([features[i].features for i in range(len(features))], axis = 1), - R_Nx = R_Nxs[0], - R_Ny = R_Nys[0], - name = name + raise ValueError( + "Can only concatenate Featurization instances with same R_Nx and R_Ny" ) + new_instance = Featurization( + np.concatenate( + [features[i].features for i in range(len(features))], axis=1 + ), + R_Nx=R_Nxs[0], + R_Ny=R_Nys[0], + name=name, + ) return new_instance def add_features(self, feature): @@ -231,7 +268,7 @@ def add_features(self, feature): key (int, float, str): A key in which a feature can be accessed from feature (ndarray): The feature associated with the key """ - self.features = np.concatenate(self.features, feature, axis = 1) + self.features = np.concatenate(self.features, feature, axis=1) return def delete_features(self, index): @@ -241,7 +278,7 @@ def delete_features(self, index): Args: index (int, list): A key which will be removed """ - self.features = np.delete(self.features, index, axis = 1) + self.features = np.delete(self.features, index, axis=1) return def mean_feature(self, index): @@ -251,7 +288,7 @@ def mean_feature(self, index): Args: index (list of int): Indices of features to take the mean of. New feature array is placed in self.features. """ - mean_features = np.mean(self.features[:,index], axis = 1) + mean_features = np.mean(self.features[:, index], axis=1) mean_features = mean_features.reshape(mean_features.shape[0], 1) cleaned_features = np.delete(self.features, index, axis=1) self.features = np.concatenate([cleaned_features, mean_features], axis=1) @@ -264,7 +301,7 @@ def median_feature(self, index): Args: index (list of int): Indices of features to take the median of. """ - median_features = np.median(self.features[:,index], axis = 1) + median_features = np.median(self.features[:, index], axis=1) median_features = median_features.reshape(median_features.shape[0], 1) cleaned_features = np.delete(self.features, index, axis=1) self.features = np.concatenate([cleaned_features, median_features], axis=1) @@ -277,13 +314,13 @@ def max_feature(self, index): Args: index (list of int): Indices of features to take the max of. """ - max_features = np.max(self.features[:,index], axis = 1) + max_features = np.max(self.features[:, index], axis=1) max_features = max_features.reshape(max_features.shape[0], 1) cleaned_features = np.delete(self.features, index, axis=1) self.features = np.concatenate([cleaned_features, max_features], axis=1) return - def MinMaxScaler(self, return_scaled = True): + def MinMaxScaler(self, return_scaled=True): """ Uses sklearn MinMaxScaler to scale a subset of the input features. Replaces a feature with the positive shifted array. @@ -298,7 +335,7 @@ def MinMaxScaler(self, return_scaled = True): else: return - def RobustScaler(self, return_scaled = True): + def RobustScaler(self, return_scaled=True): """ Uses sklearn RobustScaler to scale a subset of the input features. Replaces a feature with the positive shifted array. @@ -313,7 +350,7 @@ def RobustScaler(self, return_scaled = True): else: return - def shift_positive(self, return_scaled = True): + def shift_positive(self, return_scaled=True): """ Replaces a feature with the positive shifted array. @@ -326,27 +363,27 @@ def shift_positive(self, return_scaled = True): else: return - def PCA(self, components, return_results = False): + def PCA(self, components, return_results=False): """ Performs PCA on features Args: components (list): A list of ints for each key. This will be the output number of features """ - pca = PCA(n_components = components) + pca = PCA(n_components=components) self.pca = pca.fit_transform(self.features) if return_results == True: return self.pca return - def ICA(self, components, return_results = True): + def ICA(self, components, return_results=True): """ Performs ICA on features Args: components (list): A list of ints for each key. This will be the output number of features """ - ica = FastICA(n_components = components) + ica = FastICA(n_components=components) self.ica = ica.fit_transform(self.features) if return_results == True: return self.ica @@ -356,11 +393,11 @@ def NMF( self, max_components, num_models, - merge_thresh = 1, - max_iterations = 1, - random_seed = None, - save_all_models = True, - return_results = False + merge_thresh=1, + max_iterations=1, + random_seed=None, + save_all_models=True, + return_results=False, ): """ Performs either traditional Nonnegative Matrix Factoriation (NMF) or iteratively on input features. @@ -391,17 +428,17 @@ def NMF( self.W = _nmf_single( self.features, max_components=max_components, - merge_thresh = merge_thresh, - num_models = num_models, - max_iterations = max_iterations, - random_seed = random_seed, - save_all_models = save_all_models + merge_thresh=merge_thresh, + num_models=num_models, + max_iterations=max_iterations, + random_seed=random_seed, + save_all_models=save_all_models, ) if return_results == True: return self.W return - def GMM(self, cv, components, num_models, random_seed = None, return_results = False): + def GMM(self, cv, components, num_models, random_seed=None, return_results=False): """ Performs gaussian mixture model on input features @@ -414,9 +451,9 @@ def GMM(self, cv, components, num_models, random_seed = None, return_results = F self.gmm, self.gmm_labels, self.gmm_proba = _gmm_single( self.features, cv=cv, - components = components, - num_models = num_models, - random_seed = random_seed + components=components, + num_models=num_models, + random_seed=random_seed, ) if return_results == True: return self.gmm @@ -435,55 +472,107 @@ def get_class_DPs(self, datacube, method, thresh): datacube_shape = datacube.data.shape if len(datacube.data.shape) != 3: try: - datacube.data = datacube.data.reshape(self.R_Nx*self.R_Ny, datacube.data.shape[2], datacube.data.shape[3]) + datacube.data = datacube.data.reshape( + self.R_Nx * self.R_Ny, + datacube.data.shape[2], + datacube.data.shape[3], + ) except: - raise ValueError('Datacube must have same R_Nx and R_Ny dimensions as Featurization instance.') - if method == 'nmf': + raise ValueError( + "Datacube must have same R_Nx and R_Ny dimensions as Featurization instance." + ) + if method == "nmf": if self.W == list: - return ValueError('Method not implmented for multiple NMF models, either return 1 model or perform spatial separation first.') + return ValueError( + "Method not implmented for multiple NMF models, either return 1 model or perform spatial separation first." + ) for l in range(self.W.shape[1]): - class_pattern = np.zeros((datacube.data.shape[1], datacube.data.shape[2])) - x_ = np.where(self.W[:,l] > thresh)[0] + class_pattern = np.zeros( + (datacube.data.shape[1], datacube.data.shape[2]) + ) + x_ = np.where(self.W[:, l] > thresh)[0] for x in range(x_.shape[0]): - class_pattern += datacube.data[x_[x],0] * self.W[x_[x],l] - class_patterns.append(class_pattern / np.sum(self.W[x_, l])) - elif method == 'gmm': + class_pattern += datacube.data[x_[x], 0] * self.W[x_[x], l] + class_patterns.append(class_pattern / np.sum(self.W[x_, l])) + elif method == "gmm": if self.gmm_labels == list: - return ValueError('Method not implmented for multiple GMM models, either return 1 model or perform spatial separation first.') + return ValueError( + "Method not implmented for multiple GMM models, either return 1 model or perform spatial separation first." + ) for l in range(np.max(self.gmm_labels)): - class_pattern = np.zeros((datacube.data.shape[1], datacube.data.shape[2])) - x_ = np.where(self.gmm_proba[:,l] > thresh)[0] + class_pattern = np.zeros( + (datacube.data.shape[1], datacube.data.shape[2]) + ) + x_ = np.where(self.gmm_proba[:, l] > thresh)[0] for x in range(x_.shape[0]): - class_pattern += datacube.data[x_[x],0] * self.gmm_proba[x_[x],l] - class_patterns.append(class_pattern / np.sum(self.gmm_proba[x_,l])) - elif method == 'pca': + class_pattern += datacube.data[x_[x], 0] * self.gmm_proba[x_[x], l] + class_patterns.append(class_pattern / np.sum(self.gmm_proba[x_, l])) + elif method == "pca": for l in range(self.pca.shape[1]): - class_pattern = np.zeros((datacube.data.shape[1],datacube.data.shape[2])) - x_ = np.where(self.pca[:,l] > thresh)[0] + class_pattern = np.zeros( + (datacube.data.shape[1], datacube.data.shape[2]) + ) + x_ = np.where(self.pca[:, l] > thresh)[0] for x in range(x_.shape[0]): - class_pattern += datacube.data[x_[x]] * self.pca[x_[x],l] - class_patterns.append(class_pattern / np.sum(self.pca[x_,l])) + class_pattern += datacube.data[x_[x]] * self.pca[x_[x], l] + class_patterns.append(class_pattern / np.sum(self.pca[x_, l])) class_patterns = [class_patterns] - elif method == 'spatially_separated_ims': + elif method == "spatially_separated_ims": for l in range(len(self.spatially_separated_ims)): small_class_patterns = [] for j in range(len(self.spatially_separated_ims[l])): - class_pattern = np.zeros((datacube.data.shape[1], datacube.data.shape[2])) - x_ = np.where(self.spatially_separated_ims[l][j].reshape(self.R_Nx*self.R_Ny,1) > thresh)[0] + class_pattern = np.zeros( + (datacube.data.shape[1], datacube.data.shape[2]) + ) + x_ = np.where( + self.spatially_separated_ims[l][j].reshape( + self.R_Nx * self.R_Ny, 1 + ) + > thresh + )[0] for x in range(x_.shape[0]): - class_pattern += datacube.data[x_[x]] * self.spatially_separated_ims[l][j].reshape(self.R_Nx*self.R_Ny,1)[x_[x]] - small_class_patterns.append(class_pattern / np.sum(self.spatially_separated_ims[l][j].reshape(self.R_Nx*self.R_Ny,1)[x_])) + class_pattern += ( + datacube.data[x_[x]] + * self.spatially_separated_ims[l][j].reshape( + self.R_Nx * self.R_Ny, 1 + )[x_[x]] + ) + small_class_patterns.append( + class_pattern + / np.sum( + self.spatially_separated_ims[l][j].reshape( + self.R_Nx * self.R_Ny, 1 + )[x_] + ) + ) class_patterns.append(small_class_patterns) - elif method == 'consensus_clusters': + elif method == "consensus_clusters": for j in range(len(self.consensus_clusters)): - class_pattern = np.zeros((datacube.data.shape[1], datacube.data.shape[2])) - x_ = np.where(self.consensus_clusters[j].reshape(self.R_Nx*self.R_Ny,1) > thresh)[0] + class_pattern = np.zeros( + (datacube.data.shape[1], datacube.data.shape[2]) + ) + x_ = np.where( + self.consensus_clusters[j].reshape(self.R_Nx * self.R_Ny, 1) + > thresh + )[0] for x in range(x_.shape[0]): - class_pattern += datacube.data[x_[x]] * self.consensus_clusters[j].reshape(self.R_Nx*self.R_Ny,1)[x_[x]] - class_patterns.append(class_pattern / np.sum(self.consensus_clusters[j].reshape(self.R_Nx*self.R_Ny,1)[x_])) + class_pattern += ( + datacube.data[x_[x]] + * self.consensus_clusters[j].reshape(self.R_Nx * self.R_Ny, 1)[ + x_[x] + ] + ) + class_patterns.append( + class_pattern + / np.sum( + self.consensus_clusters[j].reshape(self.R_Nx * self.R_Ny, 1)[x_] + ) + ) class_patterns = [class_patterns] else: - raise ValueError('method not accepted. Try NMF, GMM, PCA, ICA, spatially_separated_ims, or consensus_clustering.') + raise ValueError( + "method not accepted. Try NMF, GMM, PCA, ICA, spatially_separated_ims, or consensus_clustering." + ) datacube.data = datacube.data.reshape(datacube_shape) self.class_DPs = class_patterns return @@ -496,50 +585,57 @@ def get_class_ims(self, classification_method): classification_method (str): Location to retrieve class images from - NMF, GMM, PCA, or ICA """ class_maps = [] - if classification_method == 'NMF': + if classification_method == "NMF": if type(self.W) == list: for l in range(len(self.W)): small_class_maps = [] for k in range(self.W[l].shape[1]): - small_class_maps.append(self.W[l][:,k].reshape(self.R_Nx, self.R_Ny)) + small_class_maps.append( + self.W[l][:, k].reshape(self.R_Nx, self.R_Ny) + ) class_maps.append(small_class_maps) else: for l in range(self.W.shape[1]): - class_maps.append(self.W[:,l].reshape(self.R_Nx,self.R_Ny)) + class_maps.append(self.W[:, l].reshape(self.R_Nx, self.R_Ny)) class_maps = [class_maps] - elif classification_method == 'GMM': + elif classification_method == "GMM": if type(self.gmm_labels) == list: for l in range(len(self.gmm_labels)): small_class_maps = [] for k in range(np.max(self.gmm_labels[l])): - R_vals = np.where(self.gmm_labels[l].reshape(self.R_Nx, self.R_Ny) == k, 1, 0) - small_class_maps.append(R_vals * self.gmm_proba[l][:,k].reshape(self.R_Nx, self.R_Ny)) + R_vals = np.where( + self.gmm_labels[l].reshape(self.R_Nx, self.R_Ny) == k, 1, 0 + ) + small_class_maps.append( + R_vals + * self.gmm_proba[l][:, k].reshape(self.R_Nx, self.R_Ny) + ) class_maps.append(small_class_maps) else: for l in range((np.max(self.gmm_labels))): - R_vals = np.where(self.gmm_labels[l].reshape(self.R_Nx,self.R_Ny) == l, 1, 0) - class_maps.append(R_vals * self.gmm_proba[:,l].reshape(self.R_Nx, self.R_Ny)) + R_vals = np.where( + self.gmm_labels[l].reshape(self.R_Nx, self.R_Ny) == l, 1, 0 + ) + class_maps.append( + R_vals * self.gmm_proba[:, l].reshape(self.R_Nx, self.R_Ny) + ) class_maps = [class_maps] - elif classification_method == 'PCA': + elif classification_method == "PCA": for i in range(self.pca.shape[1]): - class_maps.append(self.pca[:,i].reshape(self.R_Nx, self.R_Ny)) + class_maps.append(self.pca[:, i].reshape(self.R_Nx, self.R_Ny)) class_maps = [class_maps] - elif classification_method == 'ICA': + elif classification_method == "ICA": for i in range(self.ica.shape[1]): - class_maps.append(self.ica[:,i].reshape(self.R_Nx, self.R_Ny)) + class_maps.append(self.ica[:, i].reshape(self.R_Nx, self.R_Ny)) class_maps = [class_maps] else: - raise ValueError('classification_method not accepted. Try NMF, GMM, PCA, or ICA.') + raise ValueError( + "classification_method not accepted. Try NMF, GMM, PCA, or ICA." + ) self.class_ims = class_maps return - def spatial_separation( - self, - size, - threshold = 0, - method = None, - clean = True - ): + def spatial_separation(self, size, threshold=0, method=None, clean=True): """ Identify spatially distinct regions from class images and separate based on a threshold and size. @@ -551,58 +647,85 @@ def spatial_separation( clean (bool): Whether or not to 'clean' cluster sets based on overlap, i.e. remove clusters that do not have any unique components """ - #Prepare for separation + # Prepare for separation labelled = [] stacked = [] - #Loop through all models + # Loop through all models for j in range(len(self.class_ims)): separated_temp = [] - #Loop through class images in each model to filtered and separate class images + # Loop through class images in each model to filtered and separate class images for l in range(len(self.class_ims[j])): - image = np.where(self.class_ims[j][l] > threshold, - self.class_ims[j][l], 0) - if method == 'yen': + image = np.where( + self.class_ims[j][l] > threshold, self.class_ims[j][l], 0 + ) + if method == "yen": t = threshold_yen(image) bw = closing(image > t, square(2)) labelled_image = label(bw) if np.sum(labelled_image) > size: - large_labelled_image = remove_small_objects(labelled_image, size) + large_labelled_image = remove_small_objects( + labelled_image, size + ) else: large_labelled_image = labelled_image - elif method == 'otsu': + elif method == "otsu": t = threshold_otsu(image) bw = closing(image > t, square(2)) labelled_image = label(bw) if np.sum(labelled_image) > size: - large_labelled_image = remove_small_objects(labelled_image, size) + large_labelled_image = remove_small_objects( + labelled_image, size + ) else: large_labelled_image = labelled_image elif method == None: labelled_image = label(image) if np.sum(labelled_image) > size: - large_labelled_image = remove_small_objects(labelled_image, size) + large_labelled_image = remove_small_objects( + labelled_image, size + ) else: large_labelled_image = labelled_image else: - raise ValueError(method + ' method is not supported. Please use yen, otsu, or None instead.') + raise ValueError( + method + + " method is not supported. Please use yen, otsu, or None instead." + ) unique_labels = np.unique(large_labelled_image) separated_temp.extend( - [(np.where(large_labelled_image == unique_labels[k+1],image, 0)) - for k in range(len(unique_labels)-1) - ]) + [ + ( + np.where( + large_labelled_image == unique_labels[k + 1], image, 0 + ) + ) + for k in range(len(unique_labels) - 1) + ] + ) if len(separated_temp) > 0: if clean == True: data_ndarray = np.dstack(separated_temp) - data_hard = (data_ndarray.max(axis=2,keepdims=1) == data_ndarray) * data_ndarray - data_list = [data_ndarray[:,:,x] for x in range(data_ndarray.shape[2])] - data_list_hard = [np.where(data_hard[:,:,n] > threshold, 1, 0) - for n in range(data_hard.shape[2])] - labelled.append([data_list[n] for n in range(len(data_list_hard)) - if (np.sum(data_list_hard[n]) > size)]) + data_hard = ( + data_ndarray.max(axis=2, keepdims=1) == data_ndarray + ) * data_ndarray + data_list = [ + data_ndarray[:, :, x] for x in range(data_ndarray.shape[2]) + ] + data_list_hard = [ + np.where(data_hard[:, :, n] > threshold, 1, 0) + for n in range(data_hard.shape[2]) + ] + labelled.append( + [ + data_list[n] + for n in range(len(data_list_hard)) + if (np.sum(data_list_hard[n]) > size) + ] + ) else: labelled.append(separated_temp) else: @@ -611,19 +734,20 @@ def spatial_separation( if len(labelled) > 0: self.spatially_separated_ims = labelled else: - raise ValueError('No distinct regions found in any models. Try modifying threshold, size, or method.') + raise ValueError( + "No distinct regions found in any models. Try modifying threshold, size, or method." + ) return - - def consensus( - self, - threshold = 0, - location = 'spatially_separated_ims', - split = 0, - method = 'mean', - drop_bins= 0, - ): + def consensus( + self, + threshold=0, + location="spatially_separated_ims", + split=0, + method="mean", + drop_bins=0, + ): """ Consensus Clustering takes the outcome of a prepared set of 2D images from each cluster and averages the outcomes. @@ -655,59 +779,71 @@ def consensus( class_dict = {} consensus_clusters = [] - if location != 'spatially_separated_ims': - raise ValueError('Consensus clustering only supported for location = spatially_separated_ims.') + if location != "spatially_separated_ims": + raise ValueError( + "Consensus clustering only supported for location = spatially_separated_ims." + ) - #Find model with largest number of clusters for label correspondence - ncluster = [len(self.spatially_separated_ims[j]) - for j in range(len(self.spatially_separated_ims))] + # Find model with largest number of clusters for label correspondence + ncluster = [ + len(self.spatially_separated_ims[j]) + for j in range(len(self.spatially_separated_ims)) + ] max_cluster_ind = np.where(ncluster == np.max(ncluster))[0][0] # Label Correspondence for k in range(len(self.spatially_separated_ims[max_cluster_ind])): - class_dict['c'+str(k)] = [np.where( - self.spatially_separated_ims[max_cluster_ind][k] > threshold, - self.spatially_separated_ims[max_cluster_ind][k], 0) - ] + class_dict["c" + str(k)] = [ + np.where( + self.spatially_separated_ims[max_cluster_ind][k] > threshold, + self.spatially_separated_ims[max_cluster_ind][k], + 0, + ) + ] for j in range(len(self.spatially_separated_ims)): if j == max_cluster_ind: continue for m in range(len(self.spatially_separated_ims[j])): class_im = np.where( self.spatially_separated_ims[j][m] > threshold, - self.spatially_separated_ims[j][m], 0 + self.spatially_separated_ims[j][m], + 0, ) best_sum = -np.inf for l in range(len(class_dict.keys())): - current_sum = np.sum(np.where( - class_dict['c'+str(l)][0] > threshold, class_im, 0) + current_sum = np.sum( + np.where(class_dict["c" + str(l)][0] > threshold, class_im, 0) ) if current_sum >= best_sum: best_sum = current_sum cvalue = l if best_sum > split: - class_dict['c' + str(cvalue)].append(class_im) + class_dict["c" + str(cvalue)].append(class_im) else: - class_dict['c' + str(len(list(class_dict.keys())))] = [class_im] + class_dict["c" + str(len(list(class_dict.keys())))] = [class_im] key_list = list(class_dict.keys()) - #Consensus clustering - if method == 'mean': + # Consensus clustering + if method == "mean": for n in range(len(key_list)): if drop_bins > 0: if len(class_dict[key_list[n]]) <= drop_bins: continue - consensus_clusters.append(np.mean(np.dstack( - class_dict[key_list[n]]), axis = 2)) - elif method == 'median': + consensus_clusters.append( + np.mean(np.dstack(class_dict[key_list[n]]), axis=2) + ) + elif method == "median": for n in range(len(key_list)): if drop_bins > 0: if len(class_dict[key_list[n]]) <= drop_bins: continue - consensus_clusters.append(np.median(np.dstack( - class_dict[key_list[n]]), axis = 2)) + consensus_clusters.append( + np.median(np.dstack(class_dict[key_list[n]]), axis=2) + ) else: - raise ValueError('Only mean and median consensus methods currently supported.') + raise ValueError( + "Only mean and median consensus methods currently supported." + ) self.consensus_dict = class_dict self.consensus_clusters = consensus_clusters @@ -716,14 +852,14 @@ def consensus( @ignore_warnings(category=ConvergenceWarning) def _nmf_single( - x, - max_components, - merge_thresh, - num_models, - max_iterations, - random_seed=None, - save_all_models = True - ): + x, + max_components, + merge_thresh, + num_models, + max_iterations, + random_seed=None, + save_all_models=True, +): """ Performs NMF on single feature matrix, which is an nd.array @@ -737,16 +873,16 @@ def _nmf_single( save_all_models (bool): Whether or not to return all of the models - default is to save all outputs for consensus clustering """ - #Prepare error, random seed + # Prepare error, random seed err = np.inf if random_seed == None: - rng = np.random.RandomState(seed = 42) + rng = np.random.RandomState(seed=42) else: seed = random_seed if save_all_models == True: W = [] - #Big loop through all models + # Big loop through all models for i in range(num_models): if random_seed == None: seed = rng.randint(5000) @@ -754,18 +890,18 @@ def _nmf_single( recon_error, counter = 0, 0 Hs, Ws = [], [] - #Inner loop for iterative NMF + # Inner loop for iterative NMF for z in range(max_iterations): - nmf = NMF(n_components = n_comps, random_state = seed) + nmf = NMF(n_components=n_comps, random_state=seed) if counter == 0: nmf_temp = nmf.fit_transform(x) else: - with np.errstate(invalid='raise',divide='raise'): + with np.errstate(invalid="raise", divide="raise"): try: nmf_temp_2 = nmf.fit_transform(nmf_temp) except FloatingPointError: - print('Warning encountered in NMF: Returning last result') + print("Warning encountered in NMF: Returning last result") break Ws.append(nmf_temp) Hs.append(np.transpose(nmf.components_)) @@ -774,22 +910,24 @@ def _nmf_single( if counter >= max_iterations: break elif counter > 1: - with np.errstate(invalid='raise',divide='raise'): + with np.errstate(invalid="raise", divide="raise"): try: - tril = np.tril(np.corrcoef(nmf_temp_2, rowvar = False), k = -1) + tril = np.tril(np.corrcoef(nmf_temp_2, rowvar=False), k=-1) nmf_temp = nmf_temp_2 except FloatingPointError: - print('Warning encountered in correlation: Returning last result. Try larger merge_thresh.') + print( + "Warning encountered in correlation: Returning last result. Try larger merge_thresh." + ) break else: - tril = np.tril(np.corrcoef(nmf_temp, rowvar = False), k = -1) + tril = np.tril(np.corrcoef(nmf_temp, rowvar=False), k=-1) - #Merge correlated features + # Merge correlated features if np.nanmax(tril) >= merge_thresh: inds = np.argwhere(tril >= merge_thresh) for n in range(inds.shape[0]): - nmf_temp[:, inds[n,0]] += nmf_temp[:,inds[n,1]] - ys_sorted = np.sort(np.unique(inds[n,1]))[::-1] + nmf_temp[:, inds[n, 0]] += nmf_temp[:, inds[n, 1]] + ys_sorted = np.sort(np.unique(inds[n, 1]))[::-1] for n in range(ys_sorted.shape[0]): nmf_temp = np.delete(nmf_temp, ys_sorted[n], axis=1) else: @@ -802,19 +940,13 @@ def _nmf_single( W.append(nmf_temp) elif (recon_error / counter) < err: - err = (recon_error / counter) + err = recon_error / counter W = nmf_temp return W + @ignore_warnings(category=ConvergenceWarning) -def _gmm_single( - x, - cv, - components, - num_models, - random_seed=None, - return_all=True - ): +def _gmm_single(x, cv, components, num_models, random_seed=None, return_all=True): """ Runs GMM several times and saves value with best BIC score @@ -838,7 +970,7 @@ def _gmm_single( lowest_bic = np.infty bic_temp = 0 if random_seed == None: - rng = np.random.RandomState(seed = 42) + rng = np.random.RandomState(seed=42) else: seed = random_seed for n in range(num_models): @@ -846,8 +978,11 @@ def _gmm_single( seed = rng.randint(5000) for j in range(len(components)): for cv_type in cv: - gmm = GaussianMixture(n_components=components[j], - covariance_type=cv_type, random_state = seed) + gmm = GaussianMixture( + n_components=components[j], + covariance_type=cv_type, + random_state=seed, + ) labels = gmm.fit_predict(x) bic_temp = gmm.bic(x) @@ -857,7 +992,7 @@ def _gmm_single( gmm_proba.append(gmm.predict_proba(x)) elif return_all == False: - if (bic_temp < lowest_bic): + if bic_temp < lowest_bic: lowest_bic = bic_temp best_gmm = gmm best_gmm_labels = labels diff --git a/py4DSTEM/process/diffraction/WK_scattering_factors.py b/py4DSTEM/process/diffraction/WK_scattering_factors.py index 3983c8012..70110a977 100644 --- a/py4DSTEM/process/diffraction/WK_scattering_factors.py +++ b/py4DSTEM/process/diffraction/WK_scattering_factors.py @@ -64,7 +64,7 @@ def compute_WK_factor( if thermal_sigma is not None: UL = thermal_sigma - DWF = np.exp(-0.5 * UL ** 2 * G ** 2) + DWF = np.exp(-0.5 * UL**2 * G**2) else: UL = 0.0 DWF = 1.0 @@ -82,7 +82,7 @@ def compute_WK_factor( # WEKO(A,B,S) WK = np.zeros_like(S) for i in range(4): - argu = B[i] * S ** 2 + argu = B[i] * S**2 sub = argu < 1.0 WK[sub] += A[i] * B[i] * (1.0 - 0.5 * argu[sub]) sub = np.logical_and(argu >= 1.0, argu <= 20.0) @@ -168,7 +168,7 @@ def compute_WK_factor( # calculate phonon contribution, following FPHON(G,UL,A,B) Fphon = 0.0 if include_phonon: - U2 = UL ** 2 + U2 = UL**2 A1 = A * (4.0 * np.pi) ** 2 B1 = B / (4.0 * np.pi) ** 2 @@ -197,7 +197,7 @@ def compute_WK_factor( if verbose: print(f"gamma:{gamma}") - Fscatt = np.complex128((Freal * gamma) + (1.0j * (Fimag * gamma ** 2 / k0))) + Fscatt = np.complex128((Freal * gamma) + (1.0j * (Fimag * gamma**2 / k0))) if verbose: print(f"Fscatt:{Fscatt}") @@ -213,7 +213,7 @@ def compute_WK_factor( def RI1(BI, BJ, G): # "ERSTES INTEGRAL FUER DIE ABSORPTIONSPOTENTIALE" - eps = np.max([BI, BJ]) * G ** 2 + eps = np.max([BI, BJ]) * G**2 ri1 = np.zeros_like(G) @@ -221,10 +221,10 @@ def RI1(BI, BJ, G): ri1[sub] = np.pi * (BI * np.log((BI + BJ) / BI) + BJ * np.log((BI + BJ) / BJ)) sub = np.logical_and(eps <= 0.1, G > 0.0) - temp = 0.5 * BI ** 2 * np.log(BI / (BI + BJ)) + 0.5 * BJ ** 2 * np.log( + temp = 0.5 * BI**2 * np.log(BI / (BI + BJ)) + 0.5 * BJ**2 * np.log( BJ / (BI + BJ) ) - temp += 0.75 * (BI ** 2 + BJ ** 2) - 0.25 * (BI + BJ) ** 2 + temp += 0.75 * (BI**2 + BJ**2) - 0.25 * (BI + BJ) ** 2 temp -= 0.5 * (BI - BJ) ** 2 ri1[sub] += np.pi * G[sub] ** 2 * temp @@ -250,9 +250,9 @@ def RI1(BI, BJ, G): def RI2(BI, BJ, G, U): # "ZWEITES INTEGRAL FUER DIE ABSORPTIONSPOTENTIALE" - U2 = U ** 2 + U2 = U**2 U22 = 0.5 * U2 - G2 = G ** 2 + G2 = G**2 BIUH = BI + 0.5 * U2 BJUH = BJ + 0.5 * U2 BIU = BI + U2 diff --git a/py4DSTEM/process/diffraction/__init__.py b/py4DSTEM/process/diffraction/__init__.py index 3c8be298b..942547749 100644 --- a/py4DSTEM/process/diffraction/__init__.py +++ b/py4DSTEM/process/diffraction/__init__.py @@ -2,4 +2,3 @@ from py4DSTEM.process.diffraction.flowlines import * from py4DSTEM.process.diffraction.tdesign import * from py4DSTEM.process.diffraction.crystal_phase import * - diff --git a/py4DSTEM/process/diffraction/crystal.py b/py4DSTEM/process/diffraction/crystal.py index 4d4d4a248..30bb061f8 100644 --- a/py4DSTEM/process/diffraction/crystal.py +++ b/py4DSTEM/process/diffraction/crystal.py @@ -13,12 +13,14 @@ from py4DSTEM.process.diffraction.crystal_viz import plot_diffraction_pattern from py4DSTEM.process.diffraction.crystal_viz import plot_ring_pattern from py4DSTEM.process.diffraction.utils import Orientation, calc_1D_profile + try: from pymatgen.symmetry.analyzer import SpacegroupAnalyzer from pymatgen.core.structure import Structure except ImportError: pass + class Crystal: """ A class storing a single crystal structure, and associated diffraction data. @@ -34,6 +36,8 @@ class Crystal: orientation_plan, match_orientations, match_single_pattern, + cluster_grains, + cluster_orientation_map, calculate_strain, save_ang_file, symmetry_reduce_directions, @@ -49,11 +53,13 @@ class Crystal: plot_orientation_plan, plot_orientation_maps, plot_fiber_orientation_maps, + plot_clusters, + plot_cluster_size, ) from py4DSTEM.process.diffraction.crystal_calibrate import ( calibrate_pixel_size, - calibrate_unit_cell + calibrate_unit_cell, ) # Dynamical diffraction calculations are implemented in crystal_bloch.py @@ -72,7 +78,7 @@ def __init__( """ Args: positions (np.array): fractional coordinates of each atom in the cell - numbers (np.array): Z number for each atom in the cell + numbers (np.array): Z number for each atom in the cell, if one number passed it is used for all atom positions cell (np.array): specify the unit cell, using a variable number of parameters 1 number: the lattice parameter for a cubic cell 3 numbers: the three lattice parameters for an orthorhombic cell @@ -105,28 +111,44 @@ def __init__( self.cell = cell elif np.shape(cell)[0] == 3 and np.shape(cell)[1] == 3: self.lat_real = np.array(cell) - a = np.linalg.norm(self.lat_real[0,:]) - b = np.linalg.norm(self.lat_real[1,:]) - c = np.linalg.norm(self.lat_real[2,:]) - alpha = np.rad2deg(np.arccos(np.clip(np.sum( - self.lat_real[1,:]*self.lat_real[2,:])/b/c,-1,1))) - beta = np.rad2deg(np.arccos(np.clip(np.sum( - self.lat_real[0,:]*self.lat_real[2,:])/a/c,-1,1))) - gamma = np.rad2deg(np.arccos(np.clip(np.sum( - self.lat_real[0,:]*self.lat_real[1,:])/a/b,-1,1))) - self.cell = (a,b,c,alpha,beta,gamma) + a = np.linalg.norm(self.lat_real[0, :]) + b = np.linalg.norm(self.lat_real[1, :]) + c = np.linalg.norm(self.lat_real[2, :]) + alpha = np.rad2deg( + np.arccos( + np.clip( + np.sum(self.lat_real[1, :] * self.lat_real[2, :]) / b / c, -1, 1 + ) + ) + ) + beta = np.rad2deg( + np.arccos( + np.clip( + np.sum(self.lat_real[0, :] * self.lat_real[2, :]) / a / c, -1, 1 + ) + ) + ) + gamma = np.rad2deg( + np.arccos( + np.clip( + np.sum(self.lat_real[0, :] * self.lat_real[1, :]) / a / b, -1, 1 + ) + ) + ) + self.cell = (a, b, c, alpha, beta, gamma) else: raise Exception("Cell cannot contain " + np.size(cell) + " entries") - + # pymatgen flag - self.pymatgen_available = False - + if "pymatgen" in sys.modules: + self.pymatgen_available = True + else: + self.pymatgen_available = False # Calculate lattice parameters self.calculate_lattice() def calculate_lattice(self): - - if not hasattr(self, 'lat_real'): + if not hasattr(self, "lat_real"): # calculate unit cell lattice vectors a = self.cell[0] b = self.cell[1] @@ -135,14 +157,27 @@ def calculate_lattice(self): beta = np.deg2rad(self.cell[4]) gamma = np.deg2rad(self.cell[5]) f = np.cos(beta) * np.cos(gamma) - np.cos(alpha) - vol = a*b*c*np.sqrt(1 \ - + 2*np.cos(alpha)*np.cos(beta)*np.cos(gamma) \ - - np.cos(alpha)**2 - np.cos(beta)**2 - np.cos(gamma)**2) + vol = ( + a + * b + * c + * np.sqrt( + 1 + + 2 * np.cos(alpha) * np.cos(beta) * np.cos(gamma) + - np.cos(alpha) ** 2 + - np.cos(beta) ** 2 + - np.cos(gamma) ** 2 + ) + ) self.lat_real = np.array( [ - [a, 0, 0], - [b*np.cos(gamma), b*np.sin(gamma), 0], - [c*np.cos(beta), -c*f/np.sin(gamma), vol/(a*b*np.sin(gamma))], + [a, 0, 0], + [b * np.cos(gamma), b * np.sin(gamma), 0], + [ + c * np.cos(beta), + -c * f / np.sin(gamma), + vol / (a * b * np.sin(gamma)), + ], ] ) @@ -151,55 +186,84 @@ def calculate_lattice(self): self.metric_inv = np.linalg.inv(self.metric_real) self.lat_inv = self.metric_inv @ self.lat_real - # pymatgen flag - if 'pymatgen' in sys.modules: - self.pymatgen_available = True - else: - self.pymatgen_available = False - def get_strained_crystal( self, - exx = 0.0, - eyy = 0.0, - ezz = 0.0, - exy = 0.0, - exz = 0.0, - eyz = 0.0, - deformation_matrix = None, - return_deformation_matrix = False, - ): + exx=0.0, + eyy=0.0, + ezz=0.0, + exy=0.0, + exz=0.0, + eyz=0.0, + deformation_matrix=None, + return_deformation_matrix=False, + ): """ This method returns new Crystal class with strain applied. The directions of (x,y,z) are with respect to the default Crystal orientation, which can be checked with print(Crystal.lat_real) applied to the original Crystal. Strains are given in fractional values, so exx = 0.01 is 1% strain along the x direction. - """ - - # deformation matrix - if deformation_matrix is None: + Deformation matrix should be of the form: deformation_matrix = np.array([ [1.0+exx, 1.0*exy, 1.0*exz], [1.0*exy, 1.0+eyy, 1.0*eyz], [1.0*exz, 1.0*eyz, 1.0+ezz], ]) + Parameters + -------- + + exx (float): + fractional strain along the xx direction + eyy (float): + fractional strain along the yy direction + ezz (float): + fractional strain along the zz direction + exy (float): + fractional strain along the xy direction + exz (float): + fractional strain along the xz direction + eyz (float): + fractional strain along the yz direction + deformation_matrix (np.ndarray): + 3x3 array describing deformation matrix + return_deformation_matrix (bool): + boolean switch to return deformation matrix + + Returns + -------- + return_deformation_matrix == False: + strained_crystal (py4DSTEM.Crystal) + return_deformation_matrix == True: + (strained_crystal, deformation_matrix) + """ + + # deformation matrix + if deformation_matrix is None: + deformation_matrix = np.array( + [ + [1.0 + exx, 1.0 * exy, 1.0 * exz], + [1.0 * exy, 1.0 + eyy, 1.0 * eyz], + [1.0 * exz, 1.0 * eyz, 1.0 + ezz], + ] + ) + # new unit cell lat_new = self.lat_real @ deformation_matrix # make new crystal class from py4DSTEM.process.diffraction import Crystal + crystal_strained = Crystal( - positions = self.positions.copy(), - numbers = self.numbers.copy(), - cell = lat_new, + positions=self.positions.copy(), + numbers=self.numbers.copy(), + cell=lat_new, ) if return_deformation_matrix: return crystal_strained, deformation_matrix else: return crystal_strained - def from_CIF(CIF, conventional_standard_structure=True): """ @@ -263,10 +327,12 @@ def from_pymatgen_structure( """ import pymatgen as mg + if structure is not None: if isinstance(structure, str): from mp_api.client import MPRester - with MPRester(MP_key) as mpr: + + with MPRester(MP_key) as mpr: structure = mpr.get_structure_by_material_id(structure) assert isinstance( @@ -282,7 +348,8 @@ def from_pymatgen_structure( ) else: from mp_api.client import MPRester - with MPRester(MP_key) as mpr: + + with MPRester(MP_key) as mpr: if formula is None: raise Exception( "Atleast a formula needs to be provided to query from MP database!!" @@ -302,7 +369,10 @@ def from_pymatgen_structure( ] selected = query[ np.argmin( - [query[i]["structure"].lattice.volume for i in range(len(query))] + [ + query[i]["structure"].lattice.volume + for i in range(len(query)) + ] ) ] structure = ( @@ -339,7 +409,6 @@ def from_unitcell_parameters( from_cartesian=False, conventional_standard_structure=True, ): - """ Create a Crystal using pymatgen to generate unit cell manually from user inputs @@ -431,9 +500,7 @@ def from_unitcell_parameters( return Crystal.from_pymatgen_structure(structure) - def setup_diffraction( - self, accelerating_voltage: float - ): + def setup_diffraction(self, accelerating_voltage: float): """ Set up attributes used for diffraction calculations without going through the full ACOM pipeline. @@ -446,28 +513,24 @@ def calculate_structure_factors( k_max: float = 2.0, tol_structure_factor: float = 1e-4, return_intensities: bool = False, - ): - - + ): """ Calculate structure factors for all hkl indices up to max scattering vector k_max Parameters -------- - - k_max: float + + k_max: float max scattering vector to include (1/Angstroms) tol_structure_factor: float tolerance for removing low-valued structure factors return_intensities: bool return the intensities and positions of all structure factor peaks. - + Returns -------- (q_SF, I_SF) Tuple of the q vectors and intensities of each structure factor. - - """ # Store k_max @@ -499,8 +562,8 @@ def calculate_structure_factors( ) hkl = np.vstack([xa.ravel(), ya.ravel(), za.ravel()]) # g_vec_all = self.lat_inv @ hkl - g_vec_all = (hkl.T @ self.lat_inv).T - + g_vec_all = (hkl.T @ self.lat_inv).T + # Delete lattice vectors outside of k_max keep = np.linalg.norm(g_vec_all, axis=0) <= self.k_max self.hkl = hkl[:, keep] @@ -567,17 +630,17 @@ def generate_diffraction_pattern( tol_excitation_error_mult: float = 3, tol_intensity: float = 1e-4, k_max: Optional[float] = None, - keep_qz = False, + keep_qz=False, return_orientation_matrix=False, ): """ Generate a single diffraction pattern, return all peaks as a pointlist. Args: - orientation (Orientation): an Orientation class object + orientation (Orientation): an Orientation class object ind_orientation If input is an Orientation class object with multiple orientations, this input can be used to select a specific orientation. - + orientation_matrix (array): (3,3) orientation matrix, where columns represent projection directions. zone_axis_lattice (array): (3,) projection direction in lattice indices proj_x_lattice (array): (3,) x-axis direction in lattice indices @@ -600,13 +663,10 @@ def generate_diffraction_pattern( orientation_matrix (array): 3x3 orientation matrix (optional) """ - if not (hasattr(self, "wavelength") and hasattr( - self, "accel_voltage" - )): + if not (hasattr(self, "wavelength") and hasattr(self, "accel_voltage")): print("Accelerating voltage not set. Assuming 300 keV!") self.setup_diffraction(300e3) - # Tolerance for angular tests tol = 1e-6 @@ -618,10 +678,8 @@ def generate_diffraction_pattern( orientation_matrix = orientation.matrix[ind_orientation] elif orientation_matrix is None: orientation_matrix = self.parse_orientation( - zone_axis_lattice, - proj_x_lattice, - zone_axis_cartesian, - proj_x_cartesian) + zone_axis_lattice, proj_x_lattice, zone_axis_cartesian, proj_x_cartesian + ) # Get foil normal direction if foil_normal_lattice is not None: @@ -635,13 +693,14 @@ def generate_diffraction_pattern( # Rotate crystal into desired projection g = orientation_matrix.T @ self.g_vec_all - # Calculate excitation errors if foil_normal is None: sg = self.excitation_errors(g) else: - foil_normal = (orientation_matrix.T \ - @ (-1*foil_normal[:,None]/np.linalg.norm(foil_normal))).ravel() + foil_normal = ( + orientation_matrix.T + @ (-1 * foil_normal[:, None] / np.linalg.norm(foil_normal)) + ).ravel() sg = self.excitation_errors(g, foil_normal) # Threshold for inclusion in diffraction pattern @@ -650,14 +709,14 @@ def generate_diffraction_pattern( # Maximum scattering angle cutoff if k_max is not None: - keep_kmax = np.linalg.norm(g,axis=0) <= k_max + keep_kmax = np.linalg.norm(g, axis=0) <= k_max keep = np.logical_and(keep, keep_kmax) g_diff = g[:, keep] # Diffracted peak intensities and labels g_int = self.struct_factors_int[keep] * np.exp( - (sg[keep] ** 2) / (-2 * sigma_excitation_error ** 2) + (sg[keep] ** 2) / (-2 * sigma_excitation_error**2) ) hkl = self.hkl[:, keep] @@ -665,8 +724,8 @@ def generate_diffraction_pattern( keep_int = g_int > tol_intensity # Output peaks - gx_proj = g_diff[0,keep_int] - gy_proj = g_diff[1,keep_int] + gx_proj = g_diff[0, keep_int] + gy_proj = g_diff[1, keep_int] # Diffracted peak labels h = hkl[0, keep_int] @@ -675,8 +734,9 @@ def generate_diffraction_pattern( # Output as PointList if keep_qz: - gz_proj = g_diff[2,keep_int] - pl_dtype = np.dtype([ + gz_proj = g_diff[2, keep_int] + pl_dtype = np.dtype( + [ ("qx", "float64"), ("qy", "float64"), ("qz", "float64"), @@ -684,36 +744,29 @@ def generate_diffraction_pattern( ("h", "int"), ("k", "int"), ("l", "int"), - ]) - bragg_peaks = PointList( - np.array([],dtype=pl_dtype) + ] ) + bragg_peaks = PointList(np.array([], dtype=pl_dtype)) if np.any(keep_int): bragg_peaks.add_data_by_field( - [ - gx_proj, - gy_proj, - gz_proj, - g_int[keep_int], - h,k,l]) + [gx_proj, gy_proj, gz_proj, g_int[keep_int], h, k, l] + ) else: - pl_dtype = np.dtype([ + pl_dtype = np.dtype( + [ ("qx", "float64"), ("qy", "float64"), ("intensity", "float64"), ("h", "int"), ("k", "int"), ("l", "int"), - ]) - bragg_peaks = PointList( - np.array([],dtype=pl_dtype) + ] ) + bragg_peaks = PointList(np.array([], dtype=pl_dtype)) if np.any(keep_int): - bragg_peaks.add_data_by_field([ - gx_proj, - gy_proj, - g_int[keep_int], - h,k,l]) + bragg_peaks.add_data_by_field( + [gx_proj, gy_proj, g_int[keep_int], h, k, l] + ) if return_orientation_matrix: return bragg_peaks, orientation_matrix @@ -721,112 +774,115 @@ def generate_diffraction_pattern( return bragg_peaks def generate_ring_pattern( - self, - k_max = 2.0, - use_bloch = False, - thickness = None, - bloch_params = None, - orientation_plan_params = None, - sigma_excitation_error = 0.02, - tol_intensity = 1e-3, - plot_rings = True, - plot_params = {}, - return_calc = True, + self, + k_max=2.0, + use_bloch=False, + thickness=None, + bloch_params=None, + orientation_plan_params=None, + sigma_excitation_error=0.02, + tol_intensity=1e-3, + plot_rings=True, + plot_params={}, + return_calc=True, ): """ Calculate polycrystalline diffraction pattern from structure - - Args: + + Args: k_max (float): Maximum scattering vector use_bloch (bool): if true, use dynamic instead of kinematic approach - thickness (float): thickness in Ångström to evaluate diffraction patterns, + thickness (float): thickness in Ångström to evaluate diffraction patterns, only needed for dynamical calculations - bloch_params (dict): optional, parameters to calculate dynamical structure factor, + bloch_params (dict): optional, parameters to calculate dynamical structure factor, see calculate_dynamical_structure_factors doc strings - orientation_plan_params (dict): optional, parameters to calculate orientation plan, + orientation_plan_params (dict): optional, parameters to calculate orientation plan, see orientation_plan doc strings - sigma_excitation_error (float): sigma value for envelope applied to s_g (excitation errors) + sigma_excitation_error (float): sigma value for envelope applied to s_g (excitation errors) in units of inverse Angstroms tol_intensity (np float): tolerance in intensity units for inclusion of diffraction spots plot_rings(bool): if true, plot diffraction rings with plot_ring_pattern - return_calc (bool): return radii and intensities + return_calc (bool): return radii and intensities - Returns: + Returns: radii_unique (np array): radii of ring pattern in units of scattering vector k intensity_unique (np array): intensity of rings weighted by frequency of diffraciton spots - """ - - if use_bloch: - assert (thickness is not None), "provide thickness for dynamical diffraction calculation" - assert hasattr(self, "Ug_dict"), "run calculate_dynamical_structure_factors first" - + """ + + if use_bloch: + assert ( + thickness is not None + ), "provide thickness for dynamical diffraction calculation" + assert hasattr( + self, "Ug_dict" + ), "run calculate_dynamical_structure_factors first" + if not hasattr(self, "struct_factors"): self.calculate_structure_factors( - k_max = k_max, + k_max=k_max, ) - - #check accelerating voltage - if hasattr(self, "accel_voltage"): + + # check accelerating voltage + if hasattr(self, "accel_voltage"): accelerating_voltage = self.accel_voltage - else: + else: self.accel_voltage = 300e3 print("Accelerating voltage not set. Assuming 300 keV!") - - #check orientation plan + + # check orientation plan if not hasattr(self, "orientation_vecs"): - if orientation_plan_params is None: + if orientation_plan_params is None: orientation_plan_params = { - 'zone_axis_range': 'auto', - 'angle_step_zone_axis': 4, - 'angle_step_in_plane': 4, - } + "zone_axis_range": "auto", + "angle_step_zone_axis": 4, + "angle_step_in_plane": 4, + } self.orientation_plan( **orientation_plan_params, ) - #calculate intensity and radius for rings + # calculate intensity and radius for rings radii = [] intensity = [] for a0 in range(self.orientation_vecs.shape[0]): if use_bloch: beams = self.generate_diffraction_pattern( - zone_axis_lattice = self.orientation_vecs[a0], - sigma_excitation_error = sigma_excitation_error, - tol_intensity = tol_intensity, - k_max = k_max + zone_axis_lattice=self.orientation_vecs[a0], + sigma_excitation_error=sigma_excitation_error, + tol_intensity=tol_intensity, + k_max=k_max, ) pattern = self.generate_dynamical_diffraction_pattern( - beams = beams, - zone_axis_lattice = self.orientation_vecs[a0], - thickness = thickness, + beams=beams, + zone_axis_lattice=self.orientation_vecs[a0], + thickness=thickness, ) - else: + else: pattern = self.generate_diffraction_pattern( - zone_axis_lattice = self.orientation_vecs[a0], - sigma_excitation_error = sigma_excitation_error, - tol_intensity = tol_intensity, - k_max = k_max + zone_axis_lattice=self.orientation_vecs[a0], + sigma_excitation_error=sigma_excitation_error, + tol_intensity=tol_intensity, + k_max=k_max, ) - intensity.append(pattern['intensity']) - radii.append((pattern['qx']**2 + pattern['qy']**2)**0.5) - + intensity.append(pattern["intensity"]) + radii.append((pattern["qx"] ** 2 + pattern["qy"] ** 2) ** 0.5) + intensity = np.concatenate(intensity) radii = np.concatenate(radii) - radii_unique,idx,inv,cts = np.unique(radii, return_counts=True, return_index=True,return_inverse=True) - intensity_unique = np.bincount(inv,weights=intensity) + radii_unique, idx, inv, cts = np.unique( + radii, return_counts=True, return_index=True, return_inverse=True + ) + intensity_unique = np.bincount(inv, weights=intensity) if plot_rings == True: - from py4DSTEM.process.diffraction.crystal_viz import plot_ring_pattern - plot_ring_pattern(radii_unique, - intensity_unique, - **plot_params - ) + from py4DSTEM.process.diffraction.crystal_viz import plot_ring_pattern - if return_calc == True: - return radii_unique, intensity_unique + plot_ring_pattern(radii_unique, intensity_unique, **plot_params) + if return_calc == True: + return radii_unique, intensity_unique # Vector conversions and other utilities for Crystal classes def cartesian_to_lattice(self, vec_cartesian): @@ -838,22 +894,26 @@ def lattice_to_cartesian(self, vec_lattice): return vec_cartesian / np.linalg.norm(vec_cartesian) def hexagonal_to_lattice(self, vec_hexagonal): - return np.array([ - 2.0*vec_hexagonal[0] + vec_hexagonal[1], - 2.0*vec_hexagonal[1] + vec_hexagonal[0] , - vec_hexagonal[3] - ]) + return np.array( + [ + 2.0 * vec_hexagonal[0] + vec_hexagonal[1], + 2.0 * vec_hexagonal[1] + vec_hexagonal[0], + vec_hexagonal[3], + ] + ) def lattice_to_hexagonal(self, vec_lattice): - return np.array([ - (2.0*vec_lattice[0] - vec_lattice[1])/3.0, - (2.0*vec_lattice[1] - vec_lattice[0])/3.0, - (-vec_lattice[0] - vec_lattice[1])/3.0, - vec_lattice[2] - ]) + return np.array( + [ + (2.0 * vec_lattice[0] - vec_lattice[1]) / 3.0, + (2.0 * vec_lattice[1] - vec_lattice[0]) / 3.0, + (-vec_lattice[0] - vec_lattice[1]) / 3.0, + vec_lattice[2], + ] + ) def cartesian_to_miller(self, vec_cartesian): - vec_miller = self.lat_real.T @ self.metric_inv @ vec_cartesian + vec_miller = self.lat_real.T @ self.metric_inv @ vec_cartesian return vec_miller / np.linalg.norm(vec_miller) def miller_to_cartesian(self, vec_miller): @@ -861,21 +921,21 @@ def miller_to_cartesian(self, vec_miller): return vec_cartesian / np.linalg.norm(vec_cartesian) def rational_ind( - self, + self, vec, - tol_den = 1000, - ): - # This function rationalizes the indices of a vector, up to + tol_den=1000, + ): + # This function rationalizes the indices of a vector, up to # some tolerance. Returns integers to prevent rounding errors. - vec = np.array(vec,dtype='float64') + vec = np.array(vec, dtype="float64") sub = np.abs(vec) > 0 if np.sum(sub) > 0: for ind in np.argwhere(sub): frac = Fraction(vec[ind[0]]).limit_denominator(tol_den) vec *= np.round(frac.denominator) - vec = np.round(vec \ - / np.gcd.reduce(np.round(np.abs(vec[sub])).astype('int')) - ).astype('int') + vec = np.round( + vec / np.gcd.reduce(np.round(np.abs(vec[sub])).astype("int")) + ).astype("int") return vec @@ -944,11 +1004,11 @@ def excitation_errors( def calculate_bragg_peak_histogram( self, bragg_peaks, - bragg_k_power = 1.0, - bragg_intensity_power = 1.0, - k_min = 0.0, - k_max = None, - k_step = 0.005 + bragg_k_power=1.0, + bragg_intensity_power=1.0, + k_min=0.0, + k_max=None, + k_step=0.005, ): """ Prepare experimental bragg peaks for lattice parameter or unit cell fitting. @@ -985,10 +1045,10 @@ def calculate_bragg_peak_histogram( bragg_peaks.get_vectors( rx, ry, - center = True, - ellipse = ellipse, - pixel = True, - rotate = rotate, + center=True, + ellipse=ellipse, + pixel=True, + rotate=rotate, ).data for rx in range(bragg_peaks.shape[0]) for ry in range(bragg_peaks.shape[1]) @@ -1015,7 +1075,6 @@ def calculate_bragg_peak_histogram( weights=dk[sub] * int_meas[sub], minlength=k_num, ) - int_exp = (int_exp ** bragg_intensity_power) * (k ** bragg_k_power) + int_exp = (int_exp**bragg_intensity_power) * (k**bragg_k_power) int_exp /= np.max(int_exp) return k, int_exp - diff --git a/py4DSTEM/process/diffraction/crystal_ACOM.py b/py4DSTEM/process/diffraction/crystal_ACOM.py index bffa5b620..da553456f 100644 --- a/py4DSTEM/process/diffraction/crystal_ACOM.py +++ b/py4DSTEM/process/diffraction/crystal_ACOM.py @@ -2,6 +2,8 @@ import matplotlib.pyplot as plt import os from typing import Union, Optional +import time, sys +from tqdm import tqdm from emdfile import tqdmnd, PointList, PointListArray from py4DSTEM.data import RealSlice @@ -11,9 +13,10 @@ from warnings import warn from numpy.linalg import lstsq + try: import cupy as cp -except: +except ModuleNotFoundError: cp = None @@ -30,13 +33,12 @@ def orientation_plan( intensity_power: float = 0.25, # New default intensity power scaling tol_peak_delete=None, tol_distance: float = 0.01, - fiber_axis = None, - fiber_angles = None, + fiber_axis=None, + fiber_angles=None, figsize: Union[list, tuple, np.ndarray] = (6, 6), CUDA: bool = False, progress_bar: bool = True, - ): - +): """ Calculate the rotation basis arrays for an SO(3) rotation correlogram. @@ -51,7 +53,7 @@ def orientation_plan( Setting to 'auto' will use pymatgen to determine the point group symmetry of the structure and choose an appropriate zone_axis_range angle_step_zone_axis (float): Approximate angular step size for zone axis search [degrees] - angle_coarse_zone_axis (float): Coarse step size for zone axis search [degrees]. Setting to + angle_coarse_zone_axis (float): Coarse step size for zone axis search [degrees]. Setting to None uses the same value as angle_step_zone_axis. angle_refine_range (float): Range of angles to use for zone axis refinement. Setting to None uses same value as angle_coarse_zone_axis. @@ -101,7 +103,8 @@ def orientation_plan( if angle_coarse_zone_axis is not None: self.orientation_refine = True self.orientation_refine_ratio = np.round( - angle_coarse_zone_axis/angle_step_zone_axis).astype('int') + angle_coarse_zone_axis / angle_step_zone_axis + ).astype("int") self.orientation_angle_coarse = angle_coarse_zone_axis if angle_refine_range is None: self.orientation_refine_range = angle_coarse_zone_axis @@ -114,12 +117,12 @@ def orientation_plan( if self.pymatgen_available: from pymatgen.symmetry.analyzer import SpacegroupAnalyzer from pymatgen.core.structure import Structure + structure = Structure( self.lat_real, self.numbers, self.positions, coords_are_cartesian=False ) self.pointgroup = SpacegroupAnalyzer(structure) - # Handle the "auto" case first, since it works by overriding zone_axis_range, # fiber_axis, and fiber_angles then using the regular parser: if isinstance(zone_axis_range, str) and zone_axis_range == "auto": @@ -127,8 +130,9 @@ def orientation_plan( self.pointgroup.get_point_group_symbol() in orientation_ranges ), "Unrecognized pointgroup returned by pymatgen!" - zone_axis_range, fiber_axis, fiber_angles = orientation_ranges[ \ - self.pointgroup.get_point_group_symbol()] + zone_axis_range, fiber_axis, fiber_angles = orientation_ranges[ + self.pointgroup.get_point_group_symbol() + ] if isinstance(zone_axis_range, list): zone_axis_range = np.array(zone_axis_range) elif zone_axis_range == "fiber": @@ -140,7 +144,6 @@ def orientation_plan( f" using arguments: zone_axis_range = \n{zone_axis_range}, \n fiber_axis={fiber_axis}, fiber_angles={fiber_angles}." ) - if isinstance(zone_axis_range, str): if ( zone_axis_range == "fiber" @@ -152,14 +155,16 @@ def orientation_plan( self.orientation_fiber_axis, dtype="float" ) # if self.cartesian_directions: - self.orientation_fiber_axis = ( + self.orientation_fiber_axis = self.orientation_fiber_axis / np.linalg.norm( self.orientation_fiber_axis - / np.linalg.norm(self.orientation_fiber_axis) ) # update fiber axis to be centered on the 1st unit cell vector - v3 = np.cross(self.orientation_fiber_axis, self.lat_real[0,:]) - v2 = np.cross(v3, self.orientation_fiber_axis,) + v3 = np.cross(self.orientation_fiber_axis, self.lat_real[0, :]) + v2 = np.cross( + v3, + self.orientation_fiber_axis, + ) v2 = v2 / np.linalg.norm(v2) v3 = v3 / np.linalg.norm(v3) @@ -168,7 +173,6 @@ def orientation_plan( (self.orientation_fiber_axis, v2, v3) ).astype("float") else: - if self.orientation_fiber_angles[0] == 180: theta = np.pi / 2.0 else: @@ -182,8 +186,9 @@ def orientation_plan( phi = self.orientation_fiber_angles[1] * np.pi / 180.0 # Generate zone axis range - v2output = self.orientation_fiber_axis * np.cos(theta) \ - + v2 * np.sin(theta) + v2output = self.orientation_fiber_axis * np.cos(theta) + v2 * np.sin( + theta + ) v3output = ( self.orientation_fiber_axis * np.cos(theta) + (v2 * np.sin(theta)) * np.cos(phi) @@ -191,17 +196,15 @@ def orientation_plan( ) v2output = ( self.orientation_fiber_axis * np.cos(theta) - + (v2 * np.sin(theta)) * np.cos(phi/2) - - (v3 * np.sin(theta)) * np.sin(phi/2) + + (v2 * np.sin(theta)) * np.cos(phi / 2) + - (v3 * np.sin(theta)) * np.sin(phi / 2) ) v3output = ( self.orientation_fiber_axis * np.cos(theta) - + (v2 * np.sin(theta)) * np.cos(phi/2) - + (v3 * np.sin(theta)) * np.sin(phi/2) + + (v2 * np.sin(theta)) * np.cos(phi / 2) + + (v3 * np.sin(theta)) * np.sin(phi / 2) ) - - self.orientation_zone_axis_range = np.vstack( (self.orientation_fiber_axis, v2output, v3output) ).astype("float") @@ -283,9 +286,9 @@ def orientation_plan( (180 / np.pi) * angle_u_v / angle_step_zone_axis, (180 / np.pi) * angle_u_w / angle_step_zone_axis, ) - self.orientation_zone_axis_steps = (np.round( - step / self.orientation_refine_ratio - ) * self.orientation_refine_ratio).astype(np.integer) + self.orientation_zone_axis_steps = ( + np.round(step / self.orientation_refine_ratio) * self.orientation_refine_ratio + ).astype(np.integer) if self.orientation_fiber and self.orientation_fiber_angles[0] == 0: self.orientation_num_zones = int(1) @@ -550,7 +553,8 @@ def orientation_plan( if self.orientation_refine: self.orientation_sieve = np.logical_and( np.mod(self.orientation_inds[:, 0], self.orientation_refine_ratio) == 0, - np.mod(self.orientation_inds[:, 1], self.orientation_refine_ratio) == 0) + np.mod(self.orientation_inds[:, 1], self.orientation_refine_ratio) == 0, + ) if self.CUDA: self.orientation_sieve_CUDA = cp.asarray(self.orientation_sieve) @@ -562,12 +566,12 @@ def orientation_plan( # azim = np.pi / 2 + np.arctan2( # self.orientation_vecs[:, 1], self.orientation_vecs[:, 0] # ) - azim = np.arctan2( - self.orientation_vecs[:, 0], self.orientation_vecs[:, 1] - ) + azim = np.arctan2(self.orientation_vecs[:, 0], self.orientation_vecs[:, 1]) # Solve for number of angular steps along in-plane rotation direction - self.orientation_in_plane_steps = np.round(360 / angle_step_in_plane).astype(np.integer) + self.orientation_in_plane_steps = np.round(360 / angle_step_in_plane).astype( + np.integer + ) # Calculate -z angles (Euler angle 3) self.orientation_gamma = np.linspace( @@ -617,47 +621,53 @@ def orientation_plan( # get operators ops = self.pointgroup.get_point_group_operations() - # Inverse of lattice + # Inverse of lattice zone_axis_range_inv = np.linalg.inv(self.orientation_zone_axis_range) # init num_sym = len(ops) - self.symmetry_operators = np.zeros((num_sym,3,3)) - self.symmetry_reduction = np.zeros((num_sym,3,3)) + self.symmetry_operators = np.zeros((num_sym, 3, 3)) + self.symmetry_reduction = np.zeros((num_sym, 3, 3)) # calculate symmetry and reduction matrices for a0 in range(num_sym): - self.symmetry_operators[a0] = \ + self.symmetry_operators[a0] = ( self.lat_inv.T @ ops[a0].rotation_matrix.T @ self.lat_real - self.symmetry_reduction[a0] = \ - (zone_axis_range_inv.T @ self.symmetry_operators[a0]).T + ) + self.symmetry_reduction[a0] = ( + zone_axis_range_inv.T @ self.symmetry_operators[a0] + ).T # Remove duplicates - keep = np.ones(num_sym,dtype='bool') + keep = np.ones(num_sym, dtype="bool") for a0 in range(num_sym): if keep[a0]: - diff = np.sum(np.abs( - self.symmetry_operators - self.symmetry_operators[a0]), - axis=(1,2)) + diff = np.sum( + np.abs(self.symmetry_operators - self.symmetry_operators[a0]), + axis=(1, 2), + ) sub = diff < 1e-3 - sub[:a0+1] = False + sub[: a0 + 1] = False keep[sub] = False self.symmetry_operators = self.symmetry_operators[keep] self.symmetry_reduction = self.symmetry_reduction[keep] - if self.orientation_fiber_angles is not None \ - and np.abs(self.orientation_fiber_angles[0] - 180.0) < 1e-3: + if ( + self.orientation_fiber_angles is not None + and np.abs(self.orientation_fiber_angles[0] - 180.0) < 1e-3 + ): zone_axis_range_flip = self.orientation_zone_axis_range.copy() - zone_axis_range_flip[0,:] = -1*zone_axis_range_flip[0,:] + zone_axis_range_flip[0, :] = -1 * zone_axis_range_flip[0, :] zone_axis_range_inv = np.linalg.inv(zone_axis_range_flip) num_sym = self.symmetry_operators.shape[0] - self.symmetry_operators = np.tile(self.symmetry_operators,[2,1,1]) - self.symmetry_reduction = np.tile(self.symmetry_reduction,[2,1,1]) + self.symmetry_operators = np.tile(self.symmetry_operators, [2, 1, 1]) + self.symmetry_reduction = np.tile(self.symmetry_reduction, [2, 1, 1]) for a0 in range(num_sym): - self.symmetry_reduction[a0+num_sym] = \ - (zone_axis_range_inv.T @ self.symmetry_operators[a0+num_sym]).T + self.symmetry_reduction[a0 + num_sym] = ( + zone_axis_range_inv.T @ self.symmetry_operators[a0 + num_sym] + ).T # Calculate rotation matrices for zone axes for a0 in np.arange(self.orientation_num_zones): @@ -686,7 +696,7 @@ def orientation_plan( self.orientation_rotation_angles[a0, :] = [azim[a0], elev[a0], -azim[a0]] # Calculate reference arrays for all orientations - k0 = np.array([0.0, 0.0, -1.0/self.wavelength]) + k0 = np.array([0.0, 0.0, -1.0 / self.wavelength]) n = np.array([0.0, 0.0, -1.0]) for a0 in tqdmnd( @@ -735,8 +745,7 @@ def orientation_plan( ) ) - orientation_ref_norm = np.sqrt(np.sum( - self.orientation_ref[a0, :, :] ** 2)) + orientation_ref_norm = np.sqrt(np.sum(self.orientation_ref[a0, :, :] ** 2)) if orientation_ref_norm > 0: self.orientation_ref[a0, :, :] /= orientation_ref_norm @@ -755,30 +764,51 @@ def match_orientations( self, bragg_peaks_array: PointListArray, num_matches_return: int = 1, - min_number_peaks = 3, - inversion_symmetry = True, - multiple_corr_reset = True, - progress_bar: bool = True, + min_angle_between_matches_deg=None, + min_number_peaks: int = 3, + inversion_symmetry: bool = True, + multiple_corr_reset: bool = True, return_orientation: bool = True, + progress_bar: bool = True, ): - ''' - This function computes the orientation of any number of PointLists stored in a PointListArray, and returns an OrienationMap. + """ + Parameters + -------- + bragg_peaks_array: PointListArray + PointListArray containing the Bragg peaks and intensities, with calibrations applied + num_matches_return: int + return these many matches as 3th dim of orient (matrix) + min_angle_between_matches_deg: int + Minimum angle between zone axis of multiple matches, in degrees. + Note that I haven't thought how to handle in-plane rotations, since multiple matches are possible. + min_number_peaks: int + Minimum number of peaks required to perform ACOM matching + inversion_symmetry: bool + check for inversion symmetry in the matches + multiple_corr_reset: bool + keep original correlation score for multiple matches + return_orientation: bool + Return orientation map from function for inspection. + The map is always stored in the Crystal object. + progress_bar: bool + Show or hide the progress bar - ''' + """ orientation_map = OrientationMap( num_x=bragg_peaks_array.shape[0], num_y=bragg_peaks_array.shape[1], - num_matches=num_matches_return) - - #check cal state - if bragg_peaks_array.calstate['ellipse'] == False: + num_matches=num_matches_return, + ) + + # check cal state + if bragg_peaks_array.calstate["ellipse"] == False: ellipse = False - warn('Warning: bragg peaks not elliptically calibrated') + warn("Warning: bragg peaks not elliptically calibrated") else: ellipse = True - if bragg_peaks_array.calstate['rotate'] == False: + if bragg_peaks_array.calstate["rotate"] == False: rotate = False - warn('bragg peaks not rotationally calibrated') + warn("bragg peaks not rotationally calibrated") else: rotate = True @@ -794,64 +824,87 @@ def match_orientations( center=True, ellipse=ellipse, pixel=True, - rotate=rotate + rotate=rotate, ) orientation = self.match_single_pattern( bragg_peaks=vectors, num_matches_return=num_matches_return, + min_angle_between_matches_deg=min_angle_between_matches_deg, min_number_peaks=min_number_peaks, inversion_symmetry=inversion_symmetry, multiple_corr_reset=multiple_corr_reset, plot_corr=False, verbose=False, - ) + ) - orientation_map.set_orientation(orientation,rx,ry) + orientation_map.set_orientation(orientation, rx, ry) + + # assign and return self.orientation_map = orientation_map - + if return_orientation: return orientation_map else: return + def match_single_pattern( self, bragg_peaks: PointList, num_matches_return: int = 1, - min_number_peaks = 3, - inversion_symmetry = True, - multiple_corr_reset = True, + min_angle_between_matches_deg=None, + min_number_peaks=3, + inversion_symmetry=True, + multiple_corr_reset=True, plot_polar: bool = False, plot_corr: bool = False, returnfig: bool = False, figsize: Union[list, tuple, np.ndarray] = (12, 4), verbose: bool = False, # plot_corr_3D: bool = False, - ): +): """ Solve for the best fit orientation of a single diffraction pattern. - Args: - bragg_peaks (PointList): numpy array containing the Bragg positions and intensities ('qx', 'qy', 'intensity') - num_matches_return (int): return these many matches as 3th dim of orient (matrix) - min_number_peaks (int): Minimum number of peaks required to perform ACOM matching - inversion_symmetry (bool): check for inversion symmetry in the matches - multiple_corr_reset (bool): keep original correlation score for multiple matches - subpixel_tilt (bool): set to false for faster matching, returning the nearest corr point - plot_polar (bool): set to true to plot the polar transform of the diffraction pattern - plot_corr (bool): set to true to plot the resulting correlogram - returnfig (bool): Return figure handles - figsize (list): size of figure - verbose (bool): Print the fitted zone axes, correlation scores - CUDA (bool): Enable CUDA for the FFT steps - - Returns: - orientation (Orientation): Orientation class containing all outputs - fig, ax (handles): Figure handles for the plotting output + Parameters + -------- + bragg_peaks: PointList + numpy array containing the Bragg positions and intensities ('qx', 'qy', 'intensity') + num_matches_return: int + return these many matches as 3th dim of orient (matrix) + min_angle_between_matches_deg: int + Minimum angle between zone axis of multiple matches, in degrees. + Note that I haven't thought how to handle in-plane rotations, since multiple matches are possible. + min_number_peaks: int + Minimum number of peaks required to perform ACOM matching + inversion_symmetry bool + check for inversion symmetry in the matches + multiple_corr_reset bool + keep original correlation score for multiple matches + subpixel_tilt: bool + set to false for faster matching, returning the nearest corr point + plot_polar: bool + set to true to plot the polar transform of the diffraction pattern + plot_corr: bool + set to true to plot the resulting correlogram + returnfig: bool + return figure handles + figsize: list + size of figure + verbose: bool + Print the fitted zone axes, correlation scores + CUDA: bool + Enable CUDA for the FFT steps + + Returns + -------- + orientation: Orientation + Orientation class containing all outputs + fig, ax: handles + Figure handles for the plotting output """ - # init orientation output orientation = Orientation(num_matches=num_matches_return) if bragg_peaks.data.shape[0] < min_number_peaks: @@ -872,7 +925,7 @@ def match_single_pattern( # loop over the number of matches to return for match_ind in range(num_matches_return): # Convert Bragg peaks to polar coordinates - qr = np.sqrt(qx ** 2 + qy ** 2) + qr = np.sqrt(qx**2 + qy**2) qphi = np.arctan2(qy, qx) # Calculate polar Bragg peak image @@ -920,13 +973,13 @@ def match_single_pattern( ) # Determine the RMS signal from im_polar for the first match. - # Note that we use scaling slightly below RMS so that following matches + # Note that we use scaling slightly below RMS so that following matches # don't have higher correlating scores than previous matches. if multiple_corr_reset is False and num_matches_return > 1: if match_ind == 0: - im_polar_scale_0 = np.mean(im_polar**2)**0.4 + im_polar_scale_0 = np.mean(im_polar**2) ** 0.4 else: - im_polar_scale = np.mean(im_polar**2)**0.4 + im_polar_scale = np.mean(im_polar**2) ** 0.4 if im_polar_scale > 0: im_polar *= im_polar_scale_0 / im_polar_scale # im_polar /= np.sqrt(np.mean(im_polar**2)) @@ -947,7 +1000,7 @@ def match_single_pattern( im_polar_refine = im_polar.copy() # Plot polar space image if needed - if plot_polar is True: # and match_ind==0: + if plot_polar is True: # and match_ind==0: fig, ax = plt.subplots(1, 1, figsize=figsize) ax.imshow(im_polar) plt.show() @@ -963,29 +1016,36 @@ def match_single_pattern( else: im_polar_refine_fft = np.fft.fft(im_polar_refine) - # Calculate full orientation correlogram if self.orientation_refine: - corr_full = np.zeros(( - self.orientation_num_zones, - self.orientation_in_plane_steps, - )) + corr_full = np.zeros( + ( + self.orientation_num_zones, + self.orientation_in_plane_steps, + ) + ) if self.CUDA: - corr_full[self.orientation_sieve,:] = cp.maximum( + corr_full[self.orientation_sieve, :] = cp.maximum( cp.sum( - cp.real(cp.fft.ifft( - self.orientation_ref[self.orientation_sieve_CUDA,:,:] \ - * im_polar_fft[None, :, :])), + cp.real( + cp.fft.ifft( + self.orientation_ref[self.orientation_sieve_CUDA, :, :] + * im_polar_fft[None, :, :] + ) + ), axis=1, ), 0, ).get() else: - corr_full[self.orientation_sieve,:] = np.maximum( + corr_full[self.orientation_sieve, :] = np.maximum( np.sum( - np.real(np.fft.ifft( - self.orientation_ref[self.orientation_sieve,:,:] \ - * im_polar_fft[None, :, :])), + np.real( + np.fft.ifft( + self.orientation_ref[self.orientation_sieve, :, :] + * im_polar_fft[None, :, :] + ) + ), axis=1, ), 0, @@ -995,7 +1055,9 @@ def match_single_pattern( if self.CUDA: corr_full = np.maximum( np.sum( - np.real(cp.fft.ifft(self.orientation_ref * im_polar_fft[None, :, :])), + np.real( + cp.fft.ifft(self.orientation_ref * im_polar_fft[None, :, :]) + ), axis=1, ), 0, @@ -1003,28 +1065,53 @@ def match_single_pattern( else: corr_full = np.maximum( np.sum( - np.real(np.fft.ifft(self.orientation_ref * im_polar_fft[None, :, :])), + np.real( + np.fft.ifft(self.orientation_ref * im_polar_fft[None, :, :]) + ), axis=1, ), 0, ) + # If minimum angle is specified and we're on a match later than the first, + # we zero correlation values within the given range. + if min_angle_between_matches_deg is not None: + if match_ind > 0: + inds_previous = orientation.inds[:match_ind, 0] + for a0 in range(inds_previous.size): + mask_zero = np.arccos( + np.clip( + np.sum( + self.orientation_vecs + * self.orientation_vecs[inds_previous[a0], :], + axis=1, + ), + -1, + 1, + ) + ) < np.deg2rad(min_angle_between_matches_deg) + corr_full[mask_zero, :] = 0.0 + # Get maximum (non inverted) correlation value ind_phi = np.argmax(corr_full, axis=1) # Calculate orientation correlogram for inverse pattern (in-plane mirror) if inversion_symmetry: if self.orientation_refine: - corr_full_inv = np.zeros(( - self.orientation_num_zones, - self.orientation_in_plane_steps, - )) + corr_full_inv = np.zeros( + ( + self.orientation_num_zones, + self.orientation_in_plane_steps, + ) + ) if self.CUDA: - corr_full_inv[self.orientation_sieve,:] = cp.maximum( + corr_full_inv[self.orientation_sieve, :] = cp.maximum( cp.sum( cp.real( cp.fft.ifft( - self.orientation_ref[self.orientation_sieve_CUDA,:,:] \ + self.orientation_ref[ + self.orientation_sieve_CUDA, :, : + ] * cp.conj(im_polar_fft)[None, :, :] ) ), @@ -1033,11 +1120,11 @@ def match_single_pattern( 0, ).get() else: - corr_full_inv[self.orientation_sieve,:] = np.maximum( + corr_full_inv[self.orientation_sieve, :] = np.maximum( np.sum( np.real( np.fft.ifft( - self.orientation_ref[self.orientation_sieve,:,:] \ + self.orientation_ref[self.orientation_sieve, :, :] * np.conj(im_polar_fft)[None, :, :] ) ), @@ -1051,7 +1138,8 @@ def match_single_pattern( np.sum( np.real( cp.fft.ifft( - self.orientation_ref * cp.conj(im_polar_fft)[None, :, :] + self.orientation_ref + * cp.conj(im_polar_fft)[None, :, :] ) ), axis=1, @@ -1063,13 +1151,34 @@ def match_single_pattern( np.sum( np.real( np.fft.ifft( - self.orientation_ref * np.conj(im_polar_fft)[None, :, :] + self.orientation_ref + * np.conj(im_polar_fft)[None, :, :] ) ), axis=1, ), 0, ) + + # If minimum angle is specified and we're on a match later than the first, + # we zero correlation values within the given range. + if min_angle_between_matches_deg is not None: + if match_ind > 0: + inds_previous = orientation.inds[:match_ind, 0] + for a0 in range(inds_previous.size): + mask_zero = np.arccos( + np.clip( + np.sum( + self.orientation_vecs + * self.orientation_vecs[inds_previous[a0], :], + axis=1, + ), + -1, + 1, + ) + ) < np.deg2rad(min_angle_between_matches_deg) + corr_full_inv[mask_zero, :] = 0.0 + ind_phi_inv = np.argmax(corr_full_inv, axis=1) corr_inv = np.zeros(self.orientation_num_zones, dtype="bool") @@ -1077,7 +1186,6 @@ def match_single_pattern( corr_value[:] = 0 for a0 in range(self.orientation_num_zones): if (self.orientation_refine is False) or self.orientation_sieve[a0]: - # Correlation score if inversion_symmetry: if corr_full_inv[a0, ind_phi_inv[a0]] > corr_full[a0, ind_phi[a0]]: @@ -1095,7 +1203,7 @@ def match_single_pattern( ).astype("int") c = corr_full_inv[a0, inds] if np.max(c) > 0: - dc = (c[2] - c[0]) / (4*c[1] - 2*c[0] - 2*c[2]) + dc = (c[2] - c[0]) / (4 * c[1] - 2 * c[0] - 2 * c[2]) corr_in_plane_angle[a0] = ( self.orientation_gamma[ind_phi_inv[a0]] + dc * dphi ) + np.pi @@ -1105,46 +1213,63 @@ def match_single_pattern( ).astype("int") c = corr_full[a0, inds] if np.max(c) > 0: - dc = (c[2] - c[0]) / (4*c[1] - 2*c[0] - 2*c[2]) + dc = (c[2] - c[0]) / (4 * c[1] - 2 * c[0] - 2 * c[2]) corr_in_plane_angle[a0] = ( self.orientation_gamma[ind_phi[a0]] + dc * dphi ) # If needed, keep original polar image to recompute the correlations - if multiple_corr_reset and num_matches_return > 1 and match_ind == 0 and not self.orientation_refine: + if ( + multiple_corr_reset + and num_matches_return > 1 + and match_ind == 0 + and not self.orientation_refine + ): corr_value_keep = corr_value.copy() corr_in_plane_angle_keep = corr_in_plane_angle.copy() # Determine the best fit orientation ind_best_fit = np.unravel_index(np.argmax(corr_value), corr_value.shape)[0] - ############################################################ # If needed, perform fine step refinement of the zone axis # ############################################################ if self.orientation_refine: - mask_refine = np.arccos(np.clip(np.sum(self.orientation_vecs \ - * self.orientation_vecs[ind_best_fit,:],axis=1),-1,1)) \ - < np.deg2rad(self.orientation_refine_range) + mask_refine = np.arccos( + np.clip( + np.sum( + self.orientation_vecs * self.orientation_vecs[ind_best_fit, :], + axis=1, + ), + -1, + 1, + ) + ) < np.deg2rad(self.orientation_refine_range) if self.CUDA: mask_refine_CUDA = cp.asarray(mask_refine) if self.CUDA: - corr_full[mask_refine,:] = cp.maximum( + corr_full[mask_refine, :] = cp.maximum( cp.sum( - cp.real(cp.fft.ifft( - self.orientation_ref[mask_refine_CUDA,:,:] \ - * im_polar_refine_fft[None, :, :])), + cp.real( + cp.fft.ifft( + self.orientation_ref[mask_refine_CUDA, :, :] + * im_polar_refine_fft[None, :, :] + ) + ), axis=1, ), 0, ).get() else: - corr_full[mask_refine,:] = np.maximum( + corr_full[mask_refine, :] = np.maximum( np.sum( - np.real(np.fft.ifft( - self.orientation_ref[mask_refine,:,:] \ - * im_polar_refine_fft[None, :, :])), + np.real( + np.fft.ifft( + self.orientation_ref[mask_refine, :, :] + * im_polar_refine_fft[None, :, :] + ) + ), axis=1, ), 0, @@ -1156,11 +1281,11 @@ def match_single_pattern( # Inversion symmetry if inversion_symmetry: if self.CUDA: - corr_full_inv[mask_refine,:] = cp.maximum( + corr_full_inv[mask_refine, :] = cp.maximum( cp.sum( cp.real( cp.fft.ifft( - self.orientation_ref[mask_refine_CUDA,:,:] \ + self.orientation_ref[mask_refine_CUDA, :, :] * cp.conj(im_polar_refine_fft)[None, :, :] ) ), @@ -1169,11 +1294,11 @@ def match_single_pattern( 0, ).get() else: - corr_full_inv[mask_refine,:] = np.maximum( + corr_full_inv[mask_refine, :] = np.maximum( np.sum( np.real( np.fft.ifft( - self.orientation_ref[mask_refine,:,:] \ + self.orientation_ref[mask_refine, :, :] * np.conj(im_polar_refine_fft)[None, :, :] ) ), @@ -1183,7 +1308,6 @@ def match_single_pattern( ) ind_phi_inv = np.argmax(corr_full_inv, axis=1) - # Determine best in-plane correlation for a0 in np.argwhere(mask_refine): # Correlation score @@ -1203,7 +1327,7 @@ def match_single_pattern( ).astype("int") c = corr_full_inv[a0, inds] if np.max(c) > 0: - dc = (c[2] - c[0]) / (4*c[1] - 2*c[0] - 2*c[2]) + dc = (c[2] - c[0]) / (4 * c[1] - 2 * c[0] - 2 * c[2]) corr_in_plane_angle[a0] = ( self.orientation_gamma[ind_phi_inv[a0]] + dc * dphi ) + np.pi @@ -1213,13 +1337,15 @@ def match_single_pattern( ).astype("int") c = corr_full[a0, inds] if np.max(c) > 0: - dc = (c[2] - c[0]) / (4*c[1] - 2*c[0] - 2*c[2]) + dc = (c[2] - c[0]) / (4 * c[1] - 2 * c[0] - 2 * c[2]) corr_in_plane_angle[a0] = ( self.orientation_gamma[ind_phi[a0]] + dc * dphi ) # Determine the new best fit orientation - ind_best_fit = np.unravel_index(np.argmax(corr_value * mask_refine[None,:]), corr_value.shape)[0] + ind_best_fit = np.unravel_index( + np.argmax(corr_value * mask_refine[None, :]), corr_value.shape + )[0] # Verify current match has a correlation > 0 if corr_value[ind_best_fit] > 0: @@ -1229,22 +1355,26 @@ def match_single_pattern( ) # apply in-plane rotation, and inversion if needed - if multiple_corr_reset and match_ind > 0 and self.orientation_refine is False: + if ( + multiple_corr_reset + and match_ind > 0 + and self.orientation_refine is False + ): phi = corr_in_plane_angle_keep[ind_best_fit] else: phi = corr_in_plane_angle[ind_best_fit] m3z = np.array( - [ - [np.cos(phi), np.sin(phi), 0], - [-np.sin(phi), np.cos(phi), 0], - [0, 0, 1], - ] - ) + [ + [np.cos(phi), np.sin(phi), 0], + [-np.sin(phi), np.cos(phi), 0], + [0, 0, 1], + ] + ) orientation_matrix = orientation_matrix @ m3z if inversion_symmetry and corr_inv[ind_best_fit]: # Rotate 180 degrees around x axis for projected x-mirroring operation - orientation_matrix[:,1:] = -orientation_matrix[:,1:] - + orientation_matrix[:, 1:] = -orientation_matrix[:, 1:] + # Output best fit values into Orientation class orientation.matrix[match_ind] = orientation_matrix @@ -1260,14 +1390,16 @@ def match_single_pattern( ind_phi = ind_phi_inv[ind_best_fit] else: ind_phi = ind_phi[ind_best_fit] - orientation.inds[match_ind,0] = ind_best_fit - orientation.inds[match_ind,1] = ind_phi + orientation.inds[match_ind, 0] = ind_best_fit + orientation.inds[match_ind, 1] = ind_phi if inversion_symmetry: orientation.mirror[match_ind] = corr_inv[ind_best_fit] - orientation.angles[match_ind,:] = self.orientation_rotation_angles[ind_best_fit,:] - orientation.angles[match_ind,2] += phi + orientation.angles[match_ind, :] = self.orientation_rotation_angles[ + ind_best_fit, : + ] + orientation.angles[match_ind, 2] += phi # If point group is known, use pymatgen to caculate the symmetry- # reduced orientation matrix, producing the crystal direction family. @@ -1275,25 +1407,35 @@ def match_single_pattern( orientation = self.symmetry_reduce_directions( orientation, match_ind=match_ind, - ) + ) else: # No more matches are detected, so output default orientation matrix and leave corr = 0 - orientation.matrix[match_ind] = np.squeeze(self.orientation_rotation_matrices[0, :, :]) + orientation.matrix[match_ind] = np.squeeze( + self.orientation_rotation_matrices[0, :, :] + ) if verbose: if self.pymatgen_available: - if np.abs(self.cell[5]-120.0) < 1e-6: - x_proj_lattice = self.lattice_to_hexagonal(self.cartesian_to_lattice(orientation.family[match_ind][:, 0])) - x_proj_lattice = np.round(x_proj_lattice,decimals=3) - zone_axis_lattice = self.lattice_to_hexagonal(self.cartesian_to_lattice(orientation.family[match_ind][:, 2])) - zone_axis_lattice = np.round(zone_axis_lattice,decimals=3) + if np.abs(self.cell[5] - 120.0) < 1e-6: + x_proj_lattice = self.lattice_to_hexagonal( + self.cartesian_to_lattice(orientation.family[match_ind][:, 0]) + ) + x_proj_lattice = np.round(x_proj_lattice, decimals=3) + zone_axis_lattice = self.lattice_to_hexagonal( + self.cartesian_to_lattice(orientation.family[match_ind][:, 2]) + ) + zone_axis_lattice = np.round(zone_axis_lattice, decimals=3) else: if np.max(np.abs(orientation.family)) > 0.1: - x_proj_lattice = self.cartesian_to_lattice(orientation.family[match_ind][:, 0]) - x_proj_lattice = np.round(x_proj_lattice,decimals=3) - zone_axis_lattice = self.cartesian_to_lattice(orientation.family[match_ind][:, 2]) - zone_axis_lattice = np.round(zone_axis_lattice,decimals=3) + x_proj_lattice = self.cartesian_to_lattice( + orientation.family[match_ind][:, 0] + ) + x_proj_lattice = np.round(x_proj_lattice, decimals=3) + zone_axis_lattice = self.cartesian_to_lattice( + orientation.family[match_ind][:, 2] + ) + zone_axis_lattice = np.round(zone_axis_lattice, decimals=3) if orientation.corr[match_ind] > 0: print( @@ -1307,12 +1449,12 @@ def match_single_pattern( + str(np.round(orientation.corr[match_ind], decimals=3)) ) else: - print('No good match found for index ' + str(match_ind)) + print("No good match found for index " + str(match_ind)) else: zone_axis_fit = orientation.matrix[match_ind][:, 2] zone_axis_lattice = self.cartesian_to_lattice(zone_axis_fit) - zone_axis_lattice = np.round(zone_axis_lattice,decimals=3) + zone_axis_lattice = np.round(zone_axis_lattice, decimals=3) print( "Best fit zone axis (lattice) = (" + str(zone_axis_lattice) @@ -1323,7 +1465,7 @@ def match_single_pattern( # if needed, delete peaks for next iteration if num_matches_return > 1 and corr_value[ind_best_fit] > 0: - bragg_peaks_fit=self.generate_diffraction_pattern( + bragg_peaks_fit = self.generate_diffraction_pattern( orientation, ind_orientation=match_ind, sigma_excitation_error=self.orientation_kernel_size, @@ -1350,7 +1492,6 @@ def match_single_pattern( qy = qy[~remove] intensity = intensity[~remove] - # plotting correlation image if plot_corr is True: corr_plot = corr_value.copy() @@ -1376,7 +1517,9 @@ def match_single_pattern( self.orientation_inds[sub, 1].astype("int") + self.orientation_zone_axis_steps ) - inds_1D = np.ravel_multi_index([x_inds, y_inds], im_corr_zone_axis.shape) + inds_1D = np.ravel_multi_index( + [x_inds, y_inds], im_corr_zone_axis.shape + ) im_corr_zone_axis.ravel()[inds_1D] = corr_plot[sub] sub = self.orientation_inds[:, 2] == 1 @@ -1386,7 +1529,9 @@ def match_single_pattern( y_inds = self.orientation_zone_axis_steps - self.orientation_inds[ sub, 1 ].astype("int") - inds_1D = np.ravel_multi_index([x_inds, y_inds], im_corr_zone_axis.shape) + inds_1D = np.ravel_multi_index( + [x_inds, y_inds], im_corr_zone_axis.shape + ) im_corr_zone_axis.ravel()[inds_1D] = corr_plot[sub] sub = self.orientation_inds[:, 2] == 2 @@ -1397,7 +1542,9 @@ def match_single_pattern( self.orientation_inds[sub, 1].astype("int") + self.orientation_zone_axis_steps ) - inds_1D = np.ravel_multi_index([x_inds, y_inds], im_corr_zone_axis.shape) + inds_1D = np.ravel_multi_index( + [x_inds, y_inds], im_corr_zone_axis.shape + ) im_corr_zone_axis.ravel()[inds_1D] = corr_plot[sub] sub = self.orientation_inds[:, 2] == 3 @@ -1407,7 +1554,9 @@ def match_single_pattern( y_inds = self.orientation_zone_axis_steps - self.orientation_inds[ sub, 1 ].astype("int") - inds_1D = np.ravel_multi_index([x_inds, y_inds], im_corr_zone_axis.shape) + inds_1D = np.ravel_multi_index( + [x_inds, y_inds], im_corr_zone_axis.shape + ) im_corr_zone_axis.ravel()[inds_1D] = corr_plot[sub] im_plot = (im_corr_zone_axis - cmin) / (cmax - cmin) @@ -1433,7 +1582,9 @@ def match_single_pattern( self.orientation_inds[sub, 1].astype("int") + self.orientation_zone_axis_steps ) - inds_1D = np.ravel_multi_index([x_inds, y_inds], im_corr_zone_axis.shape) + inds_1D = np.ravel_multi_index( + [x_inds, y_inds], im_corr_zone_axis.shape + ) im_corr_zone_axis.ravel()[inds_1D] = corr_plot[sub] sub = self.orientation_inds[:, 2] == 1 @@ -1443,7 +1594,9 @@ def match_single_pattern( y_inds = self.orientation_zone_axis_steps - self.orientation_inds[ sub, 1 ].astype("int") - inds_1D = np.ravel_multi_index([x_inds, y_inds], im_corr_zone_axis.shape) + inds_1D = np.ravel_multi_index( + [x_inds, y_inds], im_corr_zone_axis.shape + ) im_corr_zone_axis.ravel()[inds_1D] = corr_plot[sub] im_plot = (im_corr_zone_axis - cmin) / (cmax - cmin) @@ -1469,33 +1622,42 @@ def match_single_pattern( ) # Image indices - x_inds = (self.orientation_inds[:, 0] - self.orientation_inds[:, 1]).astype( - "int" - ) + x_inds = ( + self.orientation_inds[:, 0] - self.orientation_inds[:, 1] + ).astype("int") y_inds = self.orientation_inds[:, 1].astype("int") # Check vertical range of the orientation triangle. - if self.orientation_fiber_angles is not None \ - and np.abs(self.orientation_fiber_angles[0] - 180.0) > 1e-3: + if ( + self.orientation_fiber_angles is not None + and np.abs(self.orientation_fiber_angles[0] - 180.0) > 1e-3 + ): # Orientation covers only top of orientation sphere - inds_1D = np.ravel_multi_index([x_inds, y_inds], im_corr_zone_axis.shape) + inds_1D = np.ravel_multi_index( + [x_inds, y_inds], im_corr_zone_axis.shape + ) im_corr_zone_axis.ravel()[inds_1D] = corr_plot im_mask.ravel()[inds_1D] = False else: # Orientation covers full vertical range of orientation sphere. # top half - sub = self.orientation_inds[:,2] == 0 - inds_1D = np.ravel_multi_index([x_inds[sub], y_inds[sub]], im_corr_zone_axis.shape) + sub = self.orientation_inds[:, 2] == 0 + inds_1D = np.ravel_multi_index( + [x_inds[sub], y_inds[sub]], im_corr_zone_axis.shape + ) im_corr_zone_axis.ravel()[inds_1D] = corr_plot[sub] im_mask.ravel()[inds_1D] = False # bottom half - sub = self.orientation_inds[:,2] == 1 - inds_1D = np.ravel_multi_index([ - self.orientation_zone_axis_steps - y_inds[sub], - self.orientation_zone_axis_steps - x_inds[sub] - ],im_corr_zone_axis.shape) + sub = self.orientation_inds[:, 2] == 1 + inds_1D = np.ravel_multi_index( + [ + self.orientation_zone_axis_steps - y_inds[sub], + self.orientation_zone_axis_steps - x_inds[sub], + ], + im_corr_zone_axis.shape, + ) im_corr_zone_axis.ravel()[inds_1D] = corr_plot[sub] im_mask.ravel()[inds_1D] = False @@ -1506,14 +1668,15 @@ def match_single_pattern( else: im_plot = im_corr_zone_axis - ax[0].imshow(im_plot, cmap="viridis", vmin=0.0, vmax=1.0) ax[0].spines["left"].set_color("none") ax[0].spines["right"].set_color("none") ax[0].spines["top"].set_color("none") ax[0].spines["bottom"].set_color("none") - inds_plot = np.unravel_index(np.argmax(im_plot, axis=None), im_plot.shape) + inds_plot = np.unravel_index( + np.argmax(im_plot, axis=None), im_plot.shape + ) ax[0].scatter( inds_plot[1], inds_plot[0], @@ -1523,29 +1686,44 @@ def match_single_pattern( edgecolors="r", ) - if np.abs(self.cell[5]-120.0) < 1e-6: + if np.abs(self.cell[5] - 120.0) < 1e-6: label_0 = self.rational_ind( self.lattice_to_hexagonal( - self.cartesian_to_lattice( - self.orientation_zone_axis_range[0, :]))) + self.cartesian_to_lattice( + self.orientation_zone_axis_range[0, :] + ) + ) + ) label_1 = self.rational_ind( self.lattice_to_hexagonal( - self.cartesian_to_lattice( - self.orientation_zone_axis_range[1, :]))) + self.cartesian_to_lattice( + self.orientation_zone_axis_range[1, :] + ) + ) + ) label_2 = self.rational_ind( self.lattice_to_hexagonal( - self.cartesian_to_lattice( - self.orientation_zone_axis_range[2, :]))) + self.cartesian_to_lattice( + self.orientation_zone_axis_range[2, :] + ) + ) + ) else: label_0 = self.rational_ind( self.cartesian_to_lattice( - self.orientation_zone_axis_range[0, :])) + self.orientation_zone_axis_range[0, :] + ) + ) label_1 = self.rational_ind( self.cartesian_to_lattice( - self.orientation_zone_axis_range[1, :])) + self.orientation_zone_axis_range[1, :] + ) + ) label_2 = self.rational_ind( self.cartesian_to_lattice( - self.orientation_zone_axis_range[2, :])) + self.orientation_zone_axis_range[2, :] + ) + ) ax[0].set_xticks([0, self.orientation_zone_axis_steps]) ax[0].set_xticklabels([str(label_0), str(label_2)], size=14) @@ -1564,17 +1742,17 @@ def match_single_pattern( sig_in_plane, ) - # Add markers for the best fit + # Add markers for the best fit tol = 0.01 sub = sig_in_plane > 1 - tol ax[1].scatter( - self.orientation_gamma[sub] * 180 / np.pi, - sig_in_plane[sub], - s=120, - linewidth=2, - facecolors="none", - edgecolors="r", - ) + self.orientation_gamma[sub] * 180 / np.pi, + sig_in_plane[sub], + s=120, + linewidth=2, + facecolors="none", + edgecolors="r", + ) ax[1].set_xlabel("In-plane rotation angle [deg]", size=16) ax[1].set_ylabel("Corr. of Best Fit Zone Axis", size=16) @@ -1588,25 +1766,267 @@ def match_single_pattern( return orientation +def cluster_grains( + self, + threshold_add=1.0, + threshold_grow=0.1, + angle_tolerance_deg=5.0, + progress_bar=True, +): + """ + Cluster grains using rotation criterion, and correlation values. + + Parameters + -------- + threshold_add: float + Minimum signal required for a probe position to initialize a cluster. + threshold_grow: float + Minimum signal required for a probe position to be added to a cluster. + angle_tolerance_deg: float + Rotation rolerance for clustering grains. + progress_bar: bool + Turns on the progress bar for the polar transformation + + """ + + # symmetry operators + sym = self.symmetry_operators + + # Get data + # Correlation data = signal to cluster with + sig = self.orientation_map.corr.copy() + sig_init = sig.copy() + mark = sig >= threshold_grow + sig[np.logical_not(mark)] = 0 + # orientation matrix used for angle tolerance + matrix = self.orientation_map.matrix.copy() + + # init + self.cluster_sizes = np.array((), dtype="int") + self.cluster_sig = np.array(()) + self.cluster_inds = [] + self.cluster_orientation = [] + inds_all = np.zeros_like(sig, dtype="int") + inds_all.ravel()[:] = np.arange(inds_all.size) + + # Tolerance + tol = np.deg2rad(angle_tolerance_deg) + + # Main loop + search = True + comp = 0.0 + mark_total = np.sum(np.max(mark, axis=2)) + pbar = tqdm(total=mark_total, disable=not progress_bar) + while search is True: + inds_grain = np.argmax(sig) + + val = sig.ravel()[inds_grain] + + if val < threshold_add: + search = False + + else: + # Start cluster + x, y, z = np.unravel_index(inds_grain, sig.shape) + mark[x, y, z] = False + sig[x, y, z] = 0 + matrix_cluster = matrix[x, y, z] + orientation_cluster = self.orientation_map.get_orientation_single(x, y, z) + + # Neighbors to search + xr = np.clip(x + np.arange(-1, 2, dtype="int"), 0, sig.shape[0] - 1) + yr = np.clip(y + np.arange(-1, 2, dtype="int"), 0, sig.shape[1] - 1) + inds_cand = inds_all[xr[:, None], yr[None], :].ravel() + inds_cand = np.delete(inds_cand, mark.ravel()[inds_cand] == False) + + if inds_cand.size == 0: + grow = False + else: + grow = True + + # grow the cluster + while grow is True: + inds_new = np.array((), dtype="int") + + keep = np.zeros(inds_cand.size, dtype="bool") + for a0 in range(inds_cand.size): + xc, yc, zc = np.unravel_index(inds_cand[a0], sig.shape) + + # Angle test between orientation matrices + dphi = np.min( + np.arccos( + np.clip( + ( + np.trace( + self.symmetry_operators + @ matrix[xc, yc, zc] + @ np.transpose(matrix_cluster), + axis1=1, + axis2=2, + ) + - 1 + ) + / 2, + -1, + 1, + ) + ) + ) + + if np.abs(dphi) < tol: + keep[a0] = True + + sig[xc, yc, zc] = 0 + mark[xc, yc, zc] = False + + xr = np.clip( + xc + np.arange(-1, 2, dtype="int"), 0, sig.shape[0] - 1 + ) + yr = np.clip( + yc + np.arange(-1, 2, dtype="int"), 0, sig.shape[1] - 1 + ) + inds_add = inds_all[xr[:, None], yr[None], :].ravel() + inds_new = np.append(inds_new, inds_add) + + inds_grain = np.append(inds_grain, inds_cand[keep]) + inds_cand = np.unique( + np.delete(inds_new, mark.ravel()[inds_new] == False) + ) + + if inds_cand.size == 0: + grow = False + + # convert grain to x,y coordinates, add = list + xg, yg, zg = np.unravel_index(inds_grain, sig.shape) + xyg = np.unique(np.vstack((xg, yg)), axis=1) + sig_mean = np.mean(sig_init.ravel()[inds_grain]) + self.cluster_sizes = np.append(self.cluster_sizes, xyg.shape[1]) + self.cluster_sig = np.append(self.cluster_sig, sig_mean) + self.cluster_orientation.append(orientation_cluster) + self.cluster_inds.append(xyg) + + # update progressbar + new_marks = mark_total - np.sum(np.max(mark, axis=2)) + pbar.update(new_marks) + mark_total -= new_marks + + pbar.close() + + +def cluster_orientation_map( + self, + stripe_width=(2, 2), + area_min=2, +): + """ + Produce a new orientation map from the clustered grains. + Use a stripe pattern for the overlapping grains. + + Parameters + -------- + stripe_width: (int,int) + Width of stripes for plotting maps with overlapping grains + area_min: (int) + Minimum size of grains to include + + Returns + -------- + + orientation_map + The clustered orientation map + + """ + + # init + orientation_map = OrientationMap( + num_x=self.orientation_map.num_x, + num_y=self.orientation_map.num_y, + num_matches=1, + ) + im_grain = np.zeros( + (self.orientation_map.num_x, self.orientation_map.num_y), dtype="bool" + ) + im_count = np.zeros((self.orientation_map.num_x, self.orientation_map.num_y)) + im_mark = np.zeros((self.orientation_map.num_x, self.orientation_map.num_y)) + + # Loop over grains to determine number in each pixel + for a0 in range(self.cluster_sizes.shape[0]): + if self.cluster_sizes[a0] >= area_min: + im_grain[:] = False + im_grain[ + self.cluster_inds[a0][0, :], + self.cluster_inds[a0][1, :], + ] = True + im_count += im_grain + im_stripe = im_count >= 2 + im_single = np.logical_not(im_stripe) + + # prefactor for stripes + if stripe_width[0] == 0: + dx = 0 + else: + dx = 1 / stripe_width[0] + if stripe_width[1] == 0: + dy = 0 + else: + dy = 1 / stripe_width[1] + + # loop over grains + for a0 in range(self.cluster_sizes.shape[0]): + if self.cluster_sizes[a0] >= area_min: + im_grain[:] = False + im_grain[ + self.cluster_inds[a0][0, :], + self.cluster_inds[a0][1, :], + ] = True + + # non-overlapping grains + sub = np.logical_and(im_grain, im_single) + x, y = np.unravel_index(np.where(sub.ravel()), im_grain.shape) + x = np.atleast_1d(np.squeeze(x)) + y = np.atleast_1d(np.squeeze(y)) + for a1 in range(x.size): + orientation_map.set_orientation( + self.cluster_orientation[a0], x[a1], y[a1] + ) + + # overlapping grains + sub = np.logical_and(im_grain, im_stripe) + x, y = np.unravel_index(np.where(sub.ravel()), im_grain.shape) + x = np.atleast_1d(np.squeeze(x)) + y = np.atleast_1d(np.squeeze(y)) + for a1 in range(x.size): + d = np.mod( + x[a1] * dx + y[a1] * dy + im_mark[x[a1], y[a1]] + +0.5, + im_count[x[a1], y[a1]], + ) + + if d < 1.0: + orientation_map.set_orientation( + self.cluster_orientation[a0], x[a1], y[a1] + ) + im_mark[x[a1], y[a1]] += 1 + + return orientation_map def calculate_strain( self, bragg_peaks_array: PointListArray, orientation_map: OrientationMap, - corr_kernel_size = None, - sigma_excitation_error = 0.02, + corr_kernel_size=None, + sigma_excitation_error=0.02, tol_excitation_error_mult: float = 3, tol_intensity: float = 1e-4, k_max: Optional[float] = None, - min_num_peaks = 5, - rotation_range = None, - mask_from_corr = True, - corr_range = (0, 2), - corr_normalize = True, - progress_bar = True, - ): - ''' + min_num_peaks=5, + rotation_range=None, + mask_from_corr=True, + corr_range=(0, 2), + corr_normalize=True, + progress_bar=True, +): + """ This function takes in both a PointListArray containing Bragg peaks, and a corresponding OrientationMap, and uses least squares to compute the deformation tensor which transforms the simulated diffraction pattern @@ -1633,43 +2053,41 @@ def calculate_strain( Returns: strain_map (RealSlice): strain tensor - ''' + """ # Initialize empty strain maps strain_map = RealSlice( - data=np.zeros(( - 5, - bragg_peaks_array.shape[0], - bragg_peaks_array.shape[1] - )), - slicelabels=('e_xx','e_yy','e_xy','theta','mask'), - name='strain_map') + data=np.zeros((5, bragg_peaks_array.shape[0], bragg_peaks_array.shape[1])), + slicelabels=("e_xx", "e_yy", "e_xy", "theta", "mask"), + name="strain_map", + ) if mask_from_corr: corr_range = np.array(corr_range) - corr_mask = orientation_map.corr[:,:,0] + corr_mask = orientation_map.corr[:, :, 0] if corr_normalize: corr_mask /= np.mean(corr_mask) - corr_mask = np.clip((corr_mask - corr_range[0]) \ - / (corr_range[1]-corr_range[0]),0,1) - strain_map.get_slice('mask').data[:] = corr_mask + corr_mask = np.clip( + (corr_mask - corr_range[0]) / (corr_range[1] - corr_range[0]), 0, 1 + ) + strain_map.get_slice("mask").data[:] = corr_mask else: - strain_map.get_slice('mask').data[:] = 1.0 + strain_map.get_slice("mask").data[:] = 1.0 # init values if corr_kernel_size is None: corr_kernel_size = self.orientation_kernel_size radius_max_2 = corr_kernel_size**2 - #check cal state - if bragg_peaks_array.calstate['ellipse'] == False: + # check cal state + if bragg_peaks_array.calstate["ellipse"] == False: ellipse = False - warn('bragg peaks not elliptically calibrated') + warn("bragg peaks not elliptically calibrated") else: ellipse = True - if bragg_peaks_array.calstate['rotate'] == False: + if bragg_peaks_array.calstate["rotate"] == False: rotate = False - warn('bragg peaks not rotationally calibrated') + warn("bragg peaks not rotationally calibrated") else: rotate = True @@ -1679,7 +2097,7 @@ def calculate_strain( desc="Calculating strains", unit=" PointList", disable=not progress_bar, - ): + ): # Get bragg peaks from experiment and reference p = bragg_peaks_array.get_vectors( scan_x=rx, @@ -1687,26 +2105,27 @@ def calculate_strain( center=True, ellipse=ellipse, pixel=True, - rotate=rotate + rotate=rotate, ) if p.data.shape[0] >= min_num_peaks: p_ref = self.generate_diffraction_pattern( - orientation_map.get_orientation(rx,ry), - sigma_excitation_error = sigma_excitation_error, - tol_excitation_error_mult = tol_excitation_error_mult, - tol_intensity = tol_intensity, - k_max = k_max, + orientation_map.get_orientation(rx, ry), + sigma_excitation_error=sigma_excitation_error, + tol_excitation_error_mult=tol_excitation_error_mult, + tol_intensity=tol_intensity, + k_max=k_max, ) # init - keep = np.zeros(p.data.shape[0],dtype='bool') - inds_match = np.zeros(p.data.shape[0],dtype='int') + keep = np.zeros(p.data.shape[0], dtype="bool") + inds_match = np.zeros(p.data.shape[0], dtype="int") # Pair off experimental Bragg peaks with reference peaks for a0 in range(p.data.shape[0]): - dist_2 = (p.data['qx'][a0] - p_ref.data['qx'])**2 \ - + (p.data['qy'][a0] - p_ref.data['qy'])**2 + dist_2 = (p.data["qx"][a0] - p_ref.data["qx"]) ** 2 + ( + p.data["qy"][a0] - p_ref.data["qy"] + ) ** 2 ind_min = np.argmin(dist_2) if dist_2[ind_min] <= radius_max_2: @@ -1714,46 +2133,47 @@ def calculate_strain( keep[a0] = True # Get all paired peaks - qxy = np.vstack(( - p.data['qx'][keep], - p.data['qy'][keep])).T - qxy_ref = np.vstack(( - p_ref.data['qx'][inds_match[keep]], - p_ref.data['qy'][inds_match[keep]])).T + qxy = np.vstack((p.data["qx"][keep], p.data["qy"][keep])).T + qxy_ref = np.vstack( + (p_ref.data["qx"][inds_match[keep]], p_ref.data["qy"][inds_match[keep]]) + ).T # Apply intensity weighting from experimental measurements - qxy *= p.data['intensity'][keep,None] - qxy_ref *= p.data['intensity'][keep,None] + qxy *= p.data["intensity"][keep, None] + qxy_ref *= p.data["intensity"][keep, None] # Fit transformation matrix - # Note - not sure about transpose here + # Note - not sure about transpose here # (though it might not matter if rotation isn't included) m = lstsq(qxy_ref, qxy, rcond=None)[0].T # Get the infinitesimal strain matrix - strain_map.get_slice('e_xx').data[rx,ry] = 1 - m[0,0] - strain_map.get_slice('e_yy').data[rx,ry] = 1 - m[1,1] - strain_map.get_slice('e_xy').data[rx,ry] = -(m[0,1]+m[1,0])/2.0 - strain_map.get_slice('theta').data[rx,ry] = (m[0,1]-m[1,0])/2.0 + strain_map.get_slice("e_xx").data[rx, ry] = 1 - m[0, 0] + strain_map.get_slice("e_yy").data[rx, ry] = 1 - m[1, 1] + strain_map.get_slice("e_xy").data[rx, ry] = -(m[0, 1] + m[1, 0]) / 2.0 + strain_map.get_slice("theta").data[rx, ry] = (m[0, 1] - m[1, 0]) / 2.0 # Add finite rotation from ACOM orientation map. # I am not sure about the relative signs here. # Also, I need to add in the mirror operator. - if orientation_map.mirror[rx,ry,0]: - strain_map.get_slice('theta').data[rx,ry] \ - += (orientation_map.angles[rx,ry,0,0] \ - + orientation_map.angles[rx,ry,0,2]) + if orientation_map.mirror[rx, ry, 0]: + strain_map.get_slice("theta").data[rx, ry] += ( + orientation_map.angles[rx, ry, 0, 0] + + orientation_map.angles[rx, ry, 0, 2] + ) else: - strain_map.get_slice('theta').data[rx,ry] \ - -= (orientation_map.angles[rx,ry,0,0] \ - + orientation_map.angles[rx,ry,0,2]) + strain_map.get_slice("theta").data[rx, ry] -= ( + orientation_map.angles[rx, ry, 0, 0] + + orientation_map.angles[rx, ry, 0, 2] + ) else: - strain_map.get_slice('mask').data[rx,ry] = 0.0 + strain_map.get_slice("mask").data[rx, ry] = 0.0 if rotation_range is not None: - strain_map.get_slice('theta').data[:] \ - = np.mod(strain_map.get_slice('theta').data[:], rotation_range) + strain_map.get_slice("theta").data[:] = np.mod( + strain_map.get_slice("theta").data[:], rotation_range + ) return strain_map @@ -1765,8 +2185,8 @@ def save_ang_file( ind_orientation=0, pixel_size=1.0, pixel_units="px", - transpose_xy = True, - flip_x = False, + transpose_xy=True, + flip_x=False, ): """ This function outputs an ascii text file in the .ang format, containing @@ -1795,8 +2215,8 @@ def save_ang_file( pixel_size=pixel_size, pixel_units=pixel_units, return_color_key=False, - transpose_xy = transpose_xy, - flip_x = flip_x, + transpose_xy=transpose_xy, + flip_x=flip_x, ) file_writer(file_name, xmap) @@ -1807,14 +2227,19 @@ def orientation_map_to_orix_CrystalMap( orientation_map, ind_orientation=0, pixel_size=1.0, - pixel_units='px', - transpose_xy = True, - flip_x = False, - return_color_key=False - ): + pixel_units="px", + transpose_xy=True, + flip_x=False, + return_color_key=False, +): try: from orix.quaternion import Rotation, Orientation - from orix.crystal_map import CrystalMap, Phase, PhaseList, create_coordinate_arrays + from orix.crystal_map import ( + CrystalMap, + Phase, + PhaseList, + create_coordinate_arrays, + ) from orix.plot import IPFColorKeyTSL except ImportError: raise Exception("orix failed to import; try pip installing separately") @@ -1831,9 +2256,9 @@ def orientation_map_to_orix_CrystalMap( import warnings # Get orientation matrices - orientation_matrices = orientation_map.matrix[:,:,ind_orientation].copy() + orientation_matrices = orientation_map.matrix[:, :, ind_orientation].copy() if transpose_xy: - orientation_matrices = np.transpose(orientation_matrices, (1,0,2,3)) + orientation_matrices = np.transpose(orientation_matrices, (1, 0, 2, 3)) if flip_x: orientation_matrices = np.flip(orientation_matrices, axis=0) @@ -1841,40 +2266,49 @@ def orientation_map_to_orix_CrystalMap( # suppress Gimbal lock warnings def fxn(): warnings.warn("deprecated", DeprecationWarning) + with warnings.catch_warnings(): warnings.simplefilter("ignore") angles = np.vstack( [ - R.from_matrix(matrix.T).as_euler('zxz') - for matrix in orientation_matrices.reshape(-1,3,3) + R.from_matrix(matrix.T).as_euler("zxz") + for matrix in orientation_matrices.reshape(-1, 3, 3) ] ) # generate a list of Rotation objects from the Euler angles - rotations = Rotation.from_euler(angles, direction='crystal2lab') + rotations = Rotation.from_euler(angles, direction="crystal2lab") # Generate x,y coordinates since orix uses flat data internally # coords, _ = create_coordinate_arrays((orientation_map.num_x,orientation_map.num_y),(pixel_size,)*2) - coords, _ = create_coordinate_arrays((orientation_matrices.shape[0],orientation_matrices.shape[1]),(pixel_size,)*2) + coords, _ = create_coordinate_arrays( + (orientation_matrices.shape[0], orientation_matrices.shape[1]), + (pixel_size,) * 2, + ) # Generate an orix structure from the Crystal - atoms = [ Atom( element_symbols[Z-1], pos) for Z, pos in zip(self.numbers, self.positions)] + atoms = [ + Atom(element_symbols[Z - 1], pos) + for Z, pos in zip(self.numbers, self.positions) + ] structure = Structure( atoms=atoms, lattice=Lattice(*self.cell), ) - + # Use pymatgen to get the symmetry - pg_structure = pgStructure(self.lat_real, self.numbers, self.positions, coords_are_cartesian=False) + pg_structure = pgStructure( + self.lat_real, self.numbers, self.positions, coords_are_cartesian=False + ) pointgroup = SpacegroupAnalyzer(pg_structure).get_point_group_symbol() - + # If the structure has only one element, name the phase based on the element if np.unique(self.numbers).size == 1: - name = element_symbols[self.numbers[0]-1] + name = element_symbols[self.numbers[0] - 1] else: - name = pg_structure.formula - + name = pg_structure.formula + # Generate an orix Phase to store symmetry phase = Phase( name=name, @@ -1902,25 +2336,25 @@ def fxn(): def symmetry_reduce_directions( self, orientation, - match_ind = 0, - plot_output = False, - figsize = (15,6), - el_shift = 0.0, - az_shift = -30.0, - ): - ''' + match_ind=0, + plot_output=False, + figsize=(15, 6), + el_shift=0.0, + az_shift=-30.0, +): + """ This function calculates the symmetry-reduced cartesian directions from and orientation matrix stored in orientation.matrix, and outputs them into orientation.family. It optionally plots the 3D output. - ''' + """ # optional plot if plot_output: - bound = 1.05; - cam_dir = np.mean(self.orientation_zone_axis_range,axis=0) + bound = 1.05 + cam_dir = np.mean(self.orientation_zone_axis_range, axis=0) cam_dir = cam_dir / np.linalg.norm(cam_dir) - az = np.rad2deg(np.arctan2(cam_dir[0],cam_dir[1])) + az_shift + az = np.rad2deg(np.arctan2(cam_dir[0], cam_dir[1])) + az_shift # if np.abs(self.orientation_fiber_angles[0] - 180.0) < 1e-3: # el = 10 # else: @@ -1928,27 +2362,33 @@ def symmetry_reduce_directions( el = 0 fig = plt.figure(figsize=figsize) - num_points = 10; - t = np.linspace(0,1,num=num_points+1,endpoint=True) - d = np.array([[0,1],[0,2],[1,2]]) + num_points = 10 + t = np.linspace(0, 1, num=num_points + 1, endpoint=True) + d = np.array([[0, 1], [0, 2], [1, 2]]) orientation_zone_axis_range_flip = self.orientation_zone_axis_range.copy() - orientation_zone_axis_range_flip[0,:] = -1*orientation_zone_axis_range_flip[0,:] - + orientation_zone_axis_range_flip[0, :] = ( + -1 * orientation_zone_axis_range_flip[0, :] + ) # loop over orientation matrix directions for a0 in range(3): - in_range = np.all(np.sum(self.symmetry_reduction * \ - orientation.matrix[match_ind,:,a0][None,:,None], - axis=1) >= 0, - axis=1) - - orientation.family[match_ind,:,a0] = \ - self.symmetry_operators[np.argmax(in_range)] @ \ - orientation.matrix[match_ind,:,a0] + in_range = np.all( + np.sum( + self.symmetry_reduction + * orientation.matrix[match_ind, :, a0][None, :, None], + axis=1, + ) + >= 0, + axis=1, + ) + orientation.family[match_ind, :, a0] = ( + self.symmetry_operators[np.argmax(in_range)] + @ orientation.matrix[match_ind, :, a0] + ) # in_range = np.all(np.sum(self.symmetry_reduction * \ - # orientation.matrix[match_ind,:,a0][None,:,None], + # orientation.matrix[match_ind,:,a0][None,:,None], # axis=1) >= 0, # axis=1) # if np.any(in_range): @@ -1958,116 +2398,112 @@ def symmetry_reduce_directions( # else: # # Note this is a quick fix for fiber_angles[0] = 180 degrees # in_range = np.all(np.sum(self.symmetry_reduction * \ - # (np.array([1,1,-1])*orientation.matrix[match_ind,:,a0][None,:,None]), + # (np.array([1,1,-1])*orientation.matrix[match_ind,:,a0][None,:,None]), # axis=1) >= 0, # axis=1) # ind = np.argmax(in_range) # orientation.family[match_ind,:,a0] = self.symmetry_operators[ind] \ # @ (np.array([1,1,-1])*orientation.matrix[match_ind,:,a0]) - if plot_output: - ax = fig.add_subplot(1, 3, a0+1, - projection='3d', - elev=el, - azim=az) + ax = fig.add_subplot(1, 3, a0 + 1, projection="3d", elev=el, azim=az) # draw orienation triangle for a1 in range(d.shape[0]): - v = self.orientation_zone_axis_range[d[a1,0],:][None,:]*t[:,None] + \ - self.orientation_zone_axis_range[d[a1,1],:][None,:]*(1-t[:,None]) - v = v / np.linalg.norm(v,axis=1)[:,None] + v = self.orientation_zone_axis_range[d[a1, 0], :][None, :] * t[ + :, None + ] + self.orientation_zone_axis_range[d[a1, 1], :][None, :] * ( + 1 - t[:, None] + ) + v = v / np.linalg.norm(v, axis=1)[:, None] ax.plot( - v[:,1], - v[:,0], - v[:,2], - c='k', - ) - v = self.orientation_zone_axis_range[a1,:][None,:]*t[:,None] + v[:, 1], + v[:, 0], + v[:, 2], + c="k", + ) + v = self.orientation_zone_axis_range[a1, :][None, :] * t[:, None] ax.plot( - v[:,1], - v[:,0], - v[:,2], - c='k', - ) - + v[:, 1], + v[:, 0], + v[:, 2], + c="k", + ) # if needed, draw orientation diamond - if self.orientation_fiber_angles is not None \ - and np.abs(self.orientation_fiber_angles[0] - 180.0) < 1e-3: - for a1 in range(d.shape[0]-1): - v = orientation_zone_axis_range_flip[d[a1,0],:][None,:]*t[:,None] + \ - orientation_zone_axis_range_flip[d[a1,1],:][None,:]*(1-t[:,None]) - v = v / np.linalg.norm(v,axis=1)[:,None] + if ( + self.orientation_fiber_angles is not None + and np.abs(self.orientation_fiber_angles[0] - 180.0) < 1e-3 + ): + for a1 in range(d.shape[0] - 1): + v = orientation_zone_axis_range_flip[d[a1, 0], :][None, :] * t[ + :, None + ] + orientation_zone_axis_range_flip[d[a1, 1], :][None, :] * ( + 1 - t[:, None] + ) + v = v / np.linalg.norm(v, axis=1)[:, None] ax.plot( - v[:,1], - v[:,0], - v[:,2], - c='k', - ) - v = orientation_zone_axis_range_flip[0,:][None,:]*t[:,None] - ax.plot( - v[:,1], - v[:,0], - v[:,2], - c='k', + v[:, 1], + v[:, 0], + v[:, 2], + c="k", ) + v = orientation_zone_axis_range_flip[0, :][None, :] * t[:, None] + ax.plot( + v[:, 1], + v[:, 0], + v[:, 2], + c="k", + ) # add points - p = self.symmetry_operators @ \ - orientation.matrix[match_ind,:,a0] + p = self.symmetry_operators @ orientation.matrix[match_ind, :, a0] ax.scatter( - xs=p[:,1], - ys=p[:,0], - zs=p[:,2], + xs=p[:, 1], + ys=p[:, 0], + zs=p[:, 2], s=10, - marker='o', + marker="o", # c='k', ) - v = orientation.family[match_ind,:,a0][None,:]*t[:,None] + v = orientation.family[match_ind, :, a0][None, :] * t[:, None] ax.plot( - v[:,1], - v[:,0], - v[:,2], - c='k', - ) + v[:, 1], + v[:, 0], + v[:, 2], + c="k", + ) ax.scatter( - xs=orientation.family[match_ind,1,a0], - ys=orientation.family[match_ind,0,a0], - zs=orientation.family[match_ind,2,a0], + xs=orientation.family[match_ind, 1, a0], + ys=orientation.family[match_ind, 0, a0], + zs=orientation.family[match_ind, 2, a0], s=160, - marker='o', + marker="o", facecolors="None", - edgecolors='r', + edgecolors="r", ) ax.scatter( - xs=orientation.matrix[match_ind,1,a0], - ys=orientation.matrix[match_ind,0,a0], - zs=orientation.matrix[match_ind,2,a0], + xs=orientation.matrix[match_ind, 1, a0], + ys=orientation.matrix[match_ind, 0, a0], + zs=orientation.matrix[match_ind, 2, a0], s=80, - marker='o', + marker="o", facecolors="None", - edgecolors='c', + edgecolors="c", ) - - ax.invert_yaxis() ax.axes.set_xlim3d(left=-bound, right=bound) ax.axes.set_ylim3d(bottom=-bound, top=bound) ax.axes.set_zlim3d(bottom=-bound, top=bound) axisEqual3D(ax) - - if plot_output: plt.show() - return orientation - # zone axis range arguments for orientation_plan corresponding # to the symmetric wedge for each pointgroup, in the order: # [zone_axis_range, fiber_axis, fiber_angles] @@ -2094,11 +2530,11 @@ def symmetry_reduce_directions( "-3m": ["fiber", [0, 0, 1], [90.0, 60.0]], "6": ["fiber", [0, 0, 1], [180.0, 60.0]], "-6": ["fiber", [0, 0, 1], [180.0, 60.0]], - "6/m": [[[1, 0, 0], [0.5, 0.5*np.sqrt(3), 0]], None, None], + "6/m": [[[1, 0, 0], [0.5, 0.5 * np.sqrt(3), 0]], None, None], "622": ["fiber", [0, 0, 1], [180.0, 30.0]], "6mm": ["fiber", [0, 0, 1], [180.0, 30.0]], "-6m2": ["fiber", [0, 0, 1], [90.0, 60.0]], - "6/mmm": [[[0.5*np.sqrt(3), 0.5, 0.0], [1, 0, 0]], None, None], + "6/mmm": [[[0.5 * np.sqrt(3), 0.5, 0.0], [1, 0, 0]], None, None], "23": [ [[1, 0, 0], [1, 1, 1]], None, @@ -2110,5 +2546,5 @@ def symmetry_reduce_directions( "m-3m": [[[0, 1, 1], [1, 1, 1]], None, None], } - # "-3m": ["fiber", [0, 0, 1], [90.0, 60.0]], - # "-3m": ["fiber", [0, 0, 1], [180.0, 30.0]], \ No newline at end of file +# "-3m": ["fiber", [0, 0, 1], [90.0, 60.0]], +# "-3m": ["fiber", [0, 0, 1], [180.0, 30.0]], diff --git a/py4DSTEM/process/diffraction/crystal_calibrate.py b/py4DSTEM/process/diffraction/crystal_calibrate.py index 1b65480f5..c068bf79e 100644 --- a/py4DSTEM/process/diffraction/crystal_calibrate.py +++ b/py4DSTEM/process/diffraction/crystal_calibrate.py @@ -1,4 +1,3 @@ - import numpy as np from typing import Union, Optional from scipy.optimize import curve_fit @@ -12,23 +11,22 @@ pass - def calibrate_pixel_size( - self, - bragg_peaks, - scale_pixel_size = 1.0, - bragg_k_power = 1.0, - bragg_intensity_power = 1.0, - k_min = 0.0, - k_max = None, - k_step = 0.002, - k_broadening = 0.002, - fit_all_intensities = True, - set_calibration_in_place = False, - verbose = True, - plot_result = False, - figsize: Union[list, tuple, np.ndarray] = (12, 6), - returnfig = False, + self, + bragg_peaks, + scale_pixel_size=1.0, + bragg_k_power=1.0, + bragg_intensity_power=1.0, + k_min=0.0, + k_max=None, + k_step=0.002, + k_broadening=0.002, + fit_all_intensities=True, + set_calibration_in_place=False, + verbose=True, + plot_result=False, + figsize: Union[list, tuple, np.ndarray] = (12, 6), + returnfig=False, ): """ Use the calculated structure factor scattering lengths to compute 1D @@ -63,23 +61,18 @@ def calibrate_pixel_size( Returns _______ - - fig, ax: handles, optional + + fig, ax: handles, optional Figure and axis handles, if returnfig=True. """ assert hasattr(self, "struct_factors"), "Compute structure factors first..." - #Prepare experimental data + # Prepare experimental data k, int_exp = self.calculate_bragg_peak_histogram( - bragg_peaks, - bragg_k_power, - bragg_intensity_power, - k_min, - k_max, - k_step + bragg_peaks, bragg_k_power, bragg_intensity_power, k_min, k_max, k_step ) # Perform fitting @@ -121,13 +114,13 @@ def fit_profile(k, *coefs): # if requested, apply calibrations in place if set_calibration_in_place: - bragg_peaks.calibration.set_Q_pixel_size( pixel_size_new ) - bragg_peaks.calibration.set_Q_pixel_units('A^-1') + bragg_peaks.calibration.set_Q_pixel_size(pixel_size_new) + bragg_peaks.calibration.set_Q_pixel_units("A^-1") # Output calibrated Bragg peaks bragg_peaks_cali = bragg_peaks.copy() - bragg_peaks_cali.calibration.set_Q_pixel_size( pixel_size_new ) - bragg_peaks_cali.calibration.set_Q_pixel_units('A^-1') + bragg_peaks_cali.calibration.set_Q_pixel_size(pixel_size_new) + bragg_peaks_cali.calibration.set_Q_pixel_units("A^-1") # Output pixel size if verbose: @@ -136,13 +129,12 @@ def fit_profile(k, *coefs): # Plotting if plot_result: if int_scale.shape[0] < self.g_vec_leng.shape[0]: - int_scale = np.hstack(( - int_scale, - np.ones(self.g_vec_leng.shape[0] - int_scale.shape[0]) - )) + int_scale = np.hstack( + (int_scale, np.ones(self.g_vec_leng.shape[0] - int_scale.shape[0])) + ) elif int_scale.shape[0] > self.g_vec_leng.shape[0]: print(int_scale.shape[0]) - int_scale = int_scale[:self.g_vec_leng.shape[0]] + int_scale = int_scale[: self.g_vec_leng.shape[0]] if returnfig: fig, ax = self.plot_scattering_intensity( @@ -172,28 +164,27 @@ def fit_profile(k, *coefs): # return if returnfig and plot_result: - return bragg_peaks_cali, (fig,ax) + return bragg_peaks_cali, (fig, ax) else: return bragg_peaks_cali - def calibrate_unit_cell( - self, - bragg_peaks, - coef_index = None, - coef_update = None, - bragg_k_power = 1.0, - bragg_intensity_power = 1.0, - k_min = 0.0, - k_max = None, - k_step = 0.005, - k_broadening = 0.02, - fit_all_intensities = True, - verbose = True, - plot_result = False, - figsize: Union[list, tuple, np.ndarray] = (12, 6), - returnfig = False, + self, + bragg_peaks, + coef_index=None, + coef_update=None, + bragg_k_power=1.0, + bragg_intensity_power=1.0, + k_min=0.0, + k_max=None, + k_step=0.005, + k_broadening=0.02, + fit_all_intensities=True, + verbose=True, + plot_result=False, + figsize: Union[list, tuple, np.ndarray] = (12, 6), + returnfig=False, ): """ Solve for the best fit scaling between the computed structure factors and bragg_peaks. @@ -226,30 +217,30 @@ def calibrate_unit_cell( parameters of the unit cell, in this order. The coef_update argument is a list of bools specifying whether or not the unit cell value will be allowed to change (True) or must maintain the original value (False) upon fitting. The coef_index argument provides a pointer to the index in which the - code will update to. - + code will update to. + For example, to update a, b, c, alpha, beta, gamma all independently of eachother, the following arguments should be used: coef_index = [0, 1, 2, 3, 4, 5] coef_update = [True, True, True, True, True, True,] - + The default is set to automatically define what can update in a unit cell based on the - point group constraints. When either 'coef_index' or 'coef_update' are None, these constraints + point group constraints. When either 'coef_index' or 'coef_update' are None, these constraints will be automatically pulled from the pointgroup. - + For example, the default for cubic unit cells is: coef_index = [0, 0, 0, 3, 3, 3] coef_update = [True, True, True, False, False, False] Which allows a, b, and c to update (True in first 3 indices of coef_update) - but b and c update based on the value of a (0 in the 1 and 2 list entries in coef_index) such + but b and c update based on the value of a (0 in the 1 and 2 list entries in coef_index) such that a = b = c. While coef_update is False for alpha, beta, and gamma (entries 3, 4, 5), no updates will be made to the angles. - The user has the option to predefine coef_index or coef_update to override defaults. In the - coef_update list, there must be 6 entries and each are boolean. In the coef_index list, there - must be 6 entries, with the first 3 entries being between 0 - 2 and the last 3 entries between + The user has the option to predefine coef_index or coef_update to override defaults. In the + coef_update list, there must be 6 entries and each are boolean. In the coef_index list, there + must be 6 entries, with the first 3 entries being between 0 - 2 and the last 3 entries between 3 - 5. These act as pointers to pull the updated parameter from. - + """ # initialize structure if coef_index is None or coef_update is None: @@ -260,29 +251,24 @@ def calibrate_unit_cell( assert ( self.pointgroup.get_point_group_symbol() in parameter_updates ), "Unrecognized pointgroup returned by pymatgen!" - coef_index, coef_update = parameter_updates[ \ - self.pointgroup.get_point_group_symbol()] - - #Prepare experimental data + coef_index, coef_update = parameter_updates[ + self.pointgroup.get_point_group_symbol() + ] + + # Prepare experimental data k, int_exp = self.calculate_bragg_peak_histogram( - bragg_peaks, - bragg_k_power, - bragg_intensity_power, - k_min, - k_max, - k_step + bragg_peaks, bragg_k_power, bragg_intensity_power, k_min, k_max, k_step ) - - #Define Fitting Class + + # Define Fitting Class class FitCrystal: - def __init__( self, crystal, coef_index, coef_update, fit_all_intensities, - ): + ): self.coefs_init = crystal.cell self.hkl = crystal.hkl self.struct_factors_int = crystal.struct_factors_int @@ -300,9 +286,9 @@ def get_coefs( else: coefs[a0] = self.coefs_init[a0] coefs[6:] = coefs_fit[6:] - + return coefs - + def fitfun(self, k, *coefs_fit): coefs = self.get_coefs(coefs_fit=coefs_fit) @@ -312,14 +298,27 @@ def fitfun(self, k, *coefs_fit): beta = np.deg2rad(coefs[4]) gamma = np.deg2rad(coefs[5]) f = np.cos(beta) * np.cos(gamma) - np.cos(alpha) - vol = a*b*c*np.sqrt(1 \ - + 2*np.cos(alpha)*np.cos(beta)*np.cos(gamma) \ - - np.cos(alpha)**2 - np.cos(beta)**2 - np.cos(gamma)**2) + vol = ( + a + * b + * c + * np.sqrt( + 1 + + 2 * np.cos(alpha) * np.cos(beta) * np.cos(gamma) + - np.cos(alpha) ** 2 + - np.cos(beta) ** 2 + - np.cos(gamma) ** 2 + ) + ) lat_real = np.array( [ - [a, 0, 0], - [b*np.cos(gamma), b*np.sin(gamma), 0], - [c*np.cos(beta), -c*f/np.sin(gamma), vol/(a*b*np.sin(gamma))], + [a, 0, 0], + [b * np.cos(gamma), b * np.sin(gamma), 0], + [ + c * np.cos(beta), + -c * f / np.sin(gamma), + vol / (a * b * np.sin(gamma)), + ], ] ) # Inverse lattice, metric tensors @@ -327,8 +326,8 @@ def fitfun(self, k, *coefs_fit): metric_inv = np.linalg.inv(metric_real) lat_inv = metric_inv @ lat_real g_vec_all = (self.hkl.T @ lat_inv).T - g_vec_leng = np.linalg.norm(g_vec_all, axis=0) - + g_vec_leng = np.linalg.norm(g_vec_all, axis=0) + # Calculate fitted intensity profile k_broadening = coefs[6] int_scale = coefs[7:] @@ -339,17 +338,17 @@ def fitfun(self, k, *coefs_fit): k_broadening=k_broadening, int_scale=int_scale, normalize_intensity=False, - ) + ) return int_sf fit_crystal = FitCrystal( - self, - coef_index = coef_index, - coef_update = coef_update, - fit_all_intensities=fit_all_intensities, - ) - + self, + coef_index=coef_index, + coef_update=coef_update, + fit_all_intensities=fit_all_intensities, + ) + if fit_all_intensities: coefs = ( *tuple(self.cell), @@ -358,12 +357,12 @@ def fitfun(self, k, *coefs_fit): ) bounds = (0.0, np.inf) popt, pcov = curve_fit( - fit_crystal.fitfun, - k, - int_exp, - p0 = coefs, - bounds = bounds, - ) + fit_crystal.fitfun, + k, + int_exp, + p0=coefs, + bounds=bounds, + ) else: coefs = ( *tuple(self.cell), @@ -372,38 +371,37 @@ def fitfun(self, k, *coefs_fit): ) bounds = (0.0, np.inf) popt, pcov = curve_fit( - fit_crystal.fitfun, - k, - int_exp, - p0 = coefs, - bounds = bounds, - ) - + fit_crystal.fitfun, + k, + int_exp, + p0=coefs, + bounds=bounds, + ) + if verbose: cell_init = self.cell # Update crystal with new lattice parameters self.cell = fit_crystal.get_coefs(popt[:6]) self.calculate_lattice() self.calculate_structure_factors(self.k_max) - + # Output if verbose: # Print unit cell parameters print("Original unit cell = " + str(cell_init)) print("Calibrated unit cell = " + str(self.cell)) - + # Plotting if plot_result: k_broadening = popt[6] int_scale = popt[7:] if int_scale.shape[0] < self.g_vec_leng.shape[0]: - int_scale = np.hstack(( - int_scale, - np.ones(self.g_vec_leng.shape[0] - int_scale.shape[0]) - )) + int_scale = np.hstack( + (int_scale, np.ones(self.g_vec_leng.shape[0] - int_scale.shape[0])) + ) elif int_scale.shape[0] > self.g_vec_leng.shape[0]: print(int_scale.shape[0]) - int_scale = int_scale[:self.g_vec_leng.shape[0]] + int_scale = int_scale[: self.g_vec_leng.shape[0]] if returnfig: fig, ax = self.plot_scattering_intensity( @@ -435,41 +433,53 @@ def fitfun(self, k, *coefs_fit): return fig, ax else: return - - + + # coef_index and coef_update sets for the fit_unit_cell function, in the order: # [coef_index, coef_update] parameter_updates = { - "1": [[0, 1, 2, 3, 4, 5], [True, True, True, True, True, True]], #Triclinic - "-1": [[0, 1, 2, 3, 4, 5], [True, True, True, True, True, True]], #Triclinic - "2": [[0, 1, 2, 3, 4, 3], [True, True, True, False, True, False]], #Monoclinic - "m": [[0, 1, 2, 3, 4, 3], [True, True, True, False, True, False]], #Monoclinic - "2/m": [[0, 1, 2, 3, 4, 3], [True, True, True, False, True, False]], #Monoclinic - "222": [[0, 1, 2, 3, 3, 3], [True, True, True, False, False, False]], #Orthorhombic - "mm2": [[0, 1, 2, 3, 3, 3], [True, True, True, False, False, False]], #Orthorhombic - "mmm": [[0, 1, 2, 3, 3, 3], [True, True, True, False, False, False]], #Orthorhombic - "4": [[0, 0, 2, 3, 3, 3], [True, True, True, False, False, False]], #Tetragonal - "-4": [[0, 0, 2, 3, 3, 3], [True, True, True, False, False, False]], #Tetragonal - "4/m": [[0, 0, 2, 3, 3, 3], [True, True, True, False, False, False]], #Tetragonal - "422": [[0, 0, 2, 3, 3, 3], [True, True, True, False, False, False]], #Tetragonal - "4mm": [[0, 0, 2, 3, 3, 3], [True, True, True, False, False, False]], #Tetragonal - "-42m": [[0, 0, 2, 3, 3, 3], [True, True, True, False, False, False]], #Tetragonal - "4/mmm": [[0, 0, 2, 3, 3, 3], [True, True, True, False, False, False]], #Tetragonal - "3": [[0, 0, 0, 3, 3, 3], [True, True, True, True, True, True]], #Trigonal - "-3": [[0, 0, 0, 3, 3, 3], [True, True, True, True, True, True]], #Trigonal - "32": [[0, 0, 0, 3, 3, 3], [True, True, True, True, True, True]], #Trigonal - "3m": [[0, 0, 0, 3, 3, 3], [True, True, True, True, True, True]], #Trigonal - "-3m": [[0, 0, 0, 3, 3, 3], [True, True, True, True, True, True]], #Trigonal - "6": [[0, 0, 2, 3, 3, 5], [True, True, True, False, False, True]], #Hexagonal - "-6": [[0, 0, 2, 3, 3, 5], [True, True, True, False, False, True]], #Hexagonal - "6/m": [[0, 0, 2, 3, 3, 5], [True, True, True, False, False, True]], #Hexagonal - "622": [[0, 0, 2, 3, 3, 5], [True, True, True, False, False, True]], #Hexagonal - "6mm": [[0, 0, 2, 3, 3, 5], [True, True, True, False, False, True]], #Hexagonal - "-6m2": [[0, 0, 2, 3, 3, 5], [True, True, True, False, False, True]], #Hexagonal - "6/mmm": [[0, 0, 2, 3, 3, 5], [True, True, True, False, False, True]], #Hexagonal - "23": [[0, 0, 0, 3, 3, 3], [True, True, True, False, False, False]], #Cubic - "m-3": [[0, 0, 0, 3, 3, 3], [True, True, True, False, False, False]], #Cubic - "432": [[0, 0, 0, 3, 3, 3], [True, True, True, False, False, False]], #Cubic - "-43m": [[0, 0, 0, 3, 3, 3], [True, True, True, False, False, False]], #Cubic - "m-3m": [[0, 0, 0, 3, 3, 3], [True, True, True, False, False, False]], #Cubic - } \ No newline at end of file + "1": [[0, 1, 2, 3, 4, 5], [True, True, True, True, True, True]], # Triclinic + "-1": [[0, 1, 2, 3, 4, 5], [True, True, True, True, True, True]], # Triclinic + "2": [[0, 1, 2, 3, 4, 3], [True, True, True, False, True, False]], # Monoclinic + "m": [[0, 1, 2, 3, 4, 3], [True, True, True, False, True, False]], # Monoclinic + "2/m": [[0, 1, 2, 3, 4, 3], [True, True, True, False, True, False]], # Monoclinic + "222": [ + [0, 1, 2, 3, 3, 3], + [True, True, True, False, False, False], + ], # Orthorhombic + "mm2": [ + [0, 1, 2, 3, 3, 3], + [True, True, True, False, False, False], + ], # Orthorhombic + "mmm": [ + [0, 1, 2, 3, 3, 3], + [True, True, True, False, False, False], + ], # Orthorhombic + "4": [[0, 0, 2, 3, 3, 3], [True, True, True, False, False, False]], # Tetragonal + "-4": [[0, 0, 2, 3, 3, 3], [True, True, True, False, False, False]], # Tetragonal + "4/m": [[0, 0, 2, 3, 3, 3], [True, True, True, False, False, False]], # Tetragonal + "422": [[0, 0, 2, 3, 3, 3], [True, True, True, False, False, False]], # Tetragonal + "4mm": [[0, 0, 2, 3, 3, 3], [True, True, True, False, False, False]], # Tetragonal + "-42m": [[0, 0, 2, 3, 3, 3], [True, True, True, False, False, False]], # Tetragonal + "4/mmm": [ + [0, 0, 2, 3, 3, 3], + [True, True, True, False, False, False], + ], # Tetragonal + "3": [[0, 0, 0, 3, 3, 3], [True, True, True, True, True, True]], # Trigonal + "-3": [[0, 0, 0, 3, 3, 3], [True, True, True, True, True, True]], # Trigonal + "32": [[0, 0, 0, 3, 3, 3], [True, True, True, True, True, True]], # Trigonal + "3m": [[0, 0, 0, 3, 3, 3], [True, True, True, True, True, True]], # Trigonal + "-3m": [[0, 0, 0, 3, 3, 3], [True, True, True, True, True, True]], # Trigonal + "6": [[0, 0, 2, 3, 3, 5], [True, True, True, False, False, True]], # Hexagonal + "-6": [[0, 0, 2, 3, 3, 5], [True, True, True, False, False, True]], # Hexagonal + "6/m": [[0, 0, 2, 3, 3, 5], [True, True, True, False, False, True]], # Hexagonal + "622": [[0, 0, 2, 3, 3, 5], [True, True, True, False, False, True]], # Hexagonal + "6mm": [[0, 0, 2, 3, 3, 5], [True, True, True, False, False, True]], # Hexagonal + "-6m2": [[0, 0, 2, 3, 3, 5], [True, True, True, False, False, True]], # Hexagonal + "6/mmm": [[0, 0, 2, 3, 3, 5], [True, True, True, False, False, True]], # Hexagonal + "23": [[0, 0, 0, 3, 3, 3], [True, True, True, False, False, False]], # Cubic + "m-3": [[0, 0, 0, 3, 3, 3], [True, True, True, False, False, False]], # Cubic + "432": [[0, 0, 0, 3, 3, 3], [True, True, True, False, False, False]], # Cubic + "-43m": [[0, 0, 0, 3, 3, 3], [True, True, True, False, False, False]], # Cubic + "m-3m": [[0, 0, 0, 3, 3, 3], [True, True, True, False, False, False]], # Cubic +} diff --git a/py4DSTEM/process/diffraction/crystal_phase.py b/py4DSTEM/process/diffraction/crystal_phase.py index 679f326a6..84824fe63 100644 --- a/py4DSTEM/process/diffraction/crystal_phase.py +++ b/py4DSTEM/process/diffraction/crystal_phase.py @@ -8,12 +8,14 @@ from py4DSTEM.visualize import show, show_image_grid from py4DSTEM.process.diffraction.crystal_viz import plot_diffraction_pattern + class Crystal_Phase: """ A class storing multiple crystal structures, and associated diffraction data. Must be initialized after matching orientations to a pointlistarray??? """ + def __init__( self, crystals, @@ -30,71 +32,81 @@ def __init__( self.crystals = crystals self.num_crystals = len(crystals) else: - raise TypeError('crystals must be a list of crystal instances.') + raise TypeError("crystals must be a list of crystal instances.") if isinstance(orientation_maps, list): if len(self.crystals) != len(orientation_maps): - raise ValueError('Orientation maps must have the same number of entries as crystals.') + raise ValueError( + "Orientation maps must have the same number of entries as crystals." + ) self.orientation_maps = orientation_maps else: - raise TypeError('orientation_maps must be a list of orientation maps.') + raise TypeError("orientation_maps must be a list of orientation maps.") self.name = name return - def plot_all_phase_maps( - self, - map_scale_values = None, - index = 0 - ): + def plot_all_phase_maps(self, map_scale_values=None, index=0): """ Visualize phase maps of dataset. - Args: + Args: map_scale_values (float): Value to scale correlations by """ phase_maps = [] if map_scale_values == None: map_scale_values = [1] * len(self.orientation_maps) - corr_sum = np.sum([(self.orientation_maps[m].corr[:,:,index] * map_scale_values[m]) for m in range(len(self.orientation_maps))]) + corr_sum = np.sum( + [ + (self.orientation_maps[m].corr[:, :, index] * map_scale_values[m]) + for m in range(len(self.orientation_maps)) + ] + ) for m in range(len(self.orientation_maps)): - phase_maps.append(self.orientation_maps[m].corr[:,:,index] / corr_sum) - show_image_grid(lambda i:phase_maps[i], 1, len(phase_maps), cmap = 'inferno') + phase_maps.append(self.orientation_maps[m].corr[:, :, index] / corr_sum) + show_image_grid(lambda i: phase_maps[i], 1, len(phase_maps), cmap="inferno") return - - def plot_phase_map( - self, - index = 0, - cmap = None - - ): - corr_array = np.dstack([maps.corr[:,:,index] for maps in self.orientation_maps]) - best_corr_score = np.max(corr_array,axis=2) - best_match_phase = [np.where(corr_array[:,:,p] == best_corr_score, True,False) - for p in range(len(self.orientation_maps)) - ] + + def plot_phase_map(self, index=0, cmap=None): + corr_array = np.dstack( + [maps.corr[:, :, index] for maps in self.orientation_maps] + ) + best_corr_score = np.max(corr_array, axis=2) + best_match_phase = [ + np.where(corr_array[:, :, p] == best_corr_score, True, False) + for p in range(len(self.orientation_maps)) + ] if cmap == None: - cm = plt.get_cmap('rainbow') - cmap = [cm(1.*i/len(self.orientation_maps)) for i in range(len(self.orientation_maps))] - - fig, (ax) = plt.subplots(figsize = (6,6)) - ax.matshow(np.zeros((self.orientation_maps[0].num_x, self.orientation_maps[0].num_y)), cmap = 'gray') - ax.axis('off') - + cm = plt.get_cmap("rainbow") + cmap = [ + cm(1.0 * i / len(self.orientation_maps)) + for i in range(len(self.orientation_maps)) + ] + + fig, (ax) = plt.subplots(figsize=(6, 6)) + ax.matshow( + np.zeros((self.orientation_maps[0].num_x, self.orientation_maps[0].num_y)), + cmap="gray", + ) + ax.axis("off") + for m in range(len(self.orientation_maps)): - c0, c1 = (cmap[m][0]*0.35,cmap[m][1]*0.35,cmap[m][2]*0.35,1), cmap[m] - cm = mpl.colors.LinearSegmentedColormap.from_list('cmap', [c0,c1], N = 10) + c0, c1 = (cmap[m][0] * 0.35, cmap[m][1] * 0.35, cmap[m][2] * 0.35, 1), cmap[ + m + ] + cm = mpl.colors.LinearSegmentedColormap.from_list("cmap", [c0, c1], N=10) ax.matshow( np.ma.array( - self.orientation_maps[m].corr[:,:,index], - mask = best_match_phase[m]), - cmap = cm) + self.orientation_maps[m].corr[:, :, index], mask=best_match_phase[m] + ), + cmap=cm, + ) plt.show() - + return - + # Potentially introduce a way to check best match out of all orientations in phase plan and plug into model # to quantify phase - + # def phase_plan( # self, # method, @@ -111,14 +123,14 @@ def plot_phase_map( # fiber_angles = None, # ): # return - + def quantify_phase( self, pointlistarray, - tolerance_distance = 0.08, - method = 'nnls', - intensity_power = 0, - mask_peaks = None + tolerance_distance=0.08, + method="nnls", + intensity_power=0, + mask_peaks=None, ): """ Quantification of the phase of a crystal based on the crystal instances and the pointlistarray. @@ -129,44 +141,50 @@ def quantify_phase( method (str): Numerical method used to quantify phase intensity_power (float): ... mask_peaks (list, optional): A pointer of which positions to mask peaks from - + Details: """ if isinstance(pointlistarray, PointListArray): - - phase_weights = np.zeros(( - pointlistarray.shape[0], - pointlistarray.shape[1], - np.sum([map.num_matches for map in self.orientation_maps]) - )) + phase_weights = np.zeros( + ( + pointlistarray.shape[0], + pointlistarray.shape[1], + np.sum([map.num_matches for map in self.orientation_maps]), + ) + ) phase_residuals = np.zeros(pointlistarray.shape) for Rx, Ry in tqdmnd(pointlistarray.shape[0], pointlistarray.shape[1]): - _, phase_weight, phase_residual, crystal_identity = self.quantify_phase_pointlist( + ( + _, + phase_weight, + phase_residual, + crystal_identity, + ) = self.quantify_phase_pointlist( pointlistarray, - position = [Rx, Ry], + position=[Rx, Ry], tolerance_distance=tolerance_distance, - method = method, - intensity_power = intensity_power, - mask_peaks = mask_peaks + method=method, + intensity_power=intensity_power, + mask_peaks=mask_peaks, ) - phase_weights[Rx,Ry,:] = phase_weight - phase_residuals[Rx,Ry] = phase_residual + phase_weights[Rx, Ry, :] = phase_weight + phase_residuals[Rx, Ry] = phase_residual self.phase_weights = phase_weights self.phase_residuals = phase_residuals self.crystal_identity = crystal_identity return else: - return TypeError('pointlistarray must be of type pointlistarray.') + return TypeError("pointlistarray must be of type pointlistarray.") return - + def quantify_phase_pointlist( self, pointlistarray, position, - method = 'nnls', - tolerance_distance = 0.08, - intensity_power = 0, - mask_peaks = None + method="nnls", + tolerance_distance=0.08, + intensity_power=0, + mask_peaks=None, ): """ Args: @@ -191,97 +209,115 @@ def quantify_phase_pointlist( # Things to add: # 1. Better cost for distance from peaks in pointlists # 2. Iterate through multiple tolerance_distance values to find best value. Cost function residuals, or something else? - - pointlist = pointlistarray.get_pointlist(position[0], position[1]) - pl_mask = np.where((pointlist['qx'] == 0) & (pointlist['qy'] == 0), 1, 0) + + pointlist = pointlistarray.get_pointlist(position[0], position[1]) + pl_mask = np.where((pointlist["qx"] == 0) & (pointlist["qy"] == 0), 1, 0) pointlist.remove(pl_mask) # False Negatives (exp peak with no match in crystal instances) will appear here, already coded in - + if intensity_power == 0: - pl_intensities = np.ones(pointlist['intensity'].shape) + pl_intensities = np.ones(pointlist["intensity"].shape) else: - pl_intensities = pointlist['intensity']**intensity_power - #Prepare matches for modeling + pl_intensities = pointlist["intensity"] ** intensity_power + # Prepare matches for modeling pointlist_peak_matches = [] crystal_identity = [] - + for c in range(len(self.crystals)): for m in range(self.orientation_maps[c].num_matches): - crystal_identity.append([c,m]) - phase_peak_match_intensities = np.zeros((pointlist['intensity'].shape)) + crystal_identity.append([c, m]) + phase_peak_match_intensities = np.zeros((pointlist["intensity"].shape)) bragg_peaks_fit = self.crystals[c].generate_diffraction_pattern( self.orientation_maps[c].get_orientation(position[0], position[1]), - ind_orientation = m + ind_orientation=m, ) - #Find the best match peak within tolerance_distance and add value in the right position - for d in range(pointlist['qx'].shape[0]): + # Find the best match peak within tolerance_distance and add value in the right position + for d in range(pointlist["qx"].shape[0]): distances = [] - for p in range(bragg_peaks_fit['qx'].shape[0]): + for p in range(bragg_peaks_fit["qx"].shape[0]): distances.append( - np.sqrt((pointlist['qx'][d] - bragg_peaks_fit['qx'][p])**2 + - (pointlist['qy'][d]-bragg_peaks_fit['qy'][p])**2) + np.sqrt( + (pointlist["qx"][d] - bragg_peaks_fit["qx"][p]) ** 2 + + (pointlist["qy"][d] - bragg_peaks_fit["qy"][p]) ** 2 + ) ) ind = np.where(distances == np.min(distances))[0][0] - - #Potentially for-loop over multiple values for 'tolerance_distance' to find best tolerance_distance value + + # Potentially for-loop over multiple values for 'tolerance_distance' to find best tolerance_distance value if distances[ind] <= tolerance_distance: ## Somewhere in this if statement is probably where better distances from the peak should be coded in - if intensity_power == 0: #This could potentially be a different intensity_power arg - phase_peak_match_intensities[d] = 1**((tolerance_distance-distances[ind])/tolerance_distance) + if ( + intensity_power == 0 + ): # This could potentially be a different intensity_power arg + phase_peak_match_intensities[d] = 1 ** ( + (tolerance_distance - distances[ind]) + / tolerance_distance + ) else: - phase_peak_match_intensities[d] = bragg_peaks_fit['intensity'][ind]**((tolerance_distance-distances[ind])/tolerance_distance) + phase_peak_match_intensities[d] = bragg_peaks_fit[ + "intensity" + ][ind] ** ( + (tolerance_distance - distances[ind]) + / tolerance_distance + ) else: ## This is probably where the false positives (peaks in crystal but not in experiment) should be handled - continue - + continue + pointlist_peak_matches.append(phase_peak_match_intensities) pointlist_peak_intensity_matches = np.dstack(pointlist_peak_matches) - pointlist_peak_intensity_matches = pointlist_peak_intensity_matches.reshape( - pl_intensities.shape[0], - pointlist_peak_intensity_matches.shape[-1] + pointlist_peak_intensity_matches = ( + pointlist_peak_intensity_matches.reshape( + pl_intensities.shape[0], + pointlist_peak_intensity_matches.shape[-1], ) - - if len(pointlist['qx']) > 0: + ) + + if len(pointlist["qx"]) > 0: if mask_peaks is not None: for i in range(len(mask_peaks)): if mask_peaks[i] == None: continue - inds_mask = np.where(pointlist_peak_intensity_matches[:,mask_peaks[i]] != 0)[0] + inds_mask = np.where( + pointlist_peak_intensity_matches[:, mask_peaks[i]] != 0 + )[0] for mask in range(len(inds_mask)): - pointlist_peak_intensity_matches[inds_mask[mask],i] = 0 + pointlist_peak_intensity_matches[inds_mask[mask], i] = 0 - if method == 'nnls': + if method == "nnls": phase_weights, phase_residuals = nnls( - pointlist_peak_intensity_matches, - pl_intensities + pointlist_peak_intensity_matches, pl_intensities ) - - elif method == 'lstsq': + + elif method == "lstsq": phase_weights, phase_residuals, rank, singluar_vals = lstsq( - pointlist_peak_intensity_matches, - pl_intensities, - rcond = -1 + pointlist_peak_intensity_matches, pl_intensities, rcond=-1 ) phase_residuals = np.sum(phase_residuals) else: - raise ValueError(method + ' Not yet implemented. Try nnls or lstsq.') + raise ValueError(method + " Not yet implemented. Try nnls or lstsq.") else: phase_weights = np.zeros((pointlist_peak_intensity_matches.shape[1],)) phase_residuals = np.NaN - return pointlist_peak_intensity_matches, phase_weights, phase_residuals, crystal_identity - + return ( + pointlist_peak_intensity_matches, + phase_weights, + phase_residuals, + crystal_identity, + ) + # def plot_peak_matches( # self, # pointlistarray, # position, - # tolerance_distance, + # tolerance_distance, # ind_orientation, # pointlist_peak_intensity_matches, # ): # """ # A method to view how the tolerance distance impacts the peak matches associated with # the quantify_phase_pointlist method. - + # Args: # pointlistarray, # position, @@ -289,15 +325,14 @@ def quantify_phase_pointlist( # pointlist_peak_intensity_matches # """ # pointlist = pointlistarray.get_pointlist(position[0],position[1]) - + # for m in range(pointlist_peak_intensity_matches.shape[1]): # bragg_peaks_fit = self.crystals[m].generate_diffraction_pattern( # self.orientation_maps[m].get_orientation(position[0], position[1]), # ind_orientation = ind_orientation # ) # peak_inds = np.where(bragg_peaks_fit.data['intensity'] == pointlist_peak_intensity_matches[:,m]) - + # fig, (ax1, ax2) = plt.subplots(2,1,figsize = figsize) # ax1 = plot_diffraction_pattern(pointlist,) # return - diff --git a/py4DSTEM/process/diffraction/crystal_viz.py b/py4DSTEM/process/diffraction/crystal_viz.py index a9420fee4..e17e87b93 100644 --- a/py4DSTEM/process/diffraction/crystal_viz.py +++ b/py4DSTEM/process/diffraction/crystal_viz.py @@ -5,6 +5,8 @@ from mpl_toolkits.mplot3d import Axes3D, art3d from scipy.signal import medfilt from scipy.ndimage import gaussian_filter +from scipy.ndimage.morphology import distance_transform_edt +from skimage.morphology import dilation, erosion import warnings import numpy as np @@ -305,38 +307,38 @@ def plot_scattering_intensity( Parameters -------- - k_min: float + k_min: float min k value for profile range. - k_max: float + k_max: float max k value for profile range. - k_step: float + k_step: float Step size of k in profile range. - k_broadening: float + k_broadening: float Broadening of simulated pattern. - k_power_scale: float + k_power_scale: float Scale SF intensities by k**k_power_scale. - int_power_scale: float + int_power_scale: float Scale SF intensities**int_power_scale. - int_scale: float + int_scale: float Scale output profile by this value. - remove_origin: bool + remove_origin: bool Remove origin from plot. - bragg_peaks: BraggVectors + bragg_peaks: BraggVectors Passed in bragg_peaks for comparison with simulated pattern. - bragg_k_power: float + bragg_k_power: float bragg_peaks scaled by k**bragg_k_power. - bragg_intensity_power: float + bragg_intensity_power: float bragg_peaks scaled by intensities**bragg_intensity_power. - bragg_k_broadening: float + bragg_k_broadening: float Broadening applied to bragg_peaks. figsize: list, tuple, np.ndarray Figure size for plot. - returnfig (bool): + returnfig (bool): Return figure and axes handles if this is True. Returns -------- - fig, ax (optional) + fig, ax (optional) figure and axes handles """ @@ -350,8 +352,8 @@ def plot_scattering_intensity( int_sf_plot = calc_1D_profile( k, self.g_vec_leng, - (self.struct_factors_int ** int_power_scale) - * (self.g_vec_leng ** k_power_scale), + (self.struct_factors_int**int_power_scale) + * (self.g_vec_leng**k_power_scale), remove_origin=True, k_broadening=k_broadening, int_scale=int_scale, @@ -359,7 +361,6 @@ def plot_scattering_intensity( # If Bragg peaks are passed in, compute 1D integral if bragg_peaks is not None: - # set rotate and ellipse based on their availability rotate = bragg_peaks.calibration.get_QR_rotation_degrees() ellipse = bragg_peaks.calibration.get_ellipse() @@ -372,10 +373,10 @@ def plot_scattering_intensity( bragg_peaks.get_vectors( rx, ry, - center = True, - ellipse = ellipse, - pixel = True, - rotate = rotate, + center=True, + ellipse=ellipse, + pixel=True, + rotate=rotate, ).data for rx in range(bragg_peaks.shape[0]) for ry in range(bragg_peaks.shape[1]) @@ -410,7 +411,7 @@ def plot_scattering_intensity( int_exp, bragg_k_broadening / k_step, mode="constant" ) - int_exp_plot = (int_exp ** bragg_intensity_power) * (k ** bragg_k_power) + int_exp_plot = (int_exp**bragg_intensity_power) * (k**bragg_k_power) int_exp_plot /= np.max(int_exp_plot) # Plotting @@ -936,7 +937,7 @@ def plot_diffraction_pattern( if plot_range_kx_ky is not None: plot_range_kx_ky = np.array(plot_range_kx_ky) if plot_range_kx_ky.ndim == 0: - plot_range_kx_ky = np.array((plot_range_kx_ky,plot_range_kx_ky)) + plot_range_kx_ky = np.array((plot_range_kx_ky, plot_range_kx_ky)) ax.set_xlim((-plot_range_kx_ky[0], plot_range_kx_ky[0])) ax.set_ylim((-plot_range_kx_ky[1], plot_range_kx_ky[1])) else: @@ -990,7 +991,7 @@ def overline(x): def plot_orientation_maps( self, - orientation_map, + orientation_map=None, orientation_ind: int = 0, dir_in_plane_degrees: float = 0.0, corr_range: np.ndarray = np.array([0, 5]), @@ -999,10 +1000,10 @@ def plot_orientation_maps( figsize: Union[list, tuple, np.ndarray] = (16, 5), figbound: Union[list, tuple, np.ndarray] = (0.01, 0.005), show_axes: bool = True, - camera_dist = None, - plot_limit = None, - plot_layout = 0, - swap_axes_xy_limits = False, + camera_dist=None, + plot_limit=None, + plot_layout=0, + swap_axes_xy_limits=False, returnfig: bool = False, progress_bar=False, ): @@ -1011,6 +1012,7 @@ def plot_orientation_maps( Args: orientation_map (OrientationMap): Class containing orientation matrices, correlation values, etc. + Optional - can reference internally stored OrientationMap. orientation_ind (int): Which orientation match to plot if num_matches > 1 dir_in_plane_degrees (float): In-plane angle to plot in degrees. Default is 0 / x-axis / vertical down. corr_range (np.ndarray): Correlation intensity range for the plot @@ -1038,6 +1040,9 @@ def plot_orientation_maps( """ # Inputs + if orientation_map is None: + orientation_map = self.orientation_map + # Legend size leg_size = np.array([300, 300], dtype="int") @@ -1109,51 +1114,62 @@ def plot_orientation_maps( desc="Generating orientation maps", unit=" PointList", disable=not progress_bar, - ): - + ): if self.pymatgen_available: - basis_x[rx,ry,:] = A @ orientation_map.family[rx,ry,orientation_ind,:,0] - basis_y[rx,ry,:] = A @ orientation_map.family[rx,ry,orientation_ind,:,1] - basis_x[rx,ry,:] = basis_x[rx,ry,:]*ct + basis_y[rx,ry,:]*st + basis_x[rx, ry, :] = ( + A @ orientation_map.family[rx, ry, orientation_ind, :, 0] + ) + basis_y[rx, ry, :] = ( + A @ orientation_map.family[rx, ry, orientation_ind, :, 1] + ) + basis_x[rx, ry, :] = basis_x[rx, ry, :] * ct + basis_y[rx, ry, :] * st - basis_z[rx,ry,:] = A @ orientation_map.family[rx,ry,orientation_ind,:,2] + basis_z[rx, ry, :] = ( + A @ orientation_map.family[rx, ry, orientation_ind, :, 2] + ) else: - basis_z[rx,ry,:] = A @ orientation_map.matrix[rx,ry,orientation_ind,:,2] - basis_x = np.clip(basis_x,0,1) - basis_z = np.clip(basis_z,0,1) + basis_z[rx, ry, :] = ( + A @ orientation_map.matrix[rx, ry, orientation_ind, :, 2] + ) + basis_x = np.clip(basis_x, 0, 1) + basis_z = np.clip(basis_z, 0, 1) # Convert to RGB images - basis_x_max = np.max(basis_x,axis=2) + basis_x_max = np.max(basis_x, axis=2) sub = basis_x_max > 0 - basis_x_scale = basis_x * mask[:,:,None] + basis_x_scale = basis_x * mask[:, :, None] for a0 in range(3): - basis_x_scale[:,:,a0][sub] /= basis_x_max[sub] - basis_x_scale[:,:,a0][np.logical_not(sub)] = 0 - rgb_x = basis_x_scale[:,:,0][:,:,None]*color_basis[0,:][None,None,:] \ - + basis_x_scale[:,:,1][:,:,None]*color_basis[1,:][None,None,:] \ - + basis_x_scale[:,:,2][:,:,None]*color_basis[2,:][None,None,:] + basis_x_scale[:, :, a0][sub] /= basis_x_max[sub] + basis_x_scale[:, :, a0][np.logical_not(sub)] = 0 + rgb_x = ( + basis_x_scale[:, :, 0][:, :, None] * color_basis[0, :][None, None, :] + + basis_x_scale[:, :, 1][:, :, None] * color_basis[1, :][None, None, :] + + basis_x_scale[:, :, 2][:, :, None] * color_basis[2, :][None, None, :] + ) - basis_z_max = np.max(basis_z,axis=2) + basis_z_max = np.max(basis_z, axis=2) sub = basis_z_max > 0 - basis_z_scale = basis_z * mask[:,:,None] + basis_z_scale = basis_z * mask[:, :, None] for a0 in range(3): - basis_z_scale[:,:,a0][sub] /= basis_z_max[sub] - basis_z_scale[:,:,a0][np.logical_not(sub)] = 0 - rgb_z = basis_z_scale[:,:,0][:,:,None]*color_basis[0,:][None,None,:] \ - + basis_z_scale[:,:,1][:,:,None]*color_basis[1,:][None,None,:] \ - + basis_z_scale[:,:,2][:,:,None]*color_basis[2,:][None,None,:] + basis_z_scale[:, :, a0][sub] /= basis_z_max[sub] + basis_z_scale[:, :, a0][np.logical_not(sub)] = 0 + rgb_z = ( + basis_z_scale[:, :, 0][:, :, None] * color_basis[0, :][None, None, :] + + basis_z_scale[:, :, 1][:, :, None] * color_basis[1, :][None, None, :] + + basis_z_scale[:, :, 2][:, :, None] * color_basis[2, :][None, None, :] + ) # Legend init # projection vector - cam_dir = np.mean(self.orientation_zone_axis_range,axis=0) + cam_dir = np.mean(self.orientation_zone_axis_range, axis=0) cam_dir = cam_dir / np.linalg.norm(cam_dir) - az = np.rad2deg(np.arctan2(cam_dir[0],cam_dir[1])) + az = np.rad2deg(np.arctan2(cam_dir[0], cam_dir[1])) # el = np.rad2deg(np.arccos(cam_dir[2])) el = np.rad2deg(np.arcsin(cam_dir[2])) # coloring - wx = self.orientation_inds[:,0] / self.orientation_zone_axis_steps - wy = self.orientation_inds[:,1] / self.orientation_zone_axis_steps - w0 = 1 - wx - 0.5*wy + wx = self.orientation_inds[:, 0] / self.orientation_zone_axis_steps + wy = self.orientation_inds[:, 1] / self.orientation_zone_axis_steps + w0 = 1 - wx - 0.5 * wy w1 = wx - wy w2 = wy # w0 = 1 - w1/2 - w2/2 @@ -1162,35 +1178,40 @@ def plot_orientation_maps( w0 = w0 / w_scale w1 = w1 / w_scale w2 = w2 / w_scale - rgb_legend = np.clip( - w0[:,None]*color_basis[0,:] + \ - w1[:,None]*color_basis[1,:] + \ - w2[:,None]*color_basis[2,:], - 0,1) + rgb_legend = np.clip( + w0[:, None] * color_basis[0, :] + + w1[:, None] * color_basis[1, :] + + w2[:, None] * color_basis[2, :], + 0, + 1, + ) - if np.abs(self.cell[5]-120.0) < 1e-6: + if np.abs(self.cell[5] - 120.0) < 1e-6: label_0 = self.rational_ind( self.lattice_to_hexagonal( - self.cartesian_to_lattice( - self.orientation_zone_axis_range[0, :]))) + self.cartesian_to_lattice(self.orientation_zone_axis_range[0, :]) + ) + ) label_1 = self.rational_ind( self.lattice_to_hexagonal( - self.cartesian_to_lattice( - self.orientation_zone_axis_range[1, :]))) + self.cartesian_to_lattice(self.orientation_zone_axis_range[1, :]) + ) + ) label_2 = self.rational_ind( self.lattice_to_hexagonal( - self.cartesian_to_lattice( - self.orientation_zone_axis_range[2, :]))) + self.cartesian_to_lattice(self.orientation_zone_axis_range[2, :]) + ) + ) else: label_0 = self.rational_ind( - self.cartesian_to_lattice( - self.orientation_zone_axis_range[0, :])) + self.cartesian_to_lattice(self.orientation_zone_axis_range[0, :]) + ) label_1 = self.rational_ind( - self.cartesian_to_lattice( - self.orientation_zone_axis_range[1, :])) + self.cartesian_to_lattice(self.orientation_zone_axis_range[1, :]) + ) label_2 = self.rational_ind( - self.cartesian_to_lattice( - self.orientation_zone_axis_range[2, :])) + self.cartesian_to_lattice(self.orientation_zone_axis_range[2, :]) + ) inds_legend = np.array( [ @@ -1205,38 +1226,35 @@ def plot_orientation_maps( v0 = self.orientation_vecs[inds_legend[0], :] v1 = self.orientation_vecs[inds_legend[1], :] v2 = self.orientation_vecs[inds_legend[2], :] - n = np.cross(v0,cam_dir) + n = np.cross(v0, cam_dir) if np.sum(v1 * n) < np.sum(v2 * n): - ha_1 = 'left' - ha_2 = 'right' + ha_1 = "left" + ha_2 = "right" else: - ha_1 = 'right' - ha_2 = 'left' - + ha_1 = "right" + ha_2 = "left" # plotting frame # fig, ax = plt.subplots(1, 3, figsize=figsize) fig = plt.figure(figsize=figsize) if plot_layout == 0: - ax_x = fig.add_axes( - [0.0+figbound[0], 0.0, 0.4-2*+figbound[0], 1.0]) - ax_z = fig.add_axes( - [0.4+figbound[0], 0.0, 0.4-2*+figbound[0], 1.0]) + ax_x = fig.add_axes([0.0 + figbound[0], 0.0, 0.4 - 2 * +figbound[0], 1.0]) + ax_z = fig.add_axes([0.4 + figbound[0], 0.0, 0.4 - 2 * +figbound[0], 1.0]) ax_l = fig.add_axes( - [0.8+figbound[0], 0.0, 0.2-2*+figbound[0], 1.0], - projection='3d', + [0.8 + figbound[0], 0.0, 0.2 - 2 * +figbound[0], 1.0], + projection="3d", elev=el, - azim=az) + azim=az, + ) elif plot_layout == 1: - ax_x = fig.add_axes( - [0.0, 0.0+figbound[0], 1.0, 0.4-2*+figbound[0]]) - ax_z = fig.add_axes( - [0.0, 0.4+figbound[0], 1.0, 0.4-2*+figbound[0]]) + ax_x = fig.add_axes([0.0, 0.0 + figbound[0], 1.0, 0.4 - 2 * +figbound[0]]) + ax_z = fig.add_axes([0.0, 0.4 + figbound[0], 1.0, 0.4 - 2 * +figbound[0]]) ax_l = fig.add_axes( - [0.0, 0.8+figbound[0], 1.0, 0.2-2*+figbound[0]], - projection='3d', + [0.0, 0.8 + figbound[0], 1.0, 0.2 - 2 * +figbound[0]], + projection="3d", elev=el, - azim=az) + azim=az, + ) # orientation images if self.pymatgen_available: @@ -1244,23 +1262,26 @@ def plot_orientation_maps( else: ax_x.imshow(np.ones_like(rgb_z)) ax_x.text( - rgb_z.shape[1]/2, - rgb_z.shape[0]/2-10, - 'in-plane orientation', + rgb_z.shape[1] / 2, + rgb_z.shape[0] / 2 - 10, + "in-plane orientation", fontsize=14, - horizontalalignment='center') + horizontalalignment="center", + ) ax_x.text( - rgb_z.shape[1]/2, - rgb_z.shape[0]/2+0, - 'for this crystal system', + rgb_z.shape[1] / 2, + rgb_z.shape[0] / 2 + 0, + "for this crystal system", fontsize=14, - horizontalalignment='center') + horizontalalignment="center", + ) ax_x.text( - rgb_z.shape[1]/2, - rgb_z.shape[0]/2+10, - 'requires pymatgen', + rgb_z.shape[1] / 2, + rgb_z.shape[0] / 2 + 10, + "requires pymatgen", fontsize=14, - horizontalalignment='center') + horizontalalignment="center", + ) ax_z.imshow(rgb_z) # Labels for orientation images @@ -1270,18 +1291,18 @@ def plot_orientation_maps( ax_x.axis("off") ax_z.axis("off") - # Triangulate faces - p = self.orientation_vecs[:,(1,0,2)] + p = self.orientation_vecs[:, (1, 0, 2)] tri = mtri.Triangulation( - self.orientation_inds[:,1]-self.orientation_inds[:,0]*1e-3, - self.orientation_inds[:,0]-self.orientation_inds[:,1]*1e-3) + self.orientation_inds[:, 1] - self.orientation_inds[:, 0] * 1e-3, + self.orientation_inds[:, 0] - self.orientation_inds[:, 1] * 1e-3, + ) # convert rgb values of pixels to faces rgb_faces = ( - rgb_legend[tri.triangles[:,0],:] + \ - rgb_legend[tri.triangles[:,1],:] + \ - rgb_legend[tri.triangles[:,2],:] \ - ) / 3 + rgb_legend[tri.triangles[:, 0], :] + + rgb_legend[tri.triangles[:, 1], :] + + rgb_legend[tri.triangles[:, 2], :] + ) / 3 # Add triangulated surface plot to axes pc = art3d.Poly3DCollection( p[tri.triangles], @@ -1301,12 +1322,15 @@ def plot_orientation_maps( # plot_limit = (plot_limit - np.mean(plot_limit, axis=0)) * 1.5 + np.mean( # plot_limit, axis=0 # ) - plot_limit[:,0] = (plot_limit[:,0] - np.mean(plot_limit[:,0]))*1.5 \ - + np.mean(plot_limit[:,0]) - plot_limit[:,1] = (plot_limit[:,2] - np.mean(plot_limit[:,1]))*1.5 \ - + np.mean(plot_limit[:,1]) - plot_limit[:,2] = (plot_limit[:,1] - np.mean(plot_limit[:,2]))*1.1 \ - + np.mean(plot_limit[:,2]) + plot_limit[:, 0] = ( + plot_limit[:, 0] - np.mean(plot_limit[:, 0]) + ) * 1.5 + np.mean(plot_limit[:, 0]) + plot_limit[:, 1] = ( + plot_limit[:, 2] - np.mean(plot_limit[:, 1]) + ) * 1.5 + np.mean(plot_limit[:, 1]) + plot_limit[:, 2] = ( + plot_limit[:, 1] - np.mean(plot_limit[:, 2]) + ) * 1.1 + np.mean(plot_limit[:, 2]) # ax_l.view_init(elev=el, azim=az) # Appearance @@ -1334,16 +1358,20 @@ def plot_orientation_maps( "size": 14, } format_labels = "{0:.2g}" - vec = self.orientation_vecs[inds_legend[0], :] - cam_dir + vec = self.orientation_vecs[inds_legend[0], :] - cam_dir vec = vec / np.linalg.norm(vec) - if np.abs(self.cell[5]-120.0) > 1e-6: + if np.abs(self.cell[5] - 120.0) > 1e-6: ax_l.text( self.orientation_vecs[inds_legend[0], 1] + vec[1] * text_scale_pos, self.orientation_vecs[inds_legend[0], 0] + vec[0] * text_scale_pos, self.orientation_vecs[inds_legend[0], 2] + vec[2] * text_scale_pos, - '[' + format_labels.format(label_0[0]) - + ' ' + format_labels.format(label_0[1]) - + ' ' + format_labels.format(label_0[2]) + ']', + "[" + + format_labels.format(label_0[0]) + + " " + + format_labels.format(label_0[1]) + + " " + + format_labels.format(label_0[2]) + + "]", None, zorder=11, ha="center", @@ -1351,28 +1379,37 @@ def plot_orientation_maps( ) else: ax_l.text( - self.orientation_vecs[inds_legend[0], 1] + vec[1] * text_scale_pos, - self.orientation_vecs[inds_legend[0], 0] + vec[0] * text_scale_pos, - self.orientation_vecs[inds_legend[0], 2] + vec[2] * text_scale_pos, - '[' + format_labels.format(label_0[0]) - + ' ' + format_labels.format(label_0[1]) - + ' ' + format_labels.format(label_0[2]) - + ' ' + format_labels.format(label_0[3]) + ']', - None, - zorder=11, - ha="center", - **text_params, + self.orientation_vecs[inds_legend[0], 1] + vec[1] * text_scale_pos, + self.orientation_vecs[inds_legend[0], 0] + vec[0] * text_scale_pos, + self.orientation_vecs[inds_legend[0], 2] + vec[2] * text_scale_pos, + "[" + + format_labels.format(label_0[0]) + + " " + + format_labels.format(label_0[1]) + + " " + + format_labels.format(label_0[2]) + + " " + + format_labels.format(label_0[3]) + + "]", + None, + zorder=11, + ha="center", + **text_params, ) - vec = self.orientation_vecs[inds_legend[1], :] - cam_dir + vec = self.orientation_vecs[inds_legend[1], :] - cam_dir vec = vec / np.linalg.norm(vec) - if np.abs(self.cell[5]-120.0) > 1e-6: + if np.abs(self.cell[5] - 120.0) > 1e-6: ax_l.text( self.orientation_vecs[inds_legend[1], 1] + vec[1] * text_scale_pos, self.orientation_vecs[inds_legend[1], 0] + vec[0] * text_scale_pos, self.orientation_vecs[inds_legend[1], 2] + vec[2] * text_scale_pos, - '[' + format_labels.format(label_1[0]) - + ' ' + format_labels.format(label_1[1]) - + ' ' + format_labels.format(label_1[2]) + ']', + "[" + + format_labels.format(label_1[0]) + + " " + + format_labels.format(label_1[1]) + + " " + + format_labels.format(label_1[2]) + + "]", None, zorder=12, ha=ha_1, @@ -1383,25 +1420,34 @@ def plot_orientation_maps( self.orientation_vecs[inds_legend[1], 1] + vec[1] * text_scale_pos, self.orientation_vecs[inds_legend[1], 0] + vec[0] * text_scale_pos, self.orientation_vecs[inds_legend[1], 2] + vec[2] * text_scale_pos, - '[' + format_labels.format(label_1[0]) - + ' ' + format_labels.format(label_1[1]) - + ' ' + format_labels.format(label_1[2]) - + ' ' + format_labels.format(label_1[3]) + ']', + "[" + + format_labels.format(label_1[0]) + + " " + + format_labels.format(label_1[1]) + + " " + + format_labels.format(label_1[2]) + + " " + + format_labels.format(label_1[3]) + + "]", None, zorder=12, ha=ha_1, **text_params, ) - vec = self.orientation_vecs[inds_legend[2], :] - cam_dir + vec = self.orientation_vecs[inds_legend[2], :] - cam_dir vec = vec / np.linalg.norm(vec) - if np.abs(self.cell[5]-120.0) > 1e-6: + if np.abs(self.cell[5] - 120.0) > 1e-6: ax_l.text( self.orientation_vecs[inds_legend[2], 1] + vec[1] * text_scale_pos, self.orientation_vecs[inds_legend[2], 0] + vec[0] * text_scale_pos, self.orientation_vecs[inds_legend[2], 2] + vec[2] * text_scale_pos, - '[' + format_labels.format(label_2[0]) - + ' ' + format_labels.format(label_2[1]) - + ' ' + format_labels.format(label_2[2]) + ']', + "[" + + format_labels.format(label_2[0]) + + " " + + format_labels.format(label_2[1]) + + " " + + format_labels.format(label_2[2]) + + "]", None, zorder=13, ha=ha_2, @@ -1412,10 +1458,15 @@ def plot_orientation_maps( self.orientation_vecs[inds_legend[2], 1] + vec[1] * text_scale_pos, self.orientation_vecs[inds_legend[2], 0] + vec[0] * text_scale_pos, self.orientation_vecs[inds_legend[2], 2] + vec[2] * text_scale_pos, - '[' + format_labels.format(label_2[0]) - + ' ' + format_labels.format(label_2[1]) - + ' ' + format_labels.format(label_2[2]) - + ' ' + format_labels.format(label_2[3]) + ']', + "[" + + format_labels.format(label_2[0]) + + " " + + format_labels.format(label_2[1]) + + " " + + format_labels.format(label_2[2]) + + " " + + format_labels.format(label_2[3]) + + "]", None, zorder=13, ha=ha_2, @@ -1424,22 +1475,18 @@ def plot_orientation_maps( plt.show() - images_orientation = np.zeros(( - orientation_map.num_x, - orientation_map.num_y, - 3,2)) + images_orientation = np.zeros((orientation_map.num_x, orientation_map.num_y, 3, 2)) if self.pymatgen_available: - images_orientation[:,:,:,0] = rgb_x - images_orientation[:,:,:,1] = rgb_z + images_orientation[:, :, :, 0] = rgb_x + images_orientation[:, :, :, 1] = rgb_z if returnfig: - ax = [ax_x,ax_z,ax_l] + ax = [ax_x, ax_z, ax_l] return images_orientation, fig, ax else: return images_orientation - def plot_fiber_orientation_maps( self, orientation_map, @@ -1553,7 +1600,7 @@ def plot_fiber_orientation_maps( # draw in-plane legends r = np.arange(leg_size) - leg_size / 2 + 0.5 ya, xa = np.meshgrid(r, r) - ra = np.sqrt(xa ** 2 + ya ** 2) + ra = np.sqrt(xa**2 + ya**2) ta = np.arctan2(ya, xa) sig_leg = np.mod((symmetry_order / (2 * np.pi)) * ta, 1.0) if symmetry_mirror: @@ -1586,14 +1633,38 @@ def plot_fiber_orientation_maps( fig = plt.figure(figsize=figsize) ax_ip = fig.add_axes( - [0.0+figbound[0], 0.25+figbound[1], 0.5-2*+figbound[0], 0.75-figbound[1]]) + [ + 0.0 + figbound[0], + 0.25 + figbound[1], + 0.5 - 2 * +figbound[0], + 0.75 - figbound[1], + ] + ) ax_op = fig.add_axes( - [0.5+figbound[0], 0.25+figbound[1], 0.5-2*+figbound[0], 0.75-figbound[1]]) + [ + 0.5 + figbound[0], + 0.25 + figbound[1], + 0.5 - 2 * +figbound[0], + 0.75 - figbound[1], + ] + ) ax_ip_l = fig.add_axes( - [0.1+figbound[0], 0.0+figbound[1], 0.3-2*+figbound[0], 0.25-figbound[1]]) + [ + 0.1 + figbound[0], + 0.0 + figbound[1], + 0.3 - 2 * +figbound[0], + 0.25 - figbound[1], + ] + ) ax_op_l = fig.add_axes( - [0.6+figbound[0], 0.0+figbound[1], 0.3-2*+figbound[0], 0.25-figbound[1]]) + [ + 0.6 + figbound[0], + 0.0 + figbound[1], + 0.3 - 2 * +figbound[0], + 0.25 - figbound[1], + ] + ) # in-plane ax_ip.imshow(im_ip) @@ -1655,6 +1726,205 @@ def plot_fiber_orientation_maps( return images_orientation +def plot_clusters( + self, + area_min=2, + outline_grains=True, + outline_thickness=1, + fill_grains=0.25, + smooth_grains=1.0, + cmap="viridis", + figsize=(8, 8), + returnfig=False, +): + """ + Plot the clusters as an image. + + Parameters + -------- + area_min: int (optional) + Min cluster size to include, in units of probe positions. + outline_grains: bool (optional) + Set to True to draw grains with outlines + outline_thickness: int (optional) + Thickenss of the grain outline + fill_grains: float (optional) + Outlined grains are filled with this value in pixels. + smooth_grains: float (optional) + Grain boundaries are smoothed by this value in pixels. + figsize: tuple + Size of the figure panel + returnfig: bool + Setting this to true returns the figure and axis handles + + Returns + -------- + fig, ax (optional) + Figure and axes handles + + """ + + # init + im_plot = np.zeros( + ( + self.orientation_map.num_x, + self.orientation_map.num_y, + ) + ) + im_grain = np.zeros( + ( + self.orientation_map.num_x, + self.orientation_map.num_y, + ), + dtype="bool", + ) + + # make plotting image + + for a0 in range(self.cluster_sizes.shape[0]): + if self.cluster_sizes[a0] >= area_min: + if outline_grains: + im_grain[:] = False + im_grain[ + self.cluster_inds[a0][0, :], + self.cluster_inds[a0][1, :], + ] = True + + im_dist = distance_transform_edt( + erosion( + np.invert(im_grain), footprint=np.ones((3, 3), dtype="bool") + ) + ) - distance_transform_edt(im_grain) + im_dist = gaussian_filter(im_dist, sigma=smooth_grains, mode="nearest") + im_add = np.exp(im_dist**2 / (-0.5 * outline_thickness**2)) + + if fill_grains > 0: + im_dist = distance_transform_edt( + erosion( + np.invert(im_grain), footprint=np.ones((3, 3), dtype="bool") + ) + ) + im_dist = gaussian_filter( + im_dist, sigma=smooth_grains, mode="nearest" + ) + im_add += fill_grains * np.exp( + im_dist**2 / (-0.5 * outline_thickness**2) + ) + + # im_add = 1 - np.exp( + # distance_transform_edt(im_grain)**2 \ + # / (-2*outline_thickness**2)) + im_plot += im_add + # im_plot = np.minimum(im_plot, im_add) + else: + # xg,yg = np.unravel_index(self.cluster_inds[a0], im_plot.shape) + im_grain[:] = False + im_grain[ + self.cluster_inds[a0][0, :], + self.cluster_inds[a0][1, :], + ] = True + im_plot += gaussian_filter( + im_grain.astype("float"), sigma=smooth_grains, mode="nearest" + ) + + # im_plot[ + # self.cluster_inds[a0][0,:], + # self.cluster_inds[a0][1,:], + # ] += 1 + + if outline_grains: + im_plot = np.clip(im_plot, 0, 2) + + # plotting + fig, ax = plt.subplots(figsize=figsize) + ax.imshow( + im_plot, + # vmin = -3, + # vmax = 3, + cmap=cmap, + ) + + +def plot_cluster_size( + self, + area_min=None, + area_max=None, + area_step=1, + weight_intensity=False, + pixel_area=1.0, + pixel_area_units="px^2", + figsize=(8, 6), + returnfig=False, +): + """ + Plot the cluster sizes + + Parameters + -------- + area_min: int (optional) + Min area to include in pixels^2 + area_max: int (optional) + Max area bin in pixels^2 + area_step: int (optional) + Step size of the histogram bin in pixels^2 + weight_intensity: bool + Weight histogram by the peak intensity. + pixel_area: float + Size of pixel area unit square + pixel_area_units: string + Units of the pixel area + figsize: tuple + Size of the figure panel + returnfig: bool + Setting this to true returns the figure and axis handles + + Returns + -------- + fig, ax (optional) + Figure and axes handles + + """ + + if area_max is None: + area_max = np.max(self.cluster_sizes) + area = np.arange(0, area_max, area_step) + if area_min is None: + sub = self.cluster_sizes.astype("int") < area_max + else: + sub = np.logical_and( + self.cluster_sizes.astype("int") >= area_min, + self.cluster_sizes.astype("int") < area_max, + ) + if weight_intensity: + hist = np.bincount( + self.cluster_sizes[sub] // area_step, + weights=self.cluster_sig[sub], + minlength=area.shape[0], + ) + else: + hist = np.bincount( + self.cluster_sizes[sub] // area_step, + minlength=area.shape[0], + ) + + # plotting + fig, ax = plt.subplots(figsize=figsize) + ax.bar( + area * pixel_area, + hist, + width=0.8 * pixel_area * area_step, + ) + ax.set_xlim((0, area_max * pixel_area)) + ax.set_xlabel("Grain Area [" + pixel_area_units + "]") + if weight_intensity: + ax.set_ylabel("Total Signal [arb. units]") + else: + ax.set_ylabel("Number of Grains") + + if returnfig: + return fig, ax + + def axisEqual3D(ax): extents = np.array([getattr(ax, "get_{}lim".format(dim))() for dim in "xyz"]) sz = extents[:, 1] - extents[:, 0] @@ -1880,4 +2150,4 @@ def plot_ring_pattern( plt.show() if returnfig: - return fig, ax \ No newline at end of file + return fig, ax diff --git a/py4DSTEM/process/diffraction/flowlines.py b/py4DSTEM/process/diffraction/flowlines.py index cf84f69f5..66904d4f8 100644 --- a/py4DSTEM/process/diffraction/flowlines.py +++ b/py4DSTEM/process/diffraction/flowlines.py @@ -17,20 +17,20 @@ def make_orientation_histogram( bragg_peaks: PointListArray = None, radial_ranges: np.ndarray = None, - orientation_map = None, + orientation_map=None, orientation_ind: int = 0, orientation_growth_angles: np.array = 0.0, orientation_separate_bins: bool = False, orientation_flip_sign: bool = False, upsample_factor=4.0, theta_step_deg=1.0, - sigma_x = 1.0, - sigma_y = 1.0, - sigma_theta = 3.0, + sigma_x=1.0, + sigma_y=1.0, + sigma_theta=3.0, normalize_intensity_image: bool = False, normalize_intensity_stack: bool = True, progress_bar: bool = True, - ): +): """ Create an 3D or 4D orientation histogram from a braggpeaks PointListArray from user-specified radial ranges, or from the Euler angles from a fiber @@ -39,7 +39,7 @@ def make_orientation_histogram( Args: bragg_peaks (PointListArray): 2D of pointlists containing centered peak locations. radial_ranges (np array): Size (N x 2) array for N radial bins, or (2,) for a single bin. - orientation_map (OrientationMap): Class containing the Euler angles to generate a flowline map. + orientation_map (OrientationMap): Class containing the Euler angles to generate a flowline map. orientation_ind (int): Index of the orientation map (default 0) orientation_growth_angles (array): Angles to place into histogram, relative to orientation. orientation_separate_bins (bool): whether to place multiple angles into multiple radial bins. @@ -53,13 +53,12 @@ def make_orientation_histogram( progress_bar (bool): Enable progress bar Returns: - orient_hist (array): 4D array containing Bragg peak intensity histogram + orient_hist (array): 4D array containing Bragg peak intensity histogram [radial_bin x_probe y_probe theta] """ - # coordinates - theta = np.arange(0,180,theta_step_deg) * np.pi / 180.0 + theta = np.arange(0, 180, theta_step_deg) * np.pi / 180.0 dtheta = theta[1] - theta[0] dtheta_deg = dtheta * 180 / np.pi num_theta_bins = np.size(theta) @@ -68,7 +67,7 @@ def make_orientation_histogram( # Input bins radial_ranges = np.array(radial_ranges) if radial_ranges.ndim == 1: - radial_ranges = radial_ranges[None,:] + radial_ranges = radial_ranges[None, :] radial_ranges_2 = radial_ranges**2 num_radii = radial_ranges.shape[0] size_input = bragg_peaks.shape @@ -80,15 +79,13 @@ def make_orientation_histogram( num_radii = 1 else: num_radii = num_angles - - size_output = np.round(np.array(size_input).astype('float') * upsample_factor).astype('int') + + size_output = np.round( + np.array(size_input).astype("float") * upsample_factor + ).astype("int") # output init - orient_hist = np.zeros([ - num_radii, - size_output[0], - size_output[1], - num_theta_bins]) + orient_hist = np.zeros([num_radii, size_output[0], size_output[1], num_theta_bins]) # Loop over all probe positions for a0 in range(num_radii): @@ -97,145 +94,233 @@ def make_orientation_histogram( # *bragg_peaks.shape, desc=t,unit=" probe positions", disable=not progress_bar # ): for rx, ry in tqdmnd( - *size_input, desc=t,unit=" probe positions", disable=not progress_bar - ): - x = (rx + 0.5)*upsample_factor - 0.5 - y = (ry + 0.5)*upsample_factor - 0.5 - x = np.clip(x,0,size_output[0]-2) - y = np.clip(y,0,size_output[1]-2) - - xF = np.floor(x).astype('int') - yF = np.floor(y).astype('int') + *size_input, desc=t, unit=" probe positions", disable=not progress_bar + ): + x = (rx + 0.5) * upsample_factor - 0.5 + y = (ry + 0.5) * upsample_factor - 0.5 + x = np.clip(x, 0, size_output[0] - 2) + y = np.clip(y, 0, size_output[1] - 2) + + xF = np.floor(x).astype("int") + yF = np.floor(y).astype("int") dx = x - xF dy = y - yF add_data = False if orientation_map is None: - p = bragg_peaks.get_pointlist(rx,ry) - r2 = p.data['qx']**2 + p.data['qy']**2 - sub = np.logical_and(r2 >= radial_ranges_2[a0,0], r2 < radial_ranges_2[a0,1]) + p = bragg_peaks.get_pointlist(rx, ry) + r2 = p.data["qx"] ** 2 + p.data["qy"] ** 2 + sub = np.logical_and( + r2 >= radial_ranges_2[a0, 0], r2 < radial_ranges_2[a0, 1] + ) if np.any(sub): add_data = True - intensity = p.data['intensity'][sub] - t = np.arctan2(p.data['qy'][sub],p.data['qx'][sub]) / dtheta + intensity = p.data["intensity"][sub] + t = np.arctan2(p.data["qy"][sub], p.data["qx"][sub]) / dtheta else: - if orientation_map.corr[rx,ry,orientation_ind] > 0: + if orientation_map.corr[rx, ry, orientation_ind] > 0: if orientation_separate_bins is False: if orientation_flip_sign: - t = np.array([(-orientation_map.angles[rx,ry,orientation_ind,0] \ - - orientation_map.angles[rx,ry,orientation_ind,2]) \ - / dtheta]) + orientation_growth_angles + t = ( + np.array( + [ + ( + -orientation_map.angles[ + rx, ry, orientation_ind, 0 + ] + - orientation_map.angles[ + rx, ry, orientation_ind, 2 + ] + ) + / dtheta + ] + ) + + orientation_growth_angles + ) else: - t = np.array([(orientation_map.angles[rx,ry,orientation_ind,0] \ - + orientation_map.angles[rx,ry,orientation_ind,2]) \ - / dtheta]) + orientation_growth_angles - intensity = np.ones(num_angles) * orientation_map.corr[rx,ry,orientation_ind] + t = ( + np.array( + [ + ( + orientation_map.angles[ + rx, ry, orientation_ind, 0 + ] + + orientation_map.angles[ + rx, ry, orientation_ind, 2 + ] + ) + / dtheta + ] + ) + + orientation_growth_angles + ) + intensity = ( + np.ones(num_angles) + * orientation_map.corr[rx, ry, orientation_ind] + ) add_data = True else: if orientation_flip_sign: - t = np.array([(-orientation_map.angles[rx,ry,orientation_ind,0] \ - - orientation_map.angles[rx,ry,orientation_ind,2]) \ - / dtheta]) + orientation_growth_angles[a0] + t = ( + np.array( + [ + ( + -orientation_map.angles[ + rx, ry, orientation_ind, 0 + ] + - orientation_map.angles[ + rx, ry, orientation_ind, 2 + ] + ) + / dtheta + ] + ) + + orientation_growth_angles[a0] + ) else: - t = np.array([(orientation_map.angles[rx,ry,orientation_ind,0] \ - + orientation_map.angles[rx,ry,orientation_ind,2]) \ - / dtheta]) + orientation_growth_angles[a0] - intensity = orientation_map.corr[rx,ry,orientation_ind] + t = ( + np.array( + [ + ( + orientation_map.angles[ + rx, ry, orientation_ind, 0 + ] + + orientation_map.angles[ + rx, ry, orientation_ind, 2 + ] + ) + / dtheta + ] + ) + + orientation_growth_angles[a0] + ) + intensity = orientation_map.corr[rx, ry, orientation_ind] add_data = True if add_data: - tF = np.floor(t).astype('int') + tF = np.floor(t).astype("int") dt = t - tF - orient_hist[a0,xF ,yF ,:] = orient_hist[a0,xF ,yF ,:] + \ - np.bincount(np.mod(tF ,num_theta_bins), - weights=(1-dx)*(1-dy)*(1-dt)*intensity,minlength=num_theta_bins) - orient_hist[a0,xF ,yF ,:] = orient_hist[a0,xF ,yF ,:] + \ - np.bincount(np.mod(tF+1,num_theta_bins), - weights=(1-dx)*(1-dy)*( dt)*intensity,minlength=num_theta_bins) - - orient_hist[a0,xF+1,yF ,:] = orient_hist[a0,xF+1,yF ,:] + \ - np.bincount(np.mod(tF ,num_theta_bins), - weights=( dx)*(1-dy)*(1-dt)*intensity,minlength=num_theta_bins) - orient_hist[a0,xF+1,yF ,:] = orient_hist[a0,xF+1,yF ,:] + \ - np.bincount(np.mod(tF+1,num_theta_bins), - weights=( dx)*(1-dy)*( dt)*intensity,minlength=num_theta_bins) - - orient_hist[a0,xF ,yF+1,:] = orient_hist[a0,xF ,yF+1,:] + \ - np.bincount(np.mod(tF ,num_theta_bins), - weights=(1-dx)*( dy)*(1-dt)*intensity,minlength=num_theta_bins) - orient_hist[a0,xF ,yF+1,:] = orient_hist[a0,xF ,yF+1,:] + \ - np.bincount(np.mod(tF+1,num_theta_bins), - weights=(1-dx)*( dy)*( dt)*intensity,minlength=num_theta_bins) - - orient_hist[a0,xF+1,yF+1,:] = orient_hist[a0,xF+1,yF+1,:] + \ - np.bincount(np.mod(tF ,num_theta_bins), - weights=( dx)*( dy)*(1-dt)*intensity,minlength=num_theta_bins) - orient_hist[a0,xF+1,yF+1,:] = orient_hist[a0,xF+1,yF+1,:] + \ - np.bincount(np.mod(tF+1,num_theta_bins), - weights=( dx)*( dy)*( dt)*intensity,minlength=num_theta_bins) + orient_hist[a0, xF, yF, :] = orient_hist[a0, xF, yF, :] + np.bincount( + np.mod(tF, num_theta_bins), + weights=(1 - dx) * (1 - dy) * (1 - dt) * intensity, + minlength=num_theta_bins, + ) + orient_hist[a0, xF, yF, :] = orient_hist[a0, xF, yF, :] + np.bincount( + np.mod(tF + 1, num_theta_bins), + weights=(1 - dx) * (1 - dy) * (dt) * intensity, + minlength=num_theta_bins, + ) + + orient_hist[a0, xF + 1, yF, :] = orient_hist[ + a0, xF + 1, yF, : + ] + np.bincount( + np.mod(tF, num_theta_bins), + weights=(dx) * (1 - dy) * (1 - dt) * intensity, + minlength=num_theta_bins, + ) + orient_hist[a0, xF + 1, yF, :] = orient_hist[ + a0, xF + 1, yF, : + ] + np.bincount( + np.mod(tF + 1, num_theta_bins), + weights=(dx) * (1 - dy) * (dt) * intensity, + minlength=num_theta_bins, + ) + + orient_hist[a0, xF, yF + 1, :] = orient_hist[ + a0, xF, yF + 1, : + ] + np.bincount( + np.mod(tF, num_theta_bins), + weights=(1 - dx) * (dy) * (1 - dt) * intensity, + minlength=num_theta_bins, + ) + orient_hist[a0, xF, yF + 1, :] = orient_hist[ + a0, xF, yF + 1, : + ] + np.bincount( + np.mod(tF + 1, num_theta_bins), + weights=(1 - dx) * (dy) * (dt) * intensity, + minlength=num_theta_bins, + ) + + orient_hist[a0, xF + 1, yF + 1, :] = orient_hist[ + a0, xF + 1, yF + 1, : + ] + np.bincount( + np.mod(tF, num_theta_bins), + weights=(dx) * (dy) * (1 - dt) * intensity, + minlength=num_theta_bins, + ) + orient_hist[a0, xF + 1, yF + 1, :] = orient_hist[ + a0, xF + 1, yF + 1, : + ] + np.bincount( + np.mod(tF + 1, num_theta_bins), + weights=(dx) * (dy) * (dt) * intensity, + minlength=num_theta_bins, + ) # smoothing / interpolation if (sigma_x is not None) or (sigma_y is not None) or (sigma_theta is not None): if num_radii > 1: - print('Interpolating orientation matrices ...', end='') + print("Interpolating orientation matrices ...", end="") else: - print('Interpolating orientation matrix ...', end='') + print("Interpolating orientation matrix ...", end="") if sigma_x is not None and sigma_x > 0: orient_hist = gaussian_filter1d( - orient_hist,sigma_x*upsample_factor, - mode='nearest', + orient_hist, + sigma_x * upsample_factor, + mode="nearest", axis=1, - truncate=3.0) + truncate=3.0, + ) if sigma_y is not None and sigma_y > 0: orient_hist = gaussian_filter1d( - orient_hist,sigma_y*upsample_factor, - mode='nearest', + orient_hist, + sigma_y * upsample_factor, + mode="nearest", axis=2, - truncate=3.0) + truncate=3.0, + ) if sigma_theta is not None and sigma_theta > 0: orient_hist = gaussian_filter1d( - orient_hist,sigma_theta/dtheta_deg, - mode='wrap', - axis=3, - truncate=2.0) - print(' done.') + orient_hist, sigma_theta / dtheta_deg, mode="wrap", axis=3, truncate=2.0 + ) + print(" done.") # normalization if normalize_intensity_stack is True: - orient_hist = orient_hist / np.max(orient_hist) + orient_hist = orient_hist / np.max(orient_hist) elif normalize_intensity_image is True: for a0 in range(num_radii): - orient_hist[a0,:,:,:] = orient_hist[a0,:,:,:] / np.max(orient_hist[a0,:,:,:]) + orient_hist[a0, :, :, :] = orient_hist[a0, :, :, :] / np.max( + orient_hist[a0, :, :, :] + ) return orient_hist - def make_flowline_map( orient_hist, - thresh_seed = 0.2, - thresh_grow = 0.05, - thresh_collision = 0.001, - sep_seeds = None, - sep_xy = 6.0, - sep_theta = 5.0, - sort_seeds = 'intensity', - linewidth = 2.0, - step_size = 0.5, - min_steps = 4, - max_steps = 1000, - sigma_x = 1.0, - sigma_y = 1.0, - sigma_theta = 2.0, + thresh_seed=0.2, + thresh_grow=0.05, + thresh_collision=0.001, + sep_seeds=None, + sep_xy=6.0, + sep_theta=5.0, + sort_seeds="intensity", + linewidth=2.0, + step_size=0.5, + min_steps=4, + max_steps=1000, + sigma_x=1.0, + sigma_y=1.0, + sigma_theta=2.0, progress_bar: bool = True, - ): +): """ Create an 3D or 4D orientation flowline map - essentially a pixelated "stream map" which represents diffraction data. - + Args: - orient_hist (array): Histogram of all orientations with coordinates + orient_hist (array): Histogram of all orientations with coordinates [radial_bin x_probe y_probe theta] We assume theta bin ranges from 0 to 180 degrees and is periodic. thresh_seed (float): Threshold for seed generation in histogram. @@ -269,113 +354,130 @@ def make_flowline_map( # number of radial bins num_radii = orient_hist.shape[0] - if num_radii > 1 and len(sep_xy)==1: - sep_xy = np.ones(num_radii)*sep_xy - if num_radii > 1 and len(sep_theta)==1: - sep_theta = np.ones(num_radii)*sep_theta + if num_radii > 1 and len(sep_xy) == 1: + sep_xy = np.ones(num_radii) * sep_xy + if num_radii > 1 and len(sep_theta) == 1: + sep_theta = np.ones(num_radii) * sep_theta # Default seed separation if sep_seeds is None: - sep_seeds = np.round(np.min(sep_xy) / 2 + 0.5).astype('int') + sep_seeds = np.round(np.min(sep_xy) / 2 + 0.5).astype("int") else: - sep_seeds = np.atleast_1d(sep_seeds).astype('int') - if num_radii > 1 and len(sep_seeds)==1: - sep_seeds = (np.ones(num_radii)*sep_seeds).astype('int') + sep_seeds = np.atleast_1d(sep_seeds).astype("int") + if num_radii > 1 and len(sep_seeds) == 1: + sep_seeds = (np.ones(num_radii) * sep_seeds).astype("int") - # coordinates - theta = np.linspace(0,np.pi,orient_hist.shape[3],endpoint=False) + theta = np.linspace(0, np.pi, orient_hist.shape[3], endpoint=False) dtheta = theta[1] - theta[0] - size_3D = np.array([ - orient_hist.shape[1], - orient_hist.shape[2], - orient_hist.shape[3], - ]) + size_3D = np.array( + [ + orient_hist.shape[1], + orient_hist.shape[2], + orient_hist.shape[3], + ] + ) # initialize weighting array - vx = np.arange(-np.ceil(2*sigma_x),np.ceil(2*sigma_x)+1) - vy = np.arange(-np.ceil(2*sigma_y),np.ceil(2*sigma_y)+1) - vt = np.arange(-np.ceil(2*sigma_theta),np.ceil(2*sigma_theta)+1) - ay,ax,at = np.meshgrid(vy,vx,vt) - k = np.exp(ax**2/(-2*sigma_x**2)) * \ - np.exp(ay**2/(-2*sigma_y**2)) * \ - np.exp(at**2/(-2*sigma_theta**2)) - k = k / np.sum(k) - vx = vx[:,None,None].astype('int') - vy = vy[None,:,None].astype('int') - vt = vt[None,None,:].astype('int') - - + vx = np.arange(-np.ceil(2 * sigma_x), np.ceil(2 * sigma_x) + 1) + vy = np.arange(-np.ceil(2 * sigma_y), np.ceil(2 * sigma_y) + 1) + vt = np.arange(-np.ceil(2 * sigma_theta), np.ceil(2 * sigma_theta) + 1) + ay, ax, at = np.meshgrid(vy, vx, vt) + k = ( + np.exp(ax**2 / (-2 * sigma_x**2)) + * np.exp(ay**2 / (-2 * sigma_y**2)) + * np.exp(at**2 / (-2 * sigma_theta**2)) + ) + k = k / np.sum(k) + vx = vx[:, None, None].astype("int") + vy = vy[None, :, None].astype("int") + vt = vt[None, None, :].astype("int") + # initalize flowline array orient_flowlines = np.zeros_like(orient_hist) # initialize output - xy_t_int = np.zeros((max_steps+1,4)) - xy_t_int_rev = np.zeros((max_steps+1,4)) + xy_t_int = np.zeros((max_steps + 1, 4)) + xy_t_int_rev = np.zeros((max_steps + 1, 4)) # Loop over radial bins for a0 in range(num_radii): # initialize collision check array - cr = np.arange(-np.ceil(sep_xy[a0]),np.ceil(sep_xy[a0])+1) - ct = np.arange(-np.ceil(sep_theta[a0]),np.ceil(sep_theta[a0])+1) - ay,ax,at = np.meshgrid(cr,cr,ct) - c_mask = ((ax**2 + ay**2)/sep_xy[a0]**2 + \ - at**2/sep_theta[a0]**2 <= (1 + 1/sep_xy[a0])**2)[None,:,:,:] - cx = cr[None,:,None,None].astype('int') - cy = cr[None,None,:,None].astype('int') - ct = ct[None,None,None,:].astype('int') - - + cr = np.arange(-np.ceil(sep_xy[a0]), np.ceil(sep_xy[a0]) + 1) + ct = np.arange(-np.ceil(sep_theta[a0]), np.ceil(sep_theta[a0]) + 1) + ay, ax, at = np.meshgrid(cr, cr, ct) + c_mask = ( + (ax**2 + ay**2) / sep_xy[a0] ** 2 + at**2 / sep_theta[a0] ** 2 + <= (1 + 1 / sep_xy[a0]) ** 2 + )[None, :, :, :] + cx = cr[None, :, None, None].astype("int") + cy = cr[None, None, :, None].astype("int") + ct = ct[None, None, None, :].astype("int") # Find all seed locations - orient = orient_hist[a0,:,:,:] - sub_seeds = np.logical_and(np.logical_and( - orient >= np.roll(orient,1,axis=2), - orient >= np.roll(orient,-1,axis=2)), - orient >= thresh_seed) + orient = orient_hist[a0, :, :, :] + sub_seeds = np.logical_and( + np.logical_and( + orient >= np.roll(orient, 1, axis=2), + orient >= np.roll(orient, -1, axis=2), + ), + orient >= thresh_seed, + ) # Separate seeds if sep_seeds > 0: - for a1 in range(sep_seeds-1): - sub_seeds[a1::sep_seeds,:,:] = False - sub_seeds[:,a1::sep_seeds,:] = False + for a1 in range(sep_seeds - 1): + sub_seeds[a1::sep_seeds, :, :] = False + sub_seeds[:, a1::sep_seeds, :] = False # Index seeds - x_inds,y_inds,t_inds = np.where(sub_seeds) + x_inds, y_inds, t_inds = np.where(sub_seeds) if sort_seeds is not None: - if sort_seeds == 'intensity': - inds_sort = np.argsort(orient[sub_seeds])[::-1] - elif sort_seeds == 'random': + if sort_seeds == "intensity": + inds_sort = np.argsort(orient[sub_seeds])[::-1] + elif sort_seeds == "random": inds_sort = np.random.permutation(np.count_nonzero(sub_seeds)) x_inds = x_inds[inds_sort] y_inds = y_inds[inds_sort] - t_inds = t_inds[inds_sort] + t_inds = t_inds[inds_sort] # for a1 in tqdmnd(range(0,40), desc="Drawing flowlines",unit=" seeds", disable=not progress_bar): t = "Drawing flowlines " + str(a0) - for a1 in tqdmnd(range(0,x_inds.shape[0]), desc=t, unit=" seeds", disable=not progress_bar): + for a1 in tqdmnd( + range(0, x_inds.shape[0]), desc=t, unit=" seeds", disable=not progress_bar + ): # initial coordinate and intensity - xy0 = np.array((x_inds[a1],y_inds[a1])) + xy0 = np.array((x_inds[a1], y_inds[a1])) t0 = theta[t_inds[a1]] # init theta - inds_theta = np.mod(np.round(t0/dtheta).astype('int')+vt,orient.shape[2]) - orient_crop = k * orient[ - np.clip(np.round(xy0[0]).astype('int')+vx,0,orient.shape[0]-1), - np.clip(np.round(xy0[1]).astype('int')+vy,0,orient.shape[1]-1), - inds_theta] + inds_theta = np.mod( + np.round(t0 / dtheta).astype("int") + vt, orient.shape[2] + ) + orient_crop = ( + k + * orient[ + np.clip( + np.round(xy0[0]).astype("int") + vx, 0, orient.shape[0] - 1 + ), + np.clip( + np.round(xy0[1]).astype("int") + vy, 0, orient.shape[1] - 1 + ), + inds_theta, + ] + ) theta_crop = theta[inds_theta] t0 = np.sum(orient_crop * theta_crop) / np.sum(orient_crop) # forward direction t = t0 - v0 = np.array((-np.sin(t),np.cos(t))) + v0 = np.array((-np.sin(t), np.cos(t))) v = v0 * step_size xy = xy0 - int_val = get_intensity(orient,xy0[0],xy0[1],t0/dtheta) - xy_t_int[0,0:2] = xy0 - xy_t_int[0,2] = t/dtheta - xy_t_int[0,3] = int_val + int_val = get_intensity(orient, xy0[0], xy0[1], t0 / dtheta) + xy_t_int[0, 0:2] = xy0 + xy_t_int[0, 2] = t / dtheta + xy_t_int[0, 3] = int_val # main loop grow = True count = 0 @@ -383,52 +485,68 @@ def make_flowline_map( count += 1 # update position and intensity - xy = xy + v - int_val = get_intensity(orient,xy[0],xy[1],t/dtheta) + xy = xy + v + int_val = get_intensity(orient, xy[0], xy[1], t / dtheta) # check for collision flow_crop = orient_flowlines[ a0, - np.clip(np.round(xy[0]).astype('int')+cx,0,orient.shape[0]-1), - np.clip(np.round(xy[1]).astype('int')+cy,0,orient.shape[1]-1), - np.mod(np.round(t/dtheta).astype('int')+ct,orient.shape[2]) + np.clip(np.round(xy[0]).astype("int") + cx, 0, orient.shape[0] - 1), + np.clip(np.round(xy[1]).astype("int") + cy, 0, orient.shape[1] - 1), + np.mod(np.round(t / dtheta).astype("int") + ct, orient.shape[2]), ] int_flow = np.max(flow_crop[c_mask]) - if xy[0] < 0 or \ - xy[1] < 0 or \ - xy[0] > orient.shape[0] or \ - xy[1] > orient.shape[1] or \ - int_val < thresh_grow or \ - int_flow > thresh_collision: + if ( + xy[0] < 0 + or xy[1] < 0 + or xy[0] > orient.shape[0] + or xy[1] > orient.shape[1] + or int_val < thresh_grow + or int_flow > thresh_collision + ): grow = False else: # update direction - inds_theta = np.mod(np.round(t/dtheta).astype('int')+vt,orient.shape[2]) - orient_crop = k * orient[ - np.clip(np.round(xy[0]).astype('int')+vx,0,orient.shape[0]-1), - np.clip(np.round(xy[1]).astype('int')+vy,0,orient.shape[1]-1), - inds_theta] + inds_theta = np.mod( + np.round(t / dtheta).astype("int") + vt, orient.shape[2] + ) + orient_crop = ( + k + * orient[ + np.clip( + np.round(xy[0]).astype("int") + vx, + 0, + orient.shape[0] - 1, + ), + np.clip( + np.round(xy[1]).astype("int") + vy, + 0, + orient.shape[1] - 1, + ), + inds_theta, + ] + ) theta_crop = theta[inds_theta] t = np.sum(orient_crop * theta_crop) / np.sum(orient_crop) - v = np.array((-np.sin(t),np.cos(t))) * step_size + v = np.array((-np.sin(t), np.cos(t))) * step_size - xy_t_int[count,0:2] = xy - xy_t_int[count,2] = t/dtheta - xy_t_int[count,3] = int_val + xy_t_int[count, 0:2] = xy + xy_t_int[count, 2] = t / dtheta + xy_t_int[count, 3] = int_val - if count > max_steps-1: - grow=False + if count > max_steps - 1: + grow = False # reverse direction t = t0 + np.pi - v0 = np.array((-np.sin(t),np.cos(t))) + v0 = np.array((-np.sin(t), np.cos(t))) v = v0 * step_size xy = xy0 - int_val = get_intensity(orient,xy0[0],xy0[1],t0/dtheta) - xy_t_int_rev[0,0:2] = xy0 - xy_t_int_rev[0,2] = t/dtheta - xy_t_int_rev[0,3] = int_val + int_val = get_intensity(orient, xy0[0], xy0[1], t0 / dtheta) + xy_t_int_rev[0, 0:2] = xy0 + xy_t_int_rev[0, 2] = t / dtheta + xy_t_int_rev[0, 3] = int_val # main loop grow = True count_rev = 0 @@ -436,53 +554,69 @@ def make_flowline_map( count_rev += 1 # update position and intensity - xy = xy + v - int_val = get_intensity(orient,xy[0],xy[1],t/dtheta) + xy = xy + v + int_val = get_intensity(orient, xy[0], xy[1], t / dtheta) # check for collision flow_crop = orient_flowlines[ a0, - np.clip(np.round(xy[0]).astype('int')+cx,0,orient.shape[0]-1), - np.clip(np.round(xy[1]).astype('int')+cy,0,orient.shape[1]-1), - np.mod(np.round(t/dtheta).astype('int')+ct,orient.shape[2]) + np.clip(np.round(xy[0]).astype("int") + cx, 0, orient.shape[0] - 1), + np.clip(np.round(xy[1]).astype("int") + cy, 0, orient.shape[1] - 1), + np.mod(np.round(t / dtheta).astype("int") + ct, orient.shape[2]), ] int_flow = np.max(flow_crop[c_mask]) - if xy[0] < 0 or \ - xy[1] < 0 or \ - xy[0] > orient.shape[0] or \ - xy[1] > orient.shape[1] or \ - int_val < thresh_grow or \ - int_flow > thresh_collision: + if ( + xy[0] < 0 + or xy[1] < 0 + or xy[0] > orient.shape[0] + or xy[1] > orient.shape[1] + or int_val < thresh_grow + or int_flow > thresh_collision + ): grow = False else: # update direction - inds_theta = np.mod(np.round(t/dtheta).astype('int')+vt,orient.shape[2]) - orient_crop = k * orient[ - np.clip(np.round(xy[0]).astype('int')+vx,0,orient.shape[0]-1), - np.clip(np.round(xy[1]).astype('int')+vy,0,orient.shape[1]-1), - inds_theta] + inds_theta = np.mod( + np.round(t / dtheta).astype("int") + vt, orient.shape[2] + ) + orient_crop = ( + k + * orient[ + np.clip( + np.round(xy[0]).astype("int") + vx, + 0, + orient.shape[0] - 1, + ), + np.clip( + np.round(xy[1]).astype("int") + vy, + 0, + orient.shape[1] - 1, + ), + inds_theta, + ] + ) theta_crop = theta[inds_theta] t = np.sum(orient_crop * theta_crop) / np.sum(orient_crop) + np.pi - v = np.array((-np.sin(t),np.cos(t))) * step_size + v = np.array((-np.sin(t), np.cos(t))) * step_size - xy_t_int_rev[count_rev,0:2] = xy - xy_t_int_rev[count_rev,2] = t/dtheta - xy_t_int_rev[count_rev,3] = int_val + xy_t_int_rev[count_rev, 0:2] = xy + xy_t_int_rev[count_rev, 2] = t / dtheta + xy_t_int_rev[count_rev, 3] = int_val - if count_rev > max_steps-1: - grow=False + if count_rev > max_steps - 1: + grow = False # write into output array if count + count_rev > min_steps: if count > 0: - orient_flowlines[a0,:,:,:] = set_intensity( - orient_flowlines[a0,:,:,:], - xy_t_int[1:count,:]) + orient_flowlines[a0, :, :, :] = set_intensity( + orient_flowlines[a0, :, :, :], xy_t_int[1:count, :] + ) if count_rev > 1: - orient_flowlines[a0,:,:,:] = set_intensity( - orient_flowlines[a0,:,:,:], - xy_t_int_rev[1:count_rev,:]) + orient_flowlines[a0, :, :, :] = set_intensity( + orient_flowlines[a0, :, :, :], xy_t_int_rev[1:count_rev, :] + ) # normalize to step size orient_flowlines = orient_flowlines * step_size @@ -490,40 +624,30 @@ def make_flowline_map( # linewidth if linewidth > 1.0: s = linewidth - 1.0 - - orient_flowlines = gaussian_filter1d( - orient_flowlines, - s, - axis=1, - truncate=3.0) - orient_flowlines = gaussian_filter1d( - orient_flowlines, - s, - axis=2, - truncate=3.0) + + orient_flowlines = gaussian_filter1d(orient_flowlines, s, axis=1, truncate=3.0) + orient_flowlines = gaussian_filter1d(orient_flowlines, s, axis=2, truncate=3.0) orient_flowlines = orient_flowlines * (s**2) return orient_flowlines - - def make_flowline_rainbow_image( orient_flowlines, - int_range = [0,0.2], - sym_rotation_order = 2, - theta_offset = 0.0, - greyscale = False, - greyscale_max = True, - white_background = False, - power_scaling = 1.0, - sum_radial_bins = False, - plot_images = True, - figsize = None, - ): + int_range=[0, 0.2], + sym_rotation_order=2, + theta_offset=0.0, + greyscale=False, + greyscale_max=True, + white_background=False, + power_scaling=1.0, + sum_radial_bins=False, + plot_images=True, + figsize=None, +): """ Generate RGB output images from the flowline arrays. - + Args: orient_flowline (array): Histogram of all orientations with coordinates [x y radial_bin theta] We assume theta bin ranges from 0 to 180 degrees and is periodic. @@ -544,27 +668,26 @@ def make_flowline_rainbow_image( # init array size_input = orient_flowlines.shape - size_output = np.array([size_input[0],size_input[1],size_input[2],3]) + size_output = np.array([size_input[0], size_input[1], size_input[2], 3]) im_flowline = np.zeros(size_output) theta_offset = np.atleast_1d(theta_offset) if greyscale is True: for a0 in range(size_input[0]): if greyscale_max is True: - im = np.max(orient_flowlines[a0,:,:,:],axis=2) + im = np.max(orient_flowlines[a0, :, :, :], axis=2) else: - im = np.mean(orient_flowlines[a0,:,:,:],axis=2) + im = np.mean(orient_flowlines[a0, :, :, :], axis=2) - sig = np.clip((im - int_range[0]) \ - / (int_range[1] - int_range[0]),0,1) + sig = np.clip((im - int_range[0]) / (int_range[1] - int_range[0]), 0, 1) if power_scaling != 1: - sig = sig ** power_scaling + sig = sig**power_scaling if white_background is False: - im_flowline[a0,:,:,:] = sig[:,:,None] + im_flowline[a0, :, :, :] = sig[:, :, None] else: - im_flowline[a0,:,:,:] = 1-sig[:,:,None] + im_flowline[a0, :, :, :] = 1 - sig[:, :, None] else: # Color basis @@ -573,56 +696,92 @@ def make_flowline_rainbow_image( c2 = np.array([0.0, 0.3, 1.0]) # angles - theta = np.linspace(0,np.pi,size_input[3],endpoint=False) + theta = np.linspace(0, np.pi, size_input[3], endpoint=False) theta_color = theta * sym_rotation_order if size_input[0] > 1 and len(theta_offset) == 1: - theta_offset = np.ones(size_input[0])*theta_offset + theta_offset = np.ones(size_input[0]) * theta_offset for a0 in range(size_input[0]): # color projections - b0 = np.maximum(1 - np.abs(np.mod(theta_offset[a0] + theta_color + np.pi, 2*np.pi) - np.pi)**2 / (np.pi*2/3)**2, 0) - b1 = np.maximum(1 - np.abs(np.mod(theta_offset[a0] + theta_color - np.pi*2/3 + np.pi, 2*np.pi) - np.pi)**2 / (np.pi*2/3)**2, 0) - b2 = np.maximum(1 - np.abs(np.mod(theta_offset[a0] + theta_color - np.pi*4/3 + np.pi, 2*np.pi) - np.pi)**2 / (np.pi*2/3)**2, 0) + b0 = np.maximum( + 1 + - np.abs( + np.mod(theta_offset[a0] + theta_color + np.pi, 2 * np.pi) - np.pi + ) + ** 2 + / (np.pi * 2 / 3) ** 2, + 0, + ) + b1 = np.maximum( + 1 + - np.abs( + np.mod( + theta_offset[a0] + theta_color - np.pi * 2 / 3 + np.pi, + 2 * np.pi, + ) + - np.pi + ) + ** 2 + / (np.pi * 2 / 3) ** 2, + 0, + ) + b2 = np.maximum( + 1 + - np.abs( + np.mod( + theta_offset[a0] + theta_color - np.pi * 4 / 3 + np.pi, + 2 * np.pi, + ) + - np.pi + ) + ** 2 + / (np.pi * 2 / 3) ** 2, + 0, + ) sig = np.clip( - (orient_flowlines[a0,:,:,:] - int_range[0]) \ - / (int_range[1] - int_range[0]),0,1) + (orient_flowlines[a0, :, :, :] - int_range[0]) + / (int_range[1] - int_range[0]), + 0, + 1, + ) if power_scaling != 1: - sig = sig ** power_scaling + sig = sig**power_scaling + + im_flowline[a0, :, :, :] = ( + np.sum(sig * b0[None, None, :], axis=2)[:, :, None] * c0[None, None, :] + + np.sum(sig * b1[None, None, :], axis=2)[:, :, None] + * c1[None, None, :] + + np.sum(sig * b2[None, None, :], axis=2)[:, :, None] + * c2[None, None, :] + ) - im_flowline[a0,:,:,:] = \ - np.sum(sig * b0[None,None,:], axis=2)[:,:,None]*c0[None,None,:] + \ - np.sum(sig * b1[None,None,:], axis=2)[:,:,None]*c1[None,None,:] + \ - np.sum(sig * b2[None,None,:], axis=2)[:,:,None]*c2[None,None,:] - # clip limits - im_flowline[a0,:,:,:] = np.clip(im_flowline[a0,:,:,:],0,1) + im_flowline[a0, :, :, :] = np.clip(im_flowline[a0, :, :, :], 0, 1) # contrast flip if white_background is True: im = rgb_to_hsv(im_flowline[a0]) - im_v = im[:,:,2] - im[:,:,1] = im_v - im[:,:,2] = 1 + im_v = im[:, :, 2] + im[:, :, 1] = im_v + im[:, :, 2] = 1 im_flowline[a0] = hsv_to_rgb(im) if sum_radial_bins is True: if white_background is False: - im_flowline = np.clip(np.sum(im_flowline,axis=0),0,1)[None,:,:,:] + im_flowline = np.clip(np.sum(im_flowline, axis=0), 0, 1)[None, :, :, :] else: # im_flowline = np.clip(np.sum(im_flowline,axis=0)+1-im_flowline.shape[0],0,1)[None,:,:,:] - im_flowline = np.min(im_flowline,axis=0)[None,:,:,:] + im_flowline = np.min(im_flowline, axis=0)[None, :, :, :] if plot_images is True: if figsize is None: - fig,ax = plt.subplots( - im_flowline.shape[0],1, - figsize=(10,im_flowline.shape[0]*10)) + fig, ax = plt.subplots( + im_flowline.shape[0], 1, figsize=(10, im_flowline.shape[0] * 10) + ) else: - fig,ax = plt.subplots( - im_flowline.shape[0],1, - figsize=figsize) + fig, ax = plt.subplots(im_flowline.shape[0], 1, figsize=figsize) if im_flowline.shape[0] > 1: for a0 in range(im_flowline.shape[0]): @@ -637,20 +796,19 @@ def make_flowline_rainbow_image( return im_flowline - def make_flowline_rainbow_legend( - im_size=np.array([256,256]), - sym_rotation_order = 2, - theta_offset = 0.0, - white_background = False, + im_size=np.array([256, 256]), + sym_rotation_order=2, + theta_offset=0.0, + white_background=False, return_image=False, - radial_range=np.array([0.45,0.9]), + radial_range=np.array([0.45, 0.9]), plot_legend=True, - figsize=(4,4), - ): + figsize=(4, 4), +): """ This function generates a legend for a the rainbow colored flowline maps, and returns it as an RGB image. - + Args: im_size (np.array): Size of legend image in pixels. sym_rotation_order (int): rotational symmety for colouring @@ -659,58 +817,77 @@ def make_flowline_rainbow_legend( return_image (bool): Return the image array. radial_range (np.array): Inner and outer radius for the legend ring. plot_legend (bool): Plot the generated legend. - figsize (tuple or list): Size of the plotted legend. + figsize (tuple or list): Size of the plotted legend. Returns: im_legend (array): Image array for the legend. """ - - # Color basis c0 = np.array([1.0, 0.0, 0.0]) c1 = np.array([0.0, 0.7, 0.0]) c2 = np.array([0.0, 0.3, 1.0]) # Coordinates - x = np.linspace(-1,1,im_size[0]) - y = np.linspace(-1,1,im_size[1]) - ya,xa = np.meshgrid(-y,x) + x = np.linspace(-1, 1, im_size[0]) + y = np.linspace(-1, 1, im_size[1]) + ya, xa = np.meshgrid(-y, x) ra = np.sqrt(xa**2 + ya**2) - ta = np.arctan2(ya,xa) - ta_sym = ta*sym_rotation_order + ta = np.arctan2(ya, xa) + ta_sym = ta * sym_rotation_order # mask - dr = xa[1,0] - xa[0,0] - mask = np.clip((radial_range[1] - ra)/dr + 0.5,0,1) \ - * np.clip((ra - radial_range[0])/dr + 0.5,0,1) + dr = xa[1, 0] - xa[0, 0] + mask = np.clip((radial_range[1] - ra) / dr + 0.5, 0, 1) * np.clip( + (ra - radial_range[0]) / dr + 0.5, 0, 1 + ) # rgb image - b0 = np.maximum(1 - np.abs(np.mod(theta_offset + ta_sym + np.pi, 2*np.pi) - np.pi)**2 / (np.pi*2/3)**2, 0) - b1 = np.maximum(1 - np.abs(np.mod(theta_offset + ta_sym - np.pi*2/3 + np.pi, 2*np.pi) - np.pi)**2 / (np.pi*2/3)**2, 0) - b2 = np.maximum(1 - np.abs(np.mod(theta_offset + ta_sym - np.pi*4/3 + np.pi, 2*np.pi) - np.pi)**2 / (np.pi*2/3)**2, 0) - im_legend = \ - b0[:,:,None]*c0[None,None,:] + \ - b1[:,:,None]*c1[None,None,:] + \ - b2[:,:,None]*c2[None,None,:] - im_legend = im_legend * mask[:,:,None] + b0 = np.maximum( + 1 + - np.abs(np.mod(theta_offset + ta_sym + np.pi, 2 * np.pi) - np.pi) ** 2 + / (np.pi * 2 / 3) ** 2, + 0, + ) + b1 = np.maximum( + 1 + - np.abs( + np.mod(theta_offset + ta_sym - np.pi * 2 / 3 + np.pi, 2 * np.pi) - np.pi + ) + ** 2 + / (np.pi * 2 / 3) ** 2, + 0, + ) + b2 = np.maximum( + 1 + - np.abs( + np.mod(theta_offset + ta_sym - np.pi * 4 / 3 + np.pi, 2 * np.pi) - np.pi + ) + ** 2 + / (np.pi * 2 / 3) ** 2, + 0, + ) + im_legend = ( + b0[:, :, None] * c0[None, None, :] + + b1[:, :, None] * c1[None, None, :] + + b2[:, :, None] * c2[None, None, :] + ) + im_legend = im_legend * mask[:, :, None] if white_background is True: im_legend = rgb_to_hsv(im_legend) - im_v = im_legend[:,:,2] - im_legend[:,:,1] = im_v - im_legend[:,:,2] = 1 + im_v = im_legend[:, :, 2] + im_legend[:, :, 1] = im_v + im_legend[:, :, 2] = 1 im_legend = hsv_to_rgb(im_legend) # plotting if plot_legend: - fig,ax = plt.subplots(1,1,figsize=figsize) + fig, ax = plt.subplots(1, 1, figsize=figsize) ax.imshow(im_legend) ax.invert_yaxis() # ax.set_axis_off() - ax.axis('off') - - + ax.axis("off") # # angles # theta = np.linspace(0,np.pi,num_angle_bins,endpoint=False) @@ -728,18 +905,20 @@ def make_flowline_rainbow_legend( def make_flowline_combined_image( orient_flowlines, - int_range = [0,0.2], - cvals = np.array([ - [0.0,0.7,0.0], - [1.0,0.0,0.0], - [0.0,0.7,1.0], - ]), - white_background = False, - power_scaling = 1.0, - sum_radial_bins = True, - plot_images = True, - figsize = None, - ): + int_range=[0, 0.2], + cvals=np.array( + [ + [0.0, 0.7, 0.0], + [1.0, 0.0, 0.0], + [0.0, 0.7, 1.0], + ] + ), + white_background=False, + power_scaling=1.0, + sum_radial_bins=True, + plot_images=True, + figsize=None, +): """ Generate RGB output images from the flowline arrays. @@ -760,22 +939,27 @@ def make_flowline_combined_image( # init array size_input = orient_flowlines.shape - size_output = np.array([size_input[0],size_input[1],size_input[2],3]) + size_output = np.array([size_input[0], size_input[1], size_input[2], 3]) im_flowline = np.zeros(size_output) cvals = np.array(cvals) # Generate all color images for a0 in range(size_input[0]): sig = np.clip( - (np.sum(orient_flowlines[a0,:,:,:],axis=2) - int_range[0]) \ - / (int_range[1] - int_range[0]),0,1) + (np.sum(orient_flowlines[a0, :, :, :], axis=2) - int_range[0]) + / (int_range[1] - int_range[0]), + 0, + 1, + ) if power_scaling != 1: - sig = sig ** power_scaling + sig = sig**power_scaling if white_background: - im_flowline[a0,:,:,:] = 1 - sig[:,:,None]*(1-cvals[a0,:][None,None,:]) + im_flowline[a0, :, :, :] = 1 - sig[:, :, None] * ( + 1 - cvals[a0, :][None, None, :] + ) else: - im_flowline[a0,:,:,:] = sig[:,:,None]*cvals[a0,:][None,None,:] + im_flowline[a0, :, :, :] = sig[:, :, None] * cvals[a0, :][None, None, :] # # contrast flip # if white_background is True: @@ -791,46 +975,42 @@ def make_flowline_combined_image( if sum_radial_bins is True: if white_background is False: - im_flowline = np.clip(np.sum(im_flowline,axis=0),0,1)[None,:,:,:] + im_flowline = np.clip(np.sum(im_flowline, axis=0), 0, 1)[None, :, :, :] else: # im_flowline = np.clip(np.sum(im_flowline,axis=0)+1-im_flowline.shape[0],0,1)[None,:,:,:] - im_flowline = np.min(im_flowline,axis=0)[None,:,:,:] - + im_flowline = np.min(im_flowline, axis=0)[None, :, :, :] if plot_images is True: if figsize is None: - fig,ax = plt.subplots( - im_flowline.shape[0],1, - figsize=(10,im_flowline.shape[0]*10)) + fig, ax = plt.subplots( + im_flowline.shape[0], 1, figsize=(10, im_flowline.shape[0] * 10) + ) else: - fig,ax = plt.subplots( - im_flowline.shape[0],1, - figsize=figsize) + fig, ax = plt.subplots(im_flowline.shape[0], 1, figsize=figsize) if im_flowline.shape[0] > 1: for a0 in range(im_flowline.shape[0]): ax[a0].imshow(im_flowline[a0]) - ax[a0].axis('off') + ax[a0].axis("off") plt.subplots_adjust(wspace=0, hspace=0.02) else: ax.imshow(im_flowline[0]) - ax.axis('off') + ax.axis("off") plt.show() return im_flowline - def orientation_correlation( orient_hist, - radius_max=None, - ): + radius_max=None, +): """ Take in the 4D orientation histogram, and compute the distance-angle (auto)correlations - + Args: orient_hist (array): 3D or 4D histogram of all orientations with coordinates [x y radial_bin theta] - radius_max (float): Maximum radial distance for correlogram calculation. If set to None, the maximum + radius_max (float): Maximum radial distance for correlogram calculation. If set to None, the maximum radius will be set to min(orient_hist.shape[0],orient_hist.shape[1])/2. Returns: @@ -840,56 +1020,74 @@ def orientation_correlation( # Array sizes size_input = np.array(orient_hist.shape) if radius_max is None: - radius_max = np.ceil(np.min(orient_hist.shape[1:3])/2).astype('int') - size_corr = np.array([ - np.maximum(2*size_input[1],2*radius_max), - np.maximum(2*size_input[2],2*radius_max)]) + radius_max = np.ceil(np.min(orient_hist.shape[1:3]) / 2).astype("int") + size_corr = np.array( + [ + np.maximum(2 * size_input[1], 2 * radius_max), + np.maximum(2 * size_input[2], 2 * radius_max), + ] + ) # Initialize orientation histogram - orient_hist_pad = np.zeros(( - size_input[0], - size_corr[0], - size_corr[1], - size_input[3], - ),dtype='complex') - orient_norm_pad = np.zeros(( - size_input[0], - size_corr[0], - size_corr[1], - ),dtype='complex') - + orient_hist_pad = np.zeros( + ( + size_input[0], + size_corr[0], + size_corr[1], + size_input[3], + ), + dtype="complex", + ) + orient_norm_pad = np.zeros( + ( + size_input[0], + size_corr[0], + size_corr[1], + ), + dtype="complex", + ) + # Pad the histogram in real space x_inds = np.arange(size_input[1]) y_inds = np.arange(size_input[2]) - orient_hist_pad[:,x_inds[:,None],y_inds[None,:],:] = orient_hist - orient_norm_pad[:,x_inds[:,None],y_inds[None,:]] = \ - np.sum(orient_hist, axis=3) / np.sqrt(size_input[3]) - orient_hist_pad = np.fft.fftn(orient_hist_pad,axes=(1,2,3)) - orient_norm_pad = np.fft.fftn(orient_norm_pad,axes=(1,2)) + orient_hist_pad[:, x_inds[:, None], y_inds[None, :], :] = orient_hist + orient_norm_pad[:, x_inds[:, None], y_inds[None, :]] = np.sum( + orient_hist, axis=3 + ) / np.sqrt(size_input[3]) + orient_hist_pad = np.fft.fftn(orient_hist_pad, axes=(1, 2, 3)) + orient_norm_pad = np.fft.fftn(orient_norm_pad, axes=(1, 2)) # Radial coordinates for integration - x = np.mod(np.arange(size_corr[0])+size_corr[0]/2,size_corr[0])-size_corr[0]/2 - y = np.mod(np.arange(size_corr[1])+size_corr[1]/2,size_corr[1])-size_corr[1]/2 - ya,xa = np.meshgrid(y,x) + x = ( + np.mod(np.arange(size_corr[0]) + size_corr[0] / 2, size_corr[0]) + - size_corr[0] / 2 + ) + y = ( + np.mod(np.arange(size_corr[1]) + size_corr[1] / 2, size_corr[1]) + - size_corr[1] / 2 + ) + ya, xa = np.meshgrid(y, x) ra = np.sqrt(xa**2 + ya**2) # coordinate subset sub0 = ra <= radius_max - sub1 = ra <= radius_max-1 - rF0 = np.floor(ra[sub0]).astype('int') - rF1 = np.floor(ra[sub1]).astype('int') + sub1 = ra <= radius_max - 1 + rF0 = np.floor(ra[sub0]).astype("int") + rF1 = np.floor(ra[sub1]).astype("int") dr0 = ra[sub0] - rF0 dr1 = ra[sub1] - rF1 - inds = np.concatenate((rF0,rF1+1)) - weights = np.concatenate((1-dr0,dr1)) + inds = np.concatenate((rF0, rF1 + 1)) + weights = np.concatenate((1 - dr0, dr1)) # init output - num_corr = (0.5*size_input[0]*(size_input[0]+1)).astype('int') - orient_corr = np.zeros(( - num_corr, - (size_input[3]/2+1).astype('int'), - radius_max+1, - )) + num_corr = (0.5 * size_input[0] * (size_input[0] + 1)).astype("int") + orient_corr = np.zeros( + ( + num_corr, + (size_input[3] / 2 + 1).astype("int"), + radius_max + 1, + ) + ) # Main correlation calculation ind_output = 0 @@ -897,31 +1095,36 @@ def orientation_correlation( for a1 in range(size_input[0]): if a0 <= a1: # Correlation - c = np.real(np.fft.ifftn( - orient_hist_pad[a0,:,:,:] * \ - np.conj(orient_hist_pad[a1,:,:,:]), - axes=(0,1,2))) + c = np.real( + np.fft.ifftn( + orient_hist_pad[a0, :, :, :] + * np.conj(orient_hist_pad[a1, :, :, :]), + axes=(0, 1, 2), + ) + ) # Loop over all angles from 0 to pi/2 (half of indices) - for a2 in range((size_input[3]/2+1).astype('int')): - orient_corr[ind_output,a2,:] = \ - np.bincount( - inds, - weights=weights*np.concatenate((c[:,:,a2][sub0],c[:,:,a2][sub1])), - minlength=radius_max, - ) - + for a2 in range((size_input[3] / 2 + 1).astype("int")): + orient_corr[ind_output, a2, :] = np.bincount( + inds, + weights=weights + * np.concatenate((c[:, :, a2][sub0], c[:, :, a2][sub1])), + minlength=radius_max, + ) + # normalize - c_norm = np.real(np.fft.ifftn( - orient_norm_pad[a0,:,:] * \ - np.conj(orient_norm_pad[a1,:,:]), - axes=(0,1))) + c_norm = np.real( + np.fft.ifftn( + orient_norm_pad[a0, :, :] * np.conj(orient_norm_pad[a1, :, :]), + axes=(0, 1), + ) + ) sig_norm = np.bincount( inds, - weights=weights*np.concatenate((c_norm[sub0],c_norm[sub1])), + weights=weights * np.concatenate((c_norm[sub0], c_norm[sub1])), minlength=radius_max, - ) - orient_corr[ind_output,:,:] /= sig_norm[None,:] + ) + orient_corr[ind_output, :, :] /= sig_norm[None, :] # increment output index ind_output += 1 @@ -935,10 +1138,9 @@ def plot_orientation_correlation( inds_plot=None, pixel_size=None, pixel_units=None, - size_fig=[8,6], + size_fig=[8, 6], return_fig=False, - ): - +): """ Plot the distance-angle (auto)correlations in orient_corr. @@ -960,15 +1162,15 @@ def plot_orientation_correlation( prob_range = np.array(prob_range) if pixel_units is None: - pixel_units = 'pixels' + pixel_units = "pixels" # Get the pair indices size_input = orient_corr.shape - num_corr = (np.sqrt(8*size_input[0]+1)/2-1/2).astype('int') - ya,xa = np.meshgrid(np.arange(num_corr),np.arange(num_corr)) + num_corr = (np.sqrt(8 * size_input[0] + 1) / 2 - 1 / 2).astype("int") + ya, xa = np.meshgrid(np.arange(num_corr), np.arange(num_corr)) keep = ya >= xa # row 0 is the first diff ring, row 1 is the second diff ring - pair_inds = np.vstack((xa[keep],ya[keep])) + pair_inds = np.vstack((xa[keep], ya[keep])) if inds_plot is None: inds_plot = np.arange(size_input[0]) @@ -985,49 +1187,46 @@ def plot_orientation_correlation( # dark red N = 256 cvals = np.zeros((N, 4)) - cvals[:,3] = 1 - c = np.linspace(0.0,1.0,int(N/4)) + cvals[:, 3] = 1 + c = np.linspace(0.0, 1.0, int(N / 4)) - cvals[0:int(N/4),1] = c*0.4+0.3 - cvals[0:int(N/4),2] = 1 + cvals[0 : int(N / 4), 1] = c * 0.4 + 0.3 + cvals[0 : int(N / 4), 2] = 1 - cvals[int(N/4):int(N/2),0] = c - cvals[int(N/4):int(N/2),1] = c*0.3+0.7 - cvals[int(N/4):int(N/2),2] = 1 + cvals[int(N / 4) : int(N / 2), 0] = c + cvals[int(N / 4) : int(N / 2), 1] = c * 0.3 + 0.7 + cvals[int(N / 4) : int(N / 2), 2] = 1 - cvals[int(N/2):int(N*3/4),0] = 1 - cvals[int(N/2):int(N*3/4),1] = 1-c - cvals[int(N/2):int(N*3/4),2] = 1-c + cvals[int(N / 2) : int(N * 3 / 4), 0] = 1 + cvals[int(N / 2) : int(N * 3 / 4), 1] = 1 - c + cvals[int(N / 2) : int(N * 3 / 4), 2] = 1 - c - cvals[int(N*3/4):N,0] = 1-0.5*c + cvals[int(N * 3 / 4) : N, 0] = 1 - 0.5 * c new_cmap = ListedColormap(cvals) # plotting num_plot = inds_plot.shape[0] - fig,ax = plt.subplots( - num_plot, - 1, - figsize=(size_fig[0],num_plot*size_fig[1])) + fig, ax = plt.subplots(num_plot, 1, figsize=(size_fig[0], num_plot * size_fig[1])) # loop over indices - for count,ind in enumerate(inds_plot): + for count, ind in enumerate(inds_plot): if num_plot > 1: p = ax[count].imshow( - np.log10(orient_corr[ind,:,:]), + np.log10(orient_corr[ind, :, :]), vmin=np.log10(prob_range[0]), vmax=np.log10(prob_range[1]), - aspect='auto', - cmap=new_cmap - ) + aspect="auto", + cmap=new_cmap, + ) ax_handle = ax[count] else: p = ax.imshow( - np.log10(orient_corr[ind,:,:]), + np.log10(orient_corr[ind, :, :]), vmin=np.log10(prob_range[0]), vmax=np.log10(prob_range[1]), - aspect='auto', - cmap=new_cmap - ) + aspect="auto", + cmap=new_cmap, + ) ax_handle = ax cbar = fig.colorbar(p, ax=ax_handle) @@ -1038,22 +1237,17 @@ def plot_orientation_correlation( cbar.set_ticks(t) cbar.ax.set_yticklabels(t_lab) - cbar.ax.set_ylabel( - 'Probability [mult. of rand. dist.]', - fontsize=12) - + cbar.ax.set_ylabel("Probability [mult. of rand. dist.]", fontsize=12) - ind_0 = pair_inds[0,ind] - ind_1 = pair_inds[1,ind] + ind_0 = pair_inds[0, ind] + ind_1 = pair_inds[1, ind] if ind_0 != ind_1: ax_handle.set_title( - 'Correlation of Rings ' + str(ind_0) + ' and ' + str(ind_1), - fontsize=16) + "Correlation of Rings " + str(ind_0) + " and " + str(ind_1), fontsize=16 + ) else: - ax_handle.set_title( - 'Autocorrelation of Ring ' + str(ind_0), - fontsize=16) + ax_handle.set_title("Autocorrelation of Ring " + str(ind_0), fontsize=16) # x axis labels if pixel_size is not None: @@ -1062,103 +1256,105 @@ def plot_orientation_correlation( x_t_new = np.delete(x_t, sub) ax_handle.set_xticks(x_t_new) ax_handle.set_xticklabels(x_t_new * pixel_size) - ax_handle.set_xlabel( - 'Radial Distance [' + pixel_units + ']', - fontsize=12) + ax_handle.set_xlabel("Radial Distance [" + pixel_units + "]", fontsize=12) # y axis labels ax_handle.invert_yaxis() - ax_handle.set_ylabel( - 'Relative Grain Orientation [degrees]', - fontsize=12) - ax_handle.set_yticks( - [0,10,20,30,40,50,60,70,80,90]) - ax_handle.set_yticklabels( - ['0','','','30','','','60','','','90']) + ax_handle.set_ylabel("Relative Grain Orientation [degrees]", fontsize=12) + ax_handle.set_yticks([0, 10, 20, 30, 40, 50, 60, 70, 80, 90]) + ax_handle.set_yticklabels(["0", "", "", "30", "", "", "60", "", "", "90"]) if return_fig is True: return fig, ax plt.show() - -def get_intensity(orient,x,y,t): +def get_intensity(orient, x, y, t): # utility function to get histogram intensites - x = np.clip(x,0,orient.shape[0]-2) - y = np.clip(y,0,orient.shape[1]-2) + x = np.clip(x, 0, orient.shape[0] - 2) + y = np.clip(y, 0, orient.shape[1] - 2) - xF = np.floor(x).astype('int') - yF = np.floor(y).astype('int') - tF = np.floor(t).astype('int') + xF = np.floor(x).astype("int") + yF = np.floor(y).astype("int") + tF = np.floor(t).astype("int") dx = x - xF dy = y - yF dt = t - tF - t1 = np.mod(tF ,orient.shape[2]) - t2 = np.mod(tF+1,orient.shape[2]) - - int_vals = \ - orient[xF ,yF ,t1]*((1-dx)*(1-dy)*(1-dt)) + \ - orient[xF ,yF ,t2]*((1-dx)*(1-dy)*( dt)) + \ - orient[xF ,yF+1,t1]*((1-dx)*( dy)*(1-dt)) + \ - orient[xF ,yF+1,t2]*((1-dx)*( dy)*( dt)) + \ - orient[xF+1,yF ,t1]*(( dx)*(1-dy)*(1-dt)) + \ - orient[xF+1,yF ,t2]*(( dx)*(1-dy)*( dt)) + \ - orient[xF+1,yF+1,t1]*(( dx)*( dy)*(1-dt)) + \ - orient[xF+1,yF+1,t2]*(( dx)*( dy)*( dt)) + t1 = np.mod(tF, orient.shape[2]) + t2 = np.mod(tF + 1, orient.shape[2]) + + int_vals = ( + orient[xF, yF, t1] * ((1 - dx) * (1 - dy) * (1 - dt)) + + orient[xF, yF, t2] * ((1 - dx) * (1 - dy) * (dt)) + + orient[xF, yF + 1, t1] * ((1 - dx) * (dy) * (1 - dt)) + + orient[xF, yF + 1, t2] * ((1 - dx) * (dy) * (dt)) + + orient[xF + 1, yF, t1] * ((dx) * (1 - dy) * (1 - dt)) + + orient[xF + 1, yF, t2] * ((dx) * (1 - dy) * (dt)) + + orient[xF + 1, yF + 1, t1] * ((dx) * (dy) * (1 - dt)) + + orient[xF + 1, yF + 1, t2] * ((dx) * (dy) * (dt)) + ) return int_vals -def set_intensity(orient,xy_t_int): +def set_intensity(orient, xy_t_int): # utility function to set flowline intensites - xF = np.floor(xy_t_int[:,0]).astype('int') - yF = np.floor(xy_t_int[:,1]).astype('int') - tF = np.floor(xy_t_int[:,2]).astype('int') - dx = xy_t_int[:,0] - xF - dy = xy_t_int[:,1] - yF - dt = xy_t_int[:,2] - tF + xF = np.floor(xy_t_int[:, 0]).astype("int") + yF = np.floor(xy_t_int[:, 1]).astype("int") + tF = np.floor(xy_t_int[:, 2]).astype("int") + dx = xy_t_int[:, 0] - xF + dy = xy_t_int[:, 1] - yF + dt = xy_t_int[:, 2] - tF inds_1D = np.ravel_multi_index( - [xF ,yF ,tF ], - orient.shape[0:3], - mode=['clip','clip','wrap']) - orient.ravel()[inds_1D] = orient.ravel()[inds_1D] + xy_t_int[:,3]*(1-dx)*(1-dy)*(1-dt) + [xF, yF, tF], orient.shape[0:3], mode=["clip", "clip", "wrap"] + ) + orient.ravel()[inds_1D] = orient.ravel()[inds_1D] + xy_t_int[:, 3] * (1 - dx) * ( + 1 - dy + ) * (1 - dt) inds_1D = np.ravel_multi_index( - [xF ,yF ,tF+1], - orient.shape[0:3], - mode=['clip','clip','wrap']) - orient.ravel()[inds_1D] = orient.ravel()[inds_1D] + xy_t_int[:,3]*(1-dx)*(1-dy)*( dt) + [xF, yF, tF + 1], orient.shape[0:3], mode=["clip", "clip", "wrap"] + ) + orient.ravel()[inds_1D] = orient.ravel()[inds_1D] + xy_t_int[:, 3] * (1 - dx) * ( + 1 - dy + ) * (dt) inds_1D = np.ravel_multi_index( - [xF ,yF+1,tF ], - orient.shape[0:3], - mode=['clip','clip','wrap']) - orient.ravel()[inds_1D] = orient.ravel()[inds_1D] + xy_t_int[:,3]*(1-dx)*( dy)*(1-dt) + [xF, yF + 1, tF], orient.shape[0:3], mode=["clip", "clip", "wrap"] + ) + orient.ravel()[inds_1D] = orient.ravel()[inds_1D] + xy_t_int[:, 3] * (1 - dx) * ( + dy + ) * (1 - dt) inds_1D = np.ravel_multi_index( - [xF ,yF+1,tF+1], - orient.shape[0:3], - mode=['clip','clip','wrap']) - orient.ravel()[inds_1D] = orient.ravel()[inds_1D] + xy_t_int[:,3]*(1-dx)*( dy)*( dt) + [xF, yF + 1, tF + 1], orient.shape[0:3], mode=["clip", "clip", "wrap"] + ) + orient.ravel()[inds_1D] = orient.ravel()[inds_1D] + xy_t_int[:, 3] * (1 - dx) * ( + dy + ) * (dt) inds_1D = np.ravel_multi_index( - [xF+1,yF ,tF ], - orient.shape[0:3], - mode=['clip','clip','wrap']) - orient.ravel()[inds_1D] = orient.ravel()[inds_1D] + xy_t_int[:,3]*( dx)*(1-dy)*(1-dt) + [xF + 1, yF, tF], orient.shape[0:3], mode=["clip", "clip", "wrap"] + ) + orient.ravel()[inds_1D] = orient.ravel()[inds_1D] + xy_t_int[:, 3] * (dx) * ( + 1 - dy + ) * (1 - dt) inds_1D = np.ravel_multi_index( - [xF+1,yF ,tF+1], - orient.shape[0:3], - mode=['clip','clip','wrap']) - orient.ravel()[inds_1D] = orient.ravel()[inds_1D] + xy_t_int[:,3]*( dx)*(1-dy)*( dt) + [xF + 1, yF, tF + 1], orient.shape[0:3], mode=["clip", "clip", "wrap"] + ) + orient.ravel()[inds_1D] = orient.ravel()[inds_1D] + xy_t_int[:, 3] * (dx) * ( + 1 - dy + ) * (dt) inds_1D = np.ravel_multi_index( - [xF+1,yF+1,tF ], - orient.shape[0:3], - mode=['clip','clip','wrap']) - orient.ravel()[inds_1D] = orient.ravel()[inds_1D] + xy_t_int[:,3]*( dx)*( dy)*(1-dt) + [xF + 1, yF + 1, tF], orient.shape[0:3], mode=["clip", "clip", "wrap"] + ) + orient.ravel()[inds_1D] = orient.ravel()[inds_1D] + xy_t_int[:, 3] * (dx) * (dy) * ( + 1 - dt + ) inds_1D = np.ravel_multi_index( - [xF+1,yF+1,tF+1], - orient.shape[0:3], - mode=['clip','clip','wrap']) - orient.ravel()[inds_1D] = orient.ravel()[inds_1D] + xy_t_int[:,3]*( dx)*( dy)*( dt) + [xF + 1, yF + 1, tF + 1], orient.shape[0:3], mode=["clip", "clip", "wrap"] + ) + orient.ravel()[inds_1D] = orient.ravel()[inds_1D] + xy_t_int[:, 3] * (dx) * (dy) * ( + dt + ) - return orient \ No newline at end of file + return orient diff --git a/py4DSTEM/process/diffraction/tdesign.py b/py4DSTEM/process/diffraction/tdesign.py index 584b14dc2..1a0a81fb6 100644 --- a/py4DSTEM/process/diffraction/tdesign.py +++ b/py4DSTEM/process/diffraction/tdesign.py @@ -2,6 +2,7 @@ import numpy as np + def tdesign(degree): """ Returns the spherical coordinates of minimal T-designs. @@ -36,1921 +37,1964 @@ def tdesign(degree): assert degree >= 1, "Degree should be at least 1." assert type(degree) is int, "Degree should be an integer." - vecs = _tdesigns[degree-1] + vecs = _tdesigns[degree - 1] - azim = np.arctan2(vecs[:,1],vecs[:,0]) - elev = np.arctan2(np.hypot(vecs[:,1], vecs[:,0]), vecs[:,2]) + azim = np.arctan2(vecs[:, 1], vecs[:, 0]) + elev = np.arctan2(np.hypot(vecs[:, 1], vecs[:, 0]), vecs[:, 2]) # elev = np.arctan2(vecs[:,2], np.hypot(vecs[:,1], vecs[:,0])) return azim, elev, vecs + _tdesigns = [ - #degree 1 - np.array([ - [ 1, 0, 0 ], - [ -1, 0, 0 ], - ]), - #degree 2 - np.array([ - [ 0.577350269189626, 0.577350269189626, 0.577350269189626 ], - [ 0.577350269189626, -0.577350269189626, -0.577350269189626 ], - [ -0.577350269189626, 0.577350269189626, -0.577350269189626 ], - [ -0.577350269189626, -0.577350269189626, 0.577350269189626 ], - ]), - #degree 3 - np.array([ - [ 1, 0, 0 ], - [ -1, 0, 0 ], - [ 0, 1, 0 ], - [ 0, -1, 0 ], - [ 0, 0, 1 ], - [ 0, 0, -1 ], - ]), - #degree 4 - np.array([ - [ 0.850650808352, 0, -0.525731112119 ], - [ 0.525731112119, -0.850650808352, 0 ], - [ 0, -0.525731112119, 0.850650808352 ], - [ 0.850650808352, 0, 0.525731112119 ], - [ -0.525731112119, -0.850650808352, 0 ], - [ 0, 0.525731112119, -0.850650808352 ], - [ -0.850650808352, 0, -0.525731112119 ], - [ -0.525731112119, 0.850650808352, 0 ], - [ 0, 0.525731112119, 0.850650808352 ], - [ -0.850650808352, 0, 0.525731112119 ], - [ 0.525731112119, 0.850650808352, 0 ], - [ 0, -0.525731112119, -0.850650808352 ], - ]), - #degree 5 - np.array([ - [ 0.850650808352, 0, -0.525731112119 ], - [ 0.525731112119, -0.850650808352, 0 ], - [ 0, -0.525731112119, 0.850650808352 ], - [ 0.850650808352, 0, 0.525731112119 ], - [ -0.525731112119, -0.850650808352, 0 ], - [ 0, 0.525731112119, -0.850650808352 ], - [ -0.850650808352, 0, -0.525731112119 ], - [ -0.525731112119, 0.850650808352, 0 ], - [ 0, 0.525731112119, 0.850650808352 ], - [ -0.850650808352, 0, 0.525731112119 ], - [ 0.525731112119, 0.850650808352, 0 ], - [ 0, -0.525731112119, -0.850650808352 ], - ]), - #degree 6 - np.array([ - [ 0.866246818107821, 0.422518653761112, 0.266635401516705 ], - [ 0.866246818107821, -0.422518653761112, -0.266635401516705 ], - [ 0.866246818107821, 0.266635401516705, -0.422518653761112 ], - [ 0.866246818107821, -0.266635401516705, 0.422518653761112 ], - [ -0.866246818107821, 0.422518653761112, -0.266635401516705 ], - [ -0.866246818107821, -0.422518653761112, 0.266635401516705 ], - [ -0.866246818107821, 0.266635401516705, 0.422518653761112 ], - [ -0.866246818107821, -0.266635401516705, -0.422518653761112 ], - [ 0.266635401516705, 0.866246818107821, 0.422518653761112 ], - [ -0.266635401516705, 0.866246818107821, -0.422518653761112 ], - [ -0.422518653761112, 0.866246818107821, 0.266635401516705 ], - [ 0.422518653761112, 0.866246818107821, -0.266635401516705 ], - [ -0.266635401516705, -0.866246818107821, 0.422518653761112 ], - [ 0.266635401516705, -0.866246818107821, -0.422518653761112 ], - [ 0.422518653761112, -0.866246818107821, 0.266635401516705 ], - [ -0.422518653761112, -0.866246818107821, -0.266635401516705 ], - [ 0.422518653761112, 0.266635401516705, 0.866246818107821 ], - [ -0.422518653761112, -0.266635401516705, 0.866246818107821 ], - [ 0.266635401516705, -0.422518653761112, 0.866246818107821 ], - [ -0.266635401516705, 0.422518653761112, 0.866246818107821 ], - [ 0.422518653761112, -0.266635401516705, -0.866246818107821 ], - [ -0.422518653761112, 0.266635401516705, -0.866246818107821 ], - [ 0.266635401516705, 0.422518653761112, -0.866246818107821 ], - [ -0.266635401516705, -0.422518653761112, -0.866246818107821 ], - ]), - #degree 7 - np.array([ - [ 0.866246818107821, 0.422518653761112, 0.266635401516705 ], - [ 0.866246818107821, -0.422518653761112, -0.266635401516705 ], - [ 0.866246818107821, 0.266635401516705, -0.422518653761112 ], - [ 0.866246818107821, -0.266635401516705, 0.422518653761112 ], - [ -0.866246818107821, 0.422518653761112, -0.266635401516705 ], - [ -0.866246818107821, -0.422518653761112, 0.266635401516705 ], - [ -0.866246818107821, 0.266635401516705, 0.422518653761112 ], - [ -0.866246818107821, -0.266635401516705, -0.422518653761112 ], - [ 0.266635401516705, 0.866246818107821, 0.422518653761112 ], - [ -0.266635401516705, 0.866246818107821, -0.422518653761112 ], - [ -0.422518653761112, 0.866246818107821, 0.266635401516705 ], - [ 0.422518653761112, 0.866246818107821, -0.266635401516705 ], - [ -0.266635401516705, -0.866246818107821, 0.422518653761112 ], - [ 0.266635401516705, -0.866246818107821, -0.422518653761112 ], - [ 0.422518653761112, -0.866246818107821, 0.266635401516705 ], - [ -0.422518653761112, -0.866246818107821, -0.266635401516705 ], - [ 0.422518653761112, 0.266635401516705, 0.866246818107821 ], - [ -0.422518653761112, -0.266635401516705, 0.866246818107821 ], - [ 0.266635401516705, -0.422518653761112, 0.866246818107821 ], - [ -0.266635401516705, 0.422518653761112, 0.866246818107821 ], - [ 0.422518653761112, -0.266635401516705, -0.866246818107821 ], - [ -0.422518653761112, 0.266635401516705, -0.866246818107821 ], - [ 0.266635401516705, 0.422518653761112, -0.866246818107821 ], - [ -0.266635401516705, -0.422518653761112, -0.866246818107821 ], - ]), - #degree 8 - np.array([ - [ 0.507475446410817, -0.306200013239571, 0.805425492011663 ], - [ -0.306200013239569, 0.805425492011663, 0.507475446410817 ], - [ -0.507475446410817, 0.30620001323957, 0.805425492011663 ], - [ 0.805425492011663, 0.507475446410817, -0.306200013239569 ], - [ 0.306200013239569, 0.805425492011664, -0.507475446410817 ], - [ 0.805425492011663, -0.507475446410817, 0.306200013239569 ], - [ 0.306200013239569, -0.805425492011663, 0.507475446410816 ], - [ -0.805425492011663, -0.507475446410817, -0.306200013239569 ], - [ -0.30620001323957, -0.805425492011664, -0.507475446410816 ], - [ -0.805425492011663, 0.507475446410818, 0.306200013239569 ], - [ 0.507475446410817, 0.30620001323957, -0.805425492011663 ], - [ -0.507475446410817, -0.30620001323957, -0.805425492011663 ], - [ 0.626363670265271, -0.243527775409194, -0.74051520928072 ], - [ -0.243527775409195, -0.74051520928072, 0.626363670265271 ], - [ -0.626363670265271, 0.243527775409194, -0.74051520928072 ], - [ -0.74051520928072, 0.62636367026527, -0.243527775409195 ], - [ 0.243527775409195, -0.740515209280719, -0.626363670265271 ], - [ -0.74051520928072, -0.62636367026527, 0.243527775409195 ], - [ 0.243527775409195, 0.740515209280719, 0.626363670265271 ], - [ 0.74051520928072, -0.62636367026527, -0.243527775409195 ], - [ -0.243527775409195, 0.74051520928072, -0.626363670265271 ], - [ 0.74051520928072, 0.62636367026527, 0.243527775409195 ], - [ 0.626363670265271, 0.243527775409194, 0.74051520928072 ], - [ -0.626363670265271, -0.243527775409194, 0.74051520928072 ], - [ -0.286248723426035, 0.957120327092458, -0.044523564585421 ], - [ 0.957120327092458, -0.04452356458542, -0.286248723426035 ], - [ 0.286248723426035, -0.957120327092458, -0.044523564585421 ], - [ -0.04452356458542, -0.286248723426035, 0.957120327092458 ], - [ -0.957120327092458, -0.044523564585419, 0.286248723426035 ], - [ -0.044523564585421, 0.286248723426034, -0.957120327092458 ], - [ -0.957120327092458, 0.04452356458542, -0.286248723426034 ], - [ 0.044523564585421, 0.286248723426034, 0.957120327092458 ], - [ 0.957120327092458, 0.04452356458542, 0.286248723426034 ], - [ 0.044523564585421, -0.286248723426034, -0.957120327092458 ], - [ -0.286248723426034, -0.957120327092458, 0.044523564585421 ], - [ 0.286248723426035, 0.957120327092458, 0.044523564585421 ], - ]), - #degree 9 - np.array([ - [ 0.93336469319931, 0.353542188921472, -0.0619537742318597 ], - [ 0.93336469319931, -0.353542188921472, 0.0619537742318597 ], - [ 0.93336469319931, -0.0619537742318597, -0.353542188921472 ], - [ 0.93336469319931, 0.0619537742318597, 0.353542188921472 ], - [ -0.93336469319931, 0.353542188921472, 0.0619537742318597 ], - [ -0.93336469319931, -0.353542188921472, -0.0619537742318597 ], - [ -0.93336469319931, -0.0619537742318597, 0.353542188921472 ], - [ -0.93336469319931, 0.0619537742318597, -0.353542188921472 ], - [ -0.0619537742318597, 0.93336469319931, 0.353542188921472 ], - [ 0.0619537742318597, 0.93336469319931, -0.353542188921472 ], - [ -0.353542188921472, 0.93336469319931, -0.0619537742318597 ], - [ 0.353542188921472, 0.93336469319931, 0.0619537742318597 ], - [ 0.0619537742318597, -0.93336469319931, 0.353542188921472 ], - [ -0.0619537742318597, -0.93336469319931, -0.353542188921472 ], - [ 0.353542188921472, -0.93336469319931, -0.0619537742318597 ], - [ -0.353542188921472, -0.93336469319931, 0.0619537742318597 ], - [ 0.353542188921472, -0.0619537742318597, 0.93336469319931 ], - [ -0.353542188921472, 0.0619537742318597, 0.93336469319931 ], - [ -0.0619537742318597, -0.353542188921472, 0.93336469319931 ], - [ 0.0619537742318597, 0.353542188921472, 0.93336469319931 ], - [ 0.353542188921472, 0.0619537742318597, -0.93336469319931 ], - [ -0.353542188921472, -0.0619537742318597, -0.93336469319931 ], - [ -0.0619537742318597, 0.353542188921472, -0.93336469319931 ], - [ 0.0619537742318597, -0.353542188921472, -0.93336469319931 ], - [ 0.70684169771255, 0.639740098619792, 0.301840057965769 ], - [ 0.70684169771255, -0.639740098619792, -0.301840057965769 ], - [ 0.70684169771255, 0.301840057965769, -0.639740098619792 ], - [ 0.70684169771255, -0.301840057965769, 0.639740098619792 ], - [ -0.70684169771255, 0.639740098619792, -0.301840057965769 ], - [ -0.70684169771255, -0.639740098619792, 0.301840057965769 ], - [ -0.70684169771255, 0.301840057965769, 0.639740098619792 ], - [ -0.70684169771255, -0.301840057965769, -0.639740098619792 ], - [ 0.301840057965769, 0.70684169771255, 0.639740098619792 ], - [ -0.301840057965769, 0.70684169771255, -0.639740098619792 ], - [ -0.639740098619792, 0.70684169771255, 0.301840057965769 ], - [ 0.639740098619792, 0.70684169771255, -0.301840057965769 ], - [ -0.301840057965769, -0.70684169771255, 0.639740098619792 ], - [ 0.301840057965769, -0.70684169771255, -0.639740098619792 ], - [ 0.639740098619792, -0.70684169771255, 0.301840057965769 ], - [ -0.639740098619792, -0.70684169771255, -0.301840057965769 ], - [ 0.639740098619792, 0.301840057965769, 0.70684169771255 ], - [ -0.639740098619792, -0.301840057965769, 0.70684169771255 ], - [ 0.301840057965769, -0.639740098619792, 0.70684169771255 ], - [ -0.301840057965769, 0.639740098619792, 0.70684169771255 ], - [ 0.639740098619792, -0.301840057965769, -0.70684169771255 ], - [ -0.639740098619792, 0.301840057965769, -0.70684169771255 ], - [ 0.301840057965769, 0.639740098619792, -0.70684169771255 ], - [ -0.301840057965769, -0.639740098619792, -0.70684169771255 ], - ]), - #degree 10 - np.array([ - [ -0.753828667197017, 0.54595190806126, -0.365621190026287 ], - [ 0.545951908061258, -0.36562119002629, -0.753828667197017 ], - [ 0.753828667197016, -0.545951908061261, -0.365621190026288 ], - [ -0.365621190026289, -0.753828667197017, 0.545951908061259 ], - [ -0.545951908061258, -0.365621190026288, 0.753828667197018 ], - [ -0.365621190026289, 0.753828667197017, -0.545951908061259 ], - [ -0.545951908061258, 0.365621190026289, -0.753828667197017 ], - [ 0.365621190026287, 0.753828667197017, 0.54595190806126 ], - [ 0.545951908061259, 0.365621190026289, 0.753828667197017 ], - [ 0.365621190026287, -0.753828667197018, -0.545951908061259 ], - [ -0.753828667197017, -0.545951908061261, 0.365621190026288 ], - [ 0.753828667197016, 0.545951908061261, 0.365621190026287 ], - [ 0.70018101936373, -0.713151065847793, 0.034089549761256 ], - [ -0.713151065847794, 0.034089549761254, 0.700181019363729 ], - [ -0.70018101936373, 0.713151065847793, 0.034089549761256 ], - [ 0.034089549761255, 0.70018101936373, -0.713151065847793 ], - [ 0.713151065847793, 0.034089549761254, -0.70018101936373 ], - [ 0.034089549761257, -0.700181019363729, 0.713151065847794 ], - [ 0.713151065847794, -0.034089549761255, 0.700181019363728 ], - [ -0.034089549761256, -0.700181019363729, -0.713151065847794 ], - [ -0.713151065847794, -0.034089549761254, -0.700181019363729 ], - [ -0.034089549761257, 0.700181019363729, 0.713151065847794 ], - [ 0.70018101936373, 0.713151065847793, -0.034089549761257 ], - [ -0.700181019363729, -0.713151065847794, -0.034089549761257 ], - [ 0.276230218261792, 0.077050720725736, -0.957997939953259 ], - [ 0.077050720725735, -0.957997939953258, 0.276230218261793 ], - [ -0.276230218261792, -0.077050720725734, -0.957997939953259 ], - [ -0.957997939953259, 0.276230218261791, 0.077050720725738 ], - [ -0.077050720725735, -0.957997939953259, -0.276230218261792 ], - [ -0.957997939953258, -0.276230218261793, -0.077050720725736 ], - [ -0.077050720725736, 0.957997939953258, 0.276230218261794 ], - [ 0.957997939953259, -0.27623021826179, 0.077050720725737 ], - [ 0.077050720725734, 0.957997939953259, -0.276230218261792 ], - [ 0.957997939953258, 0.276230218261793, -0.077050720725738 ], - [ 0.276230218261793, -0.077050720725736, 0.957997939953258 ], - [ -0.276230218261791, 0.077050720725735, 0.957997939953259 ], - [ 0.451819102555243, -0.783355937521819, 0.42686411621907 ], - [ -0.783355937521818, 0.426864116219071, 0.451819102555243 ], - [ -0.451819102555243, 0.783355937521819, 0.42686411621907 ], - [ 0.426864116219071, 0.451819102555242, -0.783355937521819 ], - [ 0.783355937521818, 0.42686411621907, -0.451819102555244 ], - [ 0.426864116219072, -0.451819102555242, 0.783355937521818 ], - [ 0.783355937521819, -0.42686411621907, 0.451819102555242 ], - [ -0.426864116219072, -0.451819102555241, -0.783355937521819 ], - [ -0.783355937521818, -0.42686411621907, -0.451819102555243 ], - [ -0.426864116219072, 0.451819102555241, 0.783355937521819 ], - [ 0.451819102555243, 0.783355937521818, -0.426864116219071 ], - [ -0.451819102555242, -0.783355937521819, -0.426864116219071 ], - [ -0.33858435995926, -0.933210037239527, 0.120331448866784 ], - [ -0.933210037239526, 0.120331448866787, -0.33858435995926 ], - [ 0.338584359959261, 0.933210037239526, 0.120331448866786 ], - [ 0.120331448866785, -0.338584359959261, -0.933210037239526 ], - [ 0.933210037239526, 0.120331448866789, 0.33858435995926 ], - [ 0.120331448866785, 0.338584359959261, 0.933210037239526 ], - [ 0.933210037239526, -0.120331448866787, -0.338584359959262 ], - [ -0.120331448866784, 0.338584359959262, -0.933210037239526 ], - [ -0.933210037239526, -0.120331448866787, 0.338584359959261 ], - [ -0.120331448866784, -0.338584359959262, 0.933210037239526 ], - [ -0.338584359959262, 0.933210037239526, -0.120331448866784 ], - [ 0.338584359959261, -0.933210037239527, -0.120331448866783 ], - ]), - #degree 11 - np.array([ - [ -0.674940520480437, 0.725629052064501, 0.133857284499464 ], - [ 0.09672433446143, -0.910327382989987, -0.402428203412229 ], - [ 0.906960315916358, 0.135127022135053, 0.398953221871704 ], - [ -0.132758704758026, -0.307658524060733, 0.942189661842955 ], - [ -0.226055801127587, -0.958831174708704, -0.171876563798827 ], - [ 0.275738264019853, -0.180692733507538, -0.944096682449892 ], - [ 0.830881650513589, 0.333278644528177, -0.445601871563928 ], - [ -0.616471328612787, -0.2675443371664, 0.740528951931372 ], - [ 0.430277293287436, -0.892644471615357, -0.13434023290057 ], - [ -0.690987198523076, 0.175109339053207, 0.701336874015319 ], - [ 0.810517041535507, -0.381449337547215, 0.444475565431127 ], - [ -0.086734443854626, -0.706008517835924, -0.702872043114784 ], - [ 0.871320852056737, 0.46045780600396, 0.169642511361809 ], - [ -0.600735266749549, 0.303266118552509, -0.739693720820614 ], - [ -0.899100947083419, -0.418081246639828, 0.12971336924846 ], - [ 0.896927087079571, -0.188066327344843, -0.400191025613991 ], - [ 0.150494960966991, 0.903072153139254, 0.402258564791324 ], - [ 0.248601716402621, -0.224283612281953, 0.94228129975259 ], - [ 0.842584674708423, -0.510756382085546, -0.1708185707275 ], - [ 0.260034500418337, 0.209356489957684, -0.942630319215749 ], - [ -0.058802461572434, 0.894595213188746, -0.442991732488095 ], - [ 0.061611769180132, -0.671290108790159, 0.738629528071408 ], - [ 0.982337536097614, 0.133784014710179, -0.130823555148513 ], - [ -0.382277582532576, -0.605243847900137, 0.698243320392029 ], - [ 0.611839278216357, 0.651571608497249, 0.448449703569971 ], - [ 0.646865348569582, -0.298464129297652, -0.701772316597447 ], - [ -0.169201016881282, 0.970430912746818, 0.172147783812972 ], - [ -0.471725450862325, -0.47529570366279, -0.742676977621112 ], - [ 0.119369755955723, -0.984692604411347, 0.127009197228668 ], - [ 0.457289212231729, 0.796155990558714, -0.396260287026038 ], - [ -0.813631436350979, 0.420942272793499, 0.40101307803722 ], - [ 0.287154555386871, 0.16417332397066, 0.943710432821951 ], - [ 0.746667577045155, 0.644035989066398, -0.166448713352744 ], - [ -0.115779644740906, 0.314952464646105, -0.942019118105898 ], - [ -0.867579212111466, 0.221916315040665, -0.445038717226738 ], - [ 0.655140022433912, -0.151162631680508, 0.740230646345257 ], - [ 0.176736512358047, 0.976002671721061, -0.12721238144483 ], - [ 0.455284607701078, -0.55278635410423, 0.697956426080188 ], - [ -0.432023930219742, 0.781838026058859, 0.449538234998843 ], - [ 0.485961267092557, 0.525163287076294, -0.698602296584415 ], - [ -0.975758639968897, 0.138431863354196, 0.1695042646494 ], - [ 0.308602378401872, -0.593188152818631, -0.743567338847214 ], - [ 0.972979693579006, -0.191167383224118, 0.129481842256537 ], - [ -0.614624689780931, 0.68217777986423, -0.39606813475866 ], - [ -0.653028964396532, -0.644975511259979, 0.396937981974668 ], - [ -0.070378900922493, 0.320878001965403, 0.944502047726543 ], - [ -0.381252925250545, 0.909662131759037, -0.164805986030565 ], - [ -0.332341796304234, -0.009834857390798, -0.943107738283054 ], - [ -0.477746621168896, -0.755138676192789, -0.448913962446598 ], - [ 0.343877558432071, 0.574039599276676, 0.743119615720828 ], - [ -0.873212544495548, 0.47009394139203, -0.128497231106812 ], - [ 0.664216892966437, 0.259987346974329, 0.700872669256879 ], - [ -0.878489109322641, -0.170673846340671, 0.446236846278739 ], - [ -0.347082716608212, 0.626648635925969, -0.697742842975825 ], - [ -0.433716795977713, -0.885744934523588, 0.165365207503367 ], - [ 0.661861683362982, 0.112512128799614, -0.741134355544863 ], - [ 0.482068945127674, 0.865869532174741, 0.133714192945202 ], - [ -0.8374660393934, -0.372486946227971, -0.399880116725617 ], - [ 0.410355219266256, -0.82161905066793, 0.39566492086167 ], - [ -0.329899568015879, 0.02926988290883, 0.943562159572669 ], - [ -0.982429034616553, -0.080964254903198, -0.168160582094488 ], - [ -0.090370421487683, -0.316160436207578, -0.944391743662116 ], - [ 0.571959920493404, -0.686312971502271, -0.449262010965652 ], - [ -0.442021476996821, 0.502111749619808, 0.743303978710785 ], - [ -0.716515724344093, -0.684793506171761, -0.13290248557738 ], - [ -0.044218043628816, 0.709851625611568, 0.702961900983442 ], - [ -0.110556556362806, -0.889624975730714, 0.443107944412334 ], - [ -0.701028131184281, -0.134257385503649, -0.700381691455451 ], - [ 0.707841110014082, -0.686721956709281, 0.165450648676302 ], - [ 0.099860111408803, 0.666551337869757, -0.738740327945793 ], - ]), - #degree 12 - np.array([ - [ -0.893804977761136, -0.426862191124497, 0.137482113446834 ], - [ -0.426862191241092, 0.137482113445288, -0.893804977705691 ], - [ 0.893804977770157, 0.426862191128157, 0.137482113376823 ], - [ 0.1374821132964, -0.893804977739491, -0.426862191218271 ], - [ 0.426862191272731, 0.137482113377345, 0.893804977701032 ], - [ 0.137482113529033, 0.893804977707775, 0.426862191209756 ], - [ 0.426862191185983, -0.137482113474993, -0.893804977727441 ], - [ -0.137482113324291, 0.893804977725279, -0.426862191239047 ], - [ -0.426862191217414, -0.137482113347288, 0.893804977732073 ], - [ -0.137482113501071, -0.893804977693655, 0.426862191248328 ], - [ -0.893804977672548, 0.426862191328071, -0.137482113390703 ], - [ 0.893804977663553, -0.42686219133326, -0.137482113433065 ], - [ 0.983086600385574, 0.022300380107522, -0.181778516853323 ], - [ 0.022300380232394, -0.181778516808726, 0.983086600390988 ], - [ -0.983086600396613, -0.022300380113323, -0.181778516792915 ], - [ -0.181778516710471, 0.983086600409631, 0.022300380211455 ], - [ -0.022300380272854, -0.181778516836686, -0.9830866003849 ], - [ -0.18177851693601, -0.983086600368179, -0.022300380200376 ], - [ -0.0223003801708, 0.181778516841875, 0.983086600386256 ], - [ 0.181778516710979, -0.983086600409044, 0.022300380233212 ], - [ 0.022300380212558, 0.181778516804081, -0.983086600392297 ], - [ 0.181778516934384, 0.983086600367503, -0.022300380243431 ], - [ 0.983086600391629, -0.022300380332372, 0.181778516792996 ], - [ -0.98308660038057, 0.022300380337865, 0.181778516852128 ], - [ -0.897951986971875, 0.376695603035365, 0.227558018419664 ], - [ 0.376695602927528, 0.227558018339206, -0.897951987037503 ], - [ 0.897951986986053, -0.376695603028569, 0.227558018374966 ], - [ 0.227558018305554, -0.897951987041904, 0.376695602937366 ], - [ -0.376695602875261, 0.227558018455254, 0.89795198703002 ], - [ 0.227558018486567, 0.89795198699048, -0.3766956029506 ], - [ -0.376695602982511, -0.22755801836891, -0.89795198700691 ], - [ -0.227558018280939, 0.897951987054767, 0.376695602921573 ], - [ 0.376695602931437, -0.22755801842558, 0.897951987013974 ], - [ -0.227558018511349, -0.897951987002348, -0.376695602907339 ], - [ -0.897951987072194, -0.376695602830637, -0.227558018362707 ], - [ 0.897951987057819, 0.376695602823051, -0.227558018431989 ], - [ -0.171330151245221, 0.459786194953055, -0.871345301361568 ], - [ 0.459786194843117, -0.871345301414649, -0.171330151270292 ], - [ 0.171330151191219, -0.459786194982334, -0.871345301356736 ], - [ -0.871345301364754, -0.171330151162981, 0.459786194977662 ], - [ -0.459786195042432, -0.871345301303738, 0.171330151299472 ], - [ -0.871345301353407, 0.171330151362727, -0.459786194924734 ], - [ -0.459786194855202, 0.87134530140841, -0.171330151269592 ], - [ 0.871345301392835, 0.171330151178183, 0.45978619491878 ], - [ 0.459786195054412, 0.871345301309038, 0.171330151240368 ], - [ 0.871345301325486, -0.171330151377355, -0.459786194972196 ], - [ -0.17133015129661, -0.459786194913003, 0.871345301372597 ], - [ 0.171330151350736, 0.459786194942983, 0.871345301346135 ], - [ -0.397191702297223, -0.548095590649226, -0.736091010091219 ], - [ -0.548095590778902, -0.736091010056557, -0.397191702182515 ], - [ 0.397191702250221, 0.548095590625205, -0.736091010134467 ], - [ -0.736091010174764, -0.397191702137083, -0.548095590653075 ], - [ 0.548095590610212, -0.736091010169131, 0.397191702206669 ], - [ -0.736091010049194, 0.397191702305889, 0.548095590699385 ], - [ 0.548095590752529, 0.736091010044117, -0.397191702241962 ], - [ 0.736091010139925, 0.397191702119602, -0.548095590712531 ], - [ -0.548095590584386, 0.736091010182625, 0.3971917022173 ], - [ 0.736091010083782, -0.39719170228798, 0.548095590665912 ], - [ -0.39719170212526, 0.548095590740419, 0.736091010116106 ], - [ 0.397191702171386, -0.548095590716295, 0.736091010109179 ], - [ 0.379474725534956, 0.69627727809449, 0.609259291836815 ], - [ 0.696277278210441, 0.609259291787114, 0.379474725402001 ], - [ -0.379474725495576, -0.696277278074161, 0.609259291884576 ], - [ 0.609259291925953, 0.379474725376213, 0.696277278103008 ], - [ -0.696277278071056, 0.609259291933888, -0.379474725422102 ], - [ 0.60925929179591, -0.379474725515542, -0.696277278140864 ], - [ -0.696277278185906, -0.609259291774849, 0.379474725466713 ], - [ -0.609259291882878, -0.379474725353089, 0.696277278153303 ], - [ 0.696277278046548, -0.609259291946589, -0.379474725446676 ], - [ -0.609259291838737, 0.379474725493095, -0.696277278115623 ], - [ 0.37947472533629, -0.696277278181595, -0.609259291861008 ], - [ -0.379474725375237, 0.696277278161216, -0.609259291860039 ], - [ -0.678701446470328, 0.729764213479081, 0.082513873284097 ], - [ 0.729764213389772, 0.082513873179234, -0.678701446579104 ], - [ 0.678701446474772, -0.72976421347722, 0.082513873263995 ], - [ 0.082513873217671, -0.678701446552547, 0.729764213410125 ], - [ -0.729764213370974, 0.082513873368402, 0.678701446576318 ], - [ 0.082513873326892, 0.678701446534692, -0.729764213414381 ], - [ -0.729764213431284, -0.082513873201736, -0.678701446531733 ], - [ -0.082513873171694, 0.678701446577399, 0.72976421339221 ], - [ 0.729764213412797, -0.08251387334668, 0.67870144653399 ], - [ -0.082513873373655, -0.678701446558336, -0.729764213387104 ], - [ -0.678701446641541, -0.729764213324827, -0.082513873240061 ], - [ 0.678701446637016, 0.729764213321344, -0.082513873308075 ], - ]), - #degree 13 - np.array([ - [ 0.276790129286922, -0.235256466916603, 0.931687511509759 ], - [ 0.198886780634501, 0.360548603139528, 0.911289609983006 ], - [ -0.258871339062373, 0.204230077441409, 0.944073993540935 ], - [ -0.20028291392731, -0.228346161950354, 0.952756414153864 ], - [ -0.883545166667525, -0.414277696639041, -0.218453492821483 ], - [ 0.397750057908559, -0.901619535998689, -0.16993264471327 ], - [ 0.876539487069282, 0.434392104192327, -0.207321073274483 ], - [ -0.411742357517625, 0.88489597883979, -0.217778184534166 ], - [ 0.501114093867204, 0.377868932752059, 0.778524074507957 ], - [ -0.394238847790386, 0.473687133880952, 0.787525383774109 ], - [ -0.495364292002136, -0.406429808740612, 0.767742814213388 ], - [ 0.370186583802172, -0.559306270968252, 0.741713144300723 ], - [ 0.411742357517961, -0.884895978839253, 0.217778184535715 ], - [ 0.883545166668397, 0.414277696639157, 0.218453492817737 ], - [ -0.39775005791059, 0.901619535997218, 0.169932644716324 ], - [ -0.876539487069878, -0.434392104191278, 0.20732107327416 ], - [ -0.69101430131565, -0.702815226887987, -0.168967429499392 ], - [ 0.684400344460127, -0.714441044251654, -0.145499700314004 ], - [ 0.660710489482765, 0.731357715035063, -0.169048932993191 ], - [ -0.773611287956309, 0.615222357857778, -0.151746583284428 ], - [ 0.683629784686022, -0.21996733132084, -0.695891292258878 ], - [ 0.256574099503526, 0.681472791071418, -0.685393730999406 ], - [ -0.644474509637892, 0.354062227985534, -0.677711254990588 ], - [ -0.220535080416141, -0.731547754140859, -0.645137320046912 ], - [ 0.394238847792041, -0.473687133882522, -0.787525383772336 ], - [ 0.495364292000968, 0.406429808741285, -0.767742814213785 ], - [ -0.370186583802439, 0.559306270970003, -0.74171314429927 ], - [ -0.50111409386464, -0.377868932752239, -0.77852407450952 ], - [ -0.488574873968534, -0.006884557978444, -0.872494811095214 ], - [ 0.055542048727444, -0.584131720249991, -0.809756268404849 ], - [ 0.526812107464791, 0.049707819617671, -0.848527039107984 ], - [ 0.004245864108125, 0.4886546223943, -0.872466980836902 ], - [ -0.710317361514613, -0.479530914625401, 0.515266288291253 ], - [ 0.521404384476562, -0.728039165451723, 0.445080264016476 ], - [ 0.738099355388852, 0.407803273205931, 0.53749961110413 ], - [ -0.496057991262554, 0.699670113703365, 0.514186932248264 ], - [ -0.973220809307327, 0.194260751789571, -0.122898399685852 ], - [ -0.376203572666605, -0.908865964003535, -0.180093118660339 ], - [ 0.914477900370762, -0.368657988534049, -0.166797653531193 ], - [ 0.28746218785413, 0.946817914340553, -0.144572914606861 ], - [ -0.098900669929334, 0.99509705928004, 0.000707177308311 ], - [ -0.986068201425202, -0.161328561779779, 0.04052896855503 ], - [ 0.098900669927371, -0.995097059280236, -0.000707177307522 ], - [ 0.98606820142538, 0.161328561777872, -0.040528968558297 ], - [ 0.815232440848265, 0.131832381174928, -0.563929331266187 ], - [ -0.113644567080339, 0.787251605615581, -0.606069155978764 ], - [ -0.76050444170531, -0.010569874890521, -0.649246695710724 ], - [ 0.179848227912241, -0.83248278540524, -0.52404868754798 ], - [ -0.92768989180951, -0.047241289482188, -0.370350813692261 ], - [ -0.062273759773745, -0.944434685686409, -0.322746190242513 ], - [ 0.939840260740896, -0.044569841802216, -0.33869427732427 ], - [ -0.00824273878155, 0.93408705946015, -0.356950168239866 ], - [ -0.287462187854123, -0.94681791433958, 0.144572914613243 ], - [ 0.973220809307003, -0.194260751794934, 0.122898399679932 ], - [ 0.376203572664097, 0.908865964003331, 0.180093118666611 ], - [ -0.914477900372906, 0.368657988531957, 0.16679765352406 ], - [ -0.198886780630987, -0.360548603140065, -0.91128960998356 ], - [ 0.258871339064112, -0.204230077444254, -0.944073993539843 ], - [ 0.200282913924527, 0.228346161951352, -0.952756414154209 ], - [ -0.276790129284401, 0.235256466920766, -0.931687511509456 ], - [ 0.496057991258595, -0.69967011370777, -0.514186932246089 ], - [ 0.710317361512836, 0.479530914628761, -0.515266288290576 ], - [ -0.521404384476695, 0.728039165452453, -0.445080264015126 ], - [ -0.738099355384499, -0.407803273209131, -0.537499611107679 ], - [ -0.815232440849446, -0.131832381170712, 0.563929331265466 ], - [ 0.113644567080183, -0.787251605613717, 0.606069155981215 ], - [ 0.760504441709935, 0.010569874889864, 0.649246695705317 ], - [ -0.179848227916839, 0.832482785402468, 0.524048687550806 ], - [ 0.644474509638734, -0.354062227985804, 0.677711254989647 ], - [ 0.220535080413518, 0.73154775414237, 0.645137320046095 ], - [ -0.683629784685343, 0.219967331325312, 0.695891292258132 ], - [ -0.256574099500943, -0.681472791069379, 0.6853937310024 ], - [ 0.00824273878347, -0.934087059459458, 0.356950168241634 ], - [ 0.927689891812602, 0.047241289479133, 0.370350813684907 ], - [ 0.062273759768788, 0.944434685686016, 0.322746190244617 ], - [ -0.939840260741931, 0.04456984180196, 0.338694277321433 ], - [ -0.684400344460716, 0.714441044251305, 0.145499700312953 ], - [ -0.660710489482671, -0.731357715034246, 0.169048932997096 ], - [ 0.773611287955743, -0.615222357858877, 0.151746583282855 ], - [ 0.691014301313431, 0.702815226889319, 0.168967429502926 ], - [ 0.823586023578098, -0.394634588904438, 0.407393670798948 ], - [ 0.494068620358303, 0.708839608416629, 0.503430837272612 ], - [ -0.75887513050105, 0.450605887274021, 0.470173234734822 ], - [ -0.431499072601357, -0.787935048711447, 0.439279989706176 ], - [ 0.488574873974618, 0.006884557979146, 0.872494811091801 ], - [ -0.055542048725634, 0.584131720247444, 0.80975626840681 ], - [ -0.52681210746758, -0.049707819615904, 0.848527039106356 ], - [ -0.004245864106237, -0.488654622389235, 0.872466980839748 ], - [ -0.49406862035774, -0.708839608420214, -0.503430837268118 ], - [ 0.758875130496518, -0.450605887275878, -0.470173234740358 ], - [ 0.431499072601226, 0.787935048714215, -0.43927998970134 ], - [ -0.823586023577754, 0.394634588903444, -0.407393670800605 ], - [ -0.05223814787874, -0.056184830047506, -0.997052877624217 ], - [ 0.052238147881538, 0.05618483004769, 0.99705287762406 ], - ]), - #degree 14 - np.array([ - [ -0.625520988160254, -0.7673610045544, 0.14099851793647 ], - [ -0.76724274137005, 0.141111638293461, -0.625640536852518 ], - [ 0.625492928633992, 0.767336602497947, 0.141255565185161 ], - [ 0.141259978285753, -0.625497336538309, -0.767332196977417 ], - [ 0.767217177597722, 0.141142445065633, 0.625664936367606 ], - [ 0.140994104732436, 0.625522897885037, 0.76736025871308 ], - [ 0.767367121351956, -0.141003470846495, -0.625512367805189 ], - [ -0.141107421738627, 0.625630007825311, -0.767252102531351 ], - [ -0.767341557579575, -0.14125061251247, 0.625487968290521 ], - [ -0.141146661279494, -0.625655569171042, 0.767224040795719 ], - [ -0.625631916042134, 0.767247698300479, -0.141122907715456 ], - [ 0.625659975568208, -0.76722329624468, -0.141131175405856 ], - [ 0.557188048071509, -0.044753456478336, -0.829179478291342 ], - [ -0.044855878114542, -0.829283214592494, 0.55702540354432 ], - [ -0.557023176560509, 0.04489683751235, -0.829282493940292 ], - [ -0.82927698135379, 0.557030331904205, -0.044909882380585 ], - [ 0.04500608513203, -0.829178759033645, -0.557168769645708 ], - [ -0.829184990273806, -0.557180524667655, 0.044744997884746 ], - [ 0.044745112627007, 0.829186887034329, 0.557177692721376 ], - [ 0.829285903726696, -0.557022572534015, -0.044841315410966 ], - [ -0.044895319644503, 0.829275086591665, -0.557034326619468 ], - [ 0.829176067900875, 0.557172765298515, 0.045006199906779 ], - [ 0.55701504778817, 0.04485436035495, 0.829290252501915 ], - [ -0.55717991929916, -0.044997741388974, 0.829171719729799 ], - [ -0.256065565410913, 0.860770382492113, 0.439891776275906 ], - [ 0.860817193749452, 0.439942893453515, -0.255820267854343 ], - [ 0.255978113844099, -0.860846435024418, 0.439793838677361 ], - [ 0.439780410927513, -0.2559784640674, 0.860853190792787 ], - [ -0.86089686693789, 0.439742722239698, 0.255896312466096 ], - [ 0.439905203705212, 0.256058129693811, -0.860765732339981 ], - [ -0.860766305814323, -0.439898638597135, -0.256067480432698 ], - [ -0.439951565414546, 0.255829619022141, 0.860809982586329 ], - [ 0.860845979002674, -0.439786977094207, 0.255991435820162 ], - [ -0.439734049218326, -0.255909284650278, -0.860897441039197 ], - [ -0.255822182689152, -0.860816739802146, -0.439942668220034 ], - [ 0.255909634255987, 0.860892792334745, -0.43974294673258 ], - [ -0.214847470746312, -0.032398468989078, 0.976110087808274 ], - [ -0.032361689068532, 0.976149872834738, -0.214672184610297 ], - [ 0.214653391258715, 0.032229687143749, 0.976158372851326 ], - [ 0.976156890684175, -0.214662164023714, -0.032216146673044 ], - [ 0.032184871976157, 0.976118589466139, 0.214840948877335 ], - [ 0.976111569264229, 0.214838964338531, 0.032410241444204 ], - [ 0.032404387317597, -0.976112740167589, -0.214834527404447 ], - [ -0.97615046971745, 0.214667748038907, -0.032373112644706 ], - [ -0.032227570225248, -0.976155722133346, 0.214665763138194 ], - [ -0.976117990230955, -0.214844548351237, 0.032179017872419 ], - [ -0.214659241112501, 0.032359572226461, -0.976152789418913 ], - [ 0.21485332060012, -0.032190790383644, -0.976115671240647 ], - [ -0.531657953418075, -0.827333953094149, -0.181268724894593 ], - [ -0.827232187812406, -0.181173291587112, -0.531848799813059 ], - [ 0.531693969367769, 0.827365274479422, -0.181019958909332 ], - [ -0.181013937184585, -0.531695771783126, -0.827365433658478 ], - [ 0.82726500032424, -0.181115392520565, 0.53181748168959 ], - [ -0.181274746488222, 0.531662962384716, 0.827329414872902 ], - [ 0.827337912964399, 0.181265235803751, -0.531652980863198 ], - [ 0.181178425057189, 0.531838819184424, -0.827237480233043 ], - [ -0.827370725476244, 0.181023448305968, 0.53168429898609 ], - [ 0.181110258615433, -0.531806009787378, 0.82727349901848 ], - [ -0.531843826870477, 0.827237640803175, 0.181162991414264 ], - [ 0.531807810920692, -0.827268962188108, 0.181125692390543 ], - [ -0.660052978431453, -0.64107030142389, -0.391610692264717 ], - [ -0.640943278162024, -0.391490626360013, -0.660247532105318 ], - [ 0.660130816198753, 0.641137993287446, -0.391368597447617 ], - [ -0.391366127194665, -0.660129990434437, -0.641140351415881 ], - [ 0.64101419265168, -0.391488664009925, 0.660179847292265 ], - [ -0.391613162232706, 0.660059082674287, 0.641062507518011 ], - [ 0.641074532205542, 0.391604771858404, -0.660052381872206 ], - [ 0.391493582576226, 0.660240832413506, -0.64094837390819 ], - [ -0.641145446695277, 0.391374518513393, 0.660120066685087 ], - [ 0.391485706851, -0.660169924653076, 0.641026217806203 ], - [ -0.660246935065265, 0.640950733115446, 0.391479427883123 ], - [ 0.660169097297863, -0.641018424977353, 0.391499861829449 ], - [ -0.887809544786451, -0.296234001309576, 0.352192601646022 ], - [ -0.296066792023988, 0.352356402512996, -0.887800326801429 ], - [ 0.88773949759831, 0.296173084548554, 0.352420329142482 ], - [ 0.352416311777685, -0.887743835667625, -0.296164861904896 ], - [ 0.296002975181616, 0.352256528867817, 0.887861237217634 ], - [ 0.352196618754376, 0.887807646454613, 0.296234914611202 ], - [ 0.29624330876316, -0.35220289417087, -0.887802356017778 ], - [ -0.352357435214616, 0.887795037857324, -0.296081422255585 ], - [ -0.296179491920463, -0.352410037210515, 0.887741445601714 ], - [ -0.352255495317536, -0.887858848644387, 0.296011369549315 ], - [ -0.887793137385775, 0.296073200109863, -0.352369132285204 ], - [ 0.887863184573985, -0.29601228334985, -0.352243798503465 ], - [ -0.26223504413332, -0.963196832316083, -0.059030871962556 ], - [ -0.963146753177879, -0.05898607663367, -0.262428989645347 ], - [ 0.262246759182379, 0.963207020345643, -0.058812186281487 ], - [ -0.058802018270752, -0.262250202293796, -0.963206703695603 ], - [ 0.963157426232092, -0.058856981708733, 0.262418802676392 ], - [ -0.059041039930677, 0.26223953025181, 0.9631949877243 ], - [ 0.963198910461706, 0.05903143538266, -0.262227284091577 ], - [ 0.058993521775316, 0.262416743924601, -0.963149633700058 ], - [ -0.963209583515922, 0.058811622961071, 0.262237471059664 ], - [ 0.058849536426177, -0.26240607188442, 0.963161349671286 ], - [ -0.262421229412009, 0.963149318669471, 0.058978710569357 ], - [ 0.262409514362814, -0.96315950669899, 0.058864347675232 ], - [ -0.715507563967586, -0.551203770138786, 0.429212452859839 ], - [ -0.551069607492995, 0.429343180584727, -0.715532473744488 ], - [ 0.715422202362423, 0.551129535145429, 0.429450006237378 ], - [ 0.429450819917231, -0.715428271411138, -0.551121022769127 ], - [ 0.5509918383738, 0.429319279234025, 0.715606701005125 ], - [ 0.429211638866897, 0.7155060331503, 0.551206391097486 ], - [ 0.551211869430264, -0.429219462039715, -0.715497119774448 ], - [ -0.429341247317559, 0.715523561571074, -0.551082685436994 ], - [ -0.551134100310698, -0.42944299777979, 0.715422892513669 ], - [ -0.429321211466422, -0.715601323311163, 0.550997317108093 ], - [ -0.715522029029489, 0.551074173985694, -0.429354726001132 ], - [ 0.71560739063481, -0.550999938995098, -0.429307733096245 ], - ]), - #degree 15 - np.array([ - [ 0.854403279867469, -0.505354134007206, 0.120881076242474 ], - [ -0.50543491755569, 0.120816219805996, 0.85436466754382 ], - [ -0.854386776665562, 0.505324765203946, 0.121120260611542 ], - [ 0.120833358636621, 0.854397789834015, -0.505374827397788 ], - [ 0.505397909754575, 0.121184507524897, -0.854334400543285 ], - [ 0.121167891781777, -0.854359592169892, 0.505359307095908 ], - [ 0.505550243990606, -0.121029099883223, 0.85426629793203 ], - [ -0.120901230058257, -0.8542712308899, -0.505572503845152 ], - [ -0.505512743080475, -0.120971801893292, -0.854296605243135 ], - [ -0.121100221709937, 0.854233310670545, 0.505588950870808 ], - [ 0.854228086018077, 0.50562286374044, -0.120995440909188 ], - [ -0.854244251915001, -0.505593339178042, -0.121004683582819 ], - [ -0.264987898778375, 0.883813698575362, -0.385557725524417 ], - [ 0.883849543661418, -0.385514772323787, -0.264930829632268 ], - [ 0.26493557758679, -0.883717531264112, -0.385814028600849 ], - [ -0.385585969828214, -0.26499188213212, 0.883800182323873 ], - [ -0.883729220902574, -0.385857204128221, 0.264833687708496 ], - [ -0.385785570894326, 0.264874341279828, -0.883748310675226 ], - [ -0.88388533425828, 0.385579930632135, -0.264716514363662 ], - [ 0.385705565188833, 0.2647713629504, 0.883814088111154 ], - [ 0.883764779677347, 0.385791993289806, 0.26481003023928 ], - [ 0.385667159435638, -0.264653128949093, -0.883866258814251 ], - [ -0.26465693400637, -0.883897033692944, 0.385594010730408 ], - [ 0.264708980625706, 0.88380076582133, 0.385778902883153 ], - [ -0.973352164893031, 0.228026239253951, 0.024281624940474 ], - [ 0.228112345886926, 0.024344549147457, -0.973330416960638 ], - [ 0.973355442809202, -0.228032180463189, 0.024093705130315 ], - [ 0.02435678809852, -0.973342833359315, 0.228058053182924 ], - [ -0.228119646411613, 0.024031592823588, 0.973336483168797 ], - [ 0.024019836271027, 0.973350319061891, -0.228061842156086 ], - [ -0.228243300844141, -0.024101340294447, -0.97330576953791 ], - [ -0.024325527828938, 0.973285859331229, 0.228304412400888 ], - [ 0.228251100231509, -0.024273911824481, 0.973299651930403 ], - [ -0.024050630999231, -0.973293331950969, -0.228301680082122 ], - [ -0.973284068418876, -0.228330589256741, -0.024150862752081 ], - [ 0.973280766897442, 0.228336792285628, -0.024225153791815 ], - [ 0.176494164360597, -0.643915961757687, 0.744460908403072 ], - [ -0.643955474734765, 0.744429879896195, 0.176480878502065 ], - [ -0.176394545795758, 0.643730246363376, 0.744645106161623 ], - [ 0.744482176984942, 0.176540768465681, -0.643878595094843 ], - [ 0.643722173544997, 0.744675565408302, -0.176295392935637 ], - [ 0.7446232602985, -0.176308754645389, 0.643779017410342 ], - [ 0.643979646991902, -0.744473128172097, 0.176210032886433 ], - [ -0.744568815199564, -0.17638194509082, -0.643821938798528 ], - [ -0.643745300418696, -0.744632317149074, -0.176393595252333 ], - [ -0.744536607308663, 0.176149174255325, 0.643922905933989 ], - [ 0.17619570702358, 0.643971118562494, -0.744483895919737 ], - [ -0.176295790346183, -0.643784799068587, -0.744621331143847 ], - [ -0.538268091669357, -0.714443097207928, 0.447033021534844 ], - [ -0.714408571342234, 0.447040121797618, -0.538308018420605 ], - [ 0.538328211219438, 0.714331668550684, 0.447138685768607 ], - [ 0.447120197809599, -0.538252620284606, -0.714400199795228 ], - [ 0.714269338389924, 0.44713116789972, 0.538417153263762 ], - [ 0.447050760030329, 0.538390645652783, 0.714339646547694 ], - [ 0.714335378036911, -0.44690804426239, -0.538514779424324 ], - [ -0.44721697306067, 0.538430741435079, -0.714205373603506 ], - [ -0.714197054202002, -0.447264479166707, 0.53840231560137 ], - [ -0.446955147518652, -0.538568616287532, 0.714265316011294 ], - [ -0.53855313587161, 0.7142734853459, -0.446960745451628 ], - [ 0.538492423492547, -0.714162749879013, -0.44721077416177 ], - [ -0.854262559171519, -0.121196786481334, 0.505516388403308 ], - [ -0.121135225917774, 0.505562840819898, -0.854243800693903 ], - [ 0.854330941426722, 0.121071937454801, 0.505430735592792 ], - [ 0.50560092757097, -0.854220500665318, -0.12114057240441 ], - [ 0.120980483865727, 0.505385380088754, 0.854370727562784 ], - [ 0.505347417325379, 0.854380006035553, 0.121073502837149 ], - [ 0.121020187472529, -0.505348640639146, -0.854386836057462 ], - [ -0.505616684568734, 0.854250931602746, -0.120859894677939 ], - [ -0.120865731027902, -0.505598482368904, 0.854260879175297 ], - [ -0.50533076030876, -0.85441039802093, 0.120928468275617 ], - [ -0.854368086396689, 0.120928425096259, -0.505402304061426 ], - [ 0.854300186149896, -0.120804448918389, -0.50554671106217 ], - [ 0.744463297304691, 0.643945879165515, 0.176374895243003 ], - [ 0.643874092586096, 0.176352654639277, 0.744530653565125 ], - [ -0.744439438351756, -0.643989673955645, 0.176315689786883 ], - [ 0.176272538481235, 0.744499406704124, 0.643932159155441 ], - [ -0.643927227994187, 0.176337348613599, -0.744488323972679 ], - [ 0.176417156092134, -0.744445359352701, -0.643955040336351 ], - [ -0.643773276436742, -0.176537581494637, 0.74457400630557 ], - [ -0.176186289075531, -0.744659935538757, 0.643770123526409 ], - [ 0.643827610309512, -0.176153056179147, -0.744618096074686 ], - [ -0.176503857743247, 0.744606426388638, -0.643745025598251 ], - [ 0.744642563797528, -0.643711305756388, -0.176474380640507 ], - [ -0.744666089545738, 0.643755051780831, -0.176215346628265 ], - [ -0.228336071531986, 0.973285242484899, -0.024051511354805 ], - [ 0.97330645157172, -0.02400727784605, -0.228250305452786 ], - [ 0.228333198106367, -0.973279105669999, -0.024325564920999 ], - [ -0.024087022235214, -0.228306334617004, 0.973291340212985 ], - [ -0.973298936142401, -0.024368394387641, 0.228244084828147 ], - [ -0.024288551979359, 0.228299553239789, -0.97328792257649 ], - [ -0.973337174231582, 0.024064219708824, -0.228113258248362 ], - [ 0.024218299052141, 0.228063963157171, 0.973344904286279 ], - [ 0.973329707418057, 0.024311563193311, 0.228118891266323 ], - [ 0.024157446706033, -0.228056100974521, -0.97334825862943 ], - [ -0.228026586903388, -0.973357047279715, 0.0240818225246 ], - [ 0.228030283046768, 0.973350913461235, 0.024293811512186 ], - [ 0.714188221641478, 0.538577770578359, -0.447067298186109 ], - [ 0.538527478643698, -0.447091804432198, 0.714210804423472 ], - [ -0.714248768779612, -0.538467220657919, -0.447103733571689 ], - [ -0.447161335031565, 0.714165750281327, 0.538529499264337 ], - [ -0.538389584312319, -0.447080411787951, -0.714321888856505 ], - [ -0.447010969457209, -0.714305113088417, -0.538469496444014 ], - [ -0.538432015415055, 0.446913681352591, 0.714394237235966 ], - [ 0.447232748299163, -0.714300498031757, 0.538291433482237 ], - [ 0.538293039926151, 0.447257878415203, -0.714283552493402 ], - [ 0.446938450981038, 0.714439434305571, -0.538351479745163 ], - [ 0.714416613789271, -0.538354159540915, 0.44697170027516 ], - [ -0.714357130519037, 0.538243224338143, 0.447200314770336 ], - [ -0.88382262479637, 0.264636203965444, 0.385778754532718 ], - [ 0.264703577640278, 0.38583706665855, -0.88377699335113 ], - [ 0.883874390523727, -0.264731681104579, 0.385594604209984 ], - [ 0.385842662030848, -0.883779863837697, 0.264685837233447 ], - [ -0.26482277877838, 0.385535295796556, 0.883872972510847 ], - [ 0.385530295763018, 0.883900556837248, -0.264737977388365 ], - [ -0.264823043649479, -0.385616295049092, -0.883837557781314 ], - [ -0.385806659371906, 0.883713819704025, 0.264958688191976 ], - [ 0.264941401779006, -0.385755452567231, 0.883741356075422 ], - [ -0.385565266715756, -0.883834666939721, -0.264906977291953 ], - [ -0.883791950673863, -0.264913750483099, -0.38565851828926 ], - [ 0.883739852971877, 0.26500888802554, -0.385712537437807 ], - ]), - #degree 16 - np.array([ - [ 0.938311825813856, -0.17507925577492, -0.298191501782276 ], - [ -0.175109632245629, -0.298282531121024, 0.938277223598034 ], - [ -0.938311652301346, 0.175147761450008, -0.298151815044902 ], - [ -0.298182757815715, 0.938327057553728, -0.175012502421904 ], - [ 0.175097712410131, -0.298058347845738, -0.938350687316958 ], - [ -0.298185477757762, -0.938323612741539, 0.175026336949732 ], - [ 0.175121225661409, 0.298070999742225, 0.938342280532811 ], - [ 0.298159022282375, -0.938297484887434, -0.175211378870018 ], - [ -0.175136638135111, 0.298288500226525, -0.938270285480331 ], - [ 0.298175056505462, 0.938292628074833, 0.175210101816042 ], - [ 0.938309721676758, 0.175091137054814, 0.298191146635404 ], - [ -0.938307020714082, -0.175144295988174, 0.298168426332282 ], - [ 0.318319389865683, -0.189552295411868, 0.928839433561922 ], - [ -0.189466106261457, 0.928833946336168, 0.318386706242113 ], - [ -0.318293314473071, 0.18936285961738, 0.928887007853633 ], - [ 0.928852943553566, 0.318350700348959, -0.189433473386317 ], - [ 0.189441607397533, 0.928892798895752, -0.318229548512164 ], - [ 0.928866264406345, -0.318313837307129, 0.189430102746667 ], - [ 0.18945182591494, -0.928887156552102, 0.318239934719146 ], - [ -0.928865750332054, -0.318289122686796, -0.189474146625178 ], - [ -0.189481041982253, -0.928834132900175, -0.318377273511944 ], - [ -0.928863874908086, 0.318277395441538, 0.189503038080361 ], - [ 0.318275484124591, 0.18957282380822, -0.92885028970154 ], - [ -0.318345902583112, -0.189353418017315, -0.928870911049379 ], - [ 0.415270907116288, 0.626546860524453, 0.659537038588256 ], - [ 0.626612654947257, 0.659451415891007, 0.415307609777736 ], - [ -0.415241828112963, -0.626676394380167, 0.659432271664102 ], - [ 0.659494217922308, 0.41519684716212, 0.626641009377521 ], - [ -0.626618996427069, 0.659521812332477, -0.415186238180433 ], - [ 0.659478785687794, -0.415192215022902, -0.626660319321504 ], - [ -0.626602233185435, -0.65952877581014, 0.415200475969626 ], - [ -0.659472693683341, -0.415326178073293, 0.626577953724091 ], - [ 0.626606052873236, -0.65944383659479, -0.415329605108723 ], - [ -0.659498633823103, 0.415315781516604, -0.626557542136963 ], - [ 0.415250963158486, -0.626542854390271, -0.659553401331872 ], - [ -0.415267233073285, 0.626674158557439, -0.659418398387537 ], - [ 0.081476869754028, 0.884767493032223, 0.458855100188022 ], - [ 0.88480215017059, 0.458780629597686, 0.081519868495058 ], - [ -0.08148097265168, -0.88484396510395, 0.458706887363658 ], - [ 0.458778051156021, 0.08139667888042, 0.884814828336823 ], - [ -0.884809515892886, 0.45878451702782, -0.081417980329578 ], - [ 0.458732327572868, -0.081386172952098, -0.884839500978449 ], - [ -0.884806469025575, -0.458784799888689, 0.081449492089205 ], - [ -0.458770768146743, -0.081567624478124, 0.884802862185155 ], - [ 0.884821176813587, -0.458741101923224, -0.081535798692882 ], - [ -0.458810899116744, 0.081573887356361, -0.884781475706435 ], - [ 0.081470600041761, -0.884777903494754, -0.458836139396478 ], - [ -0.081497545017818, 0.88485010959699, -0.458692090298344 ], - [ -0.722581612146772, 0.69116944690793, -0.012673178305347 ], - [ 0.691146231887784, -0.012722477090735, -0.722602950951623 ], - [ 0.722589739174094, -0.691157232223568, -0.012874361552029 ], - [ -0.012719991090033, -0.722649829139429, 0.691097262526357 ], - [ -0.6911640879369, -0.012832809701898, 0.722583920760425 ], - [ -0.012740894622282, 0.722658126679523, -0.691088200990487 ], - [ -0.691184825451665, 0.012806932405418, -0.722564543516851 ], - [ 0.01278690708865, 0.722509435119358, 0.69124280189425 ], - [ 0.691166758903022, 0.012679269543203, 0.722584076430794 ], - [ 0.012798402734516, -0.722517774658893, -0.691233872281593 ], - [ -0.722587198973115, -0.691163495604889, 0.01267920436853 ], - [ 0.722578352800658, 0.691170335944389, 0.012809792129789 ], - [ 0.560117573995459, 0.806868022890413, 0.187702682288658 ], - [ 0.806883478716379, 0.18757144265397, 0.560139273462648 ], - [ -0.560134093540899, -0.806891631206385, 0.18755184014617 ], - [ 0.187652131237362, 0.560025149416763, 0.806943932168034 ], - [ -0.806885441512999, 0.18768574188158, -0.560098157976558 ], - [ 0.187630222901067, -0.560004720839195, -0.806963203679022 ], - [ -0.806874677594158, -0.187697516958668, 0.560109718523856 ], - [ -0.187614808802038, -0.560215760321792, 0.806820293129301 ], - [ 0.806892320702248, -0.18757613331143, -0.560124965524367 ], - [ -0.187636487681617, 0.56022497423671, -0.806808853900342 ], - [ 0.56009182108872, -0.806880865199227, -0.18772432267788 ], - [ -0.560129384097476, 0.806896245083186, -0.187546054987136 ], - [ -0.099485634221032, -0.358895129517995, -0.928060824834181 ], - [ -0.359050794288811, -0.927994608087772, -0.099541621850345 ], - [ 0.099434389660615, 0.359143761945999, -0.927970129049474 ], - [ -0.928019026720099, -0.09942019096838, -0.359021324816913 ], - [ 0.358990815531993, -0.928035748444477, 0.099374262124424 ], - [ -0.928007207203491, 0.099420259668564, 0.359051856067911 ], - [ 0.359002982562248, 0.928031348467288, -0.099371398165657 ], - [ 0.928017938922059, 0.099510949379702, -0.358998991631458 ], - [ -0.359042863742385, 0.92799619207621, 0.099555459356689 ], - [ 0.928013665632084, -0.099489549105096, 0.359015969030581 ], - [ -0.099451875312545, 0.358926751348054, 0.928052213867059 ], - [ 0.099465503317397, -0.359120063291987, 0.927975966170987 ], - [ 0.787833199437607, 0.557450082325166, -0.261855409681697 ], - [ 0.557405388687852, -0.261977292048617, 0.787824302184578 ], - [ -0.787861477876718, -0.557364111687839, -0.26195331393273 ], - [ -0.261861028070608, 0.787802657602316, 0.557490604990374 ], - [ -0.557427204478003, -0.261835304855293, -0.787856068605919 ], - [ -0.261850091868655, -0.787804146924511, -0.557493637162722 ], - [ -0.557398047481063, 0.261806624190095, 0.787886227950765 ], - [ 0.26192814680606, -0.787893374500188, 0.557330849971047 ], - [ 0.557399834363592, 0.261935778537884, -0.787842035292097 ], - [ 0.261909535328364, 0.787908371337394, -0.55731839524686 ], - [ 0.787858967733566, -0.557444321449493, 0.261790136264747 ], - [ -0.787856023927293, 0.557369329488324, 0.261958615256708 ], - [ -0.507282732168614, -0.717049946047353, -0.478020506377115 ], - [ -0.71706431400176, -0.477906271006066, -0.507370048109131 ], - [ 0.507331753192767, 0.71711626280308, -0.477868975583995 ], - [ -0.477891616916408, -0.50725750267016, -0.717153699332196 ], - [ 0.717108744361459, -0.47798851765986, 0.507229756368514 ], - [ -0.477913676926975, 0.507235340412842, 0.717154674280526 ], - [ 0.717103637758922, 0.477942092943937, -0.507280719627002 ], - [ 0.477949791330649, 0.507362311781387, -0.7170407809538 ], - [ -0.717073605236621, 0.477889924387354, 0.507372313830785 ], - [ 0.477966885504482, -0.507396895429057, 0.717004914118516 ], - [ -0.507289494490155, 0.717039874321013, 0.478028437871252 ], - [ 0.507342973335893, -0.717147616692481, 0.47781000751239 ], - [ -0.469705390085658, -0.33624876406351, 0.816280353304085 ], - [ -0.336180458859188, 0.816354017519737, -0.4696262526314 ], - [ 0.469729267279509, 0.336087427571651, 0.816333054879763 ], - [ 0.816299320102214, -0.469688480988201, -0.336226338688182 ], - [ 0.336166188592078, 0.816261044646191, 0.469798042397566 ], - [ 0.816308187841148, 0.469684487990511, 0.336210386818421 ], - [ 0.336161196424763, -0.816254520485116, -0.469812949806501 ], - [ -0.81631474754769, 0.469749196906201, -0.336104038866138 ], - [ -0.336166711539314, -0.816355082377068, 0.469634242288587 ], - [ -0.816302029136435, -0.469752338316787, 0.33613053695499 ], - [ -0.469725914764869, 0.336254309274991, -0.816266258332602 ], - [ 0.469715709020586, -0.336082571137018, -0.816342855715183 ], - [ 0.220975783117544, 0.56198189964132, -0.797085972622227 ], - [ 0.56189854338611, -0.797188442616427, 0.220818056099052 ], - [ -0.22090980871236, -0.561819935638318, -0.79721842448229 ], - [ -0.7971433029262, 0.220906560624346, 0.561927794358875 ], - [ -0.561911046458035, -0.797113560704263, -0.221056434445611 ], - [ -0.797166608166814, -0.22092145416411, -0.561888876837612 ], - [ -0.561903189214556, 0.797117195899141, 0.221063298519679 ], - [ 0.797149071206196, -0.221019917708182, 0.56187503437274 ], - [ 0.56187154518738, 0.797190222992272, -0.220880318440273 ], - [ 0.797151311779493, 0.220966501329483, -0.561892864715723 ], - [ 0.220989674227739, -0.56195922843892, 0.79709810529009 ], - [ -0.220934514736207, 0.561821479644177, 0.797210489901321 ], - [ -0.025586321091663, 0.991400659992677, -0.128335776535923 ], - [ 0.991391023154192, -0.128410509654448, -0.025584765380375 ], - [ 0.0255553186148, -0.991378867053065, -0.128510185009118 ], - [ -0.128427355734578, -0.025687167640031, 0.991386193023514 ], - [ -0.99138841235829, -0.128432289728374, 0.02557660643704 ], - [ -0.128471046150121, 0.025696657527584, -0.991380286314492 ], - [ -0.991388770492313, 0.128433611757029, -0.025556077805202 ], - [ 0.128434643068809, 0.02546732329508, 0.991390920829907 ], - [ 0.991386054149539, 0.128448345934336, 0.025587380962899 ], - [ 0.128392989158359, -0.02544975216483, -0.991396767419448 ], - [ -0.025589705051665, -0.991398220731893, 0.128353943940207 ], - [ 0.025571746935955, 0.991376476866512, 0.128525354986419 ], - ]), - #degree 17 - np.array([ - [ -0.053895316433783, -0.14060350667641, -0.988597971258691 ], - [ -0.140602010826056, -0.988598302765153, -0.053893137981829 ], - [ 0.05389273741214, 0.140602992486377, -0.988598184986247 ], - [ -0.988598098647216, -0.053895299659618, -0.140602617421263 ], - [ 0.140604478516356, -0.988597884351918, 0.053894375180116 ], - [ -0.98859806586327, 0.053892813420617, 0.140603800919506 ], - [ 0.14060181241573, 0.988598276619905, -0.053894135205652 ], - [ 0.988598002509875, 0.053895652551635, -0.140603158106489 ], - [ -0.140604846635692, 0.988597850035353, 0.053894044272357 ], - [ 0.988598167360928, -0.0538924507278, 0.140603226297146 ], - [ -0.053892835910884, 0.140602751584219, 0.988598213878838 ], - [ 0.053896097450443, -0.140603793852246, 0.988597887836084 ], - [ -0.712137820619482, 0.484725955627139, 0.50783902211694 ], - [ 0.484727142303201, 0.507838589157962, -0.71213732164283 ], - [ 0.712137878427749, -0.484726895412166, 0.507838044038163 ], - [ 0.507839760738435, -0.712137969376798, 0.484724963236905 ], - [ -0.484727642870466, 0.507838390853387, 0.712137122338588 ], - [ 0.507839607814555, 0.712136191976364, -0.48472773472555 ], - [ -0.484726768067281, -0.507839236872381, -0.712137114474401 ], - [ -0.507840112632748, 0.712136257912531, 0.48472710896699 ], - [ 0.48472854912246, -0.507840095411427, 0.712135289926112 ], - [ -0.507838861015403, -0.712137466171353, -0.484726645149226 ], - [ -0.712136671868904, -0.484727632936327, -0.507839032024349 ], - [ 0.712137765857364, 0.484726401555971, -0.507838673275561 ], - [ -0.703005448039525, 0.261790111709517, 0.66124827216248 ], - [ 0.26179085361446, 0.661247136036676, -0.7030062404041 ], - [ 0.703006433944545, -0.261790569085573, 0.661247042919986 ], - [ 0.661249487589413, -0.70300443378585, 0.261789765346499 ], - [ -0.261791711051733, 0.661247232073399, 0.703005830772316 ], - [ 0.661247423359215, 0.703005908959219, -0.261791017930756 ], - [ -0.261791042135151, -0.661248438610085, -0.703004944999334 ], - [ -0.661249044674904, 0.703004424763402, 0.261790908321135 ], - [ 0.26179224626734, -0.661248916662998, 0.703004046934519 ], - [ -0.661247254530942, -0.703006260525483, -0.261790500280794 ], - [ -0.70300546948428, -0.261791326803081, -0.66124776830313 ], - [ 0.703005158463527, 0.261791025358218, -0.661248218308045 ], - [ 0.062800447246381, 0.786218819998244, -0.614748786827777 ], - [ 0.786220043108977, -0.614747449388309, 0.062798226760693 ], - [ -0.062799502252198, -0.786219239565021, -0.614748346768559 ], - [ -0.61474770709614, 0.062799571514381, 0.786219734194995 ], - [ -0.786218534519124, -0.614749234835089, -0.062799635733612 ], - [ -0.61474933069654, -0.062799956628617, -0.786218433932708 ], - [ -0.786219538571286, 0.614747943528109, 0.062799706183351 ], - [ 0.614749395150454, -0.062799770458141, 0.786218398406292 ], - [ 0.786217798458002, 0.614750179051967, -0.062799607828601 ], - [ 0.614747800214019, 0.062800058129605, -0.786219622517107 ], - [ 0.062800526363459, -0.786218909415802, 0.614748664386918 ], - [ -0.062801412397757, 0.786218712810953, 0.614748825315458 ], - [ 0.829543607739232, 0.321465368220585, 0.456637076783941 ], - [ 0.321463595502047, 0.456637380632479, 0.829544127443504 ], - [ -0.82954344503853, -0.321464459743646, 0.456638011903666 ], - [ 0.456635000556537, 0.829545159774775, 0.321464312422039 ], - [ -0.32146420025779, 0.456637459573867, -0.829543849634571 ], - [ 0.456637068558535, -0.829544000114897, -0.321464367374743 ], - [ -0.321462954195433, -0.456636421102337, 0.829544904150941 ], - [ -0.456636713034899, -0.829544190971737, 0.321464379883261 ], - [ 0.321462955106589, -0.456636688799517, -0.82954475643955 ], - [ -0.456637112396323, 0.829544098578271, -0.321464051017078 ], - [ 0.829544701976578, -0.321462298506758, -0.45663724997129 ], - [ -0.829544861446795, 0.321463589390512, -0.456636051515194 ], - [ -0.249500423448462, 0.954025094362385, -0.166089307379737 ], - [ 0.954025470855406, -0.166087567010738, -0.249500142371853 ], - [ 0.249500943484422, -0.954025029612664, -0.166088898102612 ], - [ -0.166086877408662, -0.249500137449683, 0.954025592196158 ], - [ -0.954024855383494, -0.166089937003122, 0.249500918108135 ], - [ -0.166090151998567, 0.249500107118379, -0.954025030047436 ], - [ -0.954025593688894, 0.166087862658579, -0.249499475879328 ], - [ 0.166089692874499, 0.249499687822531, 0.954025219633797 ], - [ 0.954024931419817, 0.166090647913648, 0.249500154118264 ], - [ 0.166087956122076, -0.249500002352048, -0.954025439732882 ], - [ -0.249499759982538, -0.954025225930409, 0.166089548307795 ], - [ 0.249498374708179, 0.954025720257113, 0.166088789826025 ], - [ 0.860787215766444, 0.418630333044569, -0.289471956203095 ], - [ 0.418631425510959, -0.289473932939102, 0.860786019707239 ], - [ -0.860786771736426, -0.418630687137034, -0.289472764506019 ], - [ -0.289474446503673, 0.860786651964262, 0.418629770347917 ], - [ -0.418629889262302, -0.289472838226515, -0.860787134978979 ], - [ -0.289472399693171, -0.860787556030986, -0.418629326729602 ], - [ -0.418629257388446, 0.289472594933548, 0.86078752409688 ], - [ 0.289473185156189, -0.860787817544824, 0.418628245872098 ], - [ 0.418628942424652, 0.289472756316209, -0.860787623002977 ], - [ 0.289473542762772, 0.860786603163078, -0.418630495610795 ], - [ 0.860788012261758, -0.418628676927689, 0.289471982755196 ], - [ -0.860787839361109, 0.418628479474405, 0.289472782452827 ], - [ -0.16910412959425, -0.878917391692094, 0.445991044680649 ], - [ -0.878918175066698, 0.445989574417742, -0.169103935637546 ], - [ 0.169102333488308, 0.878918206143462, 0.445990120651083 ], - [ 0.445989469034572, -0.169103982598388, -0.878918219506016 ], - [ 0.878916540171611, 0.445992443170219, 0.169104867014003 ], - [ 0.445990224108014, 0.169104198005711, 0.87891779491425 ], - [ 0.878918285318947, -0.445988968934561, -0.169104959479873 ], - [ -0.445991404870479, 0.169104651147508, -0.87891710857278 ], - [ -0.878917501870672, -0.445990557689425, 0.169104841318319 ], - [ -0.4459886381463, -0.169104239878744, 0.878918591622365 ], - [ -0.16910368279187, 0.878918081785917, -0.445989854117772 ], - [ 0.169104218306708, -0.878917936541782, -0.445989937303537 ], - [ 0.699159749436449, 0.682605593469953, 0.2126622875159 ], - [ 0.682603600110598, 0.212662432840561, 0.699161651389995 ], - [ -0.699161274801242, -0.682604056820824, 0.21266220498729 ], - [ 0.212660843412531, 0.699162101347243, 0.68260363441662 ], - [ -0.682604762820295, 0.212661985195386, -0.699160652373835 ], - [ 0.212662594223091, -0.699161699678606, -0.682603500372528 ], - [ -0.682602562402764, -0.212662368159073, 0.69916268419457 ], - [ -0.212661230546804, -0.699160950060591, 0.682604693019826 ], - [ 0.682603395227417, -0.21266176997579, -0.699162053042617 ], - [ -0.212661876797938, 0.699162212981133, -0.682603198129121 ], - [ 0.699162077611852, -0.682602648532129, -0.212664085934605 ], - [ -0.69916198280867, 0.682603270588311, -0.212662400948523 ], - [ -0.893254372981228, -0.172342415041176, -0.415204428116666 ], - [ -0.17234169138316, -0.415205105182347, -0.893254197886418 ], - [ 0.893254479512939, 0.172341865725213, -0.415204426937409 ], - [ -0.415203760144359, -0.893254961141014, -0.172340975855865 ], - [ 0.172343621116966, -0.415205895670259, 0.893253458129858 ], - [ -0.41520633444977, 0.89325370222595, 0.172341298859036 ], - [ 0.172340599563611, 0.415204277853847, -0.893254793098767 ], - [ 0.415204013461881, 0.893254652987798, -0.172341962739188 ], - [ -0.17234119462921, 0.415204328749048, 0.893254654632054 ], - [ 0.415206142771325, -0.893254015914866, 0.172340134782712 ], - [ -0.89325441464337, 0.172340274858685, 0.415205226823752 ], - [ 0.893254705389659, -0.172340550786628, 0.415204486793911 ], - [ -0.030119107290242, 0.538031004327585, -0.842386774444073 ], - [ 0.538032715913301, -0.84238566918646, -0.030119444819523 ], - [ 0.030118087641353, -0.538031590262412, -0.842386436664628 ], - [ -0.842386183209587, -0.030119347292783, 0.538031916577671 ], - [ -0.538030304105545, -0.842387233636645, 0.030118772718924 ], - [ -0.842387312723823, 0.030117901022641, -0.538030229076328 ], - [ -0.538031723308682, 0.842386324999934, -0.030118834084264 ], - [ 0.842387103098144, 0.030119789658303, 0.53803045155907 ], - [ 0.538029173032331, 0.842387968746045, 0.030118417588877 ], - [ 0.842386330532407, -0.030117441179441, -0.538031792619125 ], - [ -0.030117059116644, -0.538030739137179, 0.842387017049566 ], - [ 0.030118346812524, 0.538030710181824, 0.84238698950454 ], - [ 0.951905881051384, -0.301774121097739, 0.052986540323701 ], - [ -0.301774405343499, 0.052986798530194, 0.951905776566724 ], - [ -0.951905698855431, 0.3017745999, 0.05298708655653 ], - [ 0.052987612958423, 0.951905238066977, -0.301775960960479 ], - [ 0.301774562398047, 0.052986834212903, -0.951905724790833 ], - [ 0.052986766829206, -0.951905252173644, 0.301776065030379 ], - [ 0.301777293645336, -0.052987994727859, 0.951904794322844 ], - [ -0.052986574701187, -0.951905591847301, -0.301775027315507 ], - [ -0.301776941841401, -0.052986526316734, -0.951904987591586 ], - [ -0.052988794896112, 0.951905240176443, 0.301775746772478 ], - [ 0.95190486181224, 0.301777462059556, -0.052985823114433 ], - [ -0.951905018594348, -0.301776824324304, -0.052986638651951 ], - [ 0.553606146300219, 0.45440115669048, 0.697882385203248 ], - [ 0.454399619298559, 0.697882738116233, 0.55360696330584 ], - [ -0.553605814018882, -0.454401128197097, 0.697882667342941 ], - [ 0.697880969772289, 0.55360789125326, 0.454401204632875 ], - [ -0.454400796454347, 0.697882268834231, -0.553606588678677 ], - [ 0.697882520653254, -0.553605103541103, -0.454402219074583 ], - [ -0.454400180900896, -0.697882141032518, 0.553607255032936 ], - [ -0.697881635390884, -0.553607048847703, 0.454401208680482 ], - [ 0.454401271818775, -0.69788172578063, -0.553606883077631 ], - [ -0.69788232191633, 0.553606343141444, -0.454401014072625 ], - [ 0.553606029480292, -0.454400306845994, -0.697883031217504 ], - [ -0.553606960810672, 0.454400842716203, -0.697881943512493 ], - ]), - #degree 18 - np.array([ - [ -0.866376343641697, 0.223696804580225, 0.446488265017841 ], - [ 0.223696806212017, 0.446488265347841, -0.866376343050305 ], - [ 0.866376343115579, -0.223696806225293, 0.44648826521453 ], - [ 0.44648826367979, -0.866376344067145, 0.223696805603153 ], - [ -0.223696804286002, 0.446488265023544, 0.866376343714725 ], - [ 0.446488262849567, 0.866376344947941, -0.22369680384892 ], - [ -0.2236968055886, -0.446488263582537, -0.866376344121022 ], - [ -0.446488264810465, 0.866376343829741, 0.223696804265844 ], - [ 0.223696803801399, -0.446488262808774, 0.866376344981234 ], - [ -0.446488265014924, -0.866376343219064, -0.223696806222901 ], - [ -0.8663763449212, -0.223696804074408, -0.446488262788483 ], - [ 0.866376344172558, 0.223696805482214, -0.446488263535836 ], - [ -0.806844783933568, -0.461758079243128, -0.368484695601989 ], - [ -0.461758081774945, -0.368484698390835, -0.806844781210945 ], - [ 0.80684478133506, 0.461758081613586, -0.368484698321273 ], - [ -0.368484697968357, -0.806844781706494, -0.461758081246195 ], - [ 0.461758078945765, -0.368484695716793, 0.806844784051319 ], - [ -0.368484697702554, 0.806844783105505, 0.461758079013772 ], - [ 0.461758081217295, 0.368484698443883, -0.806844781505862 ], - [ 0.368484695481328, 0.806844784182969, -0.461758078903629 ], - [ -0.461758078967836, 0.36848469793606, 0.806844783025151 ], - [ 0.36848469846061, -0.806844781213308, 0.461758081715136 ], - [ -0.806844782774103, 0.461758079314706, 0.368484698051091 ], - [ 0.806844781709987, -0.461758081098712, 0.368484698145526 ], - [ -0.134842418858112, -0.040021669507572, 0.990058477084218 ], - [ -0.040021669975618, 0.990058477016276, -0.134842419218046 ], - [ 0.134842418981357, 0.040021669788942, 0.990058477056058 ], - [ 0.990058476924117, -0.134842420436143, -0.040021668151402 ], - [ 0.040021669677461, 0.990058477116921, 0.13484241856757 ], - [ 0.990058477286021, 0.134842417855397, 0.040021667893743 ], - [ 0.040021668037836, -0.990058476927568, -0.134842420444507 ], - [ -0.990058477115239, 0.134842418635191, -0.040021669491235 ], - [ -0.040021667837798, -0.990058477270892, 0.134842417983082 ], - [ -0.990058477042031, -0.134842419087429, 0.040021669778575 ], - [ -0.134842418122745, 0.040021667670891, -0.990058477258617 ], - [ 0.134842420378113, -0.040021667867212, -0.990058476943508 ], - [ 0.049794077313207, -0.279738156561879, -0.958784185115654 ], - [ -0.279738157129975, -0.958784185068512, 0.049794075029415 ], - [ -0.049794075085005, 0.279738157111834, -0.958784185070918 ], - [ -0.958784184460955, 0.049794077429761, -0.279738158785068 ], - [ 0.279738156684233, -0.958784185083851, -0.049794077238191 ], - [ -0.958784184306963, -0.049794076856858, 0.279738159414846 ], - [ 0.279738159012938, 0.958784184390379, 0.049794077508567 ], - [ 0.958784185034086, -0.049794077337113, -0.279738156837192 ], - [ -0.279738159575992, 0.95878418424315, -0.049794077180261 ], - [ 0.958784185016722, 0.049794074909289, 0.279738157328865 ], - [ 0.049794077031905, 0.279738159517178, 0.958784184268015 ], - [ -0.049794077785621, -0.279738158888691, 0.958784184412241 ], - [ 0.205470768670777, -0.192901743072287, 0.959463746444603 ], - [ -0.192901744385898, 0.959463746331714, 0.205470767964668 ], - [ -0.205470768086678, 0.19290174463045, 0.959463746256418 ], - [ 0.959463745738735, 0.205470770340502, -0.192901744804646 ], - [ 0.19290174290288, 0.959463746447685, -0.205470768815433 ], - [ 0.95946374536634, -0.205470771694041, 0.192901745215149 ], - [ 0.192901744892626, -0.959463745685675, 0.205470770505673 ], - [ -0.959463746372533, -0.205470769064203, -0.192901743011692 ], - [ -0.192901745122065, -0.959463745348892, -0.205470771862908 ], - [ -0.959463746220563, 0.205470768260652, 0.192901744623478 ], - [ 0.205470771726444, 0.192901745460598, -0.959463745310053 ], - [ -0.205470770652743, -0.192901744949698, -0.959463745642705 ], - [ -0.278905392074019, 0.772004854137857, -0.571156972696319 ], - [ 0.772004854268172, -0.571156972466399, -0.278905392184152 ], - [ 0.278905392160238, -0.772004854249339, -0.571156972503532 ], - [ -0.571156971675365, -0.278905392078835, 0.772004854891456 ], - [ -0.772004854266013, -0.571156972533567, 0.278905392052577 ], - [ -0.571156970567139, 0.278905391582234, -0.77200485589077 ], - [ -0.772004855078365, 0.571156971421921, -0.27890539208049 ], - [ 0.57115697255647, 0.278905391952095, 0.77200485428537 ], - [ 0.772004855995114, 0.571156970376128, 0.278905391684575 ], - [ 0.571156972406906, -0.278905392034109, -0.772004854366394 ], - [ -0.278905391724262, -0.772004855939801, 0.571156970431511 ], - [ 0.278905392185009, 0.772004854970249, 0.571156971517017 ], - [ 0.912363859945553, -0.393198149041577, -0.113962286110494 ], - [ -0.393198146993911, -0.113962287143254, 0.912363860699027 ], - [ -0.912363860783175, 0.393198146824102, -0.113962287055462 ], - [ -0.113962288369946, 0.912363860162702, -0.393198147882843 ], - [ 0.393198149035756, -0.113962285681562, -0.912363860001638 ], - [ -0.113962285259825, -0.912363861029922, 0.393198146771995 ], - [ 0.393198147677495, 0.113962288066404, 0.912363860289116 ], - [ 0.113962286031302, -0.912363859929225, -0.393198149102416 ], - [ -0.39319814691232, 0.113962285090844, -0.912363860990554 ], - [ 0.113962287370876, 0.912363860762456, 0.393198146780759 ], - [ 0.9123638610767, 0.393198146641003, 0.113962285337288 ], - [ -0.912363860199082, -0.39319814784926, 0.113962288194565 ], - [ 0.848662336981788, -0.012909984472825, -0.528777429633226 ], - [ -0.01290998542466, -0.528777432035493, 0.848662335470524 ], - [ -0.848662335552813, 0.012909985214183, -0.528777431908562 ], - [ -0.52877743352546, 0.848662334565693, -0.012909983878239 ], - [ 0.012909984639682, -0.528777429281808, -0.848662337198209 ], - [ -0.528777430386149, -0.848662336521067, 0.012909983920382 ], - [ 0.012909983944448, 0.52877743344648, 0.848662334613896 ], - [ 0.528777429496827, -0.848662337067122, -0.012909984449961 ], - [ -0.012909983871647, 0.528777430419671, -0.848662336500922 ], - [ 0.528777432240356, 0.848662335344326, 0.012909985329594 ], - [ 0.848662336343559, 0.012909983743557, 0.528777430675359 ], - [ -0.848662334668199, -0.012909983655303, 0.528777433366386 ], - [ -0.69585113208617, 0.211164782101034, 0.686440555892948 ], - [ 0.211164781099711, 0.68644055554441, -0.695851132733858 ], - [ 0.695851132741401, -0.211164781335084, 0.686440555464357 ], - [ 0.686440553889191, -0.695851134384757, 0.211164781040182 ], - [ -0.211164781930503, 0.686440555960218, 0.695851132071559 ], - [ 0.686440553598939, 0.695851134525998, -0.21116478151828 ], - [ -0.21116478087036, -0.686440553737906, -0.69585113458553 ], - [ -0.686440555776475, 0.695851132224679, 0.211164782023223 ], - [ 0.211164781498505, -0.686440553499572, 0.695851134630023 ], - [ -0.686440555292332, -0.695851132981083, -0.211164781104467 ], - [ -0.695851134744882, -0.211164781531153, -0.686440553373094 ], - [ 0.695851134495813, 0.211164781101486, -0.686440553757753 ], - [ -0.261718169263029, -0.581630098396244, 0.770201290908541 ], - [ -0.581630098290833, 0.770201291506502, -0.261718167737572 ], - [ 0.261718167857864, 0.581630098126426, 0.770201291589781 ], - [ 0.770201292726794, -0.261718168321791, -0.581630096412025 ], - [ 0.581630098450626, 0.770201290888376, 0.261718169201518 ], - [ 0.770201293263127, 0.261718168077775, 0.581630095811608 ], - [ 0.581630096213803, -0.770201292881278, -0.261718168307686 ], - [ -0.770201291051568, 0.26171816913029, -0.581630098266577 ], - [ -0.581630095716607, -0.770201293304276, 0.261718168167806 ], - [ -0.770201291705965, -0.261718167641045, 0.581630098070137 ], - [ -0.261718168076348, 0.581630095637746, -0.770201293394907 ], - [ 0.261718168494542, -0.581630096129926, -0.770201292881124 ], - [ 0.506136437086844, 0.700992881596967, 0.502428987025446 ], - [ 0.700992883568509, 0.502428985302136, 0.506136436066968 ], - [ -0.506136436123196, -0.700992883503112, 0.502428985336736 ], - [ 0.502428986281426, 0.506136435764488, 0.700992883085013 ], - [ -0.700992881635171, 0.502428986938925, -0.50613643711982 ], - [ 0.502428986199081, -0.506136436342322, -0.700992882726821 ], - [ -0.700992883178434, -0.502428986124795, 0.506136435790584 ], - [ -0.502428987099143, -0.506136437034413, 0.700992881582003 ], - [ 0.700992882671006, -0.502428986197914, -0.506136436420782 ], - [ -0.502428985277898, 0.506136435955935, -0.700992883666051 ], - [ 0.506136436300189, -0.700992882789867, -0.502428986153563 ], - [ -0.506136435852246, 0.700992882991532, -0.502428986323445 ], - [ -0.440748149182578, 0.602242024157979, 0.665616716534547 ], - [ 0.602242022260099, 0.66561671834234, -0.440748149045733 ], - [ 0.440748149100016, -0.602242022337998, 0.665616718235914 ], - [ 0.665616715634027, -0.440748149390786, 0.602242025000887 ], - [ -0.602242023804998, 0.665616716814167, 0.440748149242614 ], - [ 0.665616716586012, 0.440748149783209, -0.602242023661529 ], - [ -0.602242024940208, -0.665616715760932, -0.440748149282046 ], - [ -0.665616716462371, 0.440748149424008, 0.602242024061062 ], - [ 0.602242023852026, -0.665616716460744, 0.440748149712092 ], - [ -0.665616718266293, -0.44074814917988, -0.602242022245974 ], - [ -0.440748149655782, -0.602242023883194, -0.66561671646983 ], - [ 0.44074814928254, 0.602242025306933, -0.665616715428797 ], - [ -0.89025783677553, -0.293518547758229, 0.348264046639405 ], - [ -0.293518546899673, 0.348264043649922, -0.890257838228066 ], - [ 0.890257838178446, 0.293518546762444, 0.348264043892422 ], - [ 0.34826404446276, -0.890257837322353, -0.293518548682307 ], - [ 0.293518547908785, 0.348264046686625, 0.89025783670742 ], - [ 0.348264047178787, 0.890257836270502, 0.293518548650024 ], - [ 0.293518548932545, -0.348264044336184, -0.890257837289365 ], - [ -0.348264046901224, 0.890257836626627, -0.29351854789921 ], - [ -0.2935185489462, -0.348264047080228, 0.890257836211408 ], - [ -0.348264043786589, -0.890257838192766, 0.293518546844585 ], - [ -0.890257836357219, 0.293518548692058, -0.348264046921688 ], - [ 0.890257837186443, -0.293518548811097, -0.348264044701638 ], - [ 0.661971946522154, 0.031389655564508, 0.748871037990662 ], - [ 0.03138965429721, 0.748871040172752, 0.661971944113708 ], - [ -0.661971944196008, -0.031389654112142, 0.748871040107759 ], - [ 0.748871039164329, 0.661971945218693, 0.031389655052549 ], - [ -0.031389655768972, 0.748871037783183, -0.661971946747175 ], - [ 0.748871037422933, -0.661971947171443, -0.031389655416215 ], - [ -0.031389655026768, -0.748871039044161, 0.661971945355858 ], - [ -0.748871037767735, -0.661971946761125, 0.031389655843332 ], - [ 0.03138965553856, -0.748871037222178, -0.661971947392751 ], - [ -0.748871040238931, 0.661971944045087, -0.031389654165497 ], - [ 0.66197194707148, -0.031389655223358, -0.748871037519379 ], - [ -0.661971945551351, 0.031389654961479, -0.74887103887409 ], - [ -0.125732546862956, -0.877697090664539, -0.462427446956124 ], - [ -0.877697091831705, -0.462427445382079, -0.125732544504481 ], - [ 0.125732544403638, 0.877697091976424, -0.46242744513482 ], - [ -0.462427446167756, -0.125732547895101, -0.877697090932044 ], - [ 0.877697090790478, -0.462427446687307, 0.125732546972493 ], - [ -0.462427443232932, 0.125732547131528, 0.877697092587683 ], - [ 0.87769709111192, 0.462427445862905, -0.12573254776065 ], - [ 0.462427446678181, 0.125732547366796, -0.877697090738801 ], - [ -0.87769709250851, 0.462427443357225, 0.125732547227075 ], - [ 0.462427444949274, -0.125732544734265, 0.877697092026818 ], - [ -0.125732546895942, 0.877697092616795, 0.462427443241732 ], - [ 0.125732547889573, -0.877697091021935, 0.462427445998644 ], - ]), - #degree 19 - np.array([ - [ 0.553035945587524, -0.472050222255944, 0.686527370580538 ], - [ -0.472050227459673, 0.686527365766638, 0.553035947121696 ], - [ -0.55303594558747, 0.472050222505474, 0.686527370409006 ], - [ 0.686527372366403, 0.553035941501725, -0.472050224445432 ], - [ 0.472050228567412, 0.686527364805305, -0.553035947369552 ], - [ 0.68652737203169, -0.553035941518164, 0.472050224912964 ], - [ 0.472050228340927, -0.686527365268236, 0.553035946988198 ], - [ -0.686527371732145, -0.553035942965273, -0.47205022365323 ], - [ -0.472050227580466, -0.686527365608527, -0.553035947214868 ], - [ -0.686527371021655, 0.553035942983048, 0.472050224665708 ], - [ 0.553035946644886, 0.472050221691609, -0.686527370116806 ], - [ -0.553035947212832, -0.472050222465287, -0.68652736912732 ], - [ 0.534151654424436, 0.792082393152326, 0.29544456761586 ], - [ 0.792082397489039, 0.295444568376044, 0.534151647573148 ], - [ -0.53415165460592, -0.792082392760173, 0.295444568339099 ], - [ 0.295444567949351, 0.534151645341887, 0.792082399152876 ], - [ -0.792082397600766, 0.29544456929757, -0.534151646897765 ], - [ 0.295444567829592, -0.534151645364488, -0.792082399182305 ], - [ -0.792082397865911, -0.295444567933543, 0.534151647259045 ], - [ -0.295444567962042, -0.53415164560035, 0.792082398973845 ], - [ 0.792082397128777, -0.29544456908432, -0.534151647715618 ], - [ -0.295444567489444, 0.53415164476261, -0.792082399715064 ], - [ 0.534151654464793, -0.792082393125927, -0.29544456761367 ], - [ -0.534151654460867, 0.792082392713663, -0.295444568726043 ], - [ -0.987783901989363, -0.008366313346394, -0.155605166275604 ], - [ -0.008366316491905, -0.155605166254194, -0.987783901966094 ], - [ 0.987783902042018, 0.008366312354305, -0.155605165994688 ], - [ -0.155605167507252, -0.98778390181532, -0.008366310987655 ], - [ 0.008366315777747, -0.155605166766477, 0.987783901891443 ], - [ -0.155605168424393, 0.987783901667492, 0.008366311383278 ], - [ 0.008366317026602, 0.155605166706053, -0.987783901890384 ], - [ 0.155605166835858, 0.987783901919836, -0.008366311135093 ], - [ -0.008366315838957, 0.155605165685948, 0.98778390206114 ], - [ 0.155605167761508, -0.987783901773982, 0.008366311139443 ], - [ -0.98778390211314, 0.008366313179595, 0.155605165498836 ], - [ 0.987783902165643, -0.008366312162939, 0.155605165220208 ], - [ 0.950764981387945, 0.202727494112491, -0.234408859255043 ], - [ 0.202727496789996, -0.234408860732757, 0.950764980452705 ], - [ -0.950764980986237, -0.202727494847485, -0.234408860248721 ], - [ -0.23440885021567, 0.950764983233011, 0.202727495911382 ], - [ -0.20272749729896, -0.234408861592541, -0.950764980132203 ], - [ -0.23440885011577, -0.950764983322899, -0.20272749560533 ], - [ -0.202727496759051, 0.234408860491485, 0.950764980518789 ], - [ 0.234408850569327, -0.950764983253747, 0.20272749540521 ], - [ 0.202727497565203, 0.234408861679376, -0.950764980054025 ], - [ 0.234408850341461, 0.950764983305224, -0.202727495427267 ], - [ 0.950764981380539, -0.202727493695606, 0.234408859645621 ], - [ -0.950764980970666, 0.202727494426432, 0.234408860676023 ], - [ 0.512072989115983, -0.124051607170076, -0.849936734455185 ], - [ -0.12405160965336, -0.849936734716267, 0.512072988081055 ], - [ -0.51207298893537, 0.124051606674421, -0.849936734636344 ], - [ -0.849936734725645, 0.512072989351902, -0.124051604343177 ], - [ 0.124051609706947, -0.849936734419284, -0.512072988561004 ], - [ -0.849936734185619, -0.512072990133951, 0.124051604814925 ], - [ 0.124051609905272, 0.849936734159209, 0.512072988944631 ], - [ 0.849936734486865, -0.512072989718667, -0.124051604465195 ], - [ -0.124051609776913, 0.849936734911909, -0.512072987726399 ], - [ 0.849936733865973, 0.512072990727649, 0.124051604554246 ], - [ 0.512072989657044, 0.124051606837459, 0.849936734177751 ], - [ -0.512072989396574, -0.124051606970032, 0.84993673431533 ], - [ 0.391883697914976, 0.850423194793585, -0.351009340424947 ], - [ 0.850423195330397, -0.351009335923244, 0.39188370078221 ], - [ -0.391883697466306, -0.850423195007668, -0.351009340407185 ], - [ -0.351009335872243, 0.391883705326061, 0.850423193257595 ], - [ -0.850423194593128, -0.351009337444654, -0.391883701019427 ], - [ -0.35100933799945, -0.391883705264188, -0.850423192408108 ], - [ -0.850423194468673, 0.351009337760337, 0.391883701006749 ], - [ 0.351009335527498, -0.391883705866852, 0.850423193150685 ], - [ 0.850423195361416, 0.351009336170377, -0.391883700493539 ], - [ 0.351009336873483, 0.39188370616407, -0.850423192458173 ], - [ 0.391883698323181, -0.850423194786778, 0.3510093399857 ], - [ -0.391883698167036, 0.850423194811902, 0.351009340099156 ], - [ -0.637143378120116, -0.628499374133282, 0.446135464216598 ], - [ -0.628499375204954, 0.446135468086576, -0.637143374353178 ], - [ 0.63714337757707, 0.628499374826823, 0.446135464015109 ], - [ 0.446135466991108, -0.63714337381196, -0.628499376531226 ], - [ 0.628499375911897, 0.446135468292267, 0.637143373511799 ], - [ 0.446135467311664, 0.637143373480954, 0.628499376639239 ], - [ 0.62849937527089, -0.446135468666392, -0.637143373882143 ], - [ -0.446135467006437, 0.637143373286424, -0.628499377053108 ], - [ -0.628499376195147, -0.446135467887251, 0.637143373515989 ], - [ -0.446135467633094, -0.637143373382135, 0.628499376511253 ], - [ -0.637143377816856, 0.628499373935058, -0.446135464928946 ], - [ 0.637143377542956, -0.62849937478419, -0.446135464123887 ], - [ -0.420378708184596, 0.903565957647232, -0.082766550526719 ], - [ 0.903565960547129, -0.082766548074817, -0.420378702434272 ], - [ 0.42037870768752, -0.903565957904322, -0.082766550244743 ], - [ -0.08276654593922, -0.420378701283585, 0.9035659612781 ], - [ -0.903565960760554, -0.082766547146253, 0.420378702158358 ], - [ -0.082766545039106, 0.420378701254078, -0.903565961374279 ], - [ -0.903565960509685, 0.082766547722836, -0.420378702584056 ], - [ 0.082766546052241, 0.420378700439882, 0.903565961660275 ], - [ 0.90356596090935, 0.082766547862683, 0.420378701697478 ], - [ 0.082766545679396, -0.420378701270528, -0.903565961307975 ], - [ -0.420378707505505, -0.903565957945495, 0.082766550719722 ], - [ 0.420378706956033, 0.903565958233438, 0.082766550367062 ], - [ 0.491848298473796, 0.355367007972287, 0.794858189196825 ], - [ 0.355367012626596, 0.794858187708896, 0.491848297515583 ], - [ -0.491848298344631, -0.355367008156744, 0.794858189194284 ], - [ 0.794858192062911, 0.491848294626225, 0.355367006886901 ], - [ -0.355367012548889, 0.794858187634159, -0.491848297692508 ], - [ 0.794858192091183, -0.491848294618182, -0.355367006834796 ], - [ -0.355367012605403, -0.79485818761909, 0.491848297676028 ], - [ -0.79485819260841, -0.491848293926967, 0.355367006634583 ], - [ 0.35536701250799, -0.794858187986535, -0.491848297152596 ], - [ -0.794858192358054, 0.491848294578517, -0.355367006292778 ], - [ 0.491848297979809, -0.355367007868558, -0.794858189548874 ], - [ -0.491848297571808, 0.355367007417215, -0.794858190003127 ], - [ 0.060667255805915, 0.97798263888706, 0.199673338501868 ], - [ 0.977982638810576, 0.199673341482313, 0.060667247229371 ], - [ -0.06066725576913, -0.977982638936182, 0.199673338272451 ], - [ 0.19967333790937, 0.060667250976018, 0.977982639307643 ], - [ -0.977982639072168, 0.199673340242081, -0.060667247094362 ], - [ 0.199673337373138, -0.060667251086811, -0.977982639410252 ], - [ -0.977982638978921, -0.199673340702871, 0.060667247080943 ], - [ -0.199673337990036, -0.060667251306886, 0.977982639270649 ], - [ 0.977982638897865, -0.199673341052594, -0.060667247236562 ], - [ -0.199673337084201, 0.060667250789575, -0.977982639487682 ], - [ 0.06066725570001, -0.977982638898456, -0.199673338478232 ], - [ -0.060667256074209, 0.977982638961939, -0.199673338053604 ], - [ -0.708312961873346, 0.702414591990534, 0.070046334671986 ], - [ 0.702414584158394, 0.070046328146925, -0.70831297028554 ], - [ 0.70831296180624, -0.702414591950002, 0.070046335757007 ], - [ 0.070046325730793, -0.7083129711293, 0.702414583548491 ], - [ -0.702414584241602, 0.070046328819927, 0.70831297013647 ], - [ 0.070046325138075, 0.708312971393231, -0.70241458334145 ], - [ -0.702414584340882, -0.070046327329757, -0.708312970185382 ], - [ -0.070046326094986, 0.708312970542407, 0.702414584103993 ], - [ 0.702414584126282, -0.070046328999645, 0.708312970233058 ], - [ -0.07004632593766, -0.708312970292399, -0.70241458437179 ], - [ -0.70831296129047, -0.702414592488956, -0.070046335567964 ], - [ 0.708312961059513, 0.702414592640383, -0.070046336384914 ], - [ -0.608778246497891, -0.729529462544733, -0.311730348009535 ], - [ -0.729529461162802, -0.311730341531525, -0.608778251471052 ], - [ 0.608778246679673, 0.729529462023489, -0.31173034887438 ], - [ -0.311730343069402, -0.608778253416134, -0.729529458882528 ], - [ 0.729529460955067, -0.311730341992774, 0.608778251483804 ], - [ -0.311730342453046, 0.608778253837742, 0.729529458794075 ], - [ 0.729529461285603, 0.311730341286902, -0.608778251449154 ], - [ 0.311730342676067, 0.608778254584565, -0.729529458075568 ], - [ -0.729529460737167, 0.311730342625706, 0.608778251420826 ], - [ 0.311730342500045, -0.608778254449614, 0.729529458263397 ], - [ -0.608778247292532, 0.72952946202083, 0.31173034768375 ], - [ 0.608778247330452, -0.729529461617846, 0.311730348552781 ], - [ 0.230102774190651, -0.807756554170623, 0.542754145543051 ], - [ -0.807756552084345, 0.542754149424728, 0.230102772358463 ], - [ -0.230102773683601, 0.807756554197333, 0.542754145718266 ], - [ 0.542754144206019, 0.230102772513564, -0.807756555546758 ], - [ 0.807756552132751, 0.542754149180793, -0.230102772763921 ], - [ 0.54275414387689, -0.230102773432955, 0.807756555506005 ], - [ 0.807756552229309, -0.542754148882616, 0.230102773128283 ], - [ -0.542754145084005, -0.230102772500065, -0.80775655496066 ], - [ -0.807756552237738, -0.542754149346909, -0.230102772003543 ], - [ -0.542754144288786, 0.230102773227955, 0.807756555287639 ], - [ 0.230102774097675, 0.807756554025896, -0.542754145797859 ], - [ -0.230102773562357, -0.807756553761761, -0.542754146417909 ], - [ -0.496383809474105, -0.862518230775131, -0.098312843883766 ], - [ -0.862518224287333, -0.098312838975785, -0.49638382171939 ], - [ 0.496383809596231, 0.862518230686221, -0.098312844047173 ], - [ -0.098312839350041, -0.496383823019562, -0.862518223496418 ], - [ 0.862518224300261, -0.098312838333147, 0.496383821824206 ], - [ -0.098312838299782, 0.496383823078636, 0.862518223582133 ], - [ 0.862518224470515, 0.098312838524506, -0.496383821490472 ], - [ 0.09831283917121, 0.496383824314041, -0.862518222771822 ], - [ -0.862518224078588, 0.098312839378387, 0.496383822002367 ], - [ 0.098312838470056, -0.49638382381015, 0.862518223141735 ], - [ -0.496383810069422, 0.862518230414379, 0.098312844042943 ], - [ 0.496383810403814, -0.862518230215463, 0.098312844099726 ], - [ 0.278692551327958, 0.919313188465131, 0.277837584477674 ], - [ 0.919313191744972, 0.277837581526559, 0.278692543450923 ], - [ -0.278692551566547, -0.91931318841363, 0.277837584408758 ], - [ 0.277837583051005, 0.278692544908351, 0.919313190842426 ], - [ -0.919313192180326, 0.277837580345951, -0.278692543191822 ], - [ 0.277837582008532, -0.278692545046071, -0.919313191115735 ], - [ -0.919313192196504, -0.277837580255645, 0.278692543228489 ], - [ -0.277837582825575, -0.278692545265575, 0.919313190802263 ], - [ 0.919313191814052, -0.277837581086655, -0.278692543661607 ], - [ -0.277837581528602, 0.278692544535811, -0.919313191415468 ], - [ 0.278692551299389, -0.91931318860489, -0.277837584043894 ], - [ -0.278692551719555, 0.919313188501633, -0.277837583964092 ], - [ 0.711723818982073, -0.147355178359107, -0.686830151423428 ], - [ -0.14735518004562, -0.686830151696651, 0.711723818369232 ], - [ -0.711723818994987, 0.147355179083635, -0.686830151254603 ], - [ -0.686830156031755, 0.711723816221312, -0.147355170213896 ], - [ 0.147355179878181, -0.68683015150873, -0.711723818585246 ], - [ -0.686830155899656, -0.711723816480405, 0.147355169578202 ], - [ 0.147355179832049, 0.686830151151262, 0.711723818939762 ], - [ 0.686830156307707, -0.711723816117428, -0.147355169429431 ], - [ -0.147355180410728, 0.686830151596083, -0.711723818390689 ], - [ 0.686830155813336, 0.711723816667769, 0.147355169075579 ], - [ 0.711723819167954, 0.147355177853636, 0.686830151339256 ], - [ -0.711723818958232, -0.147355177932743, 0.686830151539607 ], - [ 0.910866815770901, -0.407547474081887, 0.065013077890936 ], - [ -0.407547470055014, 0.06501307469253, 0.910866817800923 ], - [ -0.91086681602966, 0.40754747351676, 0.065013077808199 ], - [ 0.065013071417123, 0.910866817243773, -0.407547471822745 ], - [ 0.407547469547224, 0.065013074424327, -0.910866818047266 ], - [ 0.065013071503944, -0.910866817193855, 0.407547471920462 ], - [ 0.407547469994702, -0.065013074730498, 0.910866817825199 ], - [ -0.065013071237167, -0.910866817002829, -0.407547472389962 ], - [ -0.407547469492954, -0.065013074760909, -0.910866818047525 ], - [ -0.065013070894069, 0.910866817046896, 0.407547472346204 ], - [ 0.910866815571027, 0.407547474607393, -0.065013077397032 ], - [ -0.910866815826998, -0.407547474069762, -0.065013077180997 ], - ]), - #degree 20 - np.array([ - [ -0.251581299355938, 0.965702462813156, -0.064230858090044 ], - [ 0.965702462812973, -0.064230858090163, -0.251581299356609 ], - [ 0.25158129935621, -0.965702462813076, -0.064230858090184 ], - [ -0.064230858090037, -0.251581299356469, 0.965702462813018 ], - [ -0.965702462812988, -0.064230858090212, 0.25158129935654 ], - [ -0.064230858090283, 0.251581299356213, -0.965702462813068 ], - [ -0.965702462813129, 0.06423085809035, -0.251581299355962 ], - [ 0.064230858090209, 0.251581299356322, 0.965702462813045 ], - [ 0.96570246281309, 0.064230858089911, 0.251581299356226 ], - [ 0.0642308580902, -0.2515812993563, -0.965702462813051 ], - [ -0.2515812993566, -0.965702462812992, 0.064230858089919 ], - [ 0.251581299356516, 0.965702462812981, 0.064230858090402 ], - [ -0.774265533845772, 0.381515182343397, -0.504934697500583 ], - [ 0.381515182343197, -0.504934697500657, -0.774265533845823 ], - [ 0.774265533845583, -0.381515182343386, -0.504934697500883 ], - [ -0.504934697500797, -0.774265533845681, 0.3815151823433 ], - [ -0.381515182343153, -0.504934697500805, 0.774265533845748 ], - [ -0.504934697500622, 0.774265533845887, -0.381515182343114 ], - [ -0.381515182343272, 0.504934697500883, -0.774265533845639 ], - [ 0.504934697500808, 0.774265533845615, 0.381515182343419 ], - [ 0.38151518234349, 0.504934697500621, 0.774265533845703 ], - [ 0.50493469750058, -0.774265533845806, -0.381515182343333 ], - [ -0.774265533845719, -0.381515182343321, 0.504934697500723 ], - [ 0.774265533845894, 0.38151518234298, 0.504934697500711 ], - [ 0.621892089865857, 0.451716799694261, -0.639689113113747 ], - [ 0.451716799694191, -0.639689113113918, 0.621892089865731 ], - [ -0.621892089865648, -0.451716799694225, -0.639689113113976 ], - [ -0.639689113113901, 0.621892089865499, 0.451716799694535 ], - [ -0.451716799694008, -0.6396891131138, -0.621892089865986 ], - [ -0.639689113113879, -0.621892089865655, -0.451716799694351 ], - [ -0.451716799694347, 0.639689113113675, 0.621892089865869 ], - [ 0.639689113113788, -0.621892089865995, 0.451716799694013 ], - [ 0.451716799694587, 0.639689113113955, -0.621892089865406 ], - [ 0.639689113114061, 0.6218920898659, -0.451716799693757 ], - [ 0.621892089865889, -0.451716799694281, 0.639689113113701 ], - [ -0.621892089865898, 0.451716799693713, 0.639689113114094 ], - [ 0.281811042675091, 0.858047847696197, -0.429344182783814 ], - [ 0.858047847696408, -0.429344182783659, 0.281811042674688 ], - [ -0.281811042675114, -0.858047847696306, -0.429344182783581 ], - [ -0.429344182783315, 0.281811042674947, 0.858047847696495 ], - [ -0.858047847696386, -0.429344182783329, -0.281811042675257 ], - [ -0.429344182783979, -0.281811042674793, -0.858047847696213 ], - [ -0.858047847696136, 0.429344182783948, 0.281811042675075 ], - [ 0.429344182783574, -0.281811042675002, 0.858047847696347 ], - [ 0.85804784769643, 0.429344182783432, -0.281811042674964 ], - [ 0.429344182783407, 0.2818110426754, -0.8580478476963 ], - [ 0.28181104267478, -0.858047847696515, 0.429344182783383 ], - [ -0.281811042675193, 0.858047847696227, 0.429344182783688 ], - [ -0.649612004107369, -0.615311084069471, 0.44653836782617 ], - [ -0.615311084069575, 0.446538367826544, -0.649612004107014 ], - [ 0.649612004107338, 0.615311084069274, 0.446538367826487 ], - [ 0.44653836782629, -0.649612004107234, -0.615311084069526 ], - [ 0.615311084069631, 0.446538367826189, 0.649612004107205 ], - [ 0.4465383678263, 0.649612004107223, 0.615311084069531 ], - [ 0.615311084069337, -0.44653836782627, -0.649612004107428 ], - [ -0.446538367826248, 0.649612004107346, -0.615311084069439 ], - [ -0.615311084069373, -0.446538367826536, 0.649612004107211 ], - [ -0.446538367826286, -0.649612004107303, 0.615311084069457 ], - [ -0.649612004107121, 0.615311084069723, -0.446538367826183 ], - [ 0.649612004107125, -0.615311084069551, -0.446538367826415 ], - [ 0.993363116319503, -0.113468728148246, -0.018829946054775 ], - [ -0.113468728148035, -0.018829946054639, 0.993363116319529 ], - [ -0.993363116319523, 0.113468728148204, -0.018829946053964 ], - [ -0.018829946053903, 0.993363116319554, -0.113468728147943 ], - [ 0.113468728148066, -0.018829946054323, -0.993363116319532 ], - [ -0.018829946054743, -0.993363116319533, 0.113468728147986 ], - [ 0.113468728148219, 0.018829946054485, 0.993363116319511 ], - [ 0.018829946054344, -0.99336311631951, -0.113468728148254 ], - [ -0.113468728148178, 0.018829946054246, -0.993363116319521 ], - [ 0.018829946054485, 0.993363116319503, 0.113468728148287 ], - [ 0.99336311631954, 0.113468728147985, 0.018829946054382 ], - [ -0.993363116319531, -0.113468728148037, 0.018829946054542 ], - [ 0.246398885891569, -0.720801569649804, 0.647867799957501 ], - [ -0.720801569649392, 0.647867799957886, 0.246398885891762 ], - [ -0.246398885891682, 0.720801569649632, 0.647867799957649 ], - [ 0.647867799957437, 0.246398885891663, -0.720801569649829 ], - [ 0.720801569649864, 0.647867799957577, -0.246398885891192 ], - [ 0.647867799957658, -0.246398885891679, 0.720801569649625 ], - [ 0.720801569649656, -0.647867799957734, 0.246398885891389 ], - [ -0.647867799957904, -0.246398885891433, -0.720801569649489 ], - [ -0.720801569649865, -0.647867799957373, -0.246398885891727 ], - [ -0.647867799957474, 0.246398885891166, 0.720801569649966 ], - [ 0.246398885891794, 0.720801569649507, -0.647867799957745 ], - [ -0.246398885891456, -0.720801569649666, -0.647867799957697 ], - [ -0.793544204802179, -0.387628773401269, -0.469075184865183 ], - [ -0.387628773401353, -0.469075184864794, -0.793544204802368 ], - [ 0.793544204802171, 0.387628773401536, -0.469075184864975 ], - [ -0.469075184865097, -0.793544204802034, -0.387628773401668 ], - [ 0.38762877340168, -0.469075184864988, 0.793544204802093 ], - [ -0.46907518486511, 0.793544204802104, 0.387628773401512 ], - [ 0.387628773401425, 0.469075184865298, -0.793544204802035 ], - [ 0.469075184865068, 0.793544204802337, -0.387628773401084 ], - [ -0.387628773401491, 0.469075184864931, 0.793544204802219 ], - [ 0.469075184864784, -0.793544204802296, 0.387628773401512 ], - [ -0.793544204802265, 0.387628773401224, 0.469075184865075 ], - [ 0.793544204802185, -0.387628773401823, 0.469075184864715 ], - [ 0.164945057653003, -0.958376909717154, 0.233038251960587 ], - [ -0.958376909716935, 0.233038251961126, 0.164945057653512 ], - [ -0.164945057653238, 0.958376909717001, 0.233038251961048 ], - [ 0.233038251960668, 0.164945057653504, -0.958376909717048 ], - [ 0.958376909717102, 0.233038251960514, -0.164945057653409 ], - [ 0.233038251960742, -0.164945057653288, 0.958376909717067 ], - [ 0.958376909717099, -0.233038251960827, 0.164945057652982 ], - [ -0.233038251961122, -0.164945057653226, -0.958376909716986 ], - [ -0.958376909717093, -0.233038251960632, -0.164945057653293 ], - [ -0.233038251960434, 0.164945057653261, 0.958376909717147 ], - [ 0.164945057653494, 0.958376909716965, -0.233038251961015 ], - [ -0.164945057653458, -0.958376909717031, -0.233038251960769 ], - [ 0.560484250466976, 0.813252649483695, -0.156452974040834 ], - [ 0.81325264948369, -0.156452974041446, 0.560484250466813 ], - [ -0.56048425046724, -0.813252649483431, -0.156452974041263 ], - [ -0.15645297404103, 0.560484250467047, 0.813252649483609 ], - [ -0.81325264948382, -0.156452974040726, -0.560484250466826 ], - [ -0.156452974041097, -0.560484250466778, -0.813252649483781 ], - [ -0.81325264948363, 0.156452974040967, 0.560484250467035 ], - [ 0.156452974041285, -0.560484250467053, 0.813252649483555 ], - [ 0.813252649483481, 0.156452974041151, -0.560484250467199 ], - [ 0.156452974040881, 0.560484250466996, -0.813252649483672 ], - [ 0.560484250466836, -0.813252649483737, 0.156452974041122 ], - [ -0.56048425046674, 0.813252649483823, 0.156452974041018 ], - [ 0.366630058651312, 0.922018832550933, -0.124353015704282 ], - [ 0.92201883255088, -0.124353015704762, 0.366630058651284 ], - [ -0.366630058651761, -0.922018832550708, -0.124353015704629 ], - [ -0.124353015704377, 0.366630058651577, 0.922018832550815 ], - [ -0.922018832550933, -0.124353015704203, -0.366630058651341 ], - [ -0.124353015704534, -0.366630058651111, -0.922018832550979 ], - [ -0.922018832550883, 0.124353015704478, 0.366630058651372 ], - [ 0.12435301570463, -0.366630058651537, 0.922018832550797 ], - [ 0.922018832550745, 0.124353015704463, -0.366630058651723 ], - [ 0.124353015704299, 0.366630058651563, -0.922018832550831 ], - [ 0.366630058651286, -0.922018832550923, 0.124353015704438 ], - [ -0.366630058651229, 0.922018832550938, 0.124353015704492 ], - [ -0.804671953651735, -0.070836250755727, 0.589478814365005 ], - [ -0.070836250756058, 0.589478814365003, -0.804671953651707 ], - [ 0.804671953651921, 0.070836250755383, 0.589478814364792 ], - [ 0.589478814364726, -0.804671953651941, -0.070836250755714 ], - [ 0.070836250755939, 0.589478814364776, 0.804671953651884 ], - [ 0.589478814365018, 0.804671953651715, 0.070836250755846 ], - [ 0.070836250755601, -0.589478814364811, -0.804671953651888 ], - [ -0.589478814364784, 0.804671953651884, -0.070836250755875 ], - [ -0.070836250755551, -0.589478814364944, 0.804671953651795 ], - [ -0.589478814364978, -0.804671953651759, 0.07083625075567 ], - [ -0.804671953651836, 0.070836250756193, -0.589478814364811 ], - [ 0.804671953651731, -0.070836250755764, -0.589478814365006 ], - [ -0.830597137771463, -0.481356221636722, 0.280008183125909 ], - [ -0.481356221636763, 0.280008183126324, -0.830597137771299 ], - [ 0.830597137771467, 0.481356221636628, 0.280008183126056 ], - [ 0.280008183125864, -0.830597137771343, -0.481356221636956 ], - [ 0.481356221637075, 0.280008183125899, 0.830597137771262 ], - [ 0.280008183126004, 0.830597137771351, 0.481356221636859 ], - [ 0.481356221636653, -0.280008183125859, -0.83059713777152 ], - [ -0.280008183126012, 0.83059713777152, -0.481356221636564 ], - [ -0.481356221636741, -0.280008183126112, 0.830597137771384 ], - [ -0.280008183126053, -0.830597137771314, 0.481356221636894 ], - [ -0.830597137771366, 0.48135622163684, -0.280008183125994 ], - [ 0.830597137771194, -0.481356221637029, -0.280008183126178 ], - [ 0.622576105404642, 0.027441908430236, -0.782077959439399 ], - [ 0.027441908430276, -0.782077959439431, 0.622576105404601 ], - [ -0.622576105404963, -0.027441908430045, -0.78207795943915 ], - [ -0.782077959439118, 0.622576105404988, 0.027441908430397 ], - [ -0.027441908430201, -0.782077959439296, -0.622576105404774 ], - [ -0.782077959439408, -0.622576105404628, -0.027441908430289 ], - [ -0.027441908430238, 0.782077959439221, 0.622576105404866 ], - [ 0.782077959439263, -0.62257610540482, 0.027441908430083 ], - [ 0.027441908430419, 0.782077959439269, -0.622576105404798 ], - [ 0.782077959439451, 0.622576105404591, -0.027441908429928 ], - [ 0.622576105404788, -0.02744190843038, 0.782077959439278 ], - [ -0.622576105404572, 0.027441908429868, 0.782077959439468 ], - [ -0.93186959347387, 0.318712863282032, -0.173323891998229 ], - [ 0.318712863281944, -0.173323891998258, -0.931869593473894 ], - [ 0.931869593473744, -0.318712863282051, -0.173323891998871 ], - [ -0.173323891998841, -0.931869593473836, 0.318712863281799 ], - [ -0.318712863281924, -0.173323891998617, 0.931869593473834 ], - [ -0.173323891998245, 0.931869593473975, -0.318712863281714 ], - [ -0.318712863281997, 0.173323891998515, -0.931869593473828 ], - [ 0.173323891998501, 0.931869593473801, 0.318712863282084 ], - [ 0.318712863282089, 0.173323891998539, 0.931869593473793 ], - [ 0.173323891998443, -0.931869593473824, -0.31871286328205 ], - [ -0.931869593473865, -0.318712863281928, 0.173323891998448 ], - [ 0.931869593473897, 0.318712863281802, 0.173323891998503 ], - [ 0.883848176852703, 0.201423804475213, 0.422185801827685 ], - [ 0.201423804475703, 0.422185801827661, 0.883848176852602 ], - [ -0.883848176852534, -0.201423804475554, 0.422185801827875 ], - [ 0.42218580182791, 0.883848176852484, 0.201423804475701 ], - [ -0.201423804475472, 0.422185801827744, -0.883848176852615 ], - [ 0.422185801827623, -0.883848176852647, -0.201423804475586 ], - [ -0.201423804475397, -0.422185801827833, 0.88384817685259 ], - [ -0.42218580182793, -0.883848176852523, 0.201423804475489 ], - [ 0.201423804475479, -0.422185801827682, -0.883848176852643 ], - [ -0.422185801827514, 0.883848176852769, -0.20142380447528 ], - [ 0.883848176852476, -0.201423804475614, -0.422185801827967 ], - [ -0.88384817685271, 0.201423804475563, -0.422185801827502 ], - [ 0.204275039956405, 0.718770569884226, 0.664560438123663 ], - [ 0.718770569884334, 0.664560438123474, 0.204275039956637 ], - [ -0.20427503995626, -0.718770569884265, 0.664560438123664 ], - [ 0.66456043812381, 0.204275039956156, 0.71877056988416 ], - [ -0.718770569884325, 0.664560438123579, -0.204275039956328 ], - [ 0.664560438123492, -0.204275039956373, -0.718770569884393 ], - [ -0.718770569884361, -0.664560438123554, 0.20427503995628 ], - [ -0.664560438123554, -0.204275039956662, 0.718770569884254 ], - [ 0.71877056988409, -0.664560438123802, -0.204275039956432 ], - [ -0.664560438123505, 0.204275039956682, -0.718770569884293 ], - [ 0.204275039956376, -0.718770569884165, -0.664560438123738 ], - [ -0.204275039956367, 0.718770569884538, -0.664560438123337 ], - [ -0.898847927472069, 0.43770082336828, 0.022144807560617 ], - [ 0.437700823367923, 0.022144807560963, -0.898847927472234 ], - [ 0.898847927472182, -0.437700823368065, 0.02214480756027 ], - [ 0.022144807560315, -0.898847927472293, 0.437700823367834 ], - [ -0.437700823367766, 0.022144807560623, 0.898847927472319 ], - [ 0.022144807560559, 0.89884792747216, -0.437700823368094 ], - [ -0.437700823368255, -0.022144807560327, -0.898847927472088 ], - [ -0.022144807560661, 0.898847927472103, 0.437700823368207 ], - [ 0.43770082336803, -0.022144807560607, 0.898847927472191 ], - [ -0.022144807560733, -0.898847927472195, -0.437700823368015 ], - [ -0.898847927472245, -0.437700823367908, -0.022144807560796 ], - [ 0.898847927472313, 0.437700823367778, -0.022144807560634 ], - ]), - #degree 21 - np.array([ - [ 0.892653535762723, 0.412534053657361, -0.181618610454253 ], - [ 0.412534053425032, -0.181618610641782, 0.892653535831938 ], - [ -0.892653535806407, -0.412534053627853, -0.181618610306575 ], - [ -0.181618610613849, 0.892653535740475, 0.41253405363524 ], - [ -0.412534053477435, -0.181618610422654, -0.892653535852304 ], - [ -0.181618610451384, -0.892653535762812, -0.412534053658432 ], - [ -0.41253405331709, 0.181618610611827, 0.892653535887918 ], - [ 0.181618610400136, -0.8926535358123, 0.412534053573911 ], - [ 0.412534053327996, 0.1816186104204, -0.892653535921825 ], - [ 0.181618610580789, 0.892653535810904, -0.412534053497399 ], - [ 0.892653535867644, -0.412534053472558, 0.181618610358339 ], - [ -0.892653535855064, 0.41253405353516, 0.181618610277971 ], - [ -0.292093742593433, -0.29576702799317, 0.909507070170347 ], - [ -0.295767028026887, 0.90950707008926, -0.292093742811776 ], - [ 0.292093742447864, 0.295767028039713, 0.909507070201962 ], - [ 0.909507070147612, -0.292093742926721, -0.295767027733934 ], - [ 0.295767028145396, 0.909507070084441, 0.292093742706783 ], - [ 0.909507070188854, 0.292093742689207, 0.295767027841675 ], - [ 0.295767027907311, -0.909507070148419, -0.292093742748651 ], - [ -0.909507070101221, 0.292093743159272, -0.295767027646927 ], - [ -0.295767027835333, -0.909507070047293, 0.292093743136414 ], - [ -0.909507070218591, -0.292093742721776, 0.295767027718069 ], - [ -0.292093742540896, 0.295767027793147, -0.909507070252266 ], - [ 0.292093742861938, -0.295767027747614, -0.909507070163969 ], - [ -0.575225718038192, 0.024120572825078, 0.817639022597403 ], - [ 0.024120572786144, 0.817639022511238, -0.575225718162301 ], - [ 0.575225718116478, -0.024120572979213, 0.817639022537781 ], - [ 0.817639022556003, -0.57522571810348, 0.024120572671469 ], - [ -0.024120573041503, 0.817639022440757, 0.575225718251777 ], - [ 0.817639022458379, 0.575225718229118, -0.024120572984526 ], - [ -0.024120572818239, -0.81763902258126, -0.575225718061424 ], - [ -0.817639022543578, 0.575225718123882, 0.024120572606111 ], - [ 0.02412057271295, -0.817639022527296, 0.575225718142546 ], - [ -0.817639022600495, -0.575225718035174, -0.024120572792228 ], - [ -0.575225717925469, -0.024120572711052, -0.81763902268007 ], - [ 0.57522571790823, 0.024120572594155, -0.817639022695646 ], - [ -0.1288331617248, 0.05224764072024, 0.990288947973853 ], - [ 0.052247640694409, 0.990288947958895, -0.128833161850251 ], - [ 0.128833161840325, -0.052247640320038, 0.990288947979938 ], - [ 0.990288947949717, -0.128833161924796, 0.052247640684558 ], - [ -0.05224764038851, 0.990288947967581, 0.128833161907538 ], - [ 0.99028894797773, 0.128833161878001, -0.052247640268992 ], - [ -0.052247640390409, -0.99028894796219, -0.128833161948209 ], - [ -0.990288947960626, 0.128833161896649, 0.052247640547187 ], - [ 0.052247640527808, -0.990288947953251, 0.1288331619612 ], - [ -0.990288947970868, -0.128833161936205, -0.052247640255526 ], - [ -0.128833161790478, -0.052247640337643, -0.990288947985494 ], - [ 0.128833161857416, 0.052247640551545, -0.9902889479655 ], - [ 0.71800638603475, 0.657446876255993, -0.228539787596286 ], - [ 0.657446876286737, -0.228539787831922, 0.718006385931596 ], - [ -0.718006386109442, -0.657446876171434, -0.228539787604877 ], - [ -0.228539787737219, 0.718006385947422, 0.657446876302374 ], - [ -0.657446876241021, -0.2285397877138, -0.718006386011054 ], - [ -0.228539787678997, -0.718006386031359, -0.657446876230945 ], - [ -0.657446876361185, 0.228539787860549, 0.718006385854315 ], - [ 0.228539787703065, -0.718006385857385, 0.657446876412577 ], - [ 0.657446876304454, 0.228539787874017, -0.718006385901975 ], - [ 0.228539787784967, 0.718006385813853, -0.657446876431648 ], - [ 0.71800638588076, -0.657446876363485, 0.228539787770851 ], - [ -0.718006385891018, 0.657446876371558, 0.228539787715401 ], - [ 0.863176473117803, 0.468181816653138, 0.189029528940001 ], - [ 0.468181816438486, 0.189029529197492, 0.86317647317784 ], - [ -0.863176473194446, -0.46818181657642, 0.189029528780033 ], - [ 0.189029529125527, 0.863176473064389, 0.468181816676708 ], - [ -0.468181816392671, 0.189029528897443, -0.863176473268398 ], - [ 0.189029528792174, -0.863176473143688, -0.4681818166651 ], - [ -0.468181816411213, -0.189029529128138, 0.863176473207821 ], - [ -0.189029528897852, -0.86317647308972, 0.468181816721931 ], - [ 0.468181816508867, -0.189029528930555, -0.863176473198123 ], - [ -0.189029529001823, 0.863176473106659, -0.468181816648722 ], - [ 0.863176473135229, -0.468181816648642, -0.189029528871561 ], - [ -0.863176473123334, 0.468181816698762, -0.189029528801744 ], - [ 0.772632856847133, -0.51705945069559, 0.368358511462152 ], - [ -0.517059450567132, 0.368358511585515, 0.772632856874286 ], - [ -0.772632856806081, 0.517059450647391, 0.368358511615915 ], - [ 0.368358511648001, 0.772632856806054, -0.517059450624573 ], - [ 0.517059450494007, 0.368358511816588, -0.772632856813056 ], - [ 0.368358511720496, -0.772632856802476, 0.517059450578273 ], - [ 0.517059450583445, -0.368358511487117, 0.77263285691028 ], - [ -0.36835851156733, -0.772632856859467, -0.517059450602229 ], - [ -0.517059450502369, -0.368358511665956, -0.772632856879275 ], - [ -0.368358511469803, 0.772632856855651, 0.517059450677412 ], - [ 0.772632856934749, 0.517059450691919, -0.368358511283531 ], - [ -0.772632856927485, -0.517059450633778, -0.368358511380378 ], - [ -0.847819231914648, -0.066325775900167, -0.526121128113002 ], - [ -0.066325775913631, -0.526121128257686, -0.847819231823809 ], - [ 0.847819231883018, 0.066325775819852, -0.526121128174097 ], - [ -0.526121128348762, -0.847819231766957, -0.06632577591791 ], - [ 0.06632577584612, -0.526121128407098, 0.847819231736372 ], - [ -0.52612112845924, 0.84781923170908, 0.066325775781366 ], - [ 0.066325775945785, 0.52612112834438, -0.847819231767496 ], - [ 0.526121128449532, 0.847819231700692, -0.066325775965613 ], - [ -0.066325775877211, 0.526121128306388, 0.847819231796436 ], - [ 0.526121128504669, -0.847819231665213, 0.06632577598176 ], - [ -0.847819231821725, 0.066325775941005, 0.526121128257594 ], - [ 0.847819231850264, -0.066325775996655, 0.52612112820459 ], - [ 0.00980574322923, 0.942983815842593, 0.332694109443892 ], - [ 0.942983815808923, 0.332694109539748, 0.00980574321495 ], - [ -0.00980574337969, -0.942983815787291, 0.332694109596207 ], - [ 0.332694109226554, 0.009805743204272, 0.942983815919532 ], - [ -0.94298381577404, 0.332694109635647, -0.009805743315804 ], - [ 0.332694109397996, -0.00980574329891, -0.942983815858062 ], - [ -0.942983815776114, -0.332694109630098, 0.009805743304667 ], - [ -0.332694109319027, -0.009805743188507, 0.94298381588707 ], - [ 0.942983815775082, -0.332694109635199, -0.009805743230763 ], - [ -0.332694109455765, 0.009805743389762, -0.942983815836735 ], - [ 0.00980574330114, -0.942983815752524, -0.332694109697065 ], - [ -0.009805743287713, 0.942983815791379, -0.332694109587331 ], - [ 0.785599248371152, -0.405156945312269, -0.467634120465896 ], - [ -0.405156944932125, -0.467634120649859, 0.785599248457698 ], - [ -0.78559924820179, 0.405156945434051, -0.467634120644904 ], - [ -0.467634120611242, 0.785599248334623, -0.405156945215339 ], - [ 0.405156945136423, -0.467634120868201, -0.785599248222366 ], - [ -0.467634120811804, -0.785599248145609, 0.405156945350347 ], - [ 0.405156944841985, 0.467634120861332, 0.785599248378305 ], - [ 0.467634120786726, -0.785599248249857, -0.405156945177156 ], - [ -0.405156944999643, 0.467634120871098, -0.785599248291182 ], - [ 0.467634120893713, 0.78559924823424, 0.405156945083953 ], - [ 0.785599248313341, 0.405156945117104, 0.467634120732106 ], - [ -0.7855992482811, -0.40515694519737, 0.467634120716727 ], - [ -0.737331999131492, 0.620851501013764, -0.26624225199189 ], - [ 0.620851500949186, -0.266242252154895, -0.73733199912701 ], - [ 0.737331999060061, -0.620851501088737, -0.266242252014883 ], - [ -0.266242251948631, -0.737331999103255, 0.62085150106585 ], - [ -0.620851501079221, -0.2662422522338, 0.737331998989025 ], - [ -0.266242252011624, 0.737331998996222, -0.620851501165951 ], - [ -0.620851501072124, 0.26624225222256, -0.73733199899906 ], - [ 0.266242252113864, 0.737331998832974, 0.620851501315983 ], - [ 0.620851501187387, 0.266242252328374, 0.737331998863797 ], - [ 0.26624225193225, -0.73733199893899, -0.620851501267959 ], - [ -0.737331998947943, -0.620851501183297, 0.266242252104879 ], - [ 0.737331998835007, 0.620851501305786, 0.26624225213201 ], - [ 0.726871469165659, -0.027488282350428, -0.686223186468061 ], - [ -0.027488282182755, -0.686223186448325, 0.726871469190633 ], - [ -0.726871469172931, 0.027488282371885, -0.686223186459499 ], - [ -0.686223186449712, 0.726871469185406, -0.027488282286341 ], - [ 0.027488282351607, -0.68622318649112, -0.726871469143845 ], - [ -0.686223186545622, -0.726871469089794, 0.027488282420281 ], - [ 0.027488282266836, 0.686223186470335, 0.726871469166674 ], - [ 0.686223186661183, -0.726871468983422, -0.027488282348185 ], - [ -0.027488282251029, 0.686223186523092, -0.726871469117465 ], - [ 0.686223186609112, 0.726871469033498, 0.027488282323948 ], - [ 0.726871469070107, 0.02748828233555, 0.686223186569869 ], - [ -0.726871469080183, -0.027488282309716, 0.686223186560232 ], - [ 0.665363385720515, 0.580860267739271, 0.468927408352716 ], - [ 0.580860267577087, 0.468927408488638, 0.665363385766308 ], - [ -0.66536338567738, -0.580860267719575, 0.468927408438318 ], - [ 0.468927408340783, 0.665363385821863, 0.580860267632813 ], - [ -0.580860267528453, 0.468927408678832, -0.665363385674723 ], - [ 0.468927408372614, -0.665363385698803, -0.580860267748078 ], - [ -0.580860267640877, -0.468927408552762, 0.665363385665427 ], - [ -0.468927408468336, -0.665363385847947, 0.580860267499961 ], - [ 0.580860267386752, -0.468927408654519, -0.665363385815563 ], - [ -0.468927408375699, 0.665363385651356, -0.580860267799938 ], - [ 0.665363385651819, -0.580860267791212, -0.46892740838585 ], - [ -0.665363385751734, 0.580860267548017, -0.468927408545326 ], - [ -0.580125367305304, -0.779099597924434, 0.237609710918707 ], - [ -0.779099598053518, 0.237609710909934, -0.580125367135539 ], - [ 0.580125367186808, 0.779099597977732, 0.237609711033258 ], - [ 0.237609710695932, -0.58012536727611, -0.779099598014114 ], - [ 0.779099598064732, 0.23760971114732, 0.58012536702325 ], - [ 0.237609710819285, 0.580125367047426, 0.779099598146774 ], - [ 0.779099598170224, -0.237609710849642, -0.580125367003499 ], - [ -0.237609710811802, 0.580125367157256, -0.779099598067276 ], - [ -0.779099598074961, -0.237609711045128, 0.580125367051369 ], - [ -0.237609710609253, -0.580125367022359, 0.779099598229495 ], - [ -0.580125367090094, 0.779099598151966, -0.237609710698086 ], - [ 0.580125367218411, -0.779099597966716, -0.237609710992215 ], - [ 0.9586680253602, 0.101113605900539, -0.265954236389956 ], - [ 0.101113605889893, -0.265954236477199, 0.95866802533712 ], - [ -0.95866802532641, -0.101113606095432, -0.26595423643766 ], - [ -0.265954236634179, 0.958668025294555, 0.101113605880558 ], - [ -0.101113606003171, -0.265954236656317, -0.958668025275482 ], - [ -0.265954236715455, -0.958668025246162, -0.101113606125602 ], - [ -0.101113605825438, 0.265954236414664, 0.958668025361267 ], - [ 0.265954236286739, -0.958668025393583, 0.101113605855522 ], - [ 0.101113605802444, 0.265954236260664, -0.958668025406415 ], - [ 0.265954236515854, 0.958668025322577, -0.101113605926106 ], - [ 0.9586680254495, -0.101113605909101, 0.265954236064808 ], - [ -0.9586680254786, 0.101113605789497, 0.265954236005386 ], - [ -0.784431814417085, 0.284319025007229, 0.551207239202516 ], - [ 0.284319024822848, 0.551207239320709, -0.784431814400862 ], - [ 0.784431814443422, -0.284319024888131, 0.551207239226467 ], - [ 0.551207239434677, -0.784431814291888, 0.284319024902556 ], - [ -0.284319024640161, 0.551207239347504, 0.784431814448249 ], - [ 0.551207239408357, 0.784431814400998, -0.284319024652546 ], - [ -0.28431902471494, -0.551207239160137, -0.784431814552804 ], - [ -0.551207239417649, 0.784431814426743, 0.284319024563503 ], - [ 0.284319024477106, -0.551207239394067, 0.784431814474629 ], - [ -0.551207239227164, -0.784431814510832, -0.284319024700797 ], - [ -0.7844318146549, -0.284319024757729, -0.551207238992772 ], - [ 0.784431814542139, 0.284319024689884, -0.55120723918824 ], - [ 0.166663878535118, 0.97946877886665, 0.113419851953285 ], - [ 0.979468778892362, 0.113419852011248, 0.166663878344564 ], - [ -0.166663878322335, -0.979468778877222, 0.113419852174659 ], - [ 0.113419851852603, 0.166663878465092, 0.979468778890224 ], - [ -0.979468778908051, 0.113419852233229, -0.166663878101297 ], - [ 0.113419852023532, -0.166663878213165, -0.979468778913298 ], - [ -0.979468778891418, -0.113419852088755, 0.166663878297368 ], - [ -0.113419851942299, -0.166663878383785, 0.979468778893673 ], - [ 0.979468778887792, -0.113419852252651, -0.166663878207142 ], - [ -0.113419851887333, 0.166663878420061, -0.979468778893865 ], - [ 0.166663878513312, -0.97946877885884, -0.113419852052775 ], - [ -0.166663878525992, 0.979468778852403, -0.113419852089727 ], - [ 0.90354263539087, 0.099002690679599, 0.416904273507865 ], - [ 0.09900269051118, 0.416904273753692, 0.903542635295897 ], - [ -0.903542635383533, -0.099002690647923, 0.416904273531288 ], - [ 0.41690427395825, 0.903542635193768, 0.09900269058185 ], - [ -0.099002690414933, 0.416904273699732, -0.903542635331341 ], - [ 0.416904273843964, -0.903542635237517, -0.099002690663845 ], - [ -0.099002690464192, -0.416904273937254, 0.903542635216348 ], - [ -0.416904274206036, -0.903542635110147, 0.099002690301575 ], - [ 0.099002690128044, -0.41690427406438, -0.903542635194523 ], - [ -0.416904274113744, 0.903542635131386, -0.099002690496392 ], - [ 0.903542635279275, -0.099002690467102, -0.416904273800183 ], - [ -0.903542635234399, 0.099002690245829, -0.416904273949988 ], - [ 0.278762404536092, 0.349312185537063, -0.894579520698175 ], - [ 0.349312185586056, -0.894579520608515, 0.278762404762431 ], - [ -0.278762404540525, -0.349312185503473, -0.89457952070991 ], - [ -0.894579520734144, 0.278762404727917, 0.349312185291866 ], - [ -0.349312185466701, -0.894579520677723, -0.278762404689896 ], - [ -0.894579520788864, -0.278762404658677, -0.349312185206984 ], - [ -0.349312185551041, 0.894579520682798, 0.278762404567923 ], - [ 0.894579520785219, -0.278762404680469, 0.349312185198929 ], - [ 0.349312185549623, 0.89457952067923, -0.278762404581149 ], - [ 0.894579520781805, 0.278762404555908, -0.349312185307075 ], - [ 0.27876240443795, -0.3493121855065, 0.894579520740692 ], - [ -0.278762404443259, 0.349312185428787, 0.894579520769382 ], - [ 0.555896230179415, -0.676833211736671, 0.48257246581476 ], - [ -0.676833211681567, 0.482572466040116, 0.555896230050876 ], - [ -0.555896230314892, 0.676833211522987, 0.482572465958401 ], - [ 0.482572465910283, 0.555896230164672, -0.676833211680673 ], - [ 0.676833211457692, 0.482572466092895, -0.555896230277639 ], - [ 0.482572465902981, -0.555896230367909, 0.676833211518957 ], - [ 0.676833211635592, -0.482572466071981, 0.555896230079191 ], - [ -0.482572466150586, -0.555896230230084, -0.676833211455616 ], - [ -0.676833211438286, -0.482572466327737, -0.5558962300974 ], - [ -0.482572465972373, 0.55589623026777, 0.676833211551727 ], - [ 0.555896230192691, 0.676833211589453, -0.482572466005949 ], - [ -0.555896230194338, -0.676833211455537, -0.482572466191875 ], - ]), + # degree 1 + np.array( + [ + [1, 0, 0], + [-1, 0, 0], + ] + ), + # degree 2 + np.array( + [ + [0.577350269189626, 0.577350269189626, 0.577350269189626], + [0.577350269189626, -0.577350269189626, -0.577350269189626], + [-0.577350269189626, 0.577350269189626, -0.577350269189626], + [-0.577350269189626, -0.577350269189626, 0.577350269189626], + ] + ), + # degree 3 + np.array( + [ + [1, 0, 0], + [-1, 0, 0], + [0, 1, 0], + [0, -1, 0], + [0, 0, 1], + [0, 0, -1], + ] + ), + # degree 4 + np.array( + [ + [0.850650808352, 0, -0.525731112119], + [0.525731112119, -0.850650808352, 0], + [0, -0.525731112119, 0.850650808352], + [0.850650808352, 0, 0.525731112119], + [-0.525731112119, -0.850650808352, 0], + [0, 0.525731112119, -0.850650808352], + [-0.850650808352, 0, -0.525731112119], + [-0.525731112119, 0.850650808352, 0], + [0, 0.525731112119, 0.850650808352], + [-0.850650808352, 0, 0.525731112119], + [0.525731112119, 0.850650808352, 0], + [0, -0.525731112119, -0.850650808352], + ] + ), + # degree 5 + np.array( + [ + [0.850650808352, 0, -0.525731112119], + [0.525731112119, -0.850650808352, 0], + [0, -0.525731112119, 0.850650808352], + [0.850650808352, 0, 0.525731112119], + [-0.525731112119, -0.850650808352, 0], + [0, 0.525731112119, -0.850650808352], + [-0.850650808352, 0, -0.525731112119], + [-0.525731112119, 0.850650808352, 0], + [0, 0.525731112119, 0.850650808352], + [-0.850650808352, 0, 0.525731112119], + [0.525731112119, 0.850650808352, 0], + [0, -0.525731112119, -0.850650808352], + ] + ), + # degree 6 + np.array( + [ + [0.866246818107821, 0.422518653761112, 0.266635401516705], + [0.866246818107821, -0.422518653761112, -0.266635401516705], + [0.866246818107821, 0.266635401516705, -0.422518653761112], + [0.866246818107821, -0.266635401516705, 0.422518653761112], + [-0.866246818107821, 0.422518653761112, -0.266635401516705], + [-0.866246818107821, -0.422518653761112, 0.266635401516705], + [-0.866246818107821, 0.266635401516705, 0.422518653761112], + [-0.866246818107821, -0.266635401516705, -0.422518653761112], + [0.266635401516705, 0.866246818107821, 0.422518653761112], + [-0.266635401516705, 0.866246818107821, -0.422518653761112], + [-0.422518653761112, 0.866246818107821, 0.266635401516705], + [0.422518653761112, 0.866246818107821, -0.266635401516705], + [-0.266635401516705, -0.866246818107821, 0.422518653761112], + [0.266635401516705, -0.866246818107821, -0.422518653761112], + [0.422518653761112, -0.866246818107821, 0.266635401516705], + [-0.422518653761112, -0.866246818107821, -0.266635401516705], + [0.422518653761112, 0.266635401516705, 0.866246818107821], + [-0.422518653761112, -0.266635401516705, 0.866246818107821], + [0.266635401516705, -0.422518653761112, 0.866246818107821], + [-0.266635401516705, 0.422518653761112, 0.866246818107821], + [0.422518653761112, -0.266635401516705, -0.866246818107821], + [-0.422518653761112, 0.266635401516705, -0.866246818107821], + [0.266635401516705, 0.422518653761112, -0.866246818107821], + [-0.266635401516705, -0.422518653761112, -0.866246818107821], + ] + ), + # degree 7 + np.array( + [ + [0.866246818107821, 0.422518653761112, 0.266635401516705], + [0.866246818107821, -0.422518653761112, -0.266635401516705], + [0.866246818107821, 0.266635401516705, -0.422518653761112], + [0.866246818107821, -0.266635401516705, 0.422518653761112], + [-0.866246818107821, 0.422518653761112, -0.266635401516705], + [-0.866246818107821, -0.422518653761112, 0.266635401516705], + [-0.866246818107821, 0.266635401516705, 0.422518653761112], + [-0.866246818107821, -0.266635401516705, -0.422518653761112], + [0.266635401516705, 0.866246818107821, 0.422518653761112], + [-0.266635401516705, 0.866246818107821, -0.422518653761112], + [-0.422518653761112, 0.866246818107821, 0.266635401516705], + [0.422518653761112, 0.866246818107821, -0.266635401516705], + [-0.266635401516705, -0.866246818107821, 0.422518653761112], + [0.266635401516705, -0.866246818107821, -0.422518653761112], + [0.422518653761112, -0.866246818107821, 0.266635401516705], + [-0.422518653761112, -0.866246818107821, -0.266635401516705], + [0.422518653761112, 0.266635401516705, 0.866246818107821], + [-0.422518653761112, -0.266635401516705, 0.866246818107821], + [0.266635401516705, -0.422518653761112, 0.866246818107821], + [-0.266635401516705, 0.422518653761112, 0.866246818107821], + [0.422518653761112, -0.266635401516705, -0.866246818107821], + [-0.422518653761112, 0.266635401516705, -0.866246818107821], + [0.266635401516705, 0.422518653761112, -0.866246818107821], + [-0.266635401516705, -0.422518653761112, -0.866246818107821], + ] + ), + # degree 8 + np.array( + [ + [0.507475446410817, -0.306200013239571, 0.805425492011663], + [-0.306200013239569, 0.805425492011663, 0.507475446410817], + [-0.507475446410817, 0.30620001323957, 0.805425492011663], + [0.805425492011663, 0.507475446410817, -0.306200013239569], + [0.306200013239569, 0.805425492011664, -0.507475446410817], + [0.805425492011663, -0.507475446410817, 0.306200013239569], + [0.306200013239569, -0.805425492011663, 0.507475446410816], + [-0.805425492011663, -0.507475446410817, -0.306200013239569], + [-0.30620001323957, -0.805425492011664, -0.507475446410816], + [-0.805425492011663, 0.507475446410818, 0.306200013239569], + [0.507475446410817, 0.30620001323957, -0.805425492011663], + [-0.507475446410817, -0.30620001323957, -0.805425492011663], + [0.626363670265271, -0.243527775409194, -0.74051520928072], + [-0.243527775409195, -0.74051520928072, 0.626363670265271], + [-0.626363670265271, 0.243527775409194, -0.74051520928072], + [-0.74051520928072, 0.62636367026527, -0.243527775409195], + [0.243527775409195, -0.740515209280719, -0.626363670265271], + [-0.74051520928072, -0.62636367026527, 0.243527775409195], + [0.243527775409195, 0.740515209280719, 0.626363670265271], + [0.74051520928072, -0.62636367026527, -0.243527775409195], + [-0.243527775409195, 0.74051520928072, -0.626363670265271], + [0.74051520928072, 0.62636367026527, 0.243527775409195], + [0.626363670265271, 0.243527775409194, 0.74051520928072], + [-0.626363670265271, -0.243527775409194, 0.74051520928072], + [-0.286248723426035, 0.957120327092458, -0.044523564585421], + [0.957120327092458, -0.04452356458542, -0.286248723426035], + [0.286248723426035, -0.957120327092458, -0.044523564585421], + [-0.04452356458542, -0.286248723426035, 0.957120327092458], + [-0.957120327092458, -0.044523564585419, 0.286248723426035], + [-0.044523564585421, 0.286248723426034, -0.957120327092458], + [-0.957120327092458, 0.04452356458542, -0.286248723426034], + [0.044523564585421, 0.286248723426034, 0.957120327092458], + [0.957120327092458, 0.04452356458542, 0.286248723426034], + [0.044523564585421, -0.286248723426034, -0.957120327092458], + [-0.286248723426034, -0.957120327092458, 0.044523564585421], + [0.286248723426035, 0.957120327092458, 0.044523564585421], + ] + ), + # degree 9 + np.array( + [ + [0.93336469319931, 0.353542188921472, -0.0619537742318597], + [0.93336469319931, -0.353542188921472, 0.0619537742318597], + [0.93336469319931, -0.0619537742318597, -0.353542188921472], + [0.93336469319931, 0.0619537742318597, 0.353542188921472], + [-0.93336469319931, 0.353542188921472, 0.0619537742318597], + [-0.93336469319931, -0.353542188921472, -0.0619537742318597], + [-0.93336469319931, -0.0619537742318597, 0.353542188921472], + [-0.93336469319931, 0.0619537742318597, -0.353542188921472], + [-0.0619537742318597, 0.93336469319931, 0.353542188921472], + [0.0619537742318597, 0.93336469319931, -0.353542188921472], + [-0.353542188921472, 0.93336469319931, -0.0619537742318597], + [0.353542188921472, 0.93336469319931, 0.0619537742318597], + [0.0619537742318597, -0.93336469319931, 0.353542188921472], + [-0.0619537742318597, -0.93336469319931, -0.353542188921472], + [0.353542188921472, -0.93336469319931, -0.0619537742318597], + [-0.353542188921472, -0.93336469319931, 0.0619537742318597], + [0.353542188921472, -0.0619537742318597, 0.93336469319931], + [-0.353542188921472, 0.0619537742318597, 0.93336469319931], + [-0.0619537742318597, -0.353542188921472, 0.93336469319931], + [0.0619537742318597, 0.353542188921472, 0.93336469319931], + [0.353542188921472, 0.0619537742318597, -0.93336469319931], + [-0.353542188921472, -0.0619537742318597, -0.93336469319931], + [-0.0619537742318597, 0.353542188921472, -0.93336469319931], + [0.0619537742318597, -0.353542188921472, -0.93336469319931], + [0.70684169771255, 0.639740098619792, 0.301840057965769], + [0.70684169771255, -0.639740098619792, -0.301840057965769], + [0.70684169771255, 0.301840057965769, -0.639740098619792], + [0.70684169771255, -0.301840057965769, 0.639740098619792], + [-0.70684169771255, 0.639740098619792, -0.301840057965769], + [-0.70684169771255, -0.639740098619792, 0.301840057965769], + [-0.70684169771255, 0.301840057965769, 0.639740098619792], + [-0.70684169771255, -0.301840057965769, -0.639740098619792], + [0.301840057965769, 0.70684169771255, 0.639740098619792], + [-0.301840057965769, 0.70684169771255, -0.639740098619792], + [-0.639740098619792, 0.70684169771255, 0.301840057965769], + [0.639740098619792, 0.70684169771255, -0.301840057965769], + [-0.301840057965769, -0.70684169771255, 0.639740098619792], + [0.301840057965769, -0.70684169771255, -0.639740098619792], + [0.639740098619792, -0.70684169771255, 0.301840057965769], + [-0.639740098619792, -0.70684169771255, -0.301840057965769], + [0.639740098619792, 0.301840057965769, 0.70684169771255], + [-0.639740098619792, -0.301840057965769, 0.70684169771255], + [0.301840057965769, -0.639740098619792, 0.70684169771255], + [-0.301840057965769, 0.639740098619792, 0.70684169771255], + [0.639740098619792, -0.301840057965769, -0.70684169771255], + [-0.639740098619792, 0.301840057965769, -0.70684169771255], + [0.301840057965769, 0.639740098619792, -0.70684169771255], + [-0.301840057965769, -0.639740098619792, -0.70684169771255], + ] + ), + # degree 10 + np.array( + [ + [-0.753828667197017, 0.54595190806126, -0.365621190026287], + [0.545951908061258, -0.36562119002629, -0.753828667197017], + [0.753828667197016, -0.545951908061261, -0.365621190026288], + [-0.365621190026289, -0.753828667197017, 0.545951908061259], + [-0.545951908061258, -0.365621190026288, 0.753828667197018], + [-0.365621190026289, 0.753828667197017, -0.545951908061259], + [-0.545951908061258, 0.365621190026289, -0.753828667197017], + [0.365621190026287, 0.753828667197017, 0.54595190806126], + [0.545951908061259, 0.365621190026289, 0.753828667197017], + [0.365621190026287, -0.753828667197018, -0.545951908061259], + [-0.753828667197017, -0.545951908061261, 0.365621190026288], + [0.753828667197016, 0.545951908061261, 0.365621190026287], + [0.70018101936373, -0.713151065847793, 0.034089549761256], + [-0.713151065847794, 0.034089549761254, 0.700181019363729], + [-0.70018101936373, 0.713151065847793, 0.034089549761256], + [0.034089549761255, 0.70018101936373, -0.713151065847793], + [0.713151065847793, 0.034089549761254, -0.70018101936373], + [0.034089549761257, -0.700181019363729, 0.713151065847794], + [0.713151065847794, -0.034089549761255, 0.700181019363728], + [-0.034089549761256, -0.700181019363729, -0.713151065847794], + [-0.713151065847794, -0.034089549761254, -0.700181019363729], + [-0.034089549761257, 0.700181019363729, 0.713151065847794], + [0.70018101936373, 0.713151065847793, -0.034089549761257], + [-0.700181019363729, -0.713151065847794, -0.034089549761257], + [0.276230218261792, 0.077050720725736, -0.957997939953259], + [0.077050720725735, -0.957997939953258, 0.276230218261793], + [-0.276230218261792, -0.077050720725734, -0.957997939953259], + [-0.957997939953259, 0.276230218261791, 0.077050720725738], + [-0.077050720725735, -0.957997939953259, -0.276230218261792], + [-0.957997939953258, -0.276230218261793, -0.077050720725736], + [-0.077050720725736, 0.957997939953258, 0.276230218261794], + [0.957997939953259, -0.27623021826179, 0.077050720725737], + [0.077050720725734, 0.957997939953259, -0.276230218261792], + [0.957997939953258, 0.276230218261793, -0.077050720725738], + [0.276230218261793, -0.077050720725736, 0.957997939953258], + [-0.276230218261791, 0.077050720725735, 0.957997939953259], + [0.451819102555243, -0.783355937521819, 0.42686411621907], + [-0.783355937521818, 0.426864116219071, 0.451819102555243], + [-0.451819102555243, 0.783355937521819, 0.42686411621907], + [0.426864116219071, 0.451819102555242, -0.783355937521819], + [0.783355937521818, 0.42686411621907, -0.451819102555244], + [0.426864116219072, -0.451819102555242, 0.783355937521818], + [0.783355937521819, -0.42686411621907, 0.451819102555242], + [-0.426864116219072, -0.451819102555241, -0.783355937521819], + [-0.783355937521818, -0.42686411621907, -0.451819102555243], + [-0.426864116219072, 0.451819102555241, 0.783355937521819], + [0.451819102555243, 0.783355937521818, -0.426864116219071], + [-0.451819102555242, -0.783355937521819, -0.426864116219071], + [-0.33858435995926, -0.933210037239527, 0.120331448866784], + [-0.933210037239526, 0.120331448866787, -0.33858435995926], + [0.338584359959261, 0.933210037239526, 0.120331448866786], + [0.120331448866785, -0.338584359959261, -0.933210037239526], + [0.933210037239526, 0.120331448866789, 0.33858435995926], + [0.120331448866785, 0.338584359959261, 0.933210037239526], + [0.933210037239526, -0.120331448866787, -0.338584359959262], + [-0.120331448866784, 0.338584359959262, -0.933210037239526], + [-0.933210037239526, -0.120331448866787, 0.338584359959261], + [-0.120331448866784, -0.338584359959262, 0.933210037239526], + [-0.338584359959262, 0.933210037239526, -0.120331448866784], + [0.338584359959261, -0.933210037239527, -0.120331448866783], + ] + ), + # degree 11 + np.array( + [ + [-0.674940520480437, 0.725629052064501, 0.133857284499464], + [0.09672433446143, -0.910327382989987, -0.402428203412229], + [0.906960315916358, 0.135127022135053, 0.398953221871704], + [-0.132758704758026, -0.307658524060733, 0.942189661842955], + [-0.226055801127587, -0.958831174708704, -0.171876563798827], + [0.275738264019853, -0.180692733507538, -0.944096682449892], + [0.830881650513589, 0.333278644528177, -0.445601871563928], + [-0.616471328612787, -0.2675443371664, 0.740528951931372], + [0.430277293287436, -0.892644471615357, -0.13434023290057], + [-0.690987198523076, 0.175109339053207, 0.701336874015319], + [0.810517041535507, -0.381449337547215, 0.444475565431127], + [-0.086734443854626, -0.706008517835924, -0.702872043114784], + [0.871320852056737, 0.46045780600396, 0.169642511361809], + [-0.600735266749549, 0.303266118552509, -0.739693720820614], + [-0.899100947083419, -0.418081246639828, 0.12971336924846], + [0.896927087079571, -0.188066327344843, -0.400191025613991], + [0.150494960966991, 0.903072153139254, 0.402258564791324], + [0.248601716402621, -0.224283612281953, 0.94228129975259], + [0.842584674708423, -0.510756382085546, -0.1708185707275], + [0.260034500418337, 0.209356489957684, -0.942630319215749], + [-0.058802461572434, 0.894595213188746, -0.442991732488095], + [0.061611769180132, -0.671290108790159, 0.738629528071408], + [0.982337536097614, 0.133784014710179, -0.130823555148513], + [-0.382277582532576, -0.605243847900137, 0.698243320392029], + [0.611839278216357, 0.651571608497249, 0.448449703569971], + [0.646865348569582, -0.298464129297652, -0.701772316597447], + [-0.169201016881282, 0.970430912746818, 0.172147783812972], + [-0.471725450862325, -0.47529570366279, -0.742676977621112], + [0.119369755955723, -0.984692604411347, 0.127009197228668], + [0.457289212231729, 0.796155990558714, -0.396260287026038], + [-0.813631436350979, 0.420942272793499, 0.40101307803722], + [0.287154555386871, 0.16417332397066, 0.943710432821951], + [0.746667577045155, 0.644035989066398, -0.166448713352744], + [-0.115779644740906, 0.314952464646105, -0.942019118105898], + [-0.867579212111466, 0.221916315040665, -0.445038717226738], + [0.655140022433912, -0.151162631680508, 0.740230646345257], + [0.176736512358047, 0.976002671721061, -0.12721238144483], + [0.455284607701078, -0.55278635410423, 0.697956426080188], + [-0.432023930219742, 0.781838026058859, 0.449538234998843], + [0.485961267092557, 0.525163287076294, -0.698602296584415], + [-0.975758639968897, 0.138431863354196, 0.1695042646494], + [0.308602378401872, -0.593188152818631, -0.743567338847214], + [0.972979693579006, -0.191167383224118, 0.129481842256537], + [-0.614624689780931, 0.68217777986423, -0.39606813475866], + [-0.653028964396532, -0.644975511259979, 0.396937981974668], + [-0.070378900922493, 0.320878001965403, 0.944502047726543], + [-0.381252925250545, 0.909662131759037, -0.164805986030565], + [-0.332341796304234, -0.009834857390798, -0.943107738283054], + [-0.477746621168896, -0.755138676192789, -0.448913962446598], + [0.343877558432071, 0.574039599276676, 0.743119615720828], + [-0.873212544495548, 0.47009394139203, -0.128497231106812], + [0.664216892966437, 0.259987346974329, 0.700872669256879], + [-0.878489109322641, -0.170673846340671, 0.446236846278739], + [-0.347082716608212, 0.626648635925969, -0.697742842975825], + [-0.433716795977713, -0.885744934523588, 0.165365207503367], + [0.661861683362982, 0.112512128799614, -0.741134355544863], + [0.482068945127674, 0.865869532174741, 0.133714192945202], + [-0.8374660393934, -0.372486946227971, -0.399880116725617], + [0.410355219266256, -0.82161905066793, 0.39566492086167], + [-0.329899568015879, 0.02926988290883, 0.943562159572669], + [-0.982429034616553, -0.080964254903198, -0.168160582094488], + [-0.090370421487683, -0.316160436207578, -0.944391743662116], + [0.571959920493404, -0.686312971502271, -0.449262010965652], + [-0.442021476996821, 0.502111749619808, 0.743303978710785], + [-0.716515724344093, -0.684793506171761, -0.13290248557738], + [-0.044218043628816, 0.709851625611568, 0.702961900983442], + [-0.110556556362806, -0.889624975730714, 0.443107944412334], + [-0.701028131184281, -0.134257385503649, -0.700381691455451], + [0.707841110014082, -0.686721956709281, 0.165450648676302], + [0.099860111408803, 0.666551337869757, -0.738740327945793], + ] + ), + # degree 12 + np.array( + [ + [-0.893804977761136, -0.426862191124497, 0.137482113446834], + [-0.426862191241092, 0.137482113445288, -0.893804977705691], + [0.893804977770157, 0.426862191128157, 0.137482113376823], + [0.1374821132964, -0.893804977739491, -0.426862191218271], + [0.426862191272731, 0.137482113377345, 0.893804977701032], + [0.137482113529033, 0.893804977707775, 0.426862191209756], + [0.426862191185983, -0.137482113474993, -0.893804977727441], + [-0.137482113324291, 0.893804977725279, -0.426862191239047], + [-0.426862191217414, -0.137482113347288, 0.893804977732073], + [-0.137482113501071, -0.893804977693655, 0.426862191248328], + [-0.893804977672548, 0.426862191328071, -0.137482113390703], + [0.893804977663553, -0.42686219133326, -0.137482113433065], + [0.983086600385574, 0.022300380107522, -0.181778516853323], + [0.022300380232394, -0.181778516808726, 0.983086600390988], + [-0.983086600396613, -0.022300380113323, -0.181778516792915], + [-0.181778516710471, 0.983086600409631, 0.022300380211455], + [-0.022300380272854, -0.181778516836686, -0.9830866003849], + [-0.18177851693601, -0.983086600368179, -0.022300380200376], + [-0.0223003801708, 0.181778516841875, 0.983086600386256], + [0.181778516710979, -0.983086600409044, 0.022300380233212], + [0.022300380212558, 0.181778516804081, -0.983086600392297], + [0.181778516934384, 0.983086600367503, -0.022300380243431], + [0.983086600391629, -0.022300380332372, 0.181778516792996], + [-0.98308660038057, 0.022300380337865, 0.181778516852128], + [-0.897951986971875, 0.376695603035365, 0.227558018419664], + [0.376695602927528, 0.227558018339206, -0.897951987037503], + [0.897951986986053, -0.376695603028569, 0.227558018374966], + [0.227558018305554, -0.897951987041904, 0.376695602937366], + [-0.376695602875261, 0.227558018455254, 0.89795198703002], + [0.227558018486567, 0.89795198699048, -0.3766956029506], + [-0.376695602982511, -0.22755801836891, -0.89795198700691], + [-0.227558018280939, 0.897951987054767, 0.376695602921573], + [0.376695602931437, -0.22755801842558, 0.897951987013974], + [-0.227558018511349, -0.897951987002348, -0.376695602907339], + [-0.897951987072194, -0.376695602830637, -0.227558018362707], + [0.897951987057819, 0.376695602823051, -0.227558018431989], + [-0.171330151245221, 0.459786194953055, -0.871345301361568], + [0.459786194843117, -0.871345301414649, -0.171330151270292], + [0.171330151191219, -0.459786194982334, -0.871345301356736], + [-0.871345301364754, -0.171330151162981, 0.459786194977662], + [-0.459786195042432, -0.871345301303738, 0.171330151299472], + [-0.871345301353407, 0.171330151362727, -0.459786194924734], + [-0.459786194855202, 0.87134530140841, -0.171330151269592], + [0.871345301392835, 0.171330151178183, 0.45978619491878], + [0.459786195054412, 0.871345301309038, 0.171330151240368], + [0.871345301325486, -0.171330151377355, -0.459786194972196], + [-0.17133015129661, -0.459786194913003, 0.871345301372597], + [0.171330151350736, 0.459786194942983, 0.871345301346135], + [-0.397191702297223, -0.548095590649226, -0.736091010091219], + [-0.548095590778902, -0.736091010056557, -0.397191702182515], + [0.397191702250221, 0.548095590625205, -0.736091010134467], + [-0.736091010174764, -0.397191702137083, -0.548095590653075], + [0.548095590610212, -0.736091010169131, 0.397191702206669], + [-0.736091010049194, 0.397191702305889, 0.548095590699385], + [0.548095590752529, 0.736091010044117, -0.397191702241962], + [0.736091010139925, 0.397191702119602, -0.548095590712531], + [-0.548095590584386, 0.736091010182625, 0.3971917022173], + [0.736091010083782, -0.39719170228798, 0.548095590665912], + [-0.39719170212526, 0.548095590740419, 0.736091010116106], + [0.397191702171386, -0.548095590716295, 0.736091010109179], + [0.379474725534956, 0.69627727809449, 0.609259291836815], + [0.696277278210441, 0.609259291787114, 0.379474725402001], + [-0.379474725495576, -0.696277278074161, 0.609259291884576], + [0.609259291925953, 0.379474725376213, 0.696277278103008], + [-0.696277278071056, 0.609259291933888, -0.379474725422102], + [0.60925929179591, -0.379474725515542, -0.696277278140864], + [-0.696277278185906, -0.609259291774849, 0.379474725466713], + [-0.609259291882878, -0.379474725353089, 0.696277278153303], + [0.696277278046548, -0.609259291946589, -0.379474725446676], + [-0.609259291838737, 0.379474725493095, -0.696277278115623], + [0.37947472533629, -0.696277278181595, -0.609259291861008], + [-0.379474725375237, 0.696277278161216, -0.609259291860039], + [-0.678701446470328, 0.729764213479081, 0.082513873284097], + [0.729764213389772, 0.082513873179234, -0.678701446579104], + [0.678701446474772, -0.72976421347722, 0.082513873263995], + [0.082513873217671, -0.678701446552547, 0.729764213410125], + [-0.729764213370974, 0.082513873368402, 0.678701446576318], + [0.082513873326892, 0.678701446534692, -0.729764213414381], + [-0.729764213431284, -0.082513873201736, -0.678701446531733], + [-0.082513873171694, 0.678701446577399, 0.72976421339221], + [0.729764213412797, -0.08251387334668, 0.67870144653399], + [-0.082513873373655, -0.678701446558336, -0.729764213387104], + [-0.678701446641541, -0.729764213324827, -0.082513873240061], + [0.678701446637016, 0.729764213321344, -0.082513873308075], + ] + ), + # degree 13 + np.array( + [ + [0.276790129286922, -0.235256466916603, 0.931687511509759], + [0.198886780634501, 0.360548603139528, 0.911289609983006], + [-0.258871339062373, 0.204230077441409, 0.944073993540935], + [-0.20028291392731, -0.228346161950354, 0.952756414153864], + [-0.883545166667525, -0.414277696639041, -0.218453492821483], + [0.397750057908559, -0.901619535998689, -0.16993264471327], + [0.876539487069282, 0.434392104192327, -0.207321073274483], + [-0.411742357517625, 0.88489597883979, -0.217778184534166], + [0.501114093867204, 0.377868932752059, 0.778524074507957], + [-0.394238847790386, 0.473687133880952, 0.787525383774109], + [-0.495364292002136, -0.406429808740612, 0.767742814213388], + [0.370186583802172, -0.559306270968252, 0.741713144300723], + [0.411742357517961, -0.884895978839253, 0.217778184535715], + [0.883545166668397, 0.414277696639157, 0.218453492817737], + [-0.39775005791059, 0.901619535997218, 0.169932644716324], + [-0.876539487069878, -0.434392104191278, 0.20732107327416], + [-0.69101430131565, -0.702815226887987, -0.168967429499392], + [0.684400344460127, -0.714441044251654, -0.145499700314004], + [0.660710489482765, 0.731357715035063, -0.169048932993191], + [-0.773611287956309, 0.615222357857778, -0.151746583284428], + [0.683629784686022, -0.21996733132084, -0.695891292258878], + [0.256574099503526, 0.681472791071418, -0.685393730999406], + [-0.644474509637892, 0.354062227985534, -0.677711254990588], + [-0.220535080416141, -0.731547754140859, -0.645137320046912], + [0.394238847792041, -0.473687133882522, -0.787525383772336], + [0.495364292000968, 0.406429808741285, -0.767742814213785], + [-0.370186583802439, 0.559306270970003, -0.74171314429927], + [-0.50111409386464, -0.377868932752239, -0.77852407450952], + [-0.488574873968534, -0.006884557978444, -0.872494811095214], + [0.055542048727444, -0.584131720249991, -0.809756268404849], + [0.526812107464791, 0.049707819617671, -0.848527039107984], + [0.004245864108125, 0.4886546223943, -0.872466980836902], + [-0.710317361514613, -0.479530914625401, 0.515266288291253], + [0.521404384476562, -0.728039165451723, 0.445080264016476], + [0.738099355388852, 0.407803273205931, 0.53749961110413], + [-0.496057991262554, 0.699670113703365, 0.514186932248264], + [-0.973220809307327, 0.194260751789571, -0.122898399685852], + [-0.376203572666605, -0.908865964003535, -0.180093118660339], + [0.914477900370762, -0.368657988534049, -0.166797653531193], + [0.28746218785413, 0.946817914340553, -0.144572914606861], + [-0.098900669929334, 0.99509705928004, 0.000707177308311], + [-0.986068201425202, -0.161328561779779, 0.04052896855503], + [0.098900669927371, -0.995097059280236, -0.000707177307522], + [0.98606820142538, 0.161328561777872, -0.040528968558297], + [0.815232440848265, 0.131832381174928, -0.563929331266187], + [-0.113644567080339, 0.787251605615581, -0.606069155978764], + [-0.76050444170531, -0.010569874890521, -0.649246695710724], + [0.179848227912241, -0.83248278540524, -0.52404868754798], + [-0.92768989180951, -0.047241289482188, -0.370350813692261], + [-0.062273759773745, -0.944434685686409, -0.322746190242513], + [0.939840260740896, -0.044569841802216, -0.33869427732427], + [-0.00824273878155, 0.93408705946015, -0.356950168239866], + [-0.287462187854123, -0.94681791433958, 0.144572914613243], + [0.973220809307003, -0.194260751794934, 0.122898399679932], + [0.376203572664097, 0.908865964003331, 0.180093118666611], + [-0.914477900372906, 0.368657988531957, 0.16679765352406], + [-0.198886780630987, -0.360548603140065, -0.91128960998356], + [0.258871339064112, -0.204230077444254, -0.944073993539843], + [0.200282913924527, 0.228346161951352, -0.952756414154209], + [-0.276790129284401, 0.235256466920766, -0.931687511509456], + [0.496057991258595, -0.69967011370777, -0.514186932246089], + [0.710317361512836, 0.479530914628761, -0.515266288290576], + [-0.521404384476695, 0.728039165452453, -0.445080264015126], + [-0.738099355384499, -0.407803273209131, -0.537499611107679], + [-0.815232440849446, -0.131832381170712, 0.563929331265466], + [0.113644567080183, -0.787251605613717, 0.606069155981215], + [0.760504441709935, 0.010569874889864, 0.649246695705317], + [-0.179848227916839, 0.832482785402468, 0.524048687550806], + [0.644474509638734, -0.354062227985804, 0.677711254989647], + [0.220535080413518, 0.73154775414237, 0.645137320046095], + [-0.683629784685343, 0.219967331325312, 0.695891292258132], + [-0.256574099500943, -0.681472791069379, 0.6853937310024], + [0.00824273878347, -0.934087059459458, 0.356950168241634], + [0.927689891812602, 0.047241289479133, 0.370350813684907], + [0.062273759768788, 0.944434685686016, 0.322746190244617], + [-0.939840260741931, 0.04456984180196, 0.338694277321433], + [-0.684400344460716, 0.714441044251305, 0.145499700312953], + [-0.660710489482671, -0.731357715034246, 0.169048932997096], + [0.773611287955743, -0.615222357858877, 0.151746583282855], + [0.691014301313431, 0.702815226889319, 0.168967429502926], + [0.823586023578098, -0.394634588904438, 0.407393670798948], + [0.494068620358303, 0.708839608416629, 0.503430837272612], + [-0.75887513050105, 0.450605887274021, 0.470173234734822], + [-0.431499072601357, -0.787935048711447, 0.439279989706176], + [0.488574873974618, 0.006884557979146, 0.872494811091801], + [-0.055542048725634, 0.584131720247444, 0.80975626840681], + [-0.52681210746758, -0.049707819615904, 0.848527039106356], + [-0.004245864106237, -0.488654622389235, 0.872466980839748], + [-0.49406862035774, -0.708839608420214, -0.503430837268118], + [0.758875130496518, -0.450605887275878, -0.470173234740358], + [0.431499072601226, 0.787935048714215, -0.43927998970134], + [-0.823586023577754, 0.394634588903444, -0.407393670800605], + [-0.05223814787874, -0.056184830047506, -0.997052877624217], + [0.052238147881538, 0.05618483004769, 0.99705287762406], + ] + ), + # degree 14 + np.array( + [ + [-0.625520988160254, -0.7673610045544, 0.14099851793647], + [-0.76724274137005, 0.141111638293461, -0.625640536852518], + [0.625492928633992, 0.767336602497947, 0.141255565185161], + [0.141259978285753, -0.625497336538309, -0.767332196977417], + [0.767217177597722, 0.141142445065633, 0.625664936367606], + [0.140994104732436, 0.625522897885037, 0.76736025871308], + [0.767367121351956, -0.141003470846495, -0.625512367805189], + [-0.141107421738627, 0.625630007825311, -0.767252102531351], + [-0.767341557579575, -0.14125061251247, 0.625487968290521], + [-0.141146661279494, -0.625655569171042, 0.767224040795719], + [-0.625631916042134, 0.767247698300479, -0.141122907715456], + [0.625659975568208, -0.76722329624468, -0.141131175405856], + [0.557188048071509, -0.044753456478336, -0.829179478291342], + [-0.044855878114542, -0.829283214592494, 0.55702540354432], + [-0.557023176560509, 0.04489683751235, -0.829282493940292], + [-0.82927698135379, 0.557030331904205, -0.044909882380585], + [0.04500608513203, -0.829178759033645, -0.557168769645708], + [-0.829184990273806, -0.557180524667655, 0.044744997884746], + [0.044745112627007, 0.829186887034329, 0.557177692721376], + [0.829285903726696, -0.557022572534015, -0.044841315410966], + [-0.044895319644503, 0.829275086591665, -0.557034326619468], + [0.829176067900875, 0.557172765298515, 0.045006199906779], + [0.55701504778817, 0.04485436035495, 0.829290252501915], + [-0.55717991929916, -0.044997741388974, 0.829171719729799], + [-0.256065565410913, 0.860770382492113, 0.439891776275906], + [0.860817193749452, 0.439942893453515, -0.255820267854343], + [0.255978113844099, -0.860846435024418, 0.439793838677361], + [0.439780410927513, -0.2559784640674, 0.860853190792787], + [-0.86089686693789, 0.439742722239698, 0.255896312466096], + [0.439905203705212, 0.256058129693811, -0.860765732339981], + [-0.860766305814323, -0.439898638597135, -0.256067480432698], + [-0.439951565414546, 0.255829619022141, 0.860809982586329], + [0.860845979002674, -0.439786977094207, 0.255991435820162], + [-0.439734049218326, -0.255909284650278, -0.860897441039197], + [-0.255822182689152, -0.860816739802146, -0.439942668220034], + [0.255909634255987, 0.860892792334745, -0.43974294673258], + [-0.214847470746312, -0.032398468989078, 0.976110087808274], + [-0.032361689068532, 0.976149872834738, -0.214672184610297], + [0.214653391258715, 0.032229687143749, 0.976158372851326], + [0.976156890684175, -0.214662164023714, -0.032216146673044], + [0.032184871976157, 0.976118589466139, 0.214840948877335], + [0.976111569264229, 0.214838964338531, 0.032410241444204], + [0.032404387317597, -0.976112740167589, -0.214834527404447], + [-0.97615046971745, 0.214667748038907, -0.032373112644706], + [-0.032227570225248, -0.976155722133346, 0.214665763138194], + [-0.976117990230955, -0.214844548351237, 0.032179017872419], + [-0.214659241112501, 0.032359572226461, -0.976152789418913], + [0.21485332060012, -0.032190790383644, -0.976115671240647], + [-0.531657953418075, -0.827333953094149, -0.181268724894593], + [-0.827232187812406, -0.181173291587112, -0.531848799813059], + [0.531693969367769, 0.827365274479422, -0.181019958909332], + [-0.181013937184585, -0.531695771783126, -0.827365433658478], + [0.82726500032424, -0.181115392520565, 0.53181748168959], + [-0.181274746488222, 0.531662962384716, 0.827329414872902], + [0.827337912964399, 0.181265235803751, -0.531652980863198], + [0.181178425057189, 0.531838819184424, -0.827237480233043], + [-0.827370725476244, 0.181023448305968, 0.53168429898609], + [0.181110258615433, -0.531806009787378, 0.82727349901848], + [-0.531843826870477, 0.827237640803175, 0.181162991414264], + [0.531807810920692, -0.827268962188108, 0.181125692390543], + [-0.660052978431453, -0.64107030142389, -0.391610692264717], + [-0.640943278162024, -0.391490626360013, -0.660247532105318], + [0.660130816198753, 0.641137993287446, -0.391368597447617], + [-0.391366127194665, -0.660129990434437, -0.641140351415881], + [0.64101419265168, -0.391488664009925, 0.660179847292265], + [-0.391613162232706, 0.660059082674287, 0.641062507518011], + [0.641074532205542, 0.391604771858404, -0.660052381872206], + [0.391493582576226, 0.660240832413506, -0.64094837390819], + [-0.641145446695277, 0.391374518513393, 0.660120066685087], + [0.391485706851, -0.660169924653076, 0.641026217806203], + [-0.660246935065265, 0.640950733115446, 0.391479427883123], + [0.660169097297863, -0.641018424977353, 0.391499861829449], + [-0.887809544786451, -0.296234001309576, 0.352192601646022], + [-0.296066792023988, 0.352356402512996, -0.887800326801429], + [0.88773949759831, 0.296173084548554, 0.352420329142482], + [0.352416311777685, -0.887743835667625, -0.296164861904896], + [0.296002975181616, 0.352256528867817, 0.887861237217634], + [0.352196618754376, 0.887807646454613, 0.296234914611202], + [0.29624330876316, -0.35220289417087, -0.887802356017778], + [-0.352357435214616, 0.887795037857324, -0.296081422255585], + [-0.296179491920463, -0.352410037210515, 0.887741445601714], + [-0.352255495317536, -0.887858848644387, 0.296011369549315], + [-0.887793137385775, 0.296073200109863, -0.352369132285204], + [0.887863184573985, -0.29601228334985, -0.352243798503465], + [-0.26223504413332, -0.963196832316083, -0.059030871962556], + [-0.963146753177879, -0.05898607663367, -0.262428989645347], + [0.262246759182379, 0.963207020345643, -0.058812186281487], + [-0.058802018270752, -0.262250202293796, -0.963206703695603], + [0.963157426232092, -0.058856981708733, 0.262418802676392], + [-0.059041039930677, 0.26223953025181, 0.9631949877243], + [0.963198910461706, 0.05903143538266, -0.262227284091577], + [0.058993521775316, 0.262416743924601, -0.963149633700058], + [-0.963209583515922, 0.058811622961071, 0.262237471059664], + [0.058849536426177, -0.26240607188442, 0.963161349671286], + [-0.262421229412009, 0.963149318669471, 0.058978710569357], + [0.262409514362814, -0.96315950669899, 0.058864347675232], + [-0.715507563967586, -0.551203770138786, 0.429212452859839], + [-0.551069607492995, 0.429343180584727, -0.715532473744488], + [0.715422202362423, 0.551129535145429, 0.429450006237378], + [0.429450819917231, -0.715428271411138, -0.551121022769127], + [0.5509918383738, 0.429319279234025, 0.715606701005125], + [0.429211638866897, 0.7155060331503, 0.551206391097486], + [0.551211869430264, -0.429219462039715, -0.715497119774448], + [-0.429341247317559, 0.715523561571074, -0.551082685436994], + [-0.551134100310698, -0.42944299777979, 0.715422892513669], + [-0.429321211466422, -0.715601323311163, 0.550997317108093], + [-0.715522029029489, 0.551074173985694, -0.429354726001132], + [0.71560739063481, -0.550999938995098, -0.429307733096245], + ] + ), + # degree 15 + np.array( + [ + [0.854403279867469, -0.505354134007206, 0.120881076242474], + [-0.50543491755569, 0.120816219805996, 0.85436466754382], + [-0.854386776665562, 0.505324765203946, 0.121120260611542], + [0.120833358636621, 0.854397789834015, -0.505374827397788], + [0.505397909754575, 0.121184507524897, -0.854334400543285], + [0.121167891781777, -0.854359592169892, 0.505359307095908], + [0.505550243990606, -0.121029099883223, 0.85426629793203], + [-0.120901230058257, -0.8542712308899, -0.505572503845152], + [-0.505512743080475, -0.120971801893292, -0.854296605243135], + [-0.121100221709937, 0.854233310670545, 0.505588950870808], + [0.854228086018077, 0.50562286374044, -0.120995440909188], + [-0.854244251915001, -0.505593339178042, -0.121004683582819], + [-0.264987898778375, 0.883813698575362, -0.385557725524417], + [0.883849543661418, -0.385514772323787, -0.264930829632268], + [0.26493557758679, -0.883717531264112, -0.385814028600849], + [-0.385585969828214, -0.26499188213212, 0.883800182323873], + [-0.883729220902574, -0.385857204128221, 0.264833687708496], + [-0.385785570894326, 0.264874341279828, -0.883748310675226], + [-0.88388533425828, 0.385579930632135, -0.264716514363662], + [0.385705565188833, 0.2647713629504, 0.883814088111154], + [0.883764779677347, 0.385791993289806, 0.26481003023928], + [0.385667159435638, -0.264653128949093, -0.883866258814251], + [-0.26465693400637, -0.883897033692944, 0.385594010730408], + [0.264708980625706, 0.88380076582133, 0.385778902883153], + [-0.973352164893031, 0.228026239253951, 0.024281624940474], + [0.228112345886926, 0.024344549147457, -0.973330416960638], + [0.973355442809202, -0.228032180463189, 0.024093705130315], + [0.02435678809852, -0.973342833359315, 0.228058053182924], + [-0.228119646411613, 0.024031592823588, 0.973336483168797], + [0.024019836271027, 0.973350319061891, -0.228061842156086], + [-0.228243300844141, -0.024101340294447, -0.97330576953791], + [-0.024325527828938, 0.973285859331229, 0.228304412400888], + [0.228251100231509, -0.024273911824481, 0.973299651930403], + [-0.024050630999231, -0.973293331950969, -0.228301680082122], + [-0.973284068418876, -0.228330589256741, -0.024150862752081], + [0.973280766897442, 0.228336792285628, -0.024225153791815], + [0.176494164360597, -0.643915961757687, 0.744460908403072], + [-0.643955474734765, 0.744429879896195, 0.176480878502065], + [-0.176394545795758, 0.643730246363376, 0.744645106161623], + [0.744482176984942, 0.176540768465681, -0.643878595094843], + [0.643722173544997, 0.744675565408302, -0.176295392935637], + [0.7446232602985, -0.176308754645389, 0.643779017410342], + [0.643979646991902, -0.744473128172097, 0.176210032886433], + [-0.744568815199564, -0.17638194509082, -0.643821938798528], + [-0.643745300418696, -0.744632317149074, -0.176393595252333], + [-0.744536607308663, 0.176149174255325, 0.643922905933989], + [0.17619570702358, 0.643971118562494, -0.744483895919737], + [-0.176295790346183, -0.643784799068587, -0.744621331143847], + [-0.538268091669357, -0.714443097207928, 0.447033021534844], + [-0.714408571342234, 0.447040121797618, -0.538308018420605], + [0.538328211219438, 0.714331668550684, 0.447138685768607], + [0.447120197809599, -0.538252620284606, -0.714400199795228], + [0.714269338389924, 0.44713116789972, 0.538417153263762], + [0.447050760030329, 0.538390645652783, 0.714339646547694], + [0.714335378036911, -0.44690804426239, -0.538514779424324], + [-0.44721697306067, 0.538430741435079, -0.714205373603506], + [-0.714197054202002, -0.447264479166707, 0.53840231560137], + [-0.446955147518652, -0.538568616287532, 0.714265316011294], + [-0.53855313587161, 0.7142734853459, -0.446960745451628], + [0.538492423492547, -0.714162749879013, -0.44721077416177], + [-0.854262559171519, -0.121196786481334, 0.505516388403308], + [-0.121135225917774, 0.505562840819898, -0.854243800693903], + [0.854330941426722, 0.121071937454801, 0.505430735592792], + [0.50560092757097, -0.854220500665318, -0.12114057240441], + [0.120980483865727, 0.505385380088754, 0.854370727562784], + [0.505347417325379, 0.854380006035553, 0.121073502837149], + [0.121020187472529, -0.505348640639146, -0.854386836057462], + [-0.505616684568734, 0.854250931602746, -0.120859894677939], + [-0.120865731027902, -0.505598482368904, 0.854260879175297], + [-0.50533076030876, -0.85441039802093, 0.120928468275617], + [-0.854368086396689, 0.120928425096259, -0.505402304061426], + [0.854300186149896, -0.120804448918389, -0.50554671106217], + [0.744463297304691, 0.643945879165515, 0.176374895243003], + [0.643874092586096, 0.176352654639277, 0.744530653565125], + [-0.744439438351756, -0.643989673955645, 0.176315689786883], + [0.176272538481235, 0.744499406704124, 0.643932159155441], + [-0.643927227994187, 0.176337348613599, -0.744488323972679], + [0.176417156092134, -0.744445359352701, -0.643955040336351], + [-0.643773276436742, -0.176537581494637, 0.74457400630557], + [-0.176186289075531, -0.744659935538757, 0.643770123526409], + [0.643827610309512, -0.176153056179147, -0.744618096074686], + [-0.176503857743247, 0.744606426388638, -0.643745025598251], + [0.744642563797528, -0.643711305756388, -0.176474380640507], + [-0.744666089545738, 0.643755051780831, -0.176215346628265], + [-0.228336071531986, 0.973285242484899, -0.024051511354805], + [0.97330645157172, -0.02400727784605, -0.228250305452786], + [0.228333198106367, -0.973279105669999, -0.024325564920999], + [-0.024087022235214, -0.228306334617004, 0.973291340212985], + [-0.973298936142401, -0.024368394387641, 0.228244084828147], + [-0.024288551979359, 0.228299553239789, -0.97328792257649], + [-0.973337174231582, 0.024064219708824, -0.228113258248362], + [0.024218299052141, 0.228063963157171, 0.973344904286279], + [0.973329707418057, 0.024311563193311, 0.228118891266323], + [0.024157446706033, -0.228056100974521, -0.97334825862943], + [-0.228026586903388, -0.973357047279715, 0.0240818225246], + [0.228030283046768, 0.973350913461235, 0.024293811512186], + [0.714188221641478, 0.538577770578359, -0.447067298186109], + [0.538527478643698, -0.447091804432198, 0.714210804423472], + [-0.714248768779612, -0.538467220657919, -0.447103733571689], + [-0.447161335031565, 0.714165750281327, 0.538529499264337], + [-0.538389584312319, -0.447080411787951, -0.714321888856505], + [-0.447010969457209, -0.714305113088417, -0.538469496444014], + [-0.538432015415055, 0.446913681352591, 0.714394237235966], + [0.447232748299163, -0.714300498031757, 0.538291433482237], + [0.538293039926151, 0.447257878415203, -0.714283552493402], + [0.446938450981038, 0.714439434305571, -0.538351479745163], + [0.714416613789271, -0.538354159540915, 0.44697170027516], + [-0.714357130519037, 0.538243224338143, 0.447200314770336], + [-0.88382262479637, 0.264636203965444, 0.385778754532718], + [0.264703577640278, 0.38583706665855, -0.88377699335113], + [0.883874390523727, -0.264731681104579, 0.385594604209984], + [0.385842662030848, -0.883779863837697, 0.264685837233447], + [-0.26482277877838, 0.385535295796556, 0.883872972510847], + [0.385530295763018, 0.883900556837248, -0.264737977388365], + [-0.264823043649479, -0.385616295049092, -0.883837557781314], + [-0.385806659371906, 0.883713819704025, 0.264958688191976], + [0.264941401779006, -0.385755452567231, 0.883741356075422], + [-0.385565266715756, -0.883834666939721, -0.264906977291953], + [-0.883791950673863, -0.264913750483099, -0.38565851828926], + [0.883739852971877, 0.26500888802554, -0.385712537437807], + ] + ), + # degree 16 + np.array( + [ + [0.938311825813856, -0.17507925577492, -0.298191501782276], + [-0.175109632245629, -0.298282531121024, 0.938277223598034], + [-0.938311652301346, 0.175147761450008, -0.298151815044902], + [-0.298182757815715, 0.938327057553728, -0.175012502421904], + [0.175097712410131, -0.298058347845738, -0.938350687316958], + [-0.298185477757762, -0.938323612741539, 0.175026336949732], + [0.175121225661409, 0.298070999742225, 0.938342280532811], + [0.298159022282375, -0.938297484887434, -0.175211378870018], + [-0.175136638135111, 0.298288500226525, -0.938270285480331], + [0.298175056505462, 0.938292628074833, 0.175210101816042], + [0.938309721676758, 0.175091137054814, 0.298191146635404], + [-0.938307020714082, -0.175144295988174, 0.298168426332282], + [0.318319389865683, -0.189552295411868, 0.928839433561922], + [-0.189466106261457, 0.928833946336168, 0.318386706242113], + [-0.318293314473071, 0.18936285961738, 0.928887007853633], + [0.928852943553566, 0.318350700348959, -0.189433473386317], + [0.189441607397533, 0.928892798895752, -0.318229548512164], + [0.928866264406345, -0.318313837307129, 0.189430102746667], + [0.18945182591494, -0.928887156552102, 0.318239934719146], + [-0.928865750332054, -0.318289122686796, -0.189474146625178], + [-0.189481041982253, -0.928834132900175, -0.318377273511944], + [-0.928863874908086, 0.318277395441538, 0.189503038080361], + [0.318275484124591, 0.18957282380822, -0.92885028970154], + [-0.318345902583112, -0.189353418017315, -0.928870911049379], + [0.415270907116288, 0.626546860524453, 0.659537038588256], + [0.626612654947257, 0.659451415891007, 0.415307609777736], + [-0.415241828112963, -0.626676394380167, 0.659432271664102], + [0.659494217922308, 0.41519684716212, 0.626641009377521], + [-0.626618996427069, 0.659521812332477, -0.415186238180433], + [0.659478785687794, -0.415192215022902, -0.626660319321504], + [-0.626602233185435, -0.65952877581014, 0.415200475969626], + [-0.659472693683341, -0.415326178073293, 0.626577953724091], + [0.626606052873236, -0.65944383659479, -0.415329605108723], + [-0.659498633823103, 0.415315781516604, -0.626557542136963], + [0.415250963158486, -0.626542854390271, -0.659553401331872], + [-0.415267233073285, 0.626674158557439, -0.659418398387537], + [0.081476869754028, 0.884767493032223, 0.458855100188022], + [0.88480215017059, 0.458780629597686, 0.081519868495058], + [-0.08148097265168, -0.88484396510395, 0.458706887363658], + [0.458778051156021, 0.08139667888042, 0.884814828336823], + [-0.884809515892886, 0.45878451702782, -0.081417980329578], + [0.458732327572868, -0.081386172952098, -0.884839500978449], + [-0.884806469025575, -0.458784799888689, 0.081449492089205], + [-0.458770768146743, -0.081567624478124, 0.884802862185155], + [0.884821176813587, -0.458741101923224, -0.081535798692882], + [-0.458810899116744, 0.081573887356361, -0.884781475706435], + [0.081470600041761, -0.884777903494754, -0.458836139396478], + [-0.081497545017818, 0.88485010959699, -0.458692090298344], + [-0.722581612146772, 0.69116944690793, -0.012673178305347], + [0.691146231887784, -0.012722477090735, -0.722602950951623], + [0.722589739174094, -0.691157232223568, -0.012874361552029], + [-0.012719991090033, -0.722649829139429, 0.691097262526357], + [-0.6911640879369, -0.012832809701898, 0.722583920760425], + [-0.012740894622282, 0.722658126679523, -0.691088200990487], + [-0.691184825451665, 0.012806932405418, -0.722564543516851], + [0.01278690708865, 0.722509435119358, 0.69124280189425], + [0.691166758903022, 0.012679269543203, 0.722584076430794], + [0.012798402734516, -0.722517774658893, -0.691233872281593], + [-0.722587198973115, -0.691163495604889, 0.01267920436853], + [0.722578352800658, 0.691170335944389, 0.012809792129789], + [0.560117573995459, 0.806868022890413, 0.187702682288658], + [0.806883478716379, 0.18757144265397, 0.560139273462648], + [-0.560134093540899, -0.806891631206385, 0.18755184014617], + [0.187652131237362, 0.560025149416763, 0.806943932168034], + [-0.806885441512999, 0.18768574188158, -0.560098157976558], + [0.187630222901067, -0.560004720839195, -0.806963203679022], + [-0.806874677594158, -0.187697516958668, 0.560109718523856], + [-0.187614808802038, -0.560215760321792, 0.806820293129301], + [0.806892320702248, -0.18757613331143, -0.560124965524367], + [-0.187636487681617, 0.56022497423671, -0.806808853900342], + [0.56009182108872, -0.806880865199227, -0.18772432267788], + [-0.560129384097476, 0.806896245083186, -0.187546054987136], + [-0.099485634221032, -0.358895129517995, -0.928060824834181], + [-0.359050794288811, -0.927994608087772, -0.099541621850345], + [0.099434389660615, 0.359143761945999, -0.927970129049474], + [-0.928019026720099, -0.09942019096838, -0.359021324816913], + [0.358990815531993, -0.928035748444477, 0.099374262124424], + [-0.928007207203491, 0.099420259668564, 0.359051856067911], + [0.359002982562248, 0.928031348467288, -0.099371398165657], + [0.928017938922059, 0.099510949379702, -0.358998991631458], + [-0.359042863742385, 0.92799619207621, 0.099555459356689], + [0.928013665632084, -0.099489549105096, 0.359015969030581], + [-0.099451875312545, 0.358926751348054, 0.928052213867059], + [0.099465503317397, -0.359120063291987, 0.927975966170987], + [0.787833199437607, 0.557450082325166, -0.261855409681697], + [0.557405388687852, -0.261977292048617, 0.787824302184578], + [-0.787861477876718, -0.557364111687839, -0.26195331393273], + [-0.261861028070608, 0.787802657602316, 0.557490604990374], + [-0.557427204478003, -0.261835304855293, -0.787856068605919], + [-0.261850091868655, -0.787804146924511, -0.557493637162722], + [-0.557398047481063, 0.261806624190095, 0.787886227950765], + [0.26192814680606, -0.787893374500188, 0.557330849971047], + [0.557399834363592, 0.261935778537884, -0.787842035292097], + [0.261909535328364, 0.787908371337394, -0.55731839524686], + [0.787858967733566, -0.557444321449493, 0.261790136264747], + [-0.787856023927293, 0.557369329488324, 0.261958615256708], + [-0.507282732168614, -0.717049946047353, -0.478020506377115], + [-0.71706431400176, -0.477906271006066, -0.507370048109131], + [0.507331753192767, 0.71711626280308, -0.477868975583995], + [-0.477891616916408, -0.50725750267016, -0.717153699332196], + [0.717108744361459, -0.47798851765986, 0.507229756368514], + [-0.477913676926975, 0.507235340412842, 0.717154674280526], + [0.717103637758922, 0.477942092943937, -0.507280719627002], + [0.477949791330649, 0.507362311781387, -0.7170407809538], + [-0.717073605236621, 0.477889924387354, 0.507372313830785], + [0.477966885504482, -0.507396895429057, 0.717004914118516], + [-0.507289494490155, 0.717039874321013, 0.478028437871252], + [0.507342973335893, -0.717147616692481, 0.47781000751239], + [-0.469705390085658, -0.33624876406351, 0.816280353304085], + [-0.336180458859188, 0.816354017519737, -0.4696262526314], + [0.469729267279509, 0.336087427571651, 0.816333054879763], + [0.816299320102214, -0.469688480988201, -0.336226338688182], + [0.336166188592078, 0.816261044646191, 0.469798042397566], + [0.816308187841148, 0.469684487990511, 0.336210386818421], + [0.336161196424763, -0.816254520485116, -0.469812949806501], + [-0.81631474754769, 0.469749196906201, -0.336104038866138], + [-0.336166711539314, -0.816355082377068, 0.469634242288587], + [-0.816302029136435, -0.469752338316787, 0.33613053695499], + [-0.469725914764869, 0.336254309274991, -0.816266258332602], + [0.469715709020586, -0.336082571137018, -0.816342855715183], + [0.220975783117544, 0.56198189964132, -0.797085972622227], + [0.56189854338611, -0.797188442616427, 0.220818056099052], + [-0.22090980871236, -0.561819935638318, -0.79721842448229], + [-0.7971433029262, 0.220906560624346, 0.561927794358875], + [-0.561911046458035, -0.797113560704263, -0.221056434445611], + [-0.797166608166814, -0.22092145416411, -0.561888876837612], + [-0.561903189214556, 0.797117195899141, 0.221063298519679], + [0.797149071206196, -0.221019917708182, 0.56187503437274], + [0.56187154518738, 0.797190222992272, -0.220880318440273], + [0.797151311779493, 0.220966501329483, -0.561892864715723], + [0.220989674227739, -0.56195922843892, 0.79709810529009], + [-0.220934514736207, 0.561821479644177, 0.797210489901321], + [-0.025586321091663, 0.991400659992677, -0.128335776535923], + [0.991391023154192, -0.128410509654448, -0.025584765380375], + [0.0255553186148, -0.991378867053065, -0.128510185009118], + [-0.128427355734578, -0.025687167640031, 0.991386193023514], + [-0.99138841235829, -0.128432289728374, 0.02557660643704], + [-0.128471046150121, 0.025696657527584, -0.991380286314492], + [-0.991388770492313, 0.128433611757029, -0.025556077805202], + [0.128434643068809, 0.02546732329508, 0.991390920829907], + [0.991386054149539, 0.128448345934336, 0.025587380962899], + [0.128392989158359, -0.02544975216483, -0.991396767419448], + [-0.025589705051665, -0.991398220731893, 0.128353943940207], + [0.025571746935955, 0.991376476866512, 0.128525354986419], + ] + ), + # degree 17 + np.array( + [ + [-0.053895316433783, -0.14060350667641, -0.988597971258691], + [-0.140602010826056, -0.988598302765153, -0.053893137981829], + [0.05389273741214, 0.140602992486377, -0.988598184986247], + [-0.988598098647216, -0.053895299659618, -0.140602617421263], + [0.140604478516356, -0.988597884351918, 0.053894375180116], + [-0.98859806586327, 0.053892813420617, 0.140603800919506], + [0.14060181241573, 0.988598276619905, -0.053894135205652], + [0.988598002509875, 0.053895652551635, -0.140603158106489], + [-0.140604846635692, 0.988597850035353, 0.053894044272357], + [0.988598167360928, -0.0538924507278, 0.140603226297146], + [-0.053892835910884, 0.140602751584219, 0.988598213878838], + [0.053896097450443, -0.140603793852246, 0.988597887836084], + [-0.712137820619482, 0.484725955627139, 0.50783902211694], + [0.484727142303201, 0.507838589157962, -0.71213732164283], + [0.712137878427749, -0.484726895412166, 0.507838044038163], + [0.507839760738435, -0.712137969376798, 0.484724963236905], + [-0.484727642870466, 0.507838390853387, 0.712137122338588], + [0.507839607814555, 0.712136191976364, -0.48472773472555], + [-0.484726768067281, -0.507839236872381, -0.712137114474401], + [-0.507840112632748, 0.712136257912531, 0.48472710896699], + [0.48472854912246, -0.507840095411427, 0.712135289926112], + [-0.507838861015403, -0.712137466171353, -0.484726645149226], + [-0.712136671868904, -0.484727632936327, -0.507839032024349], + [0.712137765857364, 0.484726401555971, -0.507838673275561], + [-0.703005448039525, 0.261790111709517, 0.66124827216248], + [0.26179085361446, 0.661247136036676, -0.7030062404041], + [0.703006433944545, -0.261790569085573, 0.661247042919986], + [0.661249487589413, -0.70300443378585, 0.261789765346499], + [-0.261791711051733, 0.661247232073399, 0.703005830772316], + [0.661247423359215, 0.703005908959219, -0.261791017930756], + [-0.261791042135151, -0.661248438610085, -0.703004944999334], + [-0.661249044674904, 0.703004424763402, 0.261790908321135], + [0.26179224626734, -0.661248916662998, 0.703004046934519], + [-0.661247254530942, -0.703006260525483, -0.261790500280794], + [-0.70300546948428, -0.261791326803081, -0.66124776830313], + [0.703005158463527, 0.261791025358218, -0.661248218308045], + [0.062800447246381, 0.786218819998244, -0.614748786827777], + [0.786220043108977, -0.614747449388309, 0.062798226760693], + [-0.062799502252198, -0.786219239565021, -0.614748346768559], + [-0.61474770709614, 0.062799571514381, 0.786219734194995], + [-0.786218534519124, -0.614749234835089, -0.062799635733612], + [-0.61474933069654, -0.062799956628617, -0.786218433932708], + [-0.786219538571286, 0.614747943528109, 0.062799706183351], + [0.614749395150454, -0.062799770458141, 0.786218398406292], + [0.786217798458002, 0.614750179051967, -0.062799607828601], + [0.614747800214019, 0.062800058129605, -0.786219622517107], + [0.062800526363459, -0.786218909415802, 0.614748664386918], + [-0.062801412397757, 0.786218712810953, 0.614748825315458], + [0.829543607739232, 0.321465368220585, 0.456637076783941], + [0.321463595502047, 0.456637380632479, 0.829544127443504], + [-0.82954344503853, -0.321464459743646, 0.456638011903666], + [0.456635000556537, 0.829545159774775, 0.321464312422039], + [-0.32146420025779, 0.456637459573867, -0.829543849634571], + [0.456637068558535, -0.829544000114897, -0.321464367374743], + [-0.321462954195433, -0.456636421102337, 0.829544904150941], + [-0.456636713034899, -0.829544190971737, 0.321464379883261], + [0.321462955106589, -0.456636688799517, -0.82954475643955], + [-0.456637112396323, 0.829544098578271, -0.321464051017078], + [0.829544701976578, -0.321462298506758, -0.45663724997129], + [-0.829544861446795, 0.321463589390512, -0.456636051515194], + [-0.249500423448462, 0.954025094362385, -0.166089307379737], + [0.954025470855406, -0.166087567010738, -0.249500142371853], + [0.249500943484422, -0.954025029612664, -0.166088898102612], + [-0.166086877408662, -0.249500137449683, 0.954025592196158], + [-0.954024855383494, -0.166089937003122, 0.249500918108135], + [-0.166090151998567, 0.249500107118379, -0.954025030047436], + [-0.954025593688894, 0.166087862658579, -0.249499475879328], + [0.166089692874499, 0.249499687822531, 0.954025219633797], + [0.954024931419817, 0.166090647913648, 0.249500154118264], + [0.166087956122076, -0.249500002352048, -0.954025439732882], + [-0.249499759982538, -0.954025225930409, 0.166089548307795], + [0.249498374708179, 0.954025720257113, 0.166088789826025], + [0.860787215766444, 0.418630333044569, -0.289471956203095], + [0.418631425510959, -0.289473932939102, 0.860786019707239], + [-0.860786771736426, -0.418630687137034, -0.289472764506019], + [-0.289474446503673, 0.860786651964262, 0.418629770347917], + [-0.418629889262302, -0.289472838226515, -0.860787134978979], + [-0.289472399693171, -0.860787556030986, -0.418629326729602], + [-0.418629257388446, 0.289472594933548, 0.86078752409688], + [0.289473185156189, -0.860787817544824, 0.418628245872098], + [0.418628942424652, 0.289472756316209, -0.860787623002977], + [0.289473542762772, 0.860786603163078, -0.418630495610795], + [0.860788012261758, -0.418628676927689, 0.289471982755196], + [-0.860787839361109, 0.418628479474405, 0.289472782452827], + [-0.16910412959425, -0.878917391692094, 0.445991044680649], + [-0.878918175066698, 0.445989574417742, -0.169103935637546], + [0.169102333488308, 0.878918206143462, 0.445990120651083], + [0.445989469034572, -0.169103982598388, -0.878918219506016], + [0.878916540171611, 0.445992443170219, 0.169104867014003], + [0.445990224108014, 0.169104198005711, 0.87891779491425], + [0.878918285318947, -0.445988968934561, -0.169104959479873], + [-0.445991404870479, 0.169104651147508, -0.87891710857278], + [-0.878917501870672, -0.445990557689425, 0.169104841318319], + [-0.4459886381463, -0.169104239878744, 0.878918591622365], + [-0.16910368279187, 0.878918081785917, -0.445989854117772], + [0.169104218306708, -0.878917936541782, -0.445989937303537], + [0.699159749436449, 0.682605593469953, 0.2126622875159], + [0.682603600110598, 0.212662432840561, 0.699161651389995], + [-0.699161274801242, -0.682604056820824, 0.21266220498729], + [0.212660843412531, 0.699162101347243, 0.68260363441662], + [-0.682604762820295, 0.212661985195386, -0.699160652373835], + [0.212662594223091, -0.699161699678606, -0.682603500372528], + [-0.682602562402764, -0.212662368159073, 0.69916268419457], + [-0.212661230546804, -0.699160950060591, 0.682604693019826], + [0.682603395227417, -0.21266176997579, -0.699162053042617], + [-0.212661876797938, 0.699162212981133, -0.682603198129121], + [0.699162077611852, -0.682602648532129, -0.212664085934605], + [-0.69916198280867, 0.682603270588311, -0.212662400948523], + [-0.893254372981228, -0.172342415041176, -0.415204428116666], + [-0.17234169138316, -0.415205105182347, -0.893254197886418], + [0.893254479512939, 0.172341865725213, -0.415204426937409], + [-0.415203760144359, -0.893254961141014, -0.172340975855865], + [0.172343621116966, -0.415205895670259, 0.893253458129858], + [-0.41520633444977, 0.89325370222595, 0.172341298859036], + [0.172340599563611, 0.415204277853847, -0.893254793098767], + [0.415204013461881, 0.893254652987798, -0.172341962739188], + [-0.17234119462921, 0.415204328749048, 0.893254654632054], + [0.415206142771325, -0.893254015914866, 0.172340134782712], + [-0.89325441464337, 0.172340274858685, 0.415205226823752], + [0.893254705389659, -0.172340550786628, 0.415204486793911], + [-0.030119107290242, 0.538031004327585, -0.842386774444073], + [0.538032715913301, -0.84238566918646, -0.030119444819523], + [0.030118087641353, -0.538031590262412, -0.842386436664628], + [-0.842386183209587, -0.030119347292783, 0.538031916577671], + [-0.538030304105545, -0.842387233636645, 0.030118772718924], + [-0.842387312723823, 0.030117901022641, -0.538030229076328], + [-0.538031723308682, 0.842386324999934, -0.030118834084264], + [0.842387103098144, 0.030119789658303, 0.53803045155907], + [0.538029173032331, 0.842387968746045, 0.030118417588877], + [0.842386330532407, -0.030117441179441, -0.538031792619125], + [-0.030117059116644, -0.538030739137179, 0.842387017049566], + [0.030118346812524, 0.538030710181824, 0.84238698950454], + [0.951905881051384, -0.301774121097739, 0.052986540323701], + [-0.301774405343499, 0.052986798530194, 0.951905776566724], + [-0.951905698855431, 0.3017745999, 0.05298708655653], + [0.052987612958423, 0.951905238066977, -0.301775960960479], + [0.301774562398047, 0.052986834212903, -0.951905724790833], + [0.052986766829206, -0.951905252173644, 0.301776065030379], + [0.301777293645336, -0.052987994727859, 0.951904794322844], + [-0.052986574701187, -0.951905591847301, -0.301775027315507], + [-0.301776941841401, -0.052986526316734, -0.951904987591586], + [-0.052988794896112, 0.951905240176443, 0.301775746772478], + [0.95190486181224, 0.301777462059556, -0.052985823114433], + [-0.951905018594348, -0.301776824324304, -0.052986638651951], + [0.553606146300219, 0.45440115669048, 0.697882385203248], + [0.454399619298559, 0.697882738116233, 0.55360696330584], + [-0.553605814018882, -0.454401128197097, 0.697882667342941], + [0.697880969772289, 0.55360789125326, 0.454401204632875], + [-0.454400796454347, 0.697882268834231, -0.553606588678677], + [0.697882520653254, -0.553605103541103, -0.454402219074583], + [-0.454400180900896, -0.697882141032518, 0.553607255032936], + [-0.697881635390884, -0.553607048847703, 0.454401208680482], + [0.454401271818775, -0.69788172578063, -0.553606883077631], + [-0.69788232191633, 0.553606343141444, -0.454401014072625], + [0.553606029480292, -0.454400306845994, -0.697883031217504], + [-0.553606960810672, 0.454400842716203, -0.697881943512493], + ] + ), + # degree 18 + np.array( + [ + [-0.866376343641697, 0.223696804580225, 0.446488265017841], + [0.223696806212017, 0.446488265347841, -0.866376343050305], + [0.866376343115579, -0.223696806225293, 0.44648826521453], + [0.44648826367979, -0.866376344067145, 0.223696805603153], + [-0.223696804286002, 0.446488265023544, 0.866376343714725], + [0.446488262849567, 0.866376344947941, -0.22369680384892], + [-0.2236968055886, -0.446488263582537, -0.866376344121022], + [-0.446488264810465, 0.866376343829741, 0.223696804265844], + [0.223696803801399, -0.446488262808774, 0.866376344981234], + [-0.446488265014924, -0.866376343219064, -0.223696806222901], + [-0.8663763449212, -0.223696804074408, -0.446488262788483], + [0.866376344172558, 0.223696805482214, -0.446488263535836], + [-0.806844783933568, -0.461758079243128, -0.368484695601989], + [-0.461758081774945, -0.368484698390835, -0.806844781210945], + [0.80684478133506, 0.461758081613586, -0.368484698321273], + [-0.368484697968357, -0.806844781706494, -0.461758081246195], + [0.461758078945765, -0.368484695716793, 0.806844784051319], + [-0.368484697702554, 0.806844783105505, 0.461758079013772], + [0.461758081217295, 0.368484698443883, -0.806844781505862], + [0.368484695481328, 0.806844784182969, -0.461758078903629], + [-0.461758078967836, 0.36848469793606, 0.806844783025151], + [0.36848469846061, -0.806844781213308, 0.461758081715136], + [-0.806844782774103, 0.461758079314706, 0.368484698051091], + [0.806844781709987, -0.461758081098712, 0.368484698145526], + [-0.134842418858112, -0.040021669507572, 0.990058477084218], + [-0.040021669975618, 0.990058477016276, -0.134842419218046], + [0.134842418981357, 0.040021669788942, 0.990058477056058], + [0.990058476924117, -0.134842420436143, -0.040021668151402], + [0.040021669677461, 0.990058477116921, 0.13484241856757], + [0.990058477286021, 0.134842417855397, 0.040021667893743], + [0.040021668037836, -0.990058476927568, -0.134842420444507], + [-0.990058477115239, 0.134842418635191, -0.040021669491235], + [-0.040021667837798, -0.990058477270892, 0.134842417983082], + [-0.990058477042031, -0.134842419087429, 0.040021669778575], + [-0.134842418122745, 0.040021667670891, -0.990058477258617], + [0.134842420378113, -0.040021667867212, -0.990058476943508], + [0.049794077313207, -0.279738156561879, -0.958784185115654], + [-0.279738157129975, -0.958784185068512, 0.049794075029415], + [-0.049794075085005, 0.279738157111834, -0.958784185070918], + [-0.958784184460955, 0.049794077429761, -0.279738158785068], + [0.279738156684233, -0.958784185083851, -0.049794077238191], + [-0.958784184306963, -0.049794076856858, 0.279738159414846], + [0.279738159012938, 0.958784184390379, 0.049794077508567], + [0.958784185034086, -0.049794077337113, -0.279738156837192], + [-0.279738159575992, 0.95878418424315, -0.049794077180261], + [0.958784185016722, 0.049794074909289, 0.279738157328865], + [0.049794077031905, 0.279738159517178, 0.958784184268015], + [-0.049794077785621, -0.279738158888691, 0.958784184412241], + [0.205470768670777, -0.192901743072287, 0.959463746444603], + [-0.192901744385898, 0.959463746331714, 0.205470767964668], + [-0.205470768086678, 0.19290174463045, 0.959463746256418], + [0.959463745738735, 0.205470770340502, -0.192901744804646], + [0.19290174290288, 0.959463746447685, -0.205470768815433], + [0.95946374536634, -0.205470771694041, 0.192901745215149], + [0.192901744892626, -0.959463745685675, 0.205470770505673], + [-0.959463746372533, -0.205470769064203, -0.192901743011692], + [-0.192901745122065, -0.959463745348892, -0.205470771862908], + [-0.959463746220563, 0.205470768260652, 0.192901744623478], + [0.205470771726444, 0.192901745460598, -0.959463745310053], + [-0.205470770652743, -0.192901744949698, -0.959463745642705], + [-0.278905392074019, 0.772004854137857, -0.571156972696319], + [0.772004854268172, -0.571156972466399, -0.278905392184152], + [0.278905392160238, -0.772004854249339, -0.571156972503532], + [-0.571156971675365, -0.278905392078835, 0.772004854891456], + [-0.772004854266013, -0.571156972533567, 0.278905392052577], + [-0.571156970567139, 0.278905391582234, -0.77200485589077], + [-0.772004855078365, 0.571156971421921, -0.27890539208049], + [0.57115697255647, 0.278905391952095, 0.77200485428537], + [0.772004855995114, 0.571156970376128, 0.278905391684575], + [0.571156972406906, -0.278905392034109, -0.772004854366394], + [-0.278905391724262, -0.772004855939801, 0.571156970431511], + [0.278905392185009, 0.772004854970249, 0.571156971517017], + [0.912363859945553, -0.393198149041577, -0.113962286110494], + [-0.393198146993911, -0.113962287143254, 0.912363860699027], + [-0.912363860783175, 0.393198146824102, -0.113962287055462], + [-0.113962288369946, 0.912363860162702, -0.393198147882843], + [0.393198149035756, -0.113962285681562, -0.912363860001638], + [-0.113962285259825, -0.912363861029922, 0.393198146771995], + [0.393198147677495, 0.113962288066404, 0.912363860289116], + [0.113962286031302, -0.912363859929225, -0.393198149102416], + [-0.39319814691232, 0.113962285090844, -0.912363860990554], + [0.113962287370876, 0.912363860762456, 0.393198146780759], + [0.9123638610767, 0.393198146641003, 0.113962285337288], + [-0.912363860199082, -0.39319814784926, 0.113962288194565], + [0.848662336981788, -0.012909984472825, -0.528777429633226], + [-0.01290998542466, -0.528777432035493, 0.848662335470524], + [-0.848662335552813, 0.012909985214183, -0.528777431908562], + [-0.52877743352546, 0.848662334565693, -0.012909983878239], + [0.012909984639682, -0.528777429281808, -0.848662337198209], + [-0.528777430386149, -0.848662336521067, 0.012909983920382], + [0.012909983944448, 0.52877743344648, 0.848662334613896], + [0.528777429496827, -0.848662337067122, -0.012909984449961], + [-0.012909983871647, 0.528777430419671, -0.848662336500922], + [0.528777432240356, 0.848662335344326, 0.012909985329594], + [0.848662336343559, 0.012909983743557, 0.528777430675359], + [-0.848662334668199, -0.012909983655303, 0.528777433366386], + [-0.69585113208617, 0.211164782101034, 0.686440555892948], + [0.211164781099711, 0.68644055554441, -0.695851132733858], + [0.695851132741401, -0.211164781335084, 0.686440555464357], + [0.686440553889191, -0.695851134384757, 0.211164781040182], + [-0.211164781930503, 0.686440555960218, 0.695851132071559], + [0.686440553598939, 0.695851134525998, -0.21116478151828], + [-0.21116478087036, -0.686440553737906, -0.69585113458553], + [-0.686440555776475, 0.695851132224679, 0.211164782023223], + [0.211164781498505, -0.686440553499572, 0.695851134630023], + [-0.686440555292332, -0.695851132981083, -0.211164781104467], + [-0.695851134744882, -0.211164781531153, -0.686440553373094], + [0.695851134495813, 0.211164781101486, -0.686440553757753], + [-0.261718169263029, -0.581630098396244, 0.770201290908541], + [-0.581630098290833, 0.770201291506502, -0.261718167737572], + [0.261718167857864, 0.581630098126426, 0.770201291589781], + [0.770201292726794, -0.261718168321791, -0.581630096412025], + [0.581630098450626, 0.770201290888376, 0.261718169201518], + [0.770201293263127, 0.261718168077775, 0.581630095811608], + [0.581630096213803, -0.770201292881278, -0.261718168307686], + [-0.770201291051568, 0.26171816913029, -0.581630098266577], + [-0.581630095716607, -0.770201293304276, 0.261718168167806], + [-0.770201291705965, -0.261718167641045, 0.581630098070137], + [-0.261718168076348, 0.581630095637746, -0.770201293394907], + [0.261718168494542, -0.581630096129926, -0.770201292881124], + [0.506136437086844, 0.700992881596967, 0.502428987025446], + [0.700992883568509, 0.502428985302136, 0.506136436066968], + [-0.506136436123196, -0.700992883503112, 0.502428985336736], + [0.502428986281426, 0.506136435764488, 0.700992883085013], + [-0.700992881635171, 0.502428986938925, -0.50613643711982], + [0.502428986199081, -0.506136436342322, -0.700992882726821], + [-0.700992883178434, -0.502428986124795, 0.506136435790584], + [-0.502428987099143, -0.506136437034413, 0.700992881582003], + [0.700992882671006, -0.502428986197914, -0.506136436420782], + [-0.502428985277898, 0.506136435955935, -0.700992883666051], + [0.506136436300189, -0.700992882789867, -0.502428986153563], + [-0.506136435852246, 0.700992882991532, -0.502428986323445], + [-0.440748149182578, 0.602242024157979, 0.665616716534547], + [0.602242022260099, 0.66561671834234, -0.440748149045733], + [0.440748149100016, -0.602242022337998, 0.665616718235914], + [0.665616715634027, -0.440748149390786, 0.602242025000887], + [-0.602242023804998, 0.665616716814167, 0.440748149242614], + [0.665616716586012, 0.440748149783209, -0.602242023661529], + [-0.602242024940208, -0.665616715760932, -0.440748149282046], + [-0.665616716462371, 0.440748149424008, 0.602242024061062], + [0.602242023852026, -0.665616716460744, 0.440748149712092], + [-0.665616718266293, -0.44074814917988, -0.602242022245974], + [-0.440748149655782, -0.602242023883194, -0.66561671646983], + [0.44074814928254, 0.602242025306933, -0.665616715428797], + [-0.89025783677553, -0.293518547758229, 0.348264046639405], + [-0.293518546899673, 0.348264043649922, -0.890257838228066], + [0.890257838178446, 0.293518546762444, 0.348264043892422], + [0.34826404446276, -0.890257837322353, -0.293518548682307], + [0.293518547908785, 0.348264046686625, 0.89025783670742], + [0.348264047178787, 0.890257836270502, 0.293518548650024], + [0.293518548932545, -0.348264044336184, -0.890257837289365], + [-0.348264046901224, 0.890257836626627, -0.29351854789921], + [-0.2935185489462, -0.348264047080228, 0.890257836211408], + [-0.348264043786589, -0.890257838192766, 0.293518546844585], + [-0.890257836357219, 0.293518548692058, -0.348264046921688], + [0.890257837186443, -0.293518548811097, -0.348264044701638], + [0.661971946522154, 0.031389655564508, 0.748871037990662], + [0.03138965429721, 0.748871040172752, 0.661971944113708], + [-0.661971944196008, -0.031389654112142, 0.748871040107759], + [0.748871039164329, 0.661971945218693, 0.031389655052549], + [-0.031389655768972, 0.748871037783183, -0.661971946747175], + [0.748871037422933, -0.661971947171443, -0.031389655416215], + [-0.031389655026768, -0.748871039044161, 0.661971945355858], + [-0.748871037767735, -0.661971946761125, 0.031389655843332], + [0.03138965553856, -0.748871037222178, -0.661971947392751], + [-0.748871040238931, 0.661971944045087, -0.031389654165497], + [0.66197194707148, -0.031389655223358, -0.748871037519379], + [-0.661971945551351, 0.031389654961479, -0.74887103887409], + [-0.125732546862956, -0.877697090664539, -0.462427446956124], + [-0.877697091831705, -0.462427445382079, -0.125732544504481], + [0.125732544403638, 0.877697091976424, -0.46242744513482], + [-0.462427446167756, -0.125732547895101, -0.877697090932044], + [0.877697090790478, -0.462427446687307, 0.125732546972493], + [-0.462427443232932, 0.125732547131528, 0.877697092587683], + [0.87769709111192, 0.462427445862905, -0.12573254776065], + [0.462427446678181, 0.125732547366796, -0.877697090738801], + [-0.87769709250851, 0.462427443357225, 0.125732547227075], + [0.462427444949274, -0.125732544734265, 0.877697092026818], + [-0.125732546895942, 0.877697092616795, 0.462427443241732], + [0.125732547889573, -0.877697091021935, 0.462427445998644], + ] + ), + # degree 19 + np.array( + [ + [0.553035945587524, -0.472050222255944, 0.686527370580538], + [-0.472050227459673, 0.686527365766638, 0.553035947121696], + [-0.55303594558747, 0.472050222505474, 0.686527370409006], + [0.686527372366403, 0.553035941501725, -0.472050224445432], + [0.472050228567412, 0.686527364805305, -0.553035947369552], + [0.68652737203169, -0.553035941518164, 0.472050224912964], + [0.472050228340927, -0.686527365268236, 0.553035946988198], + [-0.686527371732145, -0.553035942965273, -0.47205022365323], + [-0.472050227580466, -0.686527365608527, -0.553035947214868], + [-0.686527371021655, 0.553035942983048, 0.472050224665708], + [0.553035946644886, 0.472050221691609, -0.686527370116806], + [-0.553035947212832, -0.472050222465287, -0.68652736912732], + [0.534151654424436, 0.792082393152326, 0.29544456761586], + [0.792082397489039, 0.295444568376044, 0.534151647573148], + [-0.53415165460592, -0.792082392760173, 0.295444568339099], + [0.295444567949351, 0.534151645341887, 0.792082399152876], + [-0.792082397600766, 0.29544456929757, -0.534151646897765], + [0.295444567829592, -0.534151645364488, -0.792082399182305], + [-0.792082397865911, -0.295444567933543, 0.534151647259045], + [-0.295444567962042, -0.53415164560035, 0.792082398973845], + [0.792082397128777, -0.29544456908432, -0.534151647715618], + [-0.295444567489444, 0.53415164476261, -0.792082399715064], + [0.534151654464793, -0.792082393125927, -0.29544456761367], + [-0.534151654460867, 0.792082392713663, -0.295444568726043], + [-0.987783901989363, -0.008366313346394, -0.155605166275604], + [-0.008366316491905, -0.155605166254194, -0.987783901966094], + [0.987783902042018, 0.008366312354305, -0.155605165994688], + [-0.155605167507252, -0.98778390181532, -0.008366310987655], + [0.008366315777747, -0.155605166766477, 0.987783901891443], + [-0.155605168424393, 0.987783901667492, 0.008366311383278], + [0.008366317026602, 0.155605166706053, -0.987783901890384], + [0.155605166835858, 0.987783901919836, -0.008366311135093], + [-0.008366315838957, 0.155605165685948, 0.98778390206114], + [0.155605167761508, -0.987783901773982, 0.008366311139443], + [-0.98778390211314, 0.008366313179595, 0.155605165498836], + [0.987783902165643, -0.008366312162939, 0.155605165220208], + [0.950764981387945, 0.202727494112491, -0.234408859255043], + [0.202727496789996, -0.234408860732757, 0.950764980452705], + [-0.950764980986237, -0.202727494847485, -0.234408860248721], + [-0.23440885021567, 0.950764983233011, 0.202727495911382], + [-0.20272749729896, -0.234408861592541, -0.950764980132203], + [-0.23440885011577, -0.950764983322899, -0.20272749560533], + [-0.202727496759051, 0.234408860491485, 0.950764980518789], + [0.234408850569327, -0.950764983253747, 0.20272749540521], + [0.202727497565203, 0.234408861679376, -0.950764980054025], + [0.234408850341461, 0.950764983305224, -0.202727495427267], + [0.950764981380539, -0.202727493695606, 0.234408859645621], + [-0.950764980970666, 0.202727494426432, 0.234408860676023], + [0.512072989115983, -0.124051607170076, -0.849936734455185], + [-0.12405160965336, -0.849936734716267, 0.512072988081055], + [-0.51207298893537, 0.124051606674421, -0.849936734636344], + [-0.849936734725645, 0.512072989351902, -0.124051604343177], + [0.124051609706947, -0.849936734419284, -0.512072988561004], + [-0.849936734185619, -0.512072990133951, 0.124051604814925], + [0.124051609905272, 0.849936734159209, 0.512072988944631], + [0.849936734486865, -0.512072989718667, -0.124051604465195], + [-0.124051609776913, 0.849936734911909, -0.512072987726399], + [0.849936733865973, 0.512072990727649, 0.124051604554246], + [0.512072989657044, 0.124051606837459, 0.849936734177751], + [-0.512072989396574, -0.124051606970032, 0.84993673431533], + [0.391883697914976, 0.850423194793585, -0.351009340424947], + [0.850423195330397, -0.351009335923244, 0.39188370078221], + [-0.391883697466306, -0.850423195007668, -0.351009340407185], + [-0.351009335872243, 0.391883705326061, 0.850423193257595], + [-0.850423194593128, -0.351009337444654, -0.391883701019427], + [-0.35100933799945, -0.391883705264188, -0.850423192408108], + [-0.850423194468673, 0.351009337760337, 0.391883701006749], + [0.351009335527498, -0.391883705866852, 0.850423193150685], + [0.850423195361416, 0.351009336170377, -0.391883700493539], + [0.351009336873483, 0.39188370616407, -0.850423192458173], + [0.391883698323181, -0.850423194786778, 0.3510093399857], + [-0.391883698167036, 0.850423194811902, 0.351009340099156], + [-0.637143378120116, -0.628499374133282, 0.446135464216598], + [-0.628499375204954, 0.446135468086576, -0.637143374353178], + [0.63714337757707, 0.628499374826823, 0.446135464015109], + [0.446135466991108, -0.63714337381196, -0.628499376531226], + [0.628499375911897, 0.446135468292267, 0.637143373511799], + [0.446135467311664, 0.637143373480954, 0.628499376639239], + [0.62849937527089, -0.446135468666392, -0.637143373882143], + [-0.446135467006437, 0.637143373286424, -0.628499377053108], + [-0.628499376195147, -0.446135467887251, 0.637143373515989], + [-0.446135467633094, -0.637143373382135, 0.628499376511253], + [-0.637143377816856, 0.628499373935058, -0.446135464928946], + [0.637143377542956, -0.62849937478419, -0.446135464123887], + [-0.420378708184596, 0.903565957647232, -0.082766550526719], + [0.903565960547129, -0.082766548074817, -0.420378702434272], + [0.42037870768752, -0.903565957904322, -0.082766550244743], + [-0.08276654593922, -0.420378701283585, 0.9035659612781], + [-0.903565960760554, -0.082766547146253, 0.420378702158358], + [-0.082766545039106, 0.420378701254078, -0.903565961374279], + [-0.903565960509685, 0.082766547722836, -0.420378702584056], + [0.082766546052241, 0.420378700439882, 0.903565961660275], + [0.90356596090935, 0.082766547862683, 0.420378701697478], + [0.082766545679396, -0.420378701270528, -0.903565961307975], + [-0.420378707505505, -0.903565957945495, 0.082766550719722], + [0.420378706956033, 0.903565958233438, 0.082766550367062], + [0.491848298473796, 0.355367007972287, 0.794858189196825], + [0.355367012626596, 0.794858187708896, 0.491848297515583], + [-0.491848298344631, -0.355367008156744, 0.794858189194284], + [0.794858192062911, 0.491848294626225, 0.355367006886901], + [-0.355367012548889, 0.794858187634159, -0.491848297692508], + [0.794858192091183, -0.491848294618182, -0.355367006834796], + [-0.355367012605403, -0.79485818761909, 0.491848297676028], + [-0.79485819260841, -0.491848293926967, 0.355367006634583], + [0.35536701250799, -0.794858187986535, -0.491848297152596], + [-0.794858192358054, 0.491848294578517, -0.355367006292778], + [0.491848297979809, -0.355367007868558, -0.794858189548874], + [-0.491848297571808, 0.355367007417215, -0.794858190003127], + [0.060667255805915, 0.97798263888706, 0.199673338501868], + [0.977982638810576, 0.199673341482313, 0.060667247229371], + [-0.06066725576913, -0.977982638936182, 0.199673338272451], + [0.19967333790937, 0.060667250976018, 0.977982639307643], + [-0.977982639072168, 0.199673340242081, -0.060667247094362], + [0.199673337373138, -0.060667251086811, -0.977982639410252], + [-0.977982638978921, -0.199673340702871, 0.060667247080943], + [-0.199673337990036, -0.060667251306886, 0.977982639270649], + [0.977982638897865, -0.199673341052594, -0.060667247236562], + [-0.199673337084201, 0.060667250789575, -0.977982639487682], + [0.06066725570001, -0.977982638898456, -0.199673338478232], + [-0.060667256074209, 0.977982638961939, -0.199673338053604], + [-0.708312961873346, 0.702414591990534, 0.070046334671986], + [0.702414584158394, 0.070046328146925, -0.70831297028554], + [0.70831296180624, -0.702414591950002, 0.070046335757007], + [0.070046325730793, -0.7083129711293, 0.702414583548491], + [-0.702414584241602, 0.070046328819927, 0.70831297013647], + [0.070046325138075, 0.708312971393231, -0.70241458334145], + [-0.702414584340882, -0.070046327329757, -0.708312970185382], + [-0.070046326094986, 0.708312970542407, 0.702414584103993], + [0.702414584126282, -0.070046328999645, 0.708312970233058], + [-0.07004632593766, -0.708312970292399, -0.70241458437179], + [-0.70831296129047, -0.702414592488956, -0.070046335567964], + [0.708312961059513, 0.702414592640383, -0.070046336384914], + [-0.608778246497891, -0.729529462544733, -0.311730348009535], + [-0.729529461162802, -0.311730341531525, -0.608778251471052], + [0.608778246679673, 0.729529462023489, -0.31173034887438], + [-0.311730343069402, -0.608778253416134, -0.729529458882528], + [0.729529460955067, -0.311730341992774, 0.608778251483804], + [-0.311730342453046, 0.608778253837742, 0.729529458794075], + [0.729529461285603, 0.311730341286902, -0.608778251449154], + [0.311730342676067, 0.608778254584565, -0.729529458075568], + [-0.729529460737167, 0.311730342625706, 0.608778251420826], + [0.311730342500045, -0.608778254449614, 0.729529458263397], + [-0.608778247292532, 0.72952946202083, 0.31173034768375], + [0.608778247330452, -0.729529461617846, 0.311730348552781], + [0.230102774190651, -0.807756554170623, 0.542754145543051], + [-0.807756552084345, 0.542754149424728, 0.230102772358463], + [-0.230102773683601, 0.807756554197333, 0.542754145718266], + [0.542754144206019, 0.230102772513564, -0.807756555546758], + [0.807756552132751, 0.542754149180793, -0.230102772763921], + [0.54275414387689, -0.230102773432955, 0.807756555506005], + [0.807756552229309, -0.542754148882616, 0.230102773128283], + [-0.542754145084005, -0.230102772500065, -0.80775655496066], + [-0.807756552237738, -0.542754149346909, -0.230102772003543], + [-0.542754144288786, 0.230102773227955, 0.807756555287639], + [0.230102774097675, 0.807756554025896, -0.542754145797859], + [-0.230102773562357, -0.807756553761761, -0.542754146417909], + [-0.496383809474105, -0.862518230775131, -0.098312843883766], + [-0.862518224287333, -0.098312838975785, -0.49638382171939], + [0.496383809596231, 0.862518230686221, -0.098312844047173], + [-0.098312839350041, -0.496383823019562, -0.862518223496418], + [0.862518224300261, -0.098312838333147, 0.496383821824206], + [-0.098312838299782, 0.496383823078636, 0.862518223582133], + [0.862518224470515, 0.098312838524506, -0.496383821490472], + [0.09831283917121, 0.496383824314041, -0.862518222771822], + [-0.862518224078588, 0.098312839378387, 0.496383822002367], + [0.098312838470056, -0.49638382381015, 0.862518223141735], + [-0.496383810069422, 0.862518230414379, 0.098312844042943], + [0.496383810403814, -0.862518230215463, 0.098312844099726], + [0.278692551327958, 0.919313188465131, 0.277837584477674], + [0.919313191744972, 0.277837581526559, 0.278692543450923], + [-0.278692551566547, -0.91931318841363, 0.277837584408758], + [0.277837583051005, 0.278692544908351, 0.919313190842426], + [-0.919313192180326, 0.277837580345951, -0.278692543191822], + [0.277837582008532, -0.278692545046071, -0.919313191115735], + [-0.919313192196504, -0.277837580255645, 0.278692543228489], + [-0.277837582825575, -0.278692545265575, 0.919313190802263], + [0.919313191814052, -0.277837581086655, -0.278692543661607], + [-0.277837581528602, 0.278692544535811, -0.919313191415468], + [0.278692551299389, -0.91931318860489, -0.277837584043894], + [-0.278692551719555, 0.919313188501633, -0.277837583964092], + [0.711723818982073, -0.147355178359107, -0.686830151423428], + [-0.14735518004562, -0.686830151696651, 0.711723818369232], + [-0.711723818994987, 0.147355179083635, -0.686830151254603], + [-0.686830156031755, 0.711723816221312, -0.147355170213896], + [0.147355179878181, -0.68683015150873, -0.711723818585246], + [-0.686830155899656, -0.711723816480405, 0.147355169578202], + [0.147355179832049, 0.686830151151262, 0.711723818939762], + [0.686830156307707, -0.711723816117428, -0.147355169429431], + [-0.147355180410728, 0.686830151596083, -0.711723818390689], + [0.686830155813336, 0.711723816667769, 0.147355169075579], + [0.711723819167954, 0.147355177853636, 0.686830151339256], + [-0.711723818958232, -0.147355177932743, 0.686830151539607], + [0.910866815770901, -0.407547474081887, 0.065013077890936], + [-0.407547470055014, 0.06501307469253, 0.910866817800923], + [-0.91086681602966, 0.40754747351676, 0.065013077808199], + [0.065013071417123, 0.910866817243773, -0.407547471822745], + [0.407547469547224, 0.065013074424327, -0.910866818047266], + [0.065013071503944, -0.910866817193855, 0.407547471920462], + [0.407547469994702, -0.065013074730498, 0.910866817825199], + [-0.065013071237167, -0.910866817002829, -0.407547472389962], + [-0.407547469492954, -0.065013074760909, -0.910866818047525], + [-0.065013070894069, 0.910866817046896, 0.407547472346204], + [0.910866815571027, 0.407547474607393, -0.065013077397032], + [-0.910866815826998, -0.407547474069762, -0.065013077180997], + ] + ), + # degree 20 + np.array( + [ + [-0.251581299355938, 0.965702462813156, -0.064230858090044], + [0.965702462812973, -0.064230858090163, -0.251581299356609], + [0.25158129935621, -0.965702462813076, -0.064230858090184], + [-0.064230858090037, -0.251581299356469, 0.965702462813018], + [-0.965702462812988, -0.064230858090212, 0.25158129935654], + [-0.064230858090283, 0.251581299356213, -0.965702462813068], + [-0.965702462813129, 0.06423085809035, -0.251581299355962], + [0.064230858090209, 0.251581299356322, 0.965702462813045], + [0.96570246281309, 0.064230858089911, 0.251581299356226], + [0.0642308580902, -0.2515812993563, -0.965702462813051], + [-0.2515812993566, -0.965702462812992, 0.064230858089919], + [0.251581299356516, 0.965702462812981, 0.064230858090402], + [-0.774265533845772, 0.381515182343397, -0.504934697500583], + [0.381515182343197, -0.504934697500657, -0.774265533845823], + [0.774265533845583, -0.381515182343386, -0.504934697500883], + [-0.504934697500797, -0.774265533845681, 0.3815151823433], + [-0.381515182343153, -0.504934697500805, 0.774265533845748], + [-0.504934697500622, 0.774265533845887, -0.381515182343114], + [-0.381515182343272, 0.504934697500883, -0.774265533845639], + [0.504934697500808, 0.774265533845615, 0.381515182343419], + [0.38151518234349, 0.504934697500621, 0.774265533845703], + [0.50493469750058, -0.774265533845806, -0.381515182343333], + [-0.774265533845719, -0.381515182343321, 0.504934697500723], + [0.774265533845894, 0.38151518234298, 0.504934697500711], + [0.621892089865857, 0.451716799694261, -0.639689113113747], + [0.451716799694191, -0.639689113113918, 0.621892089865731], + [-0.621892089865648, -0.451716799694225, -0.639689113113976], + [-0.639689113113901, 0.621892089865499, 0.451716799694535], + [-0.451716799694008, -0.6396891131138, -0.621892089865986], + [-0.639689113113879, -0.621892089865655, -0.451716799694351], + [-0.451716799694347, 0.639689113113675, 0.621892089865869], + [0.639689113113788, -0.621892089865995, 0.451716799694013], + [0.451716799694587, 0.639689113113955, -0.621892089865406], + [0.639689113114061, 0.6218920898659, -0.451716799693757], + [0.621892089865889, -0.451716799694281, 0.639689113113701], + [-0.621892089865898, 0.451716799693713, 0.639689113114094], + [0.281811042675091, 0.858047847696197, -0.429344182783814], + [0.858047847696408, -0.429344182783659, 0.281811042674688], + [-0.281811042675114, -0.858047847696306, -0.429344182783581], + [-0.429344182783315, 0.281811042674947, 0.858047847696495], + [-0.858047847696386, -0.429344182783329, -0.281811042675257], + [-0.429344182783979, -0.281811042674793, -0.858047847696213], + [-0.858047847696136, 0.429344182783948, 0.281811042675075], + [0.429344182783574, -0.281811042675002, 0.858047847696347], + [0.85804784769643, 0.429344182783432, -0.281811042674964], + [0.429344182783407, 0.2818110426754, -0.8580478476963], + [0.28181104267478, -0.858047847696515, 0.429344182783383], + [-0.281811042675193, 0.858047847696227, 0.429344182783688], + [-0.649612004107369, -0.615311084069471, 0.44653836782617], + [-0.615311084069575, 0.446538367826544, -0.649612004107014], + [0.649612004107338, 0.615311084069274, 0.446538367826487], + [0.44653836782629, -0.649612004107234, -0.615311084069526], + [0.615311084069631, 0.446538367826189, 0.649612004107205], + [0.4465383678263, 0.649612004107223, 0.615311084069531], + [0.615311084069337, -0.44653836782627, -0.649612004107428], + [-0.446538367826248, 0.649612004107346, -0.615311084069439], + [-0.615311084069373, -0.446538367826536, 0.649612004107211], + [-0.446538367826286, -0.649612004107303, 0.615311084069457], + [-0.649612004107121, 0.615311084069723, -0.446538367826183], + [0.649612004107125, -0.615311084069551, -0.446538367826415], + [0.993363116319503, -0.113468728148246, -0.018829946054775], + [-0.113468728148035, -0.018829946054639, 0.993363116319529], + [-0.993363116319523, 0.113468728148204, -0.018829946053964], + [-0.018829946053903, 0.993363116319554, -0.113468728147943], + [0.113468728148066, -0.018829946054323, -0.993363116319532], + [-0.018829946054743, -0.993363116319533, 0.113468728147986], + [0.113468728148219, 0.018829946054485, 0.993363116319511], + [0.018829946054344, -0.99336311631951, -0.113468728148254], + [-0.113468728148178, 0.018829946054246, -0.993363116319521], + [0.018829946054485, 0.993363116319503, 0.113468728148287], + [0.99336311631954, 0.113468728147985, 0.018829946054382], + [-0.993363116319531, -0.113468728148037, 0.018829946054542], + [0.246398885891569, -0.720801569649804, 0.647867799957501], + [-0.720801569649392, 0.647867799957886, 0.246398885891762], + [-0.246398885891682, 0.720801569649632, 0.647867799957649], + [0.647867799957437, 0.246398885891663, -0.720801569649829], + [0.720801569649864, 0.647867799957577, -0.246398885891192], + [0.647867799957658, -0.246398885891679, 0.720801569649625], + [0.720801569649656, -0.647867799957734, 0.246398885891389], + [-0.647867799957904, -0.246398885891433, -0.720801569649489], + [-0.720801569649865, -0.647867799957373, -0.246398885891727], + [-0.647867799957474, 0.246398885891166, 0.720801569649966], + [0.246398885891794, 0.720801569649507, -0.647867799957745], + [-0.246398885891456, -0.720801569649666, -0.647867799957697], + [-0.793544204802179, -0.387628773401269, -0.469075184865183], + [-0.387628773401353, -0.469075184864794, -0.793544204802368], + [0.793544204802171, 0.387628773401536, -0.469075184864975], + [-0.469075184865097, -0.793544204802034, -0.387628773401668], + [0.38762877340168, -0.469075184864988, 0.793544204802093], + [-0.46907518486511, 0.793544204802104, 0.387628773401512], + [0.387628773401425, 0.469075184865298, -0.793544204802035], + [0.469075184865068, 0.793544204802337, -0.387628773401084], + [-0.387628773401491, 0.469075184864931, 0.793544204802219], + [0.469075184864784, -0.793544204802296, 0.387628773401512], + [-0.793544204802265, 0.387628773401224, 0.469075184865075], + [0.793544204802185, -0.387628773401823, 0.469075184864715], + [0.164945057653003, -0.958376909717154, 0.233038251960587], + [-0.958376909716935, 0.233038251961126, 0.164945057653512], + [-0.164945057653238, 0.958376909717001, 0.233038251961048], + [0.233038251960668, 0.164945057653504, -0.958376909717048], + [0.958376909717102, 0.233038251960514, -0.164945057653409], + [0.233038251960742, -0.164945057653288, 0.958376909717067], + [0.958376909717099, -0.233038251960827, 0.164945057652982], + [-0.233038251961122, -0.164945057653226, -0.958376909716986], + [-0.958376909717093, -0.233038251960632, -0.164945057653293], + [-0.233038251960434, 0.164945057653261, 0.958376909717147], + [0.164945057653494, 0.958376909716965, -0.233038251961015], + [-0.164945057653458, -0.958376909717031, -0.233038251960769], + [0.560484250466976, 0.813252649483695, -0.156452974040834], + [0.81325264948369, -0.156452974041446, 0.560484250466813], + [-0.56048425046724, -0.813252649483431, -0.156452974041263], + [-0.15645297404103, 0.560484250467047, 0.813252649483609], + [-0.81325264948382, -0.156452974040726, -0.560484250466826], + [-0.156452974041097, -0.560484250466778, -0.813252649483781], + [-0.81325264948363, 0.156452974040967, 0.560484250467035], + [0.156452974041285, -0.560484250467053, 0.813252649483555], + [0.813252649483481, 0.156452974041151, -0.560484250467199], + [0.156452974040881, 0.560484250466996, -0.813252649483672], + [0.560484250466836, -0.813252649483737, 0.156452974041122], + [-0.56048425046674, 0.813252649483823, 0.156452974041018], + [0.366630058651312, 0.922018832550933, -0.124353015704282], + [0.92201883255088, -0.124353015704762, 0.366630058651284], + [-0.366630058651761, -0.922018832550708, -0.124353015704629], + [-0.124353015704377, 0.366630058651577, 0.922018832550815], + [-0.922018832550933, -0.124353015704203, -0.366630058651341], + [-0.124353015704534, -0.366630058651111, -0.922018832550979], + [-0.922018832550883, 0.124353015704478, 0.366630058651372], + [0.12435301570463, -0.366630058651537, 0.922018832550797], + [0.922018832550745, 0.124353015704463, -0.366630058651723], + [0.124353015704299, 0.366630058651563, -0.922018832550831], + [0.366630058651286, -0.922018832550923, 0.124353015704438], + [-0.366630058651229, 0.922018832550938, 0.124353015704492], + [-0.804671953651735, -0.070836250755727, 0.589478814365005], + [-0.070836250756058, 0.589478814365003, -0.804671953651707], + [0.804671953651921, 0.070836250755383, 0.589478814364792], + [0.589478814364726, -0.804671953651941, -0.070836250755714], + [0.070836250755939, 0.589478814364776, 0.804671953651884], + [0.589478814365018, 0.804671953651715, 0.070836250755846], + [0.070836250755601, -0.589478814364811, -0.804671953651888], + [-0.589478814364784, 0.804671953651884, -0.070836250755875], + [-0.070836250755551, -0.589478814364944, 0.804671953651795], + [-0.589478814364978, -0.804671953651759, 0.07083625075567], + [-0.804671953651836, 0.070836250756193, -0.589478814364811], + [0.804671953651731, -0.070836250755764, -0.589478814365006], + [-0.830597137771463, -0.481356221636722, 0.280008183125909], + [-0.481356221636763, 0.280008183126324, -0.830597137771299], + [0.830597137771467, 0.481356221636628, 0.280008183126056], + [0.280008183125864, -0.830597137771343, -0.481356221636956], + [0.481356221637075, 0.280008183125899, 0.830597137771262], + [0.280008183126004, 0.830597137771351, 0.481356221636859], + [0.481356221636653, -0.280008183125859, -0.83059713777152], + [-0.280008183126012, 0.83059713777152, -0.481356221636564], + [-0.481356221636741, -0.280008183126112, 0.830597137771384], + [-0.280008183126053, -0.830597137771314, 0.481356221636894], + [-0.830597137771366, 0.48135622163684, -0.280008183125994], + [0.830597137771194, -0.481356221637029, -0.280008183126178], + [0.622576105404642, 0.027441908430236, -0.782077959439399], + [0.027441908430276, -0.782077959439431, 0.622576105404601], + [-0.622576105404963, -0.027441908430045, -0.78207795943915], + [-0.782077959439118, 0.622576105404988, 0.027441908430397], + [-0.027441908430201, -0.782077959439296, -0.622576105404774], + [-0.782077959439408, -0.622576105404628, -0.027441908430289], + [-0.027441908430238, 0.782077959439221, 0.622576105404866], + [0.782077959439263, -0.62257610540482, 0.027441908430083], + [0.027441908430419, 0.782077959439269, -0.622576105404798], + [0.782077959439451, 0.622576105404591, -0.027441908429928], + [0.622576105404788, -0.02744190843038, 0.782077959439278], + [-0.622576105404572, 0.027441908429868, 0.782077959439468], + [-0.93186959347387, 0.318712863282032, -0.173323891998229], + [0.318712863281944, -0.173323891998258, -0.931869593473894], + [0.931869593473744, -0.318712863282051, -0.173323891998871], + [-0.173323891998841, -0.931869593473836, 0.318712863281799], + [-0.318712863281924, -0.173323891998617, 0.931869593473834], + [-0.173323891998245, 0.931869593473975, -0.318712863281714], + [-0.318712863281997, 0.173323891998515, -0.931869593473828], + [0.173323891998501, 0.931869593473801, 0.318712863282084], + [0.318712863282089, 0.173323891998539, 0.931869593473793], + [0.173323891998443, -0.931869593473824, -0.31871286328205], + [-0.931869593473865, -0.318712863281928, 0.173323891998448], + [0.931869593473897, 0.318712863281802, 0.173323891998503], + [0.883848176852703, 0.201423804475213, 0.422185801827685], + [0.201423804475703, 0.422185801827661, 0.883848176852602], + [-0.883848176852534, -0.201423804475554, 0.422185801827875], + [0.42218580182791, 0.883848176852484, 0.201423804475701], + [-0.201423804475472, 0.422185801827744, -0.883848176852615], + [0.422185801827623, -0.883848176852647, -0.201423804475586], + [-0.201423804475397, -0.422185801827833, 0.88384817685259], + [-0.42218580182793, -0.883848176852523, 0.201423804475489], + [0.201423804475479, -0.422185801827682, -0.883848176852643], + [-0.422185801827514, 0.883848176852769, -0.20142380447528], + [0.883848176852476, -0.201423804475614, -0.422185801827967], + [-0.88384817685271, 0.201423804475563, -0.422185801827502], + [0.204275039956405, 0.718770569884226, 0.664560438123663], + [0.718770569884334, 0.664560438123474, 0.204275039956637], + [-0.20427503995626, -0.718770569884265, 0.664560438123664], + [0.66456043812381, 0.204275039956156, 0.71877056988416], + [-0.718770569884325, 0.664560438123579, -0.204275039956328], + [0.664560438123492, -0.204275039956373, -0.718770569884393], + [-0.718770569884361, -0.664560438123554, 0.20427503995628], + [-0.664560438123554, -0.204275039956662, 0.718770569884254], + [0.71877056988409, -0.664560438123802, -0.204275039956432], + [-0.664560438123505, 0.204275039956682, -0.718770569884293], + [0.204275039956376, -0.718770569884165, -0.664560438123738], + [-0.204275039956367, 0.718770569884538, -0.664560438123337], + [-0.898847927472069, 0.43770082336828, 0.022144807560617], + [0.437700823367923, 0.022144807560963, -0.898847927472234], + [0.898847927472182, -0.437700823368065, 0.02214480756027], + [0.022144807560315, -0.898847927472293, 0.437700823367834], + [-0.437700823367766, 0.022144807560623, 0.898847927472319], + [0.022144807560559, 0.89884792747216, -0.437700823368094], + [-0.437700823368255, -0.022144807560327, -0.898847927472088], + [-0.022144807560661, 0.898847927472103, 0.437700823368207], + [0.43770082336803, -0.022144807560607, 0.898847927472191], + [-0.022144807560733, -0.898847927472195, -0.437700823368015], + [-0.898847927472245, -0.437700823367908, -0.022144807560796], + [0.898847927472313, 0.437700823367778, -0.022144807560634], + ] + ), + # degree 21 + np.array( + [ + [0.892653535762723, 0.412534053657361, -0.181618610454253], + [0.412534053425032, -0.181618610641782, 0.892653535831938], + [-0.892653535806407, -0.412534053627853, -0.181618610306575], + [-0.181618610613849, 0.892653535740475, 0.41253405363524], + [-0.412534053477435, -0.181618610422654, -0.892653535852304], + [-0.181618610451384, -0.892653535762812, -0.412534053658432], + [-0.41253405331709, 0.181618610611827, 0.892653535887918], + [0.181618610400136, -0.8926535358123, 0.412534053573911], + [0.412534053327996, 0.1816186104204, -0.892653535921825], + [0.181618610580789, 0.892653535810904, -0.412534053497399], + [0.892653535867644, -0.412534053472558, 0.181618610358339], + [-0.892653535855064, 0.41253405353516, 0.181618610277971], + [-0.292093742593433, -0.29576702799317, 0.909507070170347], + [-0.295767028026887, 0.90950707008926, -0.292093742811776], + [0.292093742447864, 0.295767028039713, 0.909507070201962], + [0.909507070147612, -0.292093742926721, -0.295767027733934], + [0.295767028145396, 0.909507070084441, 0.292093742706783], + [0.909507070188854, 0.292093742689207, 0.295767027841675], + [0.295767027907311, -0.909507070148419, -0.292093742748651], + [-0.909507070101221, 0.292093743159272, -0.295767027646927], + [-0.295767027835333, -0.909507070047293, 0.292093743136414], + [-0.909507070218591, -0.292093742721776, 0.295767027718069], + [-0.292093742540896, 0.295767027793147, -0.909507070252266], + [0.292093742861938, -0.295767027747614, -0.909507070163969], + [-0.575225718038192, 0.024120572825078, 0.817639022597403], + [0.024120572786144, 0.817639022511238, -0.575225718162301], + [0.575225718116478, -0.024120572979213, 0.817639022537781], + [0.817639022556003, -0.57522571810348, 0.024120572671469], + [-0.024120573041503, 0.817639022440757, 0.575225718251777], + [0.817639022458379, 0.575225718229118, -0.024120572984526], + [-0.024120572818239, -0.81763902258126, -0.575225718061424], + [-0.817639022543578, 0.575225718123882, 0.024120572606111], + [0.02412057271295, -0.817639022527296, 0.575225718142546], + [-0.817639022600495, -0.575225718035174, -0.024120572792228], + [-0.575225717925469, -0.024120572711052, -0.81763902268007], + [0.57522571790823, 0.024120572594155, -0.817639022695646], + [-0.1288331617248, 0.05224764072024, 0.990288947973853], + [0.052247640694409, 0.990288947958895, -0.128833161850251], + [0.128833161840325, -0.052247640320038, 0.990288947979938], + [0.990288947949717, -0.128833161924796, 0.052247640684558], + [-0.05224764038851, 0.990288947967581, 0.128833161907538], + [0.99028894797773, 0.128833161878001, -0.052247640268992], + [-0.052247640390409, -0.99028894796219, -0.128833161948209], + [-0.990288947960626, 0.128833161896649, 0.052247640547187], + [0.052247640527808, -0.990288947953251, 0.1288331619612], + [-0.990288947970868, -0.128833161936205, -0.052247640255526], + [-0.128833161790478, -0.052247640337643, -0.990288947985494], + [0.128833161857416, 0.052247640551545, -0.9902889479655], + [0.71800638603475, 0.657446876255993, -0.228539787596286], + [0.657446876286737, -0.228539787831922, 0.718006385931596], + [-0.718006386109442, -0.657446876171434, -0.228539787604877], + [-0.228539787737219, 0.718006385947422, 0.657446876302374], + [-0.657446876241021, -0.2285397877138, -0.718006386011054], + [-0.228539787678997, -0.718006386031359, -0.657446876230945], + [-0.657446876361185, 0.228539787860549, 0.718006385854315], + [0.228539787703065, -0.718006385857385, 0.657446876412577], + [0.657446876304454, 0.228539787874017, -0.718006385901975], + [0.228539787784967, 0.718006385813853, -0.657446876431648], + [0.71800638588076, -0.657446876363485, 0.228539787770851], + [-0.718006385891018, 0.657446876371558, 0.228539787715401], + [0.863176473117803, 0.468181816653138, 0.189029528940001], + [0.468181816438486, 0.189029529197492, 0.86317647317784], + [-0.863176473194446, -0.46818181657642, 0.189029528780033], + [0.189029529125527, 0.863176473064389, 0.468181816676708], + [-0.468181816392671, 0.189029528897443, -0.863176473268398], + [0.189029528792174, -0.863176473143688, -0.4681818166651], + [-0.468181816411213, -0.189029529128138, 0.863176473207821], + [-0.189029528897852, -0.86317647308972, 0.468181816721931], + [0.468181816508867, -0.189029528930555, -0.863176473198123], + [-0.189029529001823, 0.863176473106659, -0.468181816648722], + [0.863176473135229, -0.468181816648642, -0.189029528871561], + [-0.863176473123334, 0.468181816698762, -0.189029528801744], + [0.772632856847133, -0.51705945069559, 0.368358511462152], + [-0.517059450567132, 0.368358511585515, 0.772632856874286], + [-0.772632856806081, 0.517059450647391, 0.368358511615915], + [0.368358511648001, 0.772632856806054, -0.517059450624573], + [0.517059450494007, 0.368358511816588, -0.772632856813056], + [0.368358511720496, -0.772632856802476, 0.517059450578273], + [0.517059450583445, -0.368358511487117, 0.77263285691028], + [-0.36835851156733, -0.772632856859467, -0.517059450602229], + [-0.517059450502369, -0.368358511665956, -0.772632856879275], + [-0.368358511469803, 0.772632856855651, 0.517059450677412], + [0.772632856934749, 0.517059450691919, -0.368358511283531], + [-0.772632856927485, -0.517059450633778, -0.368358511380378], + [-0.847819231914648, -0.066325775900167, -0.526121128113002], + [-0.066325775913631, -0.526121128257686, -0.847819231823809], + [0.847819231883018, 0.066325775819852, -0.526121128174097], + [-0.526121128348762, -0.847819231766957, -0.06632577591791], + [0.06632577584612, -0.526121128407098, 0.847819231736372], + [-0.52612112845924, 0.84781923170908, 0.066325775781366], + [0.066325775945785, 0.52612112834438, -0.847819231767496], + [0.526121128449532, 0.847819231700692, -0.066325775965613], + [-0.066325775877211, 0.526121128306388, 0.847819231796436], + [0.526121128504669, -0.847819231665213, 0.06632577598176], + [-0.847819231821725, 0.066325775941005, 0.526121128257594], + [0.847819231850264, -0.066325775996655, 0.52612112820459], + [0.00980574322923, 0.942983815842593, 0.332694109443892], + [0.942983815808923, 0.332694109539748, 0.00980574321495], + [-0.00980574337969, -0.942983815787291, 0.332694109596207], + [0.332694109226554, 0.009805743204272, 0.942983815919532], + [-0.94298381577404, 0.332694109635647, -0.009805743315804], + [0.332694109397996, -0.00980574329891, -0.942983815858062], + [-0.942983815776114, -0.332694109630098, 0.009805743304667], + [-0.332694109319027, -0.009805743188507, 0.94298381588707], + [0.942983815775082, -0.332694109635199, -0.009805743230763], + [-0.332694109455765, 0.009805743389762, -0.942983815836735], + [0.00980574330114, -0.942983815752524, -0.332694109697065], + [-0.009805743287713, 0.942983815791379, -0.332694109587331], + [0.785599248371152, -0.405156945312269, -0.467634120465896], + [-0.405156944932125, -0.467634120649859, 0.785599248457698], + [-0.78559924820179, 0.405156945434051, -0.467634120644904], + [-0.467634120611242, 0.785599248334623, -0.405156945215339], + [0.405156945136423, -0.467634120868201, -0.785599248222366], + [-0.467634120811804, -0.785599248145609, 0.405156945350347], + [0.405156944841985, 0.467634120861332, 0.785599248378305], + [0.467634120786726, -0.785599248249857, -0.405156945177156], + [-0.405156944999643, 0.467634120871098, -0.785599248291182], + [0.467634120893713, 0.78559924823424, 0.405156945083953], + [0.785599248313341, 0.405156945117104, 0.467634120732106], + [-0.7855992482811, -0.40515694519737, 0.467634120716727], + [-0.737331999131492, 0.620851501013764, -0.26624225199189], + [0.620851500949186, -0.266242252154895, -0.73733199912701], + [0.737331999060061, -0.620851501088737, -0.266242252014883], + [-0.266242251948631, -0.737331999103255, 0.62085150106585], + [-0.620851501079221, -0.2662422522338, 0.737331998989025], + [-0.266242252011624, 0.737331998996222, -0.620851501165951], + [-0.620851501072124, 0.26624225222256, -0.73733199899906], + [0.266242252113864, 0.737331998832974, 0.620851501315983], + [0.620851501187387, 0.266242252328374, 0.737331998863797], + [0.26624225193225, -0.73733199893899, -0.620851501267959], + [-0.737331998947943, -0.620851501183297, 0.266242252104879], + [0.737331998835007, 0.620851501305786, 0.26624225213201], + [0.726871469165659, -0.027488282350428, -0.686223186468061], + [-0.027488282182755, -0.686223186448325, 0.726871469190633], + [-0.726871469172931, 0.027488282371885, -0.686223186459499], + [-0.686223186449712, 0.726871469185406, -0.027488282286341], + [0.027488282351607, -0.68622318649112, -0.726871469143845], + [-0.686223186545622, -0.726871469089794, 0.027488282420281], + [0.027488282266836, 0.686223186470335, 0.726871469166674], + [0.686223186661183, -0.726871468983422, -0.027488282348185], + [-0.027488282251029, 0.686223186523092, -0.726871469117465], + [0.686223186609112, 0.726871469033498, 0.027488282323948], + [0.726871469070107, 0.02748828233555, 0.686223186569869], + [-0.726871469080183, -0.027488282309716, 0.686223186560232], + [0.665363385720515, 0.580860267739271, 0.468927408352716], + [0.580860267577087, 0.468927408488638, 0.665363385766308], + [-0.66536338567738, -0.580860267719575, 0.468927408438318], + [0.468927408340783, 0.665363385821863, 0.580860267632813], + [-0.580860267528453, 0.468927408678832, -0.665363385674723], + [0.468927408372614, -0.665363385698803, -0.580860267748078], + [-0.580860267640877, -0.468927408552762, 0.665363385665427], + [-0.468927408468336, -0.665363385847947, 0.580860267499961], + [0.580860267386752, -0.468927408654519, -0.665363385815563], + [-0.468927408375699, 0.665363385651356, -0.580860267799938], + [0.665363385651819, -0.580860267791212, -0.46892740838585], + [-0.665363385751734, 0.580860267548017, -0.468927408545326], + [-0.580125367305304, -0.779099597924434, 0.237609710918707], + [-0.779099598053518, 0.237609710909934, -0.580125367135539], + [0.580125367186808, 0.779099597977732, 0.237609711033258], + [0.237609710695932, -0.58012536727611, -0.779099598014114], + [0.779099598064732, 0.23760971114732, 0.58012536702325], + [0.237609710819285, 0.580125367047426, 0.779099598146774], + [0.779099598170224, -0.237609710849642, -0.580125367003499], + [-0.237609710811802, 0.580125367157256, -0.779099598067276], + [-0.779099598074961, -0.237609711045128, 0.580125367051369], + [-0.237609710609253, -0.580125367022359, 0.779099598229495], + [-0.580125367090094, 0.779099598151966, -0.237609710698086], + [0.580125367218411, -0.779099597966716, -0.237609710992215], + [0.9586680253602, 0.101113605900539, -0.265954236389956], + [0.101113605889893, -0.265954236477199, 0.95866802533712], + [-0.95866802532641, -0.101113606095432, -0.26595423643766], + [-0.265954236634179, 0.958668025294555, 0.101113605880558], + [-0.101113606003171, -0.265954236656317, -0.958668025275482], + [-0.265954236715455, -0.958668025246162, -0.101113606125602], + [-0.101113605825438, 0.265954236414664, 0.958668025361267], + [0.265954236286739, -0.958668025393583, 0.101113605855522], + [0.101113605802444, 0.265954236260664, -0.958668025406415], + [0.265954236515854, 0.958668025322577, -0.101113605926106], + [0.9586680254495, -0.101113605909101, 0.265954236064808], + [-0.9586680254786, 0.101113605789497, 0.265954236005386], + [-0.784431814417085, 0.284319025007229, 0.551207239202516], + [0.284319024822848, 0.551207239320709, -0.784431814400862], + [0.784431814443422, -0.284319024888131, 0.551207239226467], + [0.551207239434677, -0.784431814291888, 0.284319024902556], + [-0.284319024640161, 0.551207239347504, 0.784431814448249], + [0.551207239408357, 0.784431814400998, -0.284319024652546], + [-0.28431902471494, -0.551207239160137, -0.784431814552804], + [-0.551207239417649, 0.784431814426743, 0.284319024563503], + [0.284319024477106, -0.551207239394067, 0.784431814474629], + [-0.551207239227164, -0.784431814510832, -0.284319024700797], + [-0.7844318146549, -0.284319024757729, -0.551207238992772], + [0.784431814542139, 0.284319024689884, -0.55120723918824], + [0.166663878535118, 0.97946877886665, 0.113419851953285], + [0.979468778892362, 0.113419852011248, 0.166663878344564], + [-0.166663878322335, -0.979468778877222, 0.113419852174659], + [0.113419851852603, 0.166663878465092, 0.979468778890224], + [-0.979468778908051, 0.113419852233229, -0.166663878101297], + [0.113419852023532, -0.166663878213165, -0.979468778913298], + [-0.979468778891418, -0.113419852088755, 0.166663878297368], + [-0.113419851942299, -0.166663878383785, 0.979468778893673], + [0.979468778887792, -0.113419852252651, -0.166663878207142], + [-0.113419851887333, 0.166663878420061, -0.979468778893865], + [0.166663878513312, -0.97946877885884, -0.113419852052775], + [-0.166663878525992, 0.979468778852403, -0.113419852089727], + [0.90354263539087, 0.099002690679599, 0.416904273507865], + [0.09900269051118, 0.416904273753692, 0.903542635295897], + [-0.903542635383533, -0.099002690647923, 0.416904273531288], + [0.41690427395825, 0.903542635193768, 0.09900269058185], + [-0.099002690414933, 0.416904273699732, -0.903542635331341], + [0.416904273843964, -0.903542635237517, -0.099002690663845], + [-0.099002690464192, -0.416904273937254, 0.903542635216348], + [-0.416904274206036, -0.903542635110147, 0.099002690301575], + [0.099002690128044, -0.41690427406438, -0.903542635194523], + [-0.416904274113744, 0.903542635131386, -0.099002690496392], + [0.903542635279275, -0.099002690467102, -0.416904273800183], + [-0.903542635234399, 0.099002690245829, -0.416904273949988], + [0.278762404536092, 0.349312185537063, -0.894579520698175], + [0.349312185586056, -0.894579520608515, 0.278762404762431], + [-0.278762404540525, -0.349312185503473, -0.89457952070991], + [-0.894579520734144, 0.278762404727917, 0.349312185291866], + [-0.349312185466701, -0.894579520677723, -0.278762404689896], + [-0.894579520788864, -0.278762404658677, -0.349312185206984], + [-0.349312185551041, 0.894579520682798, 0.278762404567923], + [0.894579520785219, -0.278762404680469, 0.349312185198929], + [0.349312185549623, 0.89457952067923, -0.278762404581149], + [0.894579520781805, 0.278762404555908, -0.349312185307075], + [0.27876240443795, -0.3493121855065, 0.894579520740692], + [-0.278762404443259, 0.349312185428787, 0.894579520769382], + [0.555896230179415, -0.676833211736671, 0.48257246581476], + [-0.676833211681567, 0.482572466040116, 0.555896230050876], + [-0.555896230314892, 0.676833211522987, 0.482572465958401], + [0.482572465910283, 0.555896230164672, -0.676833211680673], + [0.676833211457692, 0.482572466092895, -0.555896230277639], + [0.482572465902981, -0.555896230367909, 0.676833211518957], + [0.676833211635592, -0.482572466071981, 0.555896230079191], + [-0.482572466150586, -0.555896230230084, -0.676833211455616], + [-0.676833211438286, -0.482572466327737, -0.5558962300974], + [-0.482572465972373, 0.55589623026777, 0.676833211551727], + [0.555896230192691, 0.676833211589453, -0.482572466005949], + [-0.555896230194338, -0.676833211455537, -0.482572466191875], + ] + ), ] diff --git a/py4DSTEM/process/diffraction/utils.py b/py4DSTEM/process/diffraction/utils.py index 20a14f68d..cfb11f044 100644 --- a/py4DSTEM/process/diffraction/utils.py +++ b/py4DSTEM/process/diffraction/utils.py @@ -8,7 +8,6 @@ from emdfile import tqdmnd - @dataclass class Orientation: """ @@ -68,6 +67,16 @@ def get_orientation(self, ind_x, ind_y): orientation.angles = self.angles[ind_x, ind_y] return orientation + def get_orientation_single(self, ind_x, ind_y, ind_match): + orientation = Orientation(num_matches=1) + orientation.matrix = self.matrix[ind_x, ind_y, ind_match] + orientation.family = self.family[ind_x, ind_y, ind_match] + orientation.corr = self.corr[ind_x, ind_y, ind_match] + orientation.inds = self.inds[ind_x, ind_y, ind_match] + orientation.mirror = self.mirror[ind_x, ind_y, ind_match] + orientation.angles = self.angles[ind_x, ind_y, ind_match] + return orientation + # def __copy__(self): # return OrientationMap(self.name) # def __deepcopy__(self, memo): @@ -76,9 +85,9 @@ def get_orientation(self, ind_x, ind_y): def sort_orientation_maps( orientation_map, - sort = "intensity", - cluster_thresh = 0.1, - ): + sort="intensity", + cluster_thresh=0.1, +): """ Sort the orientation maps along the ind_match direction, either by intensity or by clustering similar angles (greedily, in order of intensity). @@ -102,15 +111,21 @@ def sort_orientation_maps( desc="Sorting orientations", unit=" probe positions", # disable=not progress_bar, - ): - inds = np.argsort(orientation_map.corr[rx,ry])[::-1] - - orientation_sort.matrix[rx,ry,:,:,:] = orientation_sort.matrix[rx,ry,inds,:,:] - orientation_sort.family[rx,ry,:,:,:] = orientation_sort.family[rx,ry,inds,:,:] - orientation_sort.corr[rx,ry,:] = orientation_sort.corr[rx,ry,inds] - orientation_sort.inds[rx,ry,:,:] = orientation_sort.inds[rx,ry,inds,:] - orientation_sort.mirror[rx,ry,:] = orientation_sort.mirror[rx,ry,inds] - orientation_sort.angles[rx,ry,:,:] = orientation_sort.angles[rx,ry,inds,:] + ): + inds = np.argsort(orientation_map.corr[rx, ry])[::-1] + + orientation_sort.matrix[rx, ry, :, :, :] = orientation_sort.matrix[ + rx, ry, inds, :, : + ] + orientation_sort.family[rx, ry, :, :, :] = orientation_sort.family[ + rx, ry, inds, :, : + ] + orientation_sort.corr[rx, ry, :] = orientation_sort.corr[rx, ry, inds] + orientation_sort.inds[rx, ry, :, :] = orientation_sort.inds[rx, ry, inds, :] + orientation_sort.mirror[rx, ry, :] = orientation_sort.mirror[rx, ry, inds] + orientation_sort.angles[rx, ry, :, :] = orientation_sort.angles[ + rx, ry, inds, : + ] # elif sort == "cluster": # mask = np.zeros_like(orientation_map.corr, dtype='bool') @@ -118,26 +133,23 @@ def sort_orientation_maps( else: err_msg = "Invalid sorting method: " + sort - raise Exception(err_msg) - + raise Exception(err_msg) return orientation_sort - - def calc_1D_profile( k, g_coords, g_int, - remove_origin = True, - k_broadening = 0.0, - int_scale = None, - normalize_intensity = True, - ): + remove_origin=True, + k_broadening=0.0, + int_scale=None, + normalize_intensity=True, +): """ Utility function to calculate a 1D histogram from the diffraction vector lengths - stored in a Crystal class. + stored in a Crystal class. Args: k (np.array): k coordinates. @@ -162,39 +174,45 @@ def calc_1D_profile( # get discrete plot from structure factor amplitudes int_profile = np.zeros_like(k) - k_px = (g_coords - k_min) / k_step; - kf = np.floor(k_px).astype('int') - dk = k_px - kf; + k_px = (g_coords - k_min) / k_step + kf = np.floor(k_px).astype("int") + dk = k_px - kf sub = np.logical_and(kf >= 0, kf < k_num) if int_scale.shape[0] > 1: int_profile = np.bincount( - np.floor(k_px[sub]).astype('int'), - weights = (1-dk[sub])*g_int[sub]*int_scale[sub], - minlength = k_num) + np.floor(k_px[sub]).astype("int"), + weights=(1 - dk[sub]) * g_int[sub] * int_scale[sub], + minlength=k_num, + ) else: int_profile = np.bincount( - np.floor(k_px[sub]).astype('int'), - weights = (1-dk[sub])*g_int[sub], - minlength = k_num) - sub = np.logical_and(k_px >= -1, k_px < k_num-1) + np.floor(k_px[sub]).astype("int"), + weights=(1 - dk[sub]) * g_int[sub], + minlength=k_num, + ) + sub = np.logical_and(k_px >= -1, k_px < k_num - 1) if int_scale.shape[0] > 1: int_profile += np.bincount( - np.floor(k_px[sub] + 1).astype('int'), - weights = dk[sub]*g_int[sub]*int_scale[sub], - minlength = k_num) + np.floor(k_px[sub] + 1).astype("int"), + weights=dk[sub] * g_int[sub] * int_scale[sub], + minlength=k_num, + ) else: int_profile += np.bincount( - np.floor(k_px[sub] + 1).astype('int'), - weights = dk[sub]*g_int[sub], - minlength = k_num) + np.floor(k_px[sub] + 1).astype("int"), + weights=dk[sub] * g_int[sub], + minlength=k_num, + ) if remove_origin is True: int_profile[0:2] = 0 # Apply broadening if needed if k_broadening > 0.0: - int_profile = gaussian_filter(int_profile, k_broadening/k_step, mode='constant') + int_profile = gaussian_filter( + int_profile, k_broadening / k_step, mode="constant" + ) if normalize_intensity: int_profile /= np.max(int_profile) diff --git a/py4DSTEM/process/fit/fit.py b/py4DSTEM/process/fit/fit.py index 32809ddb1..349d88530 100644 --- a/py4DSTEM/process/fit/fit.py +++ b/py4DSTEM/process/fit/fit.py @@ -4,10 +4,12 @@ from scipy.optimize import curve_fit from inspect import signature + def gaussian(x, A, mu, sigma): - return A*np.exp(-0.5*((x-mu)/sigma)**2) + return A * np.exp(-0.5 * ((x - mu) / sigma) ** 2) + -def fit_1D_gaussian(xdata,ydata,xmin,xmax): +def fit_1D_gaussian(xdata, ydata, xmin, xmax): """ Fits a 1D gaussian to the subset of the 1D curve f(xdata)=ydata within the window (xmin,xmax). Returns A,mu,sigma. Retrieve the full curve with @@ -15,19 +17,23 @@ def fit_1D_gaussian(xdata,ydata,xmin,xmax): >>> fit_gaussian = py4DSTEM.process.fit.gaussian(xdata,A,mu,sigma) """ - mask = (xmin<=xdata)*(xmax>xdata) + mask = (xmin <= xdata) * (xmax > xdata) inds = np.nonzero(mask)[0] _xdata = xdata[inds] _ydata = ydata[inds] scale = np.max(_ydata) - _ydata = _ydata/scale + _ydata = _ydata / scale - p0 = [np.max(_ydata),_xdata[np.argmax(_ydata)],(xmax-xmin)/8.] # TODO: better guess for std + p0 = [ + np.max(_ydata), + _xdata[np.argmax(_ydata)], + (xmax - xmin) / 8.0, + ] # TODO: better guess for std + popt, pcov = curve_fit(gaussian, _xdata, _ydata, p0=p0) + A, mu, sigma = scale * popt[0], popt[1], popt[2] + return A, mu, sigma - popt,pcov = curve_fit(gaussian,_xdata,_ydata,p0=p0) - A,mu,sigma = scale*popt[0],popt[1],popt[2] - return A,mu,sigma def fit_2D( function, @@ -37,7 +43,7 @@ def fit_2D( robust=False, robust_steps=3, robust_thresh=2, - ): +): """ Performs a 2D fit. @@ -70,74 +76,82 @@ def fit_2D( """ # get shape shape = data.shape - shape1D = [1,np.prod(shape)] + shape1D = [1, np.prod(shape)] # x and y coordinates normalized from 0 to 1 - x,y = np.linspace(0, 1, shape[0]),np.linspace(0, 1, shape[1]) - ry,rx = np.meshgrid(y,x) - rx_1D = rx.reshape((1,np.prod(shape))) - ry_1D = ry.reshape((1,np.prod(shape))) + x, y = np.linspace(0, 1, shape[0]), np.linspace(0, 1, shape[1]) + ry, rx = np.meshgrid(y, x) + rx_1D = rx.reshape((1, np.prod(shape))) + ry_1D = ry.reshape((1, np.prod(shape))) xy = np.vstack((rx_1D, ry_1D)) # if robust fitting is turned off, set number of robust iterations to 0 - if robust==False: - robust_steps=0 + if robust == False: + robust_steps = 0 # least squares fitting - for k in range(robust_steps+1): - + for k in range(robust_steps + 1): # in 1st iteration, set up params and mask if k == 0: if popt is None: - popt = np.zeros((1,len(signature(function).parameters)-1)) + popt = np.zeros((1, len(signature(function).parameters) - 1)) if data_mask is not None: mask = data_mask else: - mask = np.ones(shape,dtype=bool) + mask = np.ones(shape, dtype=bool) # otherwise, get fitting error and add high error pixels to mask else: - fit_mean_square_error = ( - function(xy,*popt).reshape(shape) - data)**2 - _mask = fit_mean_square_error > np.mean( - fit_mean_square_error) * robust_thresh**2 + fit_mean_square_error = (function(xy, *popt).reshape(shape) - data) ** 2 + _mask = ( + fit_mean_square_error + > np.mean(fit_mean_square_error) * robust_thresh**2 + ) mask[_mask] == False # perform fitting popt, pcov = curve_fit( function, - np.vstack(( - rx_1D[mask.reshape(shape1D)], - ry_1D[mask.reshape(shape1D)])), + np.vstack((rx_1D[mask.reshape(shape1D)], ry_1D[mask.reshape(shape1D)])), data[mask], - p0=popt) + p0=popt, + ) - fit_ar = function(xy,*popt).reshape(shape) + fit_ar = function(xy, *popt).reshape(shape) return popt, pcov, fit_ar, mask # Functions for fitting + def plane(xy, mx, my, b): - return mx*xy[0] + my*xy[1] + b + return mx * xy[0] + my * xy[1] + b + def parabola(xy, c0, cx1, cx2, cy1, cy2, cxy): - return c0 + \ - cx1*xy[0] + cy1*xy[1] + \ - cx2*xy[0]**2 + cy2*xy[1]**2 + cxy*xy[0]*xy[1] + return ( + c0 + + cx1 * xy[0] + + cy1 * xy[1] + + cx2 * xy[0] ** 2 + + cy2 * xy[1] ** 2 + + cxy * xy[0] * xy[1] + ) + def bezier_two(xy, c00, c01, c02, c10, c11, c12, c20, c21, c22): + return ( + c00 * ((1 - xy[0]) ** 2) * ((1 - xy[1]) ** 2) + + c10 * 2 * (1 - xy[0]) * xy[0] * ((1 - xy[1]) ** 2) + + c20 * (xy[0] ** 2) * ((1 - xy[1]) ** 2) + + c01 * 2 * ((1 - xy[0]) ** 2) * (1 - xy[1]) * xy[1] + + c11 * 4 * (1 - xy[0]) * xy[0] * (1 - xy[1]) * xy[1] + + c21 * 2 * (xy[0] ** 2) * (1 - xy[1]) * xy[1] + + c02 * ((1 - xy[0]) ** 2) * (xy[1] ** 2) + + c12 * 2 * (1 - xy[0]) * xy[0] * (xy[1] ** 2) + + c22 * (xy[0] ** 2) * (xy[1] ** 2) + ) - return \ - c00 *((1-xy[0])**2) * ((1-xy[1])**2) + \ - c10*2*(1-xy[0])*xy[0] * ((1-xy[1])**2) + \ - c20 *(xy[0]**2) * ((1-xy[1])**2) + \ - c01*2*((1-xy[0])**2) * (1-xy[1])*xy[1] + \ - c11*4*(1-xy[0])*xy[0] * (1-xy[1])*xy[1] + \ - c21*2*(xy[0]**2) * (1-xy[1])*xy[1] + \ - c02 *((1-xy[0])**2) * (xy[1]**2) + \ - c12*2*(1-xy[0])*xy[0] * (xy[1]**2) + \ - c22 *(xy[0]**2) * (xy[1]**2) def polar_gaussian_2D( tq, @@ -147,17 +161,17 @@ def polar_gaussian_2D( sigma_t, sigma_q, C, - ): +): # unpack position - t,q = tq + t, q = tq # set theta value to its closest periodic reflection to mu_t - #t = np.square(t-mu_t) - #t2 = np.min(np.vstack([t,1-t])) - t2 = np.square(t-mu_t) - return \ - I0 * np.exp( - - ( t2/(2*sigma_t**2) + \ - (q-mu_q)**2/(2*sigma_q**2) ) ) + C + # t = np.square(t-mu_t) + # t2 = np.min(np.vstack([t,1-t])) + t2 = np.square(t - mu_t) + return ( + I0 * np.exp(-(t2 / (2 * sigma_t**2) + (q - mu_q) ** 2 / (2 * sigma_q**2))) + + C + ) def polar_twofold_gaussian_2D( @@ -167,18 +181,18 @@ def polar_twofold_gaussian_2D( mu_q, sigma_t, sigma_q, - ): - +): # unpack position - t,q = tq + t, q = tq # theta periodicity - dt = np.mod(t - mu_t + np.pi/2, np.pi) - np.pi/2 + dt = np.mod(t - mu_t + np.pi / 2, np.pi) - np.pi / 2 # output intensity return I0 * np.exp( - (dt**2 / (-2.0*sigma_t**2)) + \ - ((q - mu_q)**2 / (-2.0*sigma_q**2)) ) + (dt**2 / (-2.0 * sigma_t**2)) + ((q - mu_q) ** 2 / (-2.0 * sigma_q**2)) + ) + def polar_twofold_gaussian_2D_background( tq, @@ -188,29 +202,28 @@ def polar_twofold_gaussian_2D_background( sigma_t, sigma_q, C, - ): - +): # unpack position - t,q = tq + t, q = tq # theta periodicity - dt = np.mod(t - mu_t + np.pi/2, np.pi) - np.pi/2 + dt = np.mod(t - mu_t + np.pi / 2, np.pi) - np.pi / 2 # output intensity return C + I0 * np.exp( - (dt**2 / (-2.0*sigma_t**2)) + \ - ((q - mu_q)**2 / (-2.0*sigma_q**2)) ) + (dt**2 / (-2.0 * sigma_t**2)) + ((q - mu_q) ** 2 / (-2.0 * sigma_q**2)) + ) def fit_2D_polar_gaussian( data, - mask = None, - p0 = None, - robust = False, - robust_steps = 3, - robust_thresh = 2, - constant_background = False, - ): + mask=None, + p0=None, + robust=False, + robust_steps=3, + robust_thresh=2, + constant_background=False, +): """ NOTE - this cannot work without using pixel coordinates - something is wrong in the workflow. @@ -252,20 +265,20 @@ def fit_2D_polar_gaussian( if constant_background: return fit_2D( polar_twofold_gaussian_2D_background, - data = data, - data_mask = mask, - popt = p0, - robust = robust, - robust_steps = robust_steps, - robust_thresh = robust_thresh + data=data, + data_mask=mask, + popt=p0, + robust=robust, + robust_steps=robust_steps, + robust_thresh=robust_thresh, ) else: return fit_2D( polar_twofold_gaussian_2D, - data = data, - data_mask = mask, - popt = p0, - robust = robust, - robust_steps = robust_steps, - robust_thresh = robust_thresh + data=data, + data_mask=mask, + popt=p0, + robust=robust, + robust_steps=robust_steps, + robust_thresh=robust_thresh, ) diff --git a/py4DSTEM/process/latticevectors/__init__.py b/py4DSTEM/process/latticevectors/__init__.py index d1b365220..560a3b7e6 100644 --- a/py4DSTEM/process/latticevectors/__init__.py +++ b/py4DSTEM/process/latticevectors/__init__.py @@ -2,4 +2,3 @@ from py4DSTEM.process.latticevectors.index import * from py4DSTEM.process.latticevectors.fit import * from py4DSTEM.process.latticevectors.strain import * - diff --git a/py4DSTEM/process/latticevectors/fit.py b/py4DSTEM/process/latticevectors/fit.py index fef72aca3..659bc8940 100644 --- a/py4DSTEM/process/latticevectors/fit.py +++ b/py4DSTEM/process/latticevectors/fit.py @@ -6,6 +6,7 @@ from emdfile import tqdmnd, PointList, PointListArray from py4DSTEM.data import RealSlice + def fit_lattice_vectors(braggpeaks, x0=0, y0=0, minNumPeaks=5): """ Fits lattice vectors g1,g2 to braggpeaks given some known (h,k) indexing. @@ -33,42 +34,45 @@ def fit_lattice_vectors(braggpeaks, x0=0, y0=0, minNumPeaks=5): * **error**: *(float)* the fit error """ assert isinstance(braggpeaks, PointList) - assert np.all([name in braggpeaks.dtype.names for name in ('qx','qy','intensity','h','k')]) + assert np.all( + [name in braggpeaks.dtype.names for name in ("qx", "qy", "intensity", "h", "k")] + ) braggpeaks = braggpeaks.copy() # Remove unindexed peaks - if 'index_mask' in braggpeaks.dtype.names: - deletemask = braggpeaks.data['index_mask'] == False + if "index_mask" in braggpeaks.dtype.names: + deletemask = braggpeaks.data["index_mask"] == False braggpeaks.remove(deletemask) # Check to ensure enough peaks are present if braggpeaks.length < minNumPeaks: - return None,None,None,None,None,None,None + return None, None, None, None, None, None, None # Get M, the matrix of (h,k) indices - h,k = braggpeaks.data['h'],braggpeaks.data['k'] - M = np.vstack((np.ones_like(h,dtype=int),h,k)).T + h, k = braggpeaks.data["h"], braggpeaks.data["k"] + M = np.vstack((np.ones_like(h, dtype=int), h, k)).T # Get alpha, the matrix of measured Bragg peak positions - alpha = np.vstack((braggpeaks.data['qx']-x0, braggpeaks.data['qy']-y0)).T + alpha = np.vstack((braggpeaks.data["qx"] - x0, braggpeaks.data["qy"] - y0)).T # Get weighted matrices - weights = braggpeaks.data['intensity'] - weighted_M = M*weights[:,np.newaxis] - weighted_alpha = alpha*weights[:,np.newaxis] + weights = braggpeaks.data["intensity"] + weighted_M = M * weights[:, np.newaxis] + weighted_alpha = alpha * weights[:, np.newaxis] # Solve for lattice vectors beta = lstsq(weighted_M, weighted_alpha, rcond=None)[0] - x0,y0 = beta[0,0],beta[0,1] - g1x,g1y = beta[1,0],beta[1,1] - g2x,g2y = beta[2,0],beta[2,1] + x0, y0 = beta[0, 0], beta[0, 1] + g1x, g1y = beta[1, 0], beta[1, 1] + g2x, g2y = beta[2, 0], beta[2, 1] # Calculate the error - alpha_calculated = np.matmul(M,beta) - error = np.sqrt(np.sum((alpha-alpha_calculated)**2,axis=1)) - error = np.sum(error*weights)/np.sum(weights) + alpha_calculated = np.matmul(M, beta) + error = np.sqrt(np.sum((alpha - alpha_calculated) ** 2, axis=1)) + error = np.sum(error * weights) / np.sum(weights) + + return x0, y0, g1x, g1y, g2x, g2y, error - return x0,y0,g1x,g1y,g2x,g2y,error def fit_lattice_vectors_all_DPs(braggpeaks, x0=0, y0=0, minNumPeaks=5): """ @@ -100,39 +104,38 @@ def fit_lattice_vectors_all_DPs(braggpeaks, x0=0, y0=0, minNumPeaks=5): fits """ assert isinstance(braggpeaks, PointListArray) - assert np.all([name in braggpeaks.dtype.names for name in ('qx','qy','intensity','h','k')]) + assert np.all( + [name in braggpeaks.dtype.names for name in ("qx", "qy", "intensity", "h", "k")] + ) # Make RealSlice to contain outputs - slicelabels = ('x0','y0','g1x','g1y','g2x','g2y','error','mask') + slicelabels = ("x0", "y0", "g1x", "g1y", "g2x", "g2y", "error", "mask") g1g2_map = RealSlice( - data=np.zeros( - (8, braggpeaks.shape[0],braggpeaks.shape[1]) - ), - slicelabels=slicelabels, name='g1g2_map' + data=np.zeros((8, braggpeaks.shape[0], braggpeaks.shape[1])), + slicelabels=slicelabels, + name="g1g2_map", ) # Fit lattice vectors - for (Rx, Ry) in tqdmnd(braggpeaks.shape[0],braggpeaks.shape[1]): - braggpeaks_curr = braggpeaks.get_pointlist(Rx,Ry) - qx0,qy0,g1x,g1y,g2x,g2y,error = fit_lattice_vectors( - braggpeaks_curr, - x0, - y0, - minNumPeaks + for Rx, Ry in tqdmnd(braggpeaks.shape[0], braggpeaks.shape[1]): + braggpeaks_curr = braggpeaks.get_pointlist(Rx, Ry) + qx0, qy0, g1x, g1y, g2x, g2y, error = fit_lattice_vectors( + braggpeaks_curr, x0, y0, minNumPeaks ) # Store data if g1x is not None: - g1g2_map.get_slice('x0').data[Rx,Ry] = qx0 - g1g2_map.get_slice('y0').data[Rx,Ry] = qx0 - g1g2_map.get_slice('g1x').data[Rx,Ry] = g1x - g1g2_map.get_slice('g1y').data[Rx,Ry] = g1y - g1g2_map.get_slice('g2x').data[Rx,Ry] = g2x - g1g2_map.get_slice('g2y').data[Rx,Ry] = g2y - g1g2_map.get_slice('error').data[Rx,Ry] = error - g1g2_map.get_slice('mask').data[Rx,Ry] = 1 + g1g2_map.get_slice("x0").data[Rx, Ry] = qx0 + g1g2_map.get_slice("y0").data[Rx, Ry] = qx0 + g1g2_map.get_slice("g1x").data[Rx, Ry] = g1x + g1g2_map.get_slice("g1y").data[Rx, Ry] = g1y + g1g2_map.get_slice("g2x").data[Rx, Ry] = g2x + g1g2_map.get_slice("g2y").data[Rx, Ry] = g2y + g1g2_map.get_slice("error").data[Rx, Ry] = error + g1g2_map.get_slice("mask").data[Rx, Ry] = 1 return g1g2_map + def fit_lattice_vectors_masked(braggpeaks, mask, x0=0, y0=0, minNumPeaks=5): """ Fits lattice vectors g1,g2 to each diffraction pattern in braggpeaks corresponding @@ -165,27 +168,33 @@ def fit_lattice_vectors_masked(braggpeaks, mask, x0=0, y0=0, minNumPeaks=5): fits """ assert isinstance(braggpeaks, PointListArray) - assert np.all([name in braggpeaks.dtype.names for name in ('qx','qy','intensity')]) + assert np.all( + [name in braggpeaks.dtype.names for name in ("qx", "qy", "intensity")] + ) # Make RealSlice to contain outputs - slicelabels = ('x0','y0','g1x','g1y','g2x','g2y','error','mask') - g1g2_map = RealSlice(data=np.zeros((braggpeaks.shape[0],braggpeaks.shape[1],8)), - slicelabels=slicelabels, name='g1g2_map') + slicelabels = ("x0", "y0", "g1x", "g1y", "g2x", "g2y", "error", "mask") + g1g2_map = RealSlice( + data=np.zeros((braggpeaks.shape[0], braggpeaks.shape[1], 8)), + slicelabels=slicelabels, + name="g1g2_map", + ) # Fit lattice vectors - for (Rx, Ry) in tqdmnd(braggpeaks.shape[0],braggpeaks.shape[1]): - if mask[Rx,Ry]: - braggpeaks_curr = braggpeaks.get_pointlist(Rx,Ry) - qx0,qy0,g1x,g1y,g2x,g2y,error = fit_lattice_vectors(braggpeaks_curr, x0, y0, minNumPeaks) + for Rx, Ry in tqdmnd(braggpeaks.shape[0], braggpeaks.shape[1]): + if mask[Rx, Ry]: + braggpeaks_curr = braggpeaks.get_pointlist(Rx, Ry) + qx0, qy0, g1x, g1y, g2x, g2y, error = fit_lattice_vectors( + braggpeaks_curr, x0, y0, minNumPeaks + ) # Store data if g1x is not None: - g1g2_map.get_slice('x0').data[Rx,Ry] = qx0 - g1g2_map.get_slice('y0').data[Rx,Ry] = qx0 - g1g2_map.get_slice('g1x').data[Rx,Ry] = g1x - g1g2_map.get_slice('g1y').data[Rx,Ry] = g1y - g1g2_map.get_slice('g2x').data[Rx,Ry] = g2x - g1g2_map.get_slice('g2y').data[Rx,Ry] = g2y - g1g2_map.get_slice('error').data[Rx,Ry] = error - g1g2_map.get_slice('mask').data[Rx,Ry] = 1 + g1g2_map.get_slice("x0").data[Rx, Ry] = qx0 + g1g2_map.get_slice("y0").data[Rx, Ry] = qx0 + g1g2_map.get_slice("g1x").data[Rx, Ry] = g1x + g1g2_map.get_slice("g1y").data[Rx, Ry] = g1y + g1g2_map.get_slice("g2x").data[Rx, Ry] = g2x + g1g2_map.get_slice("g2y").data[Rx, Ry] = g2y + g1g2_map.get_slice("error").data[Rx, Ry] = error + g1g2_map.get_slice("mask").data[Rx, Ry] = 1 return g1g2_map - diff --git a/py4DSTEM/process/latticevectors/index.py b/py4DSTEM/process/latticevectors/index.py index 189e7f10f..4ac7939e7 100644 --- a/py4DSTEM/process/latticevectors/index.py +++ b/py4DSTEM/process/latticevectors/index.py @@ -5,7 +5,8 @@ from emdfile import tqdmnd, PointList, PointListArray -def get_selected_lattice_vectors(gx,gy,i0,i1,i2): + +def get_selected_lattice_vectors(gx, gy, i0, i1, i2): """ From a set of reciprocal lattice points (gx,gy), and indices in those arrays which specify the center beam, the first basis lattice vector, and the second basis lattice @@ -24,13 +25,14 @@ def get_selected_lattice_vectors(gx,gy,i0,i1,i2): * **g1**: *(2-tuple)* the first lattice vector, (g1x,g1y) * **g2**: *(2-tuple)* the second lattice vector, (g2x,g2y) """ - for i in (i0,i1,i2): - assert isinstance(i,(int,np.integer)) + for i in (i0, i1, i2): + assert isinstance(i, (int, np.integer)) g1x = gx[i1] - gx[i0] g1y = gy[i1] - gy[i0] g2x = gx[i2] - gx[i0] g2y = gy[i2] - gy[i0] - return (g1x,g1y),(g2x,g2y) + return (g1x, g1y), (g2x, g2y) + def index_bragg_directions(x0, y0, gx, gy, g1, g2): """ @@ -62,31 +64,32 @@ def index_bragg_directions(x0, y0, gx, gy, g1, g2): coords 'h' and 'k' contain h and k. """ # Get beta, the matrix of lattice vectors - beta = np.array([[g1[0],g2[0]],[g1[1],g2[1]]]) + beta = np.array([[g1[0], g2[0]], [g1[1], g2[1]]]) # Get alpha, the matrix of measured bragg angles - alpha = np.vstack([gx-x0,gy-y0]) + alpha = np.vstack([gx - x0, gy - y0]) # Calculate M, the matrix of peak positions M = lstsq(beta, alpha, rcond=None)[0].T M = np.round(M).astype(int) # Get h,k - h = M[:,0] - k = M[:,1] + h = M[:, 0] + k = M[:, 1] # Store in a PointList - coords = [('qx',float),('qy',float),('h',int),('k',int)] - temp_array = np.zeros([], dtype = coords) - bragg_directions = PointList(data = temp_array) - bragg_directions.add_data_by_field((gx,gy,h,k)) - mask = np.zeros(bragg_directions['qx'].shape[0]) + coords = [("qx", float), ("qy", float), ("h", int), ("k", int)] + temp_array = np.zeros([], dtype=coords) + bragg_directions = PointList(data=temp_array) + bragg_directions.add_data_by_field((gx, gy, h, k)) + mask = np.zeros(bragg_directions["qx"].shape[0]) mask[0] = 1 bragg_directions.remove(mask) - return h,k, bragg_directions + return h, k, bragg_directions + -def generate_lattice(ux,uy,vx,vy,x0,y0,Q_Nx,Q_Ny,h_max=None,k_max=None): +def generate_lattice(ux, uy, vx, vy, x0, y0, Q_Nx, Q_Ny, h_max=None, k_max=None): """ Returns a full reciprocal lattice stretching to the limits of the diffraction pattern by making linear combinations of the lattice vectors up to (±h_max,±k_max). @@ -117,52 +120,52 @@ def generate_lattice(ux,uy,vx,vy,x0,y0,Q_Nx,Q_Ny,h_max=None,k_max=None): """ # Matrix of lattice vectors - beta = np.array([[ux,uy],[vx,vy]]) + beta = np.array([[ux, uy], [vx, vy]]) # If no max index is specified, (over)estimate based on image size if (h_max is None) or (k_max is None): - (y,x) = np.mgrid[0:Q_Ny,0:Q_Nx] + (y, x) = np.mgrid[0:Q_Ny, 0:Q_Nx] x = x - x0 y = y - y0 - h_max = np.max(np.ceil(np.abs((x/ux,y/uy)))) - k_max = np.max(np.ceil(np.abs((x/vx,y/vy)))) + h_max = np.max(np.ceil(np.abs((x / ux, y / uy)))) + k_max = np.max(np.ceil(np.abs((x / vx, y / vy)))) - (hlist,klist) = np.meshgrid(np.arange(-h_max,h_max+1),np.arange(-k_max,k_max+1)) + (hlist, klist) = np.meshgrid( + np.arange(-h_max, h_max + 1), np.arange(-k_max, k_max + 1) + ) - M_ideal = np.vstack((hlist.ravel(),klist.ravel())).T - ideal_peaks = np.matmul(M_ideal,beta) + M_ideal = np.vstack((hlist.ravel(), klist.ravel())).T + ideal_peaks = np.matmul(M_ideal, beta) - coords = [('qx',float),('qy',float),('h',int),('k',int)] + coords = [("qx", float), ("qy", float), ("h", int), ("k", int)] - ideal_data = np.zeros(len(ideal_peaks[:,0]),dtype=coords) - ideal_data['qx'] = ideal_peaks[:,0] - ideal_data['qy'] = ideal_peaks[:,1] - ideal_data['h'] = M_ideal[:,0] - ideal_data['k'] = M_ideal[:,1] + ideal_data = np.zeros(len(ideal_peaks[:, 0]), dtype=coords) + ideal_data["qx"] = ideal_peaks[:, 0] + ideal_data["qy"] = ideal_peaks[:, 1] + ideal_data["h"] = M_ideal[:, 0] + ideal_data["k"] = M_ideal[:, 1] ideal_lattice = PointList(data=ideal_data) - #shift to the DP center - ideal_lattice.data['qx'] += x0 - ideal_lattice.data['qy'] += y0 + # shift to the DP center + ideal_lattice.data["qx"] += x0 + ideal_lattice.data["qy"] += y0 # trim peaks outside the image - deletePeaks = (ideal_lattice.data['qx'] > Q_Nx) | \ - (ideal_lattice.data['qx'] < 0) | \ - (ideal_lattice.data['qy'] > Q_Ny) | \ - (ideal_lattice.data['qy'] < 0) + deletePeaks = ( + (ideal_lattice.data["qx"] > Q_Nx) + | (ideal_lattice.data["qx"] < 0) + | (ideal_lattice.data["qy"] > Q_Ny) + | (ideal_lattice.data["qy"] < 0) + ) ideal_lattice.remove(deletePeaks) return ideal_lattice + def add_indices_to_braggvectors( - braggpeaks, - lattice, - maxPeakSpacing, - qx_shift=0, - qy_shift=0, - mask=None - ): + braggpeaks, lattice, maxPeakSpacing, qx_shift=0, qy_shift=0, mask=None +): """ Using the peak positions (qx,qy) and indices (h,k) in the PointList lattice, identify the indices for each peak in the PointListArray braggpeaks. @@ -195,40 +198,50 @@ def add_indices_to_braggvectors( # assert np.all([name in lattice.dtype.names for name in ('qx','qy','h','k')]) if mask is None: - mask = np.ones(braggpeaks.Rshape,dtype=bool) - - assert mask.shape == braggpeaks.Rshape, 'mask must have same shape as pointlistarray' - assert mask.dtype == bool, 'mask must be boolean' - - - coords = [('qx',float),('qy',float),('intensity',float),('h',int),('k',int)] - - indexed_braggpeaks = PointListArray( - dtype = coords, - shape = braggpeaks.Rshape, + mask = np.ones(braggpeaks.Rshape, dtype=bool) + + assert ( + mask.shape == braggpeaks.Rshape + ), "mask must have same shape as pointlistarray" + assert mask.dtype == bool, "mask must be boolean" + + coords = [ + ("qx", float), + ("qy", float), + ("intensity", float), + ("h", int), + ("k", int), + ] + + indexed_braggpeaks = PointListArray( + dtype=coords, + shape=braggpeaks.Rshape, ) # loop over all the scan positions - for Rx, Ry in tqdmnd(mask.shape[0],mask.shape[1]): - if mask[Rx,Ry]: - pl = braggpeaks.cal[Rx,Ry] + for Rx, Ry in tqdmnd(mask.shape[0], mask.shape[1]): + if mask[Rx, Ry]: + pl = braggpeaks.cal[Rx, Ry] for i in range(pl.data.shape[0]): - r2 = (pl.data['qx'][i]-lattice.data['qx'] + qx_shift)**2 + \ - (pl.data['qy'][i]-lattice.data['qy'] + qy_shift)**2 + r2 = (pl.data["qx"][i] - lattice.data["qx"] + qx_shift) ** 2 + ( + pl.data["qy"][i] - lattice.data["qy"] + qy_shift + ) ** 2 ind = np.argmin(r2) if r2[ind] <= maxPeakSpacing**2: - indexed_braggpeaks[Rx,Ry].add_data_by_field(( - pl.data['qx'][i], - pl.data['qy'][i], - pl.data['intensity'][i], - lattice.data['h'][ind], - lattice.data['k'][ind] - )) + indexed_braggpeaks[Rx, Ry].add_data_by_field( + ( + pl.data["qx"][i], + pl.data["qy"][i], + pl.data["intensity"][i], + lattice.data["h"][ind], + lattice.data["k"][ind], + ) + ) return indexed_braggpeaks -def bragg_vector_intensity_map_by_index(braggpeaks,h,k, symmetric=False): +def bragg_vector_intensity_map_by_index(braggpeaks, h, k, symmetric=False): """ Returns a correlation intensity map for an indexed (h,k) Bragg vector Used to obtain a darkfield image corresponding to the (h,k) reflection @@ -245,22 +258,23 @@ def bragg_vector_intensity_map_by_index(braggpeaks,h,k, symmetric=False): (numpy array): a map of the intensity of the (h,k) Bragg vector correlation. Same shape as the pointlistarray. """ - assert isinstance(braggpeaks,PointListArray), "braggpeaks must be a PointListArray" - assert np.all([name in braggpeaks.dtype.names for name in ('h','k','intensity')]) - intensity_map = np.zeros(braggpeaks.shape,dtype=float) + assert isinstance(braggpeaks, PointListArray), "braggpeaks must be a PointListArray" + assert np.all([name in braggpeaks.dtype.names for name in ("h", "k", "intensity")]) + intensity_map = np.zeros(braggpeaks.shape, dtype=float) for Rx in range(braggpeaks.shape[0]): for Ry in range(braggpeaks.shape[1]): - pl = braggpeaks.get_pointlist(Rx,Ry) + pl = braggpeaks.get_pointlist(Rx, Ry) if pl.length > 0: if symmetric: - matches = np.logical_and(np.abs(pl.data['h']) == np.abs(h), np.abs(pl.data['k']) == np.abs(k)) + matches = np.logical_and( + np.abs(pl.data["h"]) == np.abs(h), + np.abs(pl.data["k"]) == np.abs(k), + ) else: - matches = np.logical_and(pl.data['h'] == h, pl.data['k'] == k) + matches = np.logical_and(pl.data["h"] == h, pl.data["k"] == k) - if len(matches)>0: - intensity_map[Rx,Ry] = np.sum(pl.data['intensity'][matches]) + if len(matches) > 0: + intensity_map[Rx, Ry] = np.sum(pl.data["intensity"][matches]) return intensity_map - - diff --git a/py4DSTEM/process/latticevectors/initialguess.py b/py4DSTEM/process/latticevectors/initialguess.py index b4486d531..d8054143f 100644 --- a/py4DSTEM/process/latticevectors/initialguess.py +++ b/py4DSTEM/process/latticevectors/initialguess.py @@ -6,8 +6,15 @@ from py4DSTEM.process.utils import get_maxima_1D -def get_radon_scores(braggvectormap, mask=None, N_angles=200, sigma=2, minSpacing=2, - minRelativeIntensity=0.05): + +def get_radon_scores( + braggvectormap, + mask=None, + N_angles=200, + sigma=2, + minSpacing=2, + minRelativeIntensity=0.05, +): """ Calculates a score function, score(angle), representing the likelihood that angle is a principle lattice direction of the lattice in braggvectormap. @@ -42,9 +49,9 @@ def get_radon_scores(braggvectormap, mask=None, N_angles=200, sigma=2, minSpacin * **sinogram**: *(ndarray)* the radon transform of braggvectormap*mask """ # Get sinogram - thetas = np.linspace(0,180,N_angles) + thetas = np.linspace(0, 180, N_angles) if mask is not None: - sinogram = radon(braggvectormap*mask, theta=thetas, circle=False) + sinogram = radon(braggvectormap * mask, theta=thetas, circle=False) else: sinogram = radon(braggvectormap, theta=thetas, circle=False) @@ -55,22 +62,24 @@ def get_radon_scores(braggvectormap, mask=None, N_angles=200, sigma=2, minSpacin theta = thetas[i] # Get radon transform slice - ind = np.argmin(np.abs(thetas-theta)) - sinogram_theta = sinogram[:,ind] - sinogram_theta = gaussian_filter(sinogram_theta,2) + ind = np.argmin(np.abs(thetas - theta)) + sinogram_theta = sinogram[:, ind] + sinogram_theta = gaussian_filter(sinogram_theta, 2) # Get maxima - maxima = get_maxima_1D(sinogram_theta,sigma,minSpacing,minRelativeIntensity) + maxima = get_maxima_1D(sinogram_theta, sigma, minSpacing, minRelativeIntensity) # Calculate metrics N_maxima[i] = len(maxima) total_intensity[i] = np.sum(sinogram_theta[maxima]) - scores = total_intensity/N_maxima + scores = total_intensity / N_maxima return scores, np.radians(thetas), sinogram -def get_lattice_directions_from_scores(thetas, scores, sigma=2, minSpacing=2, - minRelativeIntensity=0.05, index1=0, index2=0): + +def get_lattice_directions_from_scores( + thetas, scores, sigma=2, minSpacing=2, minRelativeIntensity=0.05, index1=0, index2=0 +): """ Get the lattice directions from the scores of the radon transform slices. @@ -91,37 +100,54 @@ def get_lattice_directions_from_scores(thetas, scores, sigma=2, minSpacing=2, * **theta1**: *(float)* the first lattice direction, in radians * **theta2**: *(float)* the second lattice direction, in radians """ - assert len(thetas)==len(scores), "Size of thetas and scores must match" + assert len(thetas) == len(scores), "Size of thetas and scores must match" # Get first lattice direction - maxima1 = get_maxima_1D(scores, sigma, minSpacing, minRelativeIntensity) # Get maxima + maxima1 = get_maxima_1D( + scores, sigma, minSpacing, minRelativeIntensity + ) # Get maxima thetas_max1 = thetas[maxima1] scores_max1 = scores[maxima1] - dtype = np.dtype([('thetas',thetas.dtype),('scores',scores.dtype)]) # Sort by intensity - ar_structured = np.empty(len(thetas_max1),dtype=dtype) - ar_structured['thetas'] = thetas_max1 - ar_structured['scores'] = scores_max1 - ar_structured = np.sort(ar_structured, order='scores')[::-1] - theta1 = ar_structured['thetas'][index1] # Get direction 1 + dtype = np.dtype( + [("thetas", thetas.dtype), ("scores", scores.dtype)] + ) # Sort by intensity + ar_structured = np.empty(len(thetas_max1), dtype=dtype) + ar_structured["thetas"] = thetas_max1 + ar_structured["scores"] = scores_max1 + ar_structured = np.sort(ar_structured, order="scores")[::-1] + theta1 = ar_structured["thetas"][index1] # Get direction 1 # Apply sin**2 damping - scores_damped = scores*np.sin(thetas-theta1)**2 + scores_damped = scores * np.sin(thetas - theta1) ** 2 # Get second lattice direction - maxima2 = get_maxima_1D(scores_damped, sigma, minSpacing, minRelativeIntensity) # Get maxima + maxima2 = get_maxima_1D( + scores_damped, sigma, minSpacing, minRelativeIntensity + ) # Get maxima thetas_max2 = thetas[maxima2] scores_max2 = scores[maxima2] - dtype = np.dtype([('thetas',thetas.dtype),('scores',scores.dtype)]) # Sort by intensity - ar_structured = np.empty(len(thetas_max2),dtype=dtype) - ar_structured['thetas'] = thetas_max2 - ar_structured['scores'] = scores_max2 - ar_structured = np.sort(ar_structured, order='scores')[::-1] - theta2 = ar_structured['thetas'][index2] # Get direction 2 + dtype = np.dtype( + [("thetas", thetas.dtype), ("scores", scores.dtype)] + ) # Sort by intensity + ar_structured = np.empty(len(thetas_max2), dtype=dtype) + ar_structured["thetas"] = thetas_max2 + ar_structured["scores"] = scores_max2 + ar_structured = np.sort(ar_structured, order="scores")[::-1] + theta2 = ar_structured["thetas"][index2] # Get direction 2 return theta1, theta2 -def get_lattice_vector_lengths(u_theta, v_theta, thetas, sinogram, spacing_thresh=1.5, - sigma=1, minSpacing=2, minRelativeIntensity=0.1): + +def get_lattice_vector_lengths( + u_theta, + v_theta, + thetas, + sinogram, + spacing_thresh=1.5, + sigma=1, + minSpacing=2, + minRelativeIntensity=0.1, +): """ Gets the lengths of the two lattice vectors from their angles and the sinogram. @@ -155,34 +181,49 @@ def get_lattice_vector_lengths(u_theta, v_theta, thetas, sinogram, spacing_thres * **u_length**: *(float)* the length of u, in pixels * **v_length**: *(float)* the length of v, in pixels """ - assert len(thetas)==sinogram.shape[1], "thetas must corresponding to the number of sinogram projection directions." + assert ( + len(thetas) == sinogram.shape[1] + ), "thetas must corresponding to the number of sinogram projection directions." # Get u projected spacing - ind = np.argmin(np.abs(thetas-u_theta)) - sinogram_slice = sinogram[:,ind] + ind = np.argmin(np.abs(thetas - u_theta)) + sinogram_slice = sinogram[:, ind] maxima = get_maxima_1D(sinogram_slice, sigma, minSpacing, minRelativeIntensity) spacings = np.sort(np.arange(sinogram_slice.shape[0])[maxima]) spacings = spacings[1:] - spacings[:-1] - mask = np.array([max(i,np.median(spacings))/min(i,np.median(spacings)) for i in spacings]) < spacing_thresh + mask = ( + np.array( + [ + max(i, np.median(spacings)) / min(i, np.median(spacings)) + for i in spacings + ] + ) + < spacing_thresh + ) spacings = spacings[mask] u_projected_spacing = np.mean(spacings) # Get v projected spacing - ind = np.argmin(np.abs(thetas-v_theta)) - sinogram_slice = sinogram[:,ind] + ind = np.argmin(np.abs(thetas - v_theta)) + sinogram_slice = sinogram[:, ind] maxima = get_maxima_1D(sinogram_slice, sigma, minSpacing, minRelativeIntensity) spacings = np.sort(np.arange(sinogram_slice.shape[0])[maxima]) spacings = spacings[1:] - spacings[:-1] - mask = np.array([max(i,np.median(spacings))/min(i,np.median(spacings)) for i in spacings]) < spacing_thresh + mask = ( + np.array( + [ + max(i, np.median(spacings)) / min(i, np.median(spacings)) + for i in spacings + ] + ) + < spacing_thresh + ) spacings = spacings[mask] v_projected_spacing = np.mean(spacings) # Get u and v lengths - sin_uv = np.sin(np.abs(u_theta-v_theta)) + sin_uv = np.sin(np.abs(u_theta - v_theta)) u_length = v_projected_spacing / sin_uv v_length = u_projected_spacing / sin_uv return u_length, v_length - - - diff --git a/py4DSTEM/process/latticevectors/strain.py b/py4DSTEM/process/latticevectors/strain.py index 7a586bd69..6f4000449 100644 --- a/py4DSTEM/process/latticevectors/strain.py +++ b/py4DSTEM/process/latticevectors/strain.py @@ -5,6 +5,7 @@ from py4DSTEM.data import RealSlice + def get_reference_g1g2(g1g2_map, mask): """ Gets a pair of reference lattice vectors from a region of real space specified by @@ -25,13 +26,16 @@ def get_reference_g1g2(g1g2_map, mask): * **g2**: *(2-tuple)* second reference lattice vector (x,y) """ assert isinstance(g1g2_map, RealSlice) - assert np.all([name in g1g2_map.slicelabels for name in ('g1x','g1y','g2x','g2y')]) + assert np.all( + [name in g1g2_map.slicelabels for name in ("g1x", "g1y", "g2x", "g2y")] + ) assert mask.dtype == bool - g1x = np.median(g1g2_map.get_slice('g1x').data[mask]) - g1y = np.median(g1g2_map.get_slice('g1y').data[mask]) - g2x = np.median(g1g2_map.get_slice('g2x').data[mask]) - g2y = np.median(g1g2_map.get_slice('g2y').data[mask]) - return (g1x,g1y),(g2x,g2y) + g1x = np.median(g1g2_map.get_slice("g1x").data[mask]) + g1y = np.median(g1g2_map.get_slice("g1y").data[mask]) + g2x = np.median(g1g2_map.get_slice("g2x").data[mask]) + g2y = np.median(g1g2_map.get_slice("g2y").data[mask]) + return (g1x, g1y), (g2x, g2y) + def get_strain_from_reference_g1g2(g1g2_map, g1, g2): """ @@ -67,37 +71,52 @@ def get_strain_from_reference_g1g2(g1g2_map, g1, g2): Note 1: the strain matrix has been symmetrized, so e_xy and e_yx are identical """ assert isinstance(g1g2_map, RealSlice) - assert np.all([name in g1g2_map.slicelabels for name in ('g1x','g1y','g2x','g2y','mask')]) + assert np.all( + [name in g1g2_map.slicelabels for name in ("g1x", "g1y", "g2x", "g2y", "mask")] + ) # Get RealSlice for output storage - R_Nx,R_Ny = g1g2_map.get_slice('g1x').shape + R_Nx, R_Ny = g1g2_map.get_slice("g1x").shape strain_map = RealSlice( data=np.zeros((5, R_Nx, R_Ny)), - slicelabels=('e_xx','e_yy','e_xy','theta','mask'), - name='strain_map' + slicelabels=("e_xx", "e_yy", "e_xy", "theta", "mask"), + name="strain_map", ) # Get reference lattice matrix - g1x,g1y = g1 - g2x,g2y = g2 - M = np.array([[g1x,g1y],[g2x,g2y]]) + g1x, g1y = g1 + g2x, g2y = g2 + M = np.array([[g1x, g1y], [g2x, g2y]]) for Rx in range(R_Nx): for Ry in range(R_Ny): # Get lattice vectors for DP at Rx,Ry - alpha = np.array([[g1g2_map.get_slice('g1x').data[Rx,Ry],g1g2_map.get_slice('g1y').data[Rx,Ry]], - [g1g2_map.get_slice('g2x').data[Rx,Ry],g1g2_map.get_slice('g2y').data[Rx,Ry]]]) + alpha = np.array( + [ + [ + g1g2_map.get_slice("g1x").data[Rx, Ry], + g1g2_map.get_slice("g1y").data[Rx, Ry], + ], + [ + g1g2_map.get_slice("g2x").data[Rx, Ry], + g1g2_map.get_slice("g2y").data[Rx, Ry], + ], + ] + ) # Get transformation matrix beta = lstsq(M, alpha, rcond=None)[0].T # Get the infinitesimal strain matrix - strain_map.get_slice('e_xx').data[Rx,Ry] = 1 - beta[0,0] - strain_map.get_slice('e_yy').data[Rx,Ry] = 1 - beta[1,1] - strain_map.get_slice('e_xy').data[Rx,Ry] = -(beta[0,1]+beta[1,0])/2. - strain_map.get_slice('theta').data[Rx,Ry] = (beta[0,1]-beta[1,0])/2. - strain_map.get_slice('mask').data[Rx,Ry] = g1g2_map.get_slice('mask').data[Rx,Ry] + strain_map.get_slice("e_xx").data[Rx, Ry] = 1 - beta[0, 0] + strain_map.get_slice("e_yy").data[Rx, Ry] = 1 - beta[1, 1] + strain_map.get_slice("e_xy").data[Rx, Ry] = -(beta[0, 1] + beta[1, 0]) / 2.0 + strain_map.get_slice("theta").data[Rx, Ry] = (beta[0, 1] - beta[1, 0]) / 2.0 + strain_map.get_slice("mask").data[Rx, Ry] = g1g2_map.get_slice("mask").data[ + Rx, Ry + ] return strain_map + def get_strain_from_reference_region(g1g2_map, mask): """ Gets a strain map from the reference region of real space specified by mask and the @@ -133,13 +152,15 @@ def get_strain_from_reference_region(g1g2_map, mask): """ assert isinstance(g1g2_map, RealSlice) assert np.all( - [name in g1g2_map.slicelabels for name in ('g1x','g1y','g2x','g2y','mask')]) + [name in g1g2_map.slicelabels for name in ("g1x", "g1y", "g2x", "g2y", "mask")] + ) assert mask.dtype == bool - g1,g2 = get_reference_g1g2(g1g2_map,mask) - strain_map = get_strain_from_reference_g1g2(g1g2_map,g1,g2) + g1, g2 = get_reference_g1g2(g1g2_map, mask) + strain_map = get_strain_from_reference_g1g2(g1g2_map, g1, g2) return strain_map + def get_rotated_strain_map(unrotated_strain_map, xaxis_x, xaxis_y, flip_theta): """ Starting from a strain map defined with respect to the xy coordinate system of @@ -164,29 +185,47 @@ def get_rotated_strain_map(unrotated_strain_map, xaxis_x, xaxis_y, flip_theta): system """ assert isinstance(unrotated_strain_map, RealSlice) - assert np.all([key in ['e_xx','e_xy','e_yy','theta','mask'] for key in unrotated_strain_map.slicelabels]) - theta = -np.arctan2(xaxis_y,xaxis_x) + assert np.all( + [ + key in ["e_xx", "e_xy", "e_yy", "theta", "mask"] + for key in unrotated_strain_map.slicelabels + ] + ) + theta = -np.arctan2(xaxis_y, xaxis_x) cost = np.cos(theta) sint = np.sin(theta) cost2 = cost**2 sint2 = sint**2 - Rx,Ry = unrotated_strain_map.get_slice('e_xx').data.shape + Rx, Ry = unrotated_strain_map.get_slice("e_xx").data.shape rotated_strain_map = RealSlice( - data=np.zeros((5, Rx,Ry)), - slicelabels=['e_xx','e_xy','e_yy','theta','mask'], - name=unrotated_strain_map.name+"_rotated".format(np.degrees(theta)) + data=np.zeros((5, Rx, Ry)), + slicelabels=["e_xx", "e_xy", "e_yy", "theta", "mask"], + name=unrotated_strain_map.name + "_rotated".format(np.degrees(theta)), ) - rotated_strain_map.data[0,:,:] = cost2*unrotated_strain_map.get_slice('e_xx').data - 2*cost*sint*unrotated_strain_map.get_slice('e_xy').data + sint2*unrotated_strain_map.get_slice('e_yy').data - rotated_strain_map.data[1,:,:] = cost*sint*(unrotated_strain_map.get_slice('e_xx').data-unrotated_strain_map.get_slice('e_yy').data) + (cost2-sint2)*unrotated_strain_map.get_slice('e_xy').data - rotated_strain_map.data[2,:,:] = sint2*unrotated_strain_map.get_slice('e_xx').data + 2*cost*sint*unrotated_strain_map.get_slice('e_xy').data + cost2*unrotated_strain_map.get_slice('e_yy').data + rotated_strain_map.data[0, :, :] = ( + cost2 * unrotated_strain_map.get_slice("e_xx").data + - 2 * cost * sint * unrotated_strain_map.get_slice("e_xy").data + + sint2 * unrotated_strain_map.get_slice("e_yy").data + ) + rotated_strain_map.data[1, :, :] = ( + cost + * sint + * ( + unrotated_strain_map.get_slice("e_xx").data + - unrotated_strain_map.get_slice("e_yy").data + ) + + (cost2 - sint2) * unrotated_strain_map.get_slice("e_xy").data + ) + rotated_strain_map.data[2, :, :] = ( + sint2 * unrotated_strain_map.get_slice("e_xx").data + + 2 * cost * sint * unrotated_strain_map.get_slice("e_xy").data + + cost2 * unrotated_strain_map.get_slice("e_yy").data + ) if flip_theta == True: - rotated_strain_map.data[3,:,:] = -unrotated_strain_map.get_slice('theta').data - else: - rotated_strain_map.data[3,:,:] = unrotated_strain_map.get_slice('theta').data - rotated_strain_map.data[4,:,:] = unrotated_strain_map.get_slice('mask').data + rotated_strain_map.data[3, :, :] = -unrotated_strain_map.get_slice("theta").data + else: + rotated_strain_map.data[3, :, :] = unrotated_strain_map.get_slice("theta").data + rotated_strain_map.data[4, :, :] = unrotated_strain_map.get_slice("mask").data return rotated_strain_map - - - diff --git a/py4DSTEM/process/phase/iterative_multislice_ptychography.py b/py4DSTEM/process/phase/iterative_multislice_ptychography.py index 92f8c0bf3..a352502d0 100644 --- a/py4DSTEM/process/phase/iterative_multislice_ptychography.py +++ b/py4DSTEM/process/phase/iterative_multislice_ptychography.py @@ -974,7 +974,7 @@ def _gradient_descent_adjoint( ) # back-transmit - exit_waves *= xp.conj(obj) #/ xp.abs(obj) ** 2 + exit_waves *= xp.conj(obj) # / xp.abs(obj) ** 2 if s > 0: # back-propagate @@ -1076,7 +1076,7 @@ def _projection_sets_adjoint( ) # back-transmit - exit_waves_copy *= xp.conj(obj) # / xp.abs(obj) ** 2 + exit_waves_copy *= xp.conj(obj) # / xp.abs(obj) ** 2 if s > 0: # back-propagate @@ -3067,4 +3067,4 @@ def _return_object_fft( obj = np.angle(obj) obj = self._crop_rotate_object_fov(np.sum(obj, axis=0)) - return np.abs(np.fft.fftshift(np.fft.fft2(np.fft.ifftshift(obj)))) \ No newline at end of file + return np.abs(np.fft.fftshift(np.fft.fft2(np.fft.ifftshift(obj)))) diff --git a/py4DSTEM/process/phase/iterative_ptychographic_constraints.py b/py4DSTEM/process/phase/iterative_ptychographic_constraints.py index 9af22ba92..67dba6115 100644 --- a/py4DSTEM/process/phase/iterative_ptychographic_constraints.py +++ b/py4DSTEM/process/phase/iterative_ptychographic_constraints.py @@ -364,7 +364,7 @@ def _probe_amplitude_constraint( erf = self._erf probe_intensity = xp.abs(current_probe) ** 2 - #current_probe_sum = xp.sum(probe_intensity) + # current_probe_sum = xp.sum(probe_intensity) X = xp.fft.fftfreq(current_probe.shape[0])[:, None] Y = xp.fft.fftfreq(current_probe.shape[1])[None] @@ -374,10 +374,10 @@ def _probe_amplitude_constraint( tophat_mask = 0.5 * (1 - erf(sigma * r / (1 - r**2))) updated_probe = current_probe * tophat_mask - #updated_probe_sum = xp.sum(xp.abs(updated_probe) ** 2) - #normalization = xp.sqrt(current_probe_sum / updated_probe_sum) + # updated_probe_sum = xp.sum(xp.abs(updated_probe) ** 2) + # normalization = xp.sqrt(current_probe_sum / updated_probe_sum) - return updated_probe #* normalization + return updated_probe # * normalization def _probe_fourier_amplitude_constraint( self, @@ -406,7 +406,7 @@ def _probe_fourier_amplitude_constraint( xp = self._xp asnumpy = self._asnumpy - #current_probe_sum = xp.sum(xp.abs(current_probe) ** 2) + # current_probe_sum = xp.sum(xp.abs(current_probe) ** 2) current_probe_fft = xp.fft.fft2(current_probe) updated_probe_fft, _, _, _ = regularize_probe_amplitude( @@ -419,10 +419,10 @@ def _probe_fourier_amplitude_constraint( updated_probe_fft = xp.asarray(updated_probe_fft) updated_probe = xp.fft.ifft2(updated_probe_fft) - #updated_probe_sum = xp.sum(xp.abs(updated_probe) ** 2) - #normalization = xp.sqrt(current_probe_sum / updated_probe_sum) + # updated_probe_sum = xp.sum(xp.abs(updated_probe) ** 2) + # normalization = xp.sqrt(current_probe_sum / updated_probe_sum) - return updated_probe #* normalization + return updated_probe # * normalization def _probe_aperture_constraint( self, @@ -444,16 +444,16 @@ def _probe_aperture_constraint( """ xp = self._xp - #current_probe_sum = xp.sum(xp.abs(current_probe) ** 2) + # current_probe_sum = xp.sum(xp.abs(current_probe) ** 2) current_probe_fft_phase = xp.angle(xp.fft.fft2(current_probe)) updated_probe = xp.fft.ifft2( xp.exp(1j * current_probe_fft_phase) * initial_probe_aperture ) - #updated_probe_sum = xp.sum(xp.abs(updated_probe) ** 2) - #normalization = xp.sqrt(current_probe_sum / updated_probe_sum) + # updated_probe_sum = xp.sum(xp.abs(updated_probe) ** 2) + # normalization = xp.sqrt(current_probe_sum / updated_probe_sum) - return updated_probe #* normalization + return updated_probe # * normalization def _probe_aberration_fitting_constraint( self, diff --git a/py4DSTEM/process/polar/__init__.py b/py4DSTEM/process/polar/__init__.py index 79e13a054..06d32c88e 100644 --- a/py4DSTEM/process/polar/__init__.py +++ b/py4DSTEM/process/polar/__init__.py @@ -1,3 +1,10 @@ from py4DSTEM.process.polar.polar_datacube import PolarDatacube from py4DSTEM.process.polar.polar_fits import fit_amorphous_ring, plot_amorphous_ring -from py4DSTEM.process.polar.polar_peaks import find_peaks_single_pattern, find_peaks, refine_peaks, plot_radial_peaks, plot_radial_background, make_orientation_histogram \ No newline at end of file +from py4DSTEM.process.polar.polar_peaks import ( + find_peaks_single_pattern, + find_peaks, + refine_peaks, + plot_radial_peaks, + plot_radial_background, + make_orientation_histogram, +) diff --git a/py4DSTEM/process/polar/polar_analysis.py b/py4DSTEM/process/polar/polar_analysis.py index 0f355bbd1..0c2454289 100644 --- a/py4DSTEM/process/polar/polar_analysis.py +++ b/py4DSTEM/process/polar/polar_analysis.py @@ -31,7 +31,7 @@ def calculate_radial_statistics( -------- self: PolarDatacube Polar datacube used for measuring FEM properties. - + Returns -------- radial_avg: np.array @@ -43,7 +43,9 @@ def calculate_radial_statistics( """ # Get the dimensioned radial bins - self.scattering_vector = self.radial_bins * self.qstep * self.calibration.get_Q_pixel_size() + self.scattering_vector = ( + self.radial_bins * self.qstep * self.calibration.get_Q_pixel_size() + ) self.scattering_vector_units = self.calibration.get_Q_pixel_units() # init radial data arrays @@ -87,9 +89,9 @@ def calculate_radial_statistics( if returnfig: fig,ax = plot_radial_mean( self, - figsize = figsize, - returnfig = True, - ) + figsize=figsize, + returnfig=True, + ) else: plot_radial_mean( self, @@ -161,15 +163,15 @@ def plot_radial_var_norm( """ Plotting function for the global FEM. """ - fig,ax = plt.subplots(figsize=figsize) + fig, ax = plt.subplots(figsize=figsize) ax.plot( self.scattering_vector, self.radial_var_norm, - ) + ) - ax.set_xlabel('Scattering Vector (' + self.scattering_vector_units + ')') - ax.set_ylabel('Normalized Variance') - ax.set_xlim((self.scattering_vector[0],self.scattering_vector[-1])) + ax.set_xlabel("Scattering Vector (" + self.scattering_vector_units + ")") + ax.set_ylabel("Normalized Variance") + ax.set_xlim((self.scattering_vector[0], self.scattering_vector[-1])) if returnfig: return fig, ax @@ -404,9 +406,9 @@ def calculate_pair_dist_function( def calculate_FEM_local( self, - figsize = (8,6), - returnfig = False, - ): + figsize=(8, 6), + returnfig=False, +): """ Calculate fluctuation electron microscopy (FEM) statistics, including radial mean, variance, and normalized variance. This function computes the radial average and variance @@ -416,7 +418,7 @@ def calculate_FEM_local( -------- self: PolarDatacube Polar datacube used for measuring FEM properties. - + Returns -------- radial_avg: np.array diff --git a/py4DSTEM/process/polar/polar_datacube.py b/py4DSTEM/process/polar/polar_datacube.py index 591943444..a5d48c99e 100644 --- a/py4DSTEM/process/polar/polar_datacube.py +++ b/py4DSTEM/process/polar/polar_datacube.py @@ -1,7 +1,6 @@ import numpy as np from py4DSTEM.datacube import DataCube -from scipy.ndimage import binary_opening,binary_closing, gaussian_filter1d - +from scipy.ndimage import binary_opening, binary_closing, gaussian_filter1d class PolarDatacube: @@ -13,16 +12,16 @@ class PolarDatacube: def __init__( self, datacube, - qmin = 0.0, - qmax = None, - qstep = 1.0, - n_annular = 180, - qscale = None, - mask = None, - mask_thresh = 0.1, - ellipse = True, - two_fold_symmetry = False, - ): + qmin=0.0, + qmax=None, + qstep=1.0, + n_annular=180, + qscale=None, + mask=None, + mask_thresh=0.1, + ellipse=True, + two_fold_symmetry=False, + ): """ Parameters ---------- @@ -57,12 +56,12 @@ def __init__( """ # attach datacube - assert(isinstance(datacube,DataCube)) + assert isinstance(datacube, DataCube) self._datacube = datacube self._datacube.polar = self # check for calibrations - assert(hasattr(self._datacube,'calibration')), "No .calibration found" + assert hasattr(self._datacube, "calibration"), "No .calibration found" self.calibration = self._datacube.calibration # setup data getter @@ -75,14 +74,14 @@ def __init__( if qmax is None: qmax = np.min(self._datacube.Qshape) / np.sqrt(2) self._n_annular = n_annular - self.two_fold_symmetry = two_fold_symmetry #implicitly calls set_annular_bins - self.set_radial_bins(qmin,qmax,qstep) + self.two_fold_symmetry = two_fold_symmetry # implicitly calls set_annular_bins + self.set_radial_bins(qmin, qmax, qstep) # cartesian - self._xa,self._ya = np.meshgrid( + self._xa, self._ya = np.meshgrid( np.arange(self._datacube.Q_Nx), np.arange(self._datacube.Q_Ny), - indexing = 'ij' + indexing="ij", ) # ellipse @@ -112,7 +111,6 @@ def __init__( make_orientation_histogram, ) - # sampling methods + properties def set_radial_bins( self, @@ -124,11 +122,7 @@ def set_radial_bins( self._qmax = qmax self._qstep = qstep - self.radial_bins = np.arange( - self._qmin, - self._qmax, - self._qstep - ) + self.radial_bins = np.arange(self._qmin, self._qmax, self._qstep) self._radial_step = self._datacube.calibration.get_Q_pixel_size() * self._qstep self.set_polar_shape() self.qscale = self._qscale @@ -136,46 +130,31 @@ def set_radial_bins( @property def qmin(self): return self._qmin + @qmin.setter def qmin(self, x): - self.set_radial_bins( - x, - self._qmax, - self._qstep - ) + self.set_radial_bins(x, self._qmax, self._qstep) @property def qmax(self): return self._qmax + @qmin.setter def qmax(self, x): - self.set_radial_bins( - self._qmin, - x, - self._qstep - ) + self.set_radial_bins(self._qmin, x, self._qstep) @property def qstep(self): return self._qstep + @qstep.setter def qstep(self, x): - self.set_radial_bins( - self._qmin, - self._qmax, - x - ) + self.set_radial_bins(self._qmin, self._qmax, x) - def set_annular_bins( - self, - n_annular - ): + def set_annular_bins(self, n_annular): self._n_annular = n_annular self._annular_bins = np.linspace( - 0, - self._annular_range, - self._n_annular, - endpoint = False + 0, self._annular_range, self._n_annular, endpoint=False ) self._annular_step = self.annular_bins[1] - self.annular_bins[0] self.set_polar_shape() @@ -183,15 +162,20 @@ def set_annular_bins( @property def annular_bins(self): return self._annular_bins + @property def annular_step(self): return self._annular_step + @property def two_fold_symmetry(self): return self._two_fold_symmetry + @two_fold_symmetry.setter - def two_fold_symmetry(self,x): - assert(isinstance(x,bool)), f"two_fold_symmetry must be boolean, not type {type(x)}" + def two_fold_symmetry(self, x): + assert isinstance( + x, bool + ), f"two_fold_symmetry must be boolean, not type {type(x)}" self._two_fold_symmetry = x if x: self._annular_range = np.pi @@ -202,93 +186,93 @@ def two_fold_symmetry(self,x): @property def n_annular(self): return self._n_annular + @n_annular.setter def n_annular(self, x): self.set_annular_bins(x) def set_polar_shape(self): - if hasattr(self,'radial_bins') and hasattr(self,'annular_bins'): + if hasattr(self, "radial_bins") and hasattr(self, "annular_bins"): # set shape - self.polar_shape = np.array(( - self.annular_bins.shape[0], - self.radial_bins.shape[0] - )) + self.polar_shape = np.array( + (self.annular_bins.shape[0], self.radial_bins.shape[0]) + ) self.polar_size = np.prod(self.polar_shape) # set KDE params - self._annular_bin_step = 1 / (self._annular_step * (self.radial_bins + self.qstep * 0.5)) + self._annular_bin_step = 1 / ( + self._annular_step * (self.radial_bins + self.qstep * 0.5) + ) self._sigma_KDE = self._annular_bin_step * 0.5 # set array indices self._annular_indices = np.arange(self.polar_shape[0]).astype(int) self._radial_indices = np.arange(self.polar_shape[1]).astype(int) - # coordinate grid properties @property def tt(self): return self._annular_bins + @property def tt_deg(self): - return self._annular_bins * 180/np.pi + return self._annular_bins * 180 / np.pi + @property def qq(self): return self.radial_bins * self.calibration.get_Q_pixel_size() - - # scaling property @property def qscale(self): return self._qscale + @qscale.setter - def qscale(self,x): + def qscale(self, x): self._qscale = x if x is not None: - self._qscale_ar = (self.qq / self.qq[-1])**x - + self._qscale_ar = (self.qq / self.qq[-1]) ** x # expose raw data @property def data_raw(self): return self._datacube - # expose transformed data @property def data(self): return self._polar_data_getter def _set_polar_data_getter(self): - self._polar_data_getter = PolarDataGetter( - polarcube = self - ) - + self._polar_data_getter = PolarDataGetter(polarcube=self) # mask properties @property def mask(self): return self._mask + @mask.setter - def mask(self,x): + def mask(self, x): if x is None: self._mask = x else: - assert(x.shape == self._datacube.Qshape), "Mask shape must match diffraction space" + assert ( + x.shape == self._datacube.Qshape + ), "Mask shape must match diffraction space" self._mask = x - self._mask_polar = self.transform( - x - ) + self._mask_polar = self.transform(x) + @property def mask_polar(self): return self._mask_polar + @property def mask_thresh(self): return self._mask_thresh + @mask_thresh.setter - def mask_thresh(self,x): + def mask_thresh(self, x): self._mask_thresh = x self.mask = self.mask - # expose transformation @property def transform(self): @@ -317,52 +301,41 @@ def transform(self): """ return self._polar_data_getter._transform - def __repr__(self): - space = ' '*len(self.__class__.__name__)+' ' + space = " " * len(self.__class__.__name__) + " " string = f"{self.__class__.__name__}( " - string += "Retrieves diffraction images in polar coordinates, using .data[x,y] )" + string += ( + "Retrieves diffraction images in polar coordinates, using .data[x,y] )" + ) return string - - - - class PolarDataGetter: - def __init__( self, polarcube, ): self._polarcube = polarcube - - def __getitem__(self,pos): - + def __getitem__(self, pos): # unpack scan position - x,y = pos + x, y = pos # get the data - cartesian_data = self._polarcube._datacube[x,y] + cartesian_data = self._polarcube._datacube[x, y] # transform - ans = self._transform( - cartesian_data, - origin = [x,y], - returnval = 'masked' - ) + ans = self._transform(cartesian_data, origin=[x, y], returnval="masked") # return return ans - def _transform( self, cartesian_data, - origin = None, - ellipse = None, - mask = None, - mask_thresh = None, - returnval = 'masked', - ): + origin=None, + ellipse=None, + mask=None, + mask_thresh=None, + returnval="masked", + ): """ Return a transformed copy of the diffraction pattern `cartesian_data`. @@ -410,46 +383,44 @@ def _transform( # get calibrations if origin is None: origin = self._polarcube.calibration.get_origin_mean() - elif isinstance(origin,list): - origin = self._polarcube.calibration.get_origin(origin[0],origin[1]) - elif isinstance(origin,tuple): + elif isinstance(origin, list): + origin = self._polarcube.calibration.get_origin(origin[0], origin[1]) + elif isinstance(origin, tuple): pass else: raise Exception(f"Invalid type for `origin`, {type(origin)}") if ellipse is None: ellipse = self._polarcube.calibration.get_ellipse() - elif isinstance(ellipse,tuple): + elif isinstance(ellipse, tuple): pass else: raise Exception(f"Invalid type for `ellipse`, {type(ellipse)}") - # combine passed mask with default mask mask0 = self._polarcube.mask if mask is None and mask0 is None: - mask = np.ones_like(cartesian_data,dtype=bool) + mask = np.ones_like(cartesian_data, dtype=bool) elif mask is None: mask = mask0 elif mask0 is None: mask = mask else: - mask = mask*mask0 + mask = mask * mask0 if mask_thresh is None: mask_thresh = self._polarcube.mask_thresh - # transform data ans = self._transform_array( - cartesian_data * mask.astype('float'), + cartesian_data * mask.astype("float"), origin, ellipse, ) # transform normalization array ans_norm = self._transform_array( - mask.astype('float'), + mask.astype("float"), origin, ellipse, ) @@ -462,76 +433,66 @@ def _transform( ans = np.divide( ans, ans_norm, - out = np.full_like(ans, np.nan), - where = np.logical_not(mask_bool), + out=np.full_like(ans, np.nan), + where=np.logical_not(mask_bool), ) # radial power law scaling of output if self._polarcube.qscale is not None: - ans *= self._polarcube._qscale_ar[np.newaxis,:] + ans *= self._polarcube._qscale_ar[np.newaxis, :] # return - if returnval == 'masked': - ans = np.ma.array( - data = ans, - mask = mask_bool - ) + if returnval == "masked": + ans = np.ma.array(data=ans, mask=mask_bool) return ans - elif returnval == 'nan': + elif returnval == "nan": ans[mask_bool] = np.nan return ans - elif returnval == 'all': + elif returnval == "all": return ans, ans_norm, norm_array, mask_bool - elif returnval == 'zeros': + elif returnval == "zeros": ans[mask_bool] = 0 return ans - elif returnval == 'all_zeros': + elif returnval == "all_zeros": ans[mask_bool] = 0 return ans, ans_norm, norm_array, mask_bool else: raise Exception(f"Unexpected value {returnval} encountered for `returnval`") - def _transform_array( self, data, origin, ellipse, - ): - + ): # set origin x = self._polarcube._xa - origin[0] y = self._polarcube._ya - origin[1] # circular if (ellipse is None) or (self._polarcube.ellipse) is False: - # get polar coords rr = np.sqrt(x**2 + y**2) - tt = np.mod( - np.arctan2(y, x), - self._polarcube._annular_range) + tt = np.mod(np.arctan2(y, x), self._polarcube._annular_range) # elliptical else: # unpack ellipse - a,b,theta = ellipse + a, b, theta = ellipse # Get polar coords - xc = x*np.cos(theta) + y*np.sin(theta) - yc = (y*np.cos(theta) - x*np.sin(theta))*(a/b) - rr = (b/a) * np.hypot(xc,yc) - tt = np.mod( - np.arctan2(yc,xc) + theta, - self._polarcube._annular_range) + xc = x * np.cos(theta) + y * np.sin(theta) + yc = (y * np.cos(theta) - x * np.sin(theta)) * (a / b) + rr = (b / a) * np.hypot(xc, yc) + tt = np.mod(np.arctan2(yc, xc) + theta, self._polarcube._annular_range) # transform to bin sampling r_ind = (rr - self._polarcube.radial_bins[0]) / self._polarcube.qstep t_ind = tt / self._polarcube.annular_step # get integers and increments - r_ind_floor = np.floor(r_ind).astype('int') - t_ind_floor = np.floor(t_ind).astype('int') + r_ind_floor = np.floor(r_ind).astype("int") + t_ind_floor = np.floor(t_ind).astype("int") dr = r_ind - r_ind_floor dt = t_ind - t_ind_floor @@ -541,29 +502,37 @@ def _transform_array( r_ind_floor < self._polarcube.polar_shape[1], ) im = np.bincount( - r_ind_floor[sub] + \ - np.mod(t_ind_floor[sub],self._polarcube.polar_shape[0]) * self._polarcube.polar_shape[1], - weights = data[sub] * (1 - dr[sub]) * (1 - dt[sub]), - minlength = self._polarcube.polar_size, + r_ind_floor[sub] + + np.mod(t_ind_floor[sub], self._polarcube.polar_shape[0]) + * self._polarcube.polar_shape[1], + weights=data[sub] * (1 - dr[sub]) * (1 - dt[sub]), + minlength=self._polarcube.polar_size, ) im += np.bincount( - r_ind_floor[sub] + \ - np.mod(t_ind_floor[sub] + 1,self._polarcube.polar_shape[0]) * self._polarcube.polar_shape[1], - weights = data[sub] * (1 - dr[sub]) * ( dt[sub]), - minlength = self._polarcube.polar_size, + r_ind_floor[sub] + + np.mod(t_ind_floor[sub] + 1, self._polarcube.polar_shape[0]) + * self._polarcube.polar_shape[1], + weights=data[sub] * (1 - dr[sub]) * (dt[sub]), + minlength=self._polarcube.polar_size, + ) + sub = np.logical_and( + r_ind_floor >= -1, r_ind_floor < self._polarcube.polar_shape[1] - 1 ) - sub = np.logical_and(r_ind_floor >= -1, r_ind_floor < self._polarcube.polar_shape[1]-1) im += np.bincount( - r_ind_floor[sub] + 1 + \ - np.mod(t_ind_floor[sub],self._polarcube.polar_shape[0]) * self._polarcube.polar_shape[1], - weights = data[sub] * ( dr[sub]) * (1 - dt[sub]), - minlength = self._polarcube.polar_size, + r_ind_floor[sub] + + 1 + + np.mod(t_ind_floor[sub], self._polarcube.polar_shape[0]) + * self._polarcube.polar_shape[1], + weights=data[sub] * (dr[sub]) * (1 - dt[sub]), + minlength=self._polarcube.polar_size, ) im += np.bincount( - r_ind_floor[sub] + 1 + \ - np.mod(t_ind_floor[sub] + 1,self._polarcube.polar_shape[0]) * self._polarcube.polar_shape[1], - weights = data[sub] * ( dr[sub]) * ( dt[sub]), - minlength = self._polarcube.polar_size, + r_ind_floor[sub] + + 1 + + np.mod(t_ind_floor[sub] + 1, self._polarcube.polar_shape[0]) + * self._polarcube.polar_shape[1], + weights=data[sub] * (dr[sub]) * (dt[sub]), + minlength=self._polarcube.polar_size, ) # reshape to 2D @@ -574,20 +543,17 @@ def _transform_array( # Use 5% (= exp(-(1/2*.1669)^2)) cutoff value # for adjacent pixel in kernel if self._polarcube._sigma_KDE[a0] > 0.1669: - ans[:,a0] = gaussian_filter1d( - ans[:,a0], - sigma = self._polarcube._sigma_KDE[a0], - mode = 'wrap', - ) + ans[:, a0] = gaussian_filter1d( + ans[:, a0], + sigma=self._polarcube._sigma_KDE[a0], + mode="wrap", + ) # return return ans - - - def __repr__(self): - space = ' '*len(self.__class__.__name__)+' ' + space = " " * len(self.__class__.__name__) + " " string = f"{self.__class__.__name__}( " string += "Retrieves the diffraction pattern at scan position (x,y) in polar coordinates when sliced with [x,y]." - return string \ No newline at end of file + return string diff --git a/py4DSTEM/process/polar/polar_fits.py b/py4DSTEM/process/polar/polar_fits.py index e231dda07..3e39c5584 100644 --- a/py4DSTEM/process/polar/polar_fits.py +++ b/py4DSTEM/process/polar/polar_fits.py @@ -1,27 +1,27 @@ - import numpy as np import matplotlib.pyplot as plt + # from scipy.optimize import leastsq from scipy.optimize import curve_fit def fit_amorphous_ring( im, - center = None, - radial_range = None, - coefs = None, - mask_dp = None, - show_fit_mask = False, - maxfev = None, - verbose = False, - plot_result = True, - plot_log_scale = False, - plot_int_scale = (-3,3), - figsize = (8,8), - return_all_coefs = True, - ): + center=None, + radial_range=None, + coefs=None, + mask_dp=None, + show_fit_mask=False, + maxfev=None, + verbose=False, + plot_result=True, + plot_log_scale=False, + plot_int_scale=(-3, 3), + figsize=(8, 8), + return_all_coefs=True, +): """ - Fit an amorphous halo with a two-sided Gaussian model, plus a background + Fit an amorphous halo with a two-sided Gaussian model, plus a background Gaussian function. Parameters @@ -29,7 +29,7 @@ def fit_amorphous_ring( im: np.array 2D image array to perform fitting on center: np.array - (x,y) center coordinates for fitting mask. If not specified + (x,y) center coordinates for fitting mask. If not specified by the user, we will assume the center coordinate is (im.shape-1)/2. radial_range: np.array (radius_inner, radius_outer) radial range to perform fitting over. @@ -54,7 +54,7 @@ def fit_amorphous_ring( Figure size for plots return_all_coefs: bool Set to True to return the 11 parameter fit, rather than the 5 parameter ellipse - + Returns -------- params_ellipse: np.array @@ -65,30 +65,28 @@ def fit_amorphous_ring( # Default values if center is None: - center = np.array(( - (im.shape[0]-1)/2, - (im.shape[1]-1)/2)) + center = np.array(((im.shape[0] - 1) / 2, (im.shape[1] - 1) / 2)) if radial_range is None: - radial_range = (im.shape[0]/4, im.shape[0]/2) + radial_range = (im.shape[0] / 4, im.shape[0] / 2) # coordinates - xa,ya = np.meshgrid( + xa, ya = np.meshgrid( np.arange(im.shape[0]), np.arange(im.shape[1]), - indexing = 'ij', - ) + indexing="ij", + ) # Make fitting mask - ra2 = (xa - center[0])**2 + (ya - center[1])**2 + ra2 = (xa - center[0]) ** 2 + (ya - center[1]) ** 2 mask = np.logical_and( - ra2 >= radial_range[0]**2, - ra2 <= radial_range[1]**2, - ) + ra2 >= radial_range[0] ** 2, + ra2 <= radial_range[1] ** 2, + ) if mask_dp is not None: # Logical AND the radial mask with the user-provided mask mask = np.logical_and(mask, mask_dp) vals = im[mask] - basis = np.vstack((xa[mask],ya[mask])) + basis = np.vstack((xa[mask], ya[mask])) # initial fitting parameters if coefs is None: @@ -106,28 +104,28 @@ def fit_amorphous_ring( # Gaussian model parameters int_min = np.min(vals) int_max = np.max(vals) - int0 = (int_max - int_min)/2 - int12 = (int_max - int_min)/2 + int0 = (int_max - int_min) / 2 + int12 = (int_max - int_min) / 2 k_bg = int_min sigma0 = np.mean(radial_range) - sigma1 = (radial_range[1] - radial_range[0])/4 - sigma2 = (radial_range[1] - radial_range[0])/4 - - coefs = ( - x0,y0, - a,b,t, - int0,int12,k_bg, - sigma0,sigma1,sigma2) - lb = ( - 0,0, - radial_range[0],radial_range[0],-np.inf, - 0,0,0, - 1,1,1) + sigma1 = (radial_range[1] - radial_range[0]) / 4 + sigma2 = (radial_range[1] - radial_range[0]) / 4 + + coefs = (x0, y0, a, b, t, int0, int12, k_bg, sigma0, sigma1, sigma2) + lb = (0, 0, radial_range[0], radial_range[0], -np.inf, 0, 0, 0, 1, 1, 1) ub = ( - im.shape[0],im.shape[1], - radial_range[1],radial_range[1],np.inf, - np.inf,np.inf,np.inf, - np.inf,np.inf,np.inf) + im.shape[0], + im.shape[1], + radial_range[1], + radial_range[1], + np.inf, + np.inf, + np.inf, + np.inf, + np.inf, + np.inf, + np.inf, + ) if show_fit_mask: # show image preview of fitting mask @@ -135,28 +133,39 @@ def fit_amorphous_ring( # Generate hybrid image for plotting if plot_log_scale: int_med = np.median(np.log(vals)) - int_std = np.sqrt(np.median((np.log(vals) - int_med)**2)) + int_std = np.sqrt(np.median((np.log(vals) - int_med) ** 2)) int_range = ( - int_med + plot_int_scale[0]*int_std, - int_med + plot_int_scale[1]*int_std) + int_med + plot_int_scale[0] * int_std, + int_med + plot_int_scale[1] * int_std, + ) im_plot = np.tile( np.clip( - (np.log(im[:,:,None]) - int_range[0]) / (int_range[1] - int_range[0]), - 0,1), - (1,1,3)) + (np.log(im[:, :, None]) - int_range[0]) + / (int_range[1] - int_range[0]), + 0, + 1, + ), + (1, 1, 3), + ) else: int_med = np.median(vals) - int_std = np.sqrt(np.median((vals - int_med)**2)) + int_std = np.sqrt(np.median((vals - int_med) ** 2)) int_range = ( - int_med + plot_int_scale[0]*int_std, - int_med + plot_int_scale[1]*int_std) - im_plot = np.tile(np.clip( - (im[:,:,None] - int_range[0]) / (int_range[1] - int_range[0]), - 0,1),(1,1,3)) - im_plot[:,:,0] *= 1-mask - - fig,ax = plt.subplots(figsize=figsize) + int_med + plot_int_scale[0] * int_std, + int_med + plot_int_scale[1] * int_std, + ) + im_plot = np.tile( + np.clip( + (im[:, :, None] - int_range[0]) / (int_range[1] - int_range[0]), + 0, + 1, + ), + (1, 1, 3), + ) + im_plot[:, :, 0] *= 1 - mask + + fig, ax = plt.subplots(figsize=figsize) ax.imshow(im_plot) else: @@ -165,43 +174,43 @@ def fit_amorphous_ring( if maxfev is None: coefs = curve_fit( - amorphous_model, - basis, - vals / int_mean, + amorphous_model, + basis, + vals / int_mean, p0=coefs, - xtol = 1e-8, - bounds = (lb,ub), + xtol=1e-8, + bounds=(lb, ub), )[0] else: coefs = curve_fit( - amorphous_model, - basis, - vals / int_mean, + amorphous_model, + basis, + vals / int_mean, p0=coefs, - xtol = 1e-8, - bounds = (lb,ub), - maxfev = maxfev, + xtol=1e-8, + bounds=(lb, ub), + maxfev=maxfev, )[0] - coefs[4] = np.mod(coefs[4],2*np.pi) + coefs[4] = np.mod(coefs[4], 2 * np.pi) coefs[5:8] *= int_mean # bounds=bounds if verbose: - print('x0 = ' + str(np.round(coefs[0],3)) + ' px') - print('y0 = ' + str(np.round(coefs[1],3)) + ' px') - print('a = ' + str(np.round(coefs[2],3)) + ' px') - print('b = ' + str(np.round(coefs[3],3)) + ' px') - print('t = ' + str(np.round(np.rad2deg(coefs[4]),3)) + ' deg') + print("x0 = " + str(np.round(coefs[0], 3)) + " px") + print("y0 = " + str(np.round(coefs[1], 3)) + " px") + print("a = " + str(np.round(coefs[2], 3)) + " px") + print("b = " + str(np.round(coefs[3], 3)) + " px") + print("t = " + str(np.round(np.rad2deg(coefs[4]), 3)) + " deg") if plot_result and not show_fit_mask: plot_amorphous_ring( - im = im, - coefs = coefs, - radial_range = radial_range, - plot_log_scale = plot_log_scale, - plot_int_scale = plot_int_scale, - figsize = figsize, - ) + im=im, + coefs=coefs, + radial_range=radial_range, + plot_log_scale=plot_log_scale, + plot_int_scale=plot_int_scale, + figsize=figsize, + ) # Return fit parameters if return_all_coefs: @@ -213,13 +222,13 @@ def fit_amorphous_ring( def plot_amorphous_ring( im, coefs, - radial_range = (0,np.inf), - plot_log_scale = True, - plot_int_scale = (-3,3), - figsize = (8,8), - ): + radial_range=(0, np.inf), + plot_log_scale=True, + plot_int_scale=(-3, 3), + figsize=(8, 8), +): """ - Fit an amorphous halo with a two-sided Gaussian model, plus a background + Fit an amorphous halo with a two-sided Gaussian model, plus a background Gaussian function. Parameters @@ -236,7 +245,7 @@ def plot_amorphous_ring( Figure size for plots return_all_coefs: bool Set to True to return the 11 parameter fit, rather than the 5 parameter ellipse - + Returns -------- @@ -246,62 +255,69 @@ def plot_amorphous_ring( center = coefs[0:2] # coordinates - xa,ya = np.meshgrid( + xa, ya = np.meshgrid( np.arange(im.shape[0]), np.arange(im.shape[1]), - indexing = 'ij', - ) + indexing="ij", + ) # Make fitting mask - ra2 = (xa - center[0])**2 + (ya - center[1])**2 + ra2 = (xa - center[0]) ** 2 + (ya - center[1]) ** 2 mask = np.logical_and( - ra2 >= radial_range[0]**2, - ra2 <= radial_range[1]**2, - ) + ra2 >= radial_range[0] ** 2, + ra2 <= radial_range[1] ** 2, + ) vals = im[mask] - basis = np.vstack((xa[mask],ya[mask])) + basis = np.vstack((xa[mask], ya[mask])) # Generate resulting best fit image - im_fit = np.reshape(amorphous_model( - np.vstack((xa.ravel(),ya.ravel())), - coefs),im.shape) + im_fit = np.reshape( + amorphous_model(np.vstack((xa.ravel(), ya.ravel())), coefs), im.shape + ) # plotting arrays - phi = np.linspace(0,2*np.pi,360) + phi = np.linspace(0, 2 * np.pi, 360) cp = np.cos(phi) sp = np.sin(phi) # plotting intensity range if plot_log_scale: int_med = np.median(np.log(vals)) - int_std = np.sqrt(np.median((np.log(vals) - int_med)**2)) + int_std = np.sqrt(np.median((np.log(vals) - int_med) ** 2)) int_range = ( - int_med + plot_int_scale[0]*int_std, - int_med + plot_int_scale[1]*int_std) - im_plot = np.tile(np.clip( - (np.log(im[:,:,None]) - int_range[0]) / (int_range[1] - int_range[0]), - 0,1),(1,1,3)) + int_med + plot_int_scale[0] * int_std, + int_med + plot_int_scale[1] * int_std, + ) + im_plot = np.tile( + np.clip( + (np.log(im[:, :, None]) - int_range[0]) / (int_range[1] - int_range[0]), + 0, + 1, + ), + (1, 1, 3), + ) else: int_med = np.median(vals) - int_std = np.sqrt(np.median((vals - int_med)**2)) + int_std = np.sqrt(np.median((vals - int_med) ** 2)) int_range = ( - int_med + plot_int_scale[0]*int_std, - int_med + plot_int_scale[1]*int_std) + int_med + plot_int_scale[0] * int_std, + int_med + plot_int_scale[1] * int_std, + ) im_plot = np.clip( - (im[:,:,None] - int_range[0]) / (int_range[1] - int_range[0]), - 0,1) + (im[:, :, None] - int_range[0]) / (int_range[1] - int_range[0]), 0, 1 + ) # vals_mean = np.mean(vals) # vals_std = np.std(vals) - # vmin = vals_mean - + # vmin = vals_mean - # plotting - fig,ax = plt.subplots(figsize=figsize) + fig, ax = plt.subplots(figsize=figsize) ax.imshow( im_plot, - vmin = 0, - vmax = 1, - cmap = 'gray', - ) + vmin=0, + vmax=1, + cmap="gray", + ) x0 = coefs[0] y0 = coefs[1] @@ -312,39 +328,38 @@ def plot_amorphous_ring( s2 = coefs[10] ax.plot( - y0 + np.array((-1,1))*a*np.sin(t), - x0 + np.array((-1,1))*a*np.cos(t), - c = 'r', - ) + y0 + np.array((-1, 1)) * a * np.sin(t), + x0 + np.array((-1, 1)) * a * np.cos(t), + c="r", + ) ax.plot( - y0 + np.array((-1,1))*b*np.cos(t), - x0 + np.array((1,-1))*b*np.sin(t), - c = 'r', - linestyle = 'dashed', - ) + y0 + np.array((-1, 1)) * b * np.cos(t), + x0 + np.array((1, -1)) * b * np.sin(t), + c="r", + linestyle="dashed", + ) ax.plot( - y0 + a*np.sin(t)*cp + b*np.cos(t)*sp, - x0 + a*np.cos(t)*cp - b*np.sin(t)*sp, - c = 'r', - ) + y0 + a * np.sin(t) * cp + b * np.cos(t) * sp, + x0 + a * np.cos(t) * cp - b * np.sin(t) * sp, + c="r", + ) scale = 1 - s1 / a ax.plot( - y0 + scale*a*np.sin(t)*cp + scale*b*np.cos(t)*sp, - x0 + scale*a*np.cos(t)*cp - scale*b*np.sin(t)*sp, - c = 'r', - linestyle='dashed', - ) + y0 + scale * a * np.sin(t) * cp + scale * b * np.cos(t) * sp, + x0 + scale * a * np.cos(t) * cp - scale * b * np.sin(t) * sp, + c="r", + linestyle="dashed", + ) scale = 1 + s2 / a ax.plot( - y0 + scale*a*np.sin(t)*cp + scale*b*np.cos(t)*sp, - x0 + scale*a*np.cos(t)*cp - scale*b*np.sin(t)*sp, - c = 'r', - linestyle='dashed', - ) - ax.set_xlim((0,im.shape[1]-1)) - ax.set_ylim((im.shape[0]-1,0)) - + y0 + scale * a * np.sin(t) * cp + scale * b * np.cos(t) * sp, + x0 + scale * a * np.cos(t) * cp - scale * b * np.sin(t) * sp, + c="r", + linestyle="dashed", + ) + ax.set_xlim((0, im.shape[1] - 1)) + ax.set_ylim((im.shape[0] - 1, 0)) def amorphous_model(basis, *coefs): @@ -365,20 +380,19 @@ def amorphous_model(basis, *coefs): sigma1 = coefs[9] sigma2 = coefs[10] - x0 = basis[0,:] - x0 - y0 = basis[1,:] - y0 - x = np.cos(t)*x0 - (b/a)*np.sin(t)*y0 - y = np.sin(t)*x0 + (b/a)*np.cos(t)*y0 + x0 = basis[0, :] - x0 + y0 = basis[1, :] - y0 + x = np.cos(t) * x0 - (b / a) * np.sin(t) * y0 + y = np.sin(t) * x0 + (b / a) * np.cos(t) * y0 r2 = x**2 + y**2 dr = np.sqrt(r2) - b dr2 = dr**2 sub = dr < 0 - int_model = k_bg + \ - int0*np.exp(r2/(-2*sigma0**2)) - int_model[sub] += int12*np.exp(dr2[sub]/(-2*sigma1**2)) + int_model = k_bg + int0 * np.exp(r2 / (-2 * sigma0**2)) + int_model[sub] += int12 * np.exp(dr2[sub] / (-2 * sigma1**2)) sub = np.logical_not(sub) - int_model[sub] += int12*np.exp(dr2[sub]/(-2*sigma2**2)) + int_model[sub] += int12 * np.exp(dr2[sub] / (-2 * sigma2**2)) - return int_model \ No newline at end of file + return int_model diff --git a/py4DSTEM/process/polar/polar_peaks.py b/py4DSTEM/process/polar/polar_peaks.py index 6a6e0860a..be9ae989e 100644 --- a/py4DSTEM/process/polar/polar_peaks.py +++ b/py4DSTEM/process/polar/polar_peaks.py @@ -1,4 +1,3 @@ - import numpy as np import matplotlib.pyplot as plt @@ -10,34 +9,38 @@ # from emdfile import tqdmnd, PointList, PointListArray from py4DSTEM import tqdmnd, PointList, PointListArray -from py4DSTEM.process.fit import polar_twofold_gaussian_2D, polar_twofold_gaussian_2D_background +from py4DSTEM.process.fit import ( + polar_twofold_gaussian_2D, + polar_twofold_gaussian_2D_background, +) + def find_peaks_single_pattern( self, x, y, - mask = None, - bragg_peaks = None, - bragg_mask_radius = None, - sigma_annular_deg = 10.0, - sigma_radial_px = 3.0, - sigma_annular_deg_max = None, - radial_background_subtract = True, - radial_background_thresh = 0.25, - num_peaks_max = 100, - threshold_abs = 1.0, - threshold_prom_annular = None, - threshold_prom_radial = None, - remove_masked_peaks = False, - scale_sigma_annular = 0.5, - scale_sigma_radial = 0.25, - return_background = False, - plot_result = True, - plot_power_scale = 1.0, - plot_scale_size = 10.0, - figsize = (12,6), - returnfig = False, - ): + mask=None, + bragg_peaks=None, + bragg_mask_radius=None, + sigma_annular_deg=10.0, + sigma_radial_px=3.0, + sigma_annular_deg_max=None, + radial_background_subtract=True, + radial_background_thresh=0.25, + num_peaks_max=100, + threshold_abs=1.0, + threshold_prom_annular=None, + threshold_prom_radial=None, + remove_masked_peaks=False, + scale_sigma_annular=0.5, + scale_sigma_radial=0.25, + return_background=False, + plot_result=True, + plot_power_scale=1.0, + plot_scale_size=10.0, + figsize=(12, 6), + returnfig=False, +): """ Peak detection function for polar transformations. @@ -56,7 +59,7 @@ def find_peaks_single_pattern( sigma_radial_px: float smoothing along the radial direction in pixels, not periodic sigma_annular_deg_max: float - Specify this value for the max annular sigma. Peaks larger than this will be split + Specify this value for the max annular sigma. Peaks larger than this will be split into multiple peaks, depending on the ratio. radial_background_subtract: bool If true, subtract radial background estimate @@ -96,7 +99,7 @@ def find_peaks_single_pattern( peaks_polar : pointlist The detected peaks fig, ax : (optional) - Figure and axes handles + Figure and axes handles """ @@ -106,24 +109,23 @@ def find_peaks_single_pattern( bragg_peaks, x, y, - radius = bragg_mask_radius, + radius=bragg_mask_radius, ) if mask is None: mask = mask_bragg else: mask = np.logical_or(mask, mask_bragg) - # Convert sigma values into units of bins sigma_annular = np.deg2rad(sigma_annular_deg) / self.annular_step sigma_radial = sigma_radial_px / self.qstep - + # Get transformed image and normalization array im_polar, im_polar_norm, norm_array, mask_bool = self.transform( - self._datacube.data[x,y], - mask = mask, - returnval = 'all_zeros', - ) + self._datacube.data[x, y], + mask=mask, + returnval="all_zeros", + ) # Change sign convention of mask mask_bool = np.logical_not(mask_bool) @@ -131,219 +133,231 @@ def find_peaks_single_pattern( if radial_background_subtract: sig_bg = np.zeros(im_polar.shape[1]) for a0 in range(im_polar.shape[1]): - if np.any(mask_bool[:,a0]): - vals = np.sort(im_polar[mask_bool[:,a0],a0]) - ind = np.round(radial_background_thresh * (vals.shape[0]-1)).astype('int') + if np.any(mask_bool[:, a0]): + vals = np.sort(im_polar[mask_bool[:, a0], a0]) + ind = np.round(radial_background_thresh * (vals.shape[0] - 1)).astype( + "int" + ) sig_bg[a0] = vals[ind] - sig_bg_mask = np.sum(mask_bool, axis=0) >= (im_polar.shape[0]//2) - im_polar = np.maximum(im_polar - sig_bg[None,:], 0) + sig_bg_mask = np.sum(mask_bool, axis=0) >= (im_polar.shape[0] // 2) + im_polar = np.maximum(im_polar - sig_bg[None, :], 0) # apply smoothing and normalization im_polar_sm = gaussian_filter( im_polar * norm_array, - sigma = (sigma_annular, sigma_radial), - mode = ('wrap', 'nearest'), - ) + sigma=(sigma_annular, sigma_radial), + mode=("wrap", "nearest"), + ) im_mask = gaussian_filter( norm_array, - sigma = (sigma_annular, sigma_radial), - mode = ('wrap', 'nearest'), - ) + sigma=(sigma_annular, sigma_radial), + mode=("wrap", "nearest"), + ) sub = im_mask > 0.001 * np.max(im_mask) im_polar_sm[sub] /= im_mask[sub] # Find local maxima peaks = peak_local_max( im_polar_sm, - num_peaks = num_peaks_max, - threshold_abs = threshold_abs, - ) + num_peaks=num_peaks_max, + threshold_abs=threshold_abs, + ) # check if peaks should be removed from the polar transformation mask if remove_masked_peaks: peaks = np.delete( - peaks, - mask_bool[peaks[:,0],peaks[:,1]] == False, - axis = 0, - ) + peaks, + mask_bool[peaks[:, 0], peaks[:, 1]] == False, + axis=0, + ) # peak intensity - peaks_int = im_polar_sm[peaks[:,0],peaks[:,1]] + peaks_int = im_polar_sm[peaks[:, 0], peaks[:, 1]] # Estimate prominance of peaks, and their size in units of pixels - peaks_prom = np.zeros((peaks.shape[0],4)) - annular_ind_center = np.atleast_1d(np.array(im_polar_sm.shape[0]//2).astype('int')) + peaks_prom = np.zeros((peaks.shape[0], 4)) + annular_ind_center = np.atleast_1d( + np.array(im_polar_sm.shape[0] // 2).astype("int") + ) for a0 in range(peaks.shape[0]): - # annular trace_annular = np.roll( - np.squeeze(im_polar_sm[:,peaks[a0,1]]), - annular_ind_center - peaks[a0,0]) + np.squeeze(im_polar_sm[:, peaks[a0, 1]]), annular_ind_center - peaks[a0, 0] + ) p_annular = peak_prominences( - trace_annular, + trace_annular, annular_ind_center, - ) + ) sigma_annular = scale_sigma_annular * np.minimum( - annular_ind_center - p_annular[1], - p_annular[2] - annular_ind_center) + annular_ind_center - p_annular[1], p_annular[2] - annular_ind_center + ) # radial - trace_radial = im_polar_sm[peaks[a0,0],:] + trace_radial = im_polar_sm[peaks[a0, 0], :] p_radial = peak_prominences( - trace_radial, - np.atleast_1d(peaks[a0,1]), - ) + trace_radial, + np.atleast_1d(peaks[a0, 1]), + ) sigma_radial = scale_sigma_radial * np.minimum( - peaks[a0,1] - p_radial[1], - p_radial[2] - peaks[a0,1]) + peaks[a0, 1] - p_radial[1], p_radial[2] - peaks[a0, 1] + ) # output - peaks_prom[a0,0] = p_annular[0] - peaks_prom[a0,1] = sigma_annular[0] - peaks_prom[a0,2] = p_radial[0] - peaks_prom[a0,3] = sigma_radial[0] + peaks_prom[a0, 0] = p_annular[0] + peaks_prom[a0, 1] = sigma_annular[0] + peaks_prom[a0, 2] = p_radial[0] + peaks_prom[a0, 3] = sigma_radial[0] # if needed, remove peaks using prominance criteria if threshold_prom_annular is not None: - remove = peaks_prom[:,0] < threshold_prom_annular + remove = peaks_prom[:, 0] < threshold_prom_annular peaks = np.delete( peaks, remove, - axis = 0, - ) + axis=0, + ) peaks_int = np.delete( peaks_int, remove, - ) + ) peaks_prom = np.delete( peaks_prom, remove, - axis = 0, - ) + axis=0, + ) if threshold_prom_radial is not None: - remove = peaks_prom[:,2] < threshold_prom_radial + remove = peaks_prom[:, 2] < threshold_prom_radial peaks = np.delete( peaks, remove, - axis = 0, - ) + axis=0, + ) peaks_int = np.delete( peaks_int, remove, - ) + ) peaks_prom = np.delete( peaks_prom, remove, - axis = 0, - ) + axis=0, + ) # combine peaks into one array peaks_all = np.column_stack((peaks, peaks_int, peaks_prom)) # Split peaks into multiple peaks if they have sigma values larger than user-specified threshold if sigma_annular_deg_max is not None: - peaks_new = np.zeros((0,peaks_all.shape[1])) + peaks_new = np.zeros((0, peaks_all.shape[1])) for a0 in range(peaks_all.shape[0]): - if peaks_all[a0,4] >= (1.5*sigma_annular_deg_max): - num = np.round(peaks_all[a0,4] / sigma_annular_deg_max) - sigma_annular_new = peaks_all[a0,4] / num + if peaks_all[a0, 4] >= (1.5 * sigma_annular_deg_max): + num = np.round(peaks_all[a0, 4] / sigma_annular_deg_max) + sigma_annular_new = peaks_all[a0, 4] / num v = np.arange(num) v -= np.mean(v) - t_new = np.mod(peaks_all[a0,0] + 2*v*sigma_annular_new, - self._n_annular) - - for a1 in range(num.astype('int')): - peaks_new = np.vstack(( - peaks_new, - np.array(( - t_new[a1], - peaks_all[a0,1], - peaks_all[a0,2], - peaks_all[a0,3], - sigma_annular_new, - peaks_all[a0,5], - peaks_all[a0,6], - )), - )) + t_new = np.mod( + peaks_all[a0, 0] + 2 * v * sigma_annular_new, self._n_annular + ) + + for a1 in range(num.astype("int")): + peaks_new = np.vstack( + ( + peaks_new, + np.array( + ( + t_new[a1], + peaks_all[a0, 1], + peaks_all[a0, 2], + peaks_all[a0, 3], + sigma_annular_new, + peaks_all[a0, 5], + peaks_all[a0, 6], + ) + ), + ) + ) else: - peaks_new = np.vstack(( - peaks_new, - peaks_all[a0,:] - )) + peaks_new = np.vstack((peaks_new, peaks_all[a0, :])) peaks_all = peaks_new - # Output data as a pointlist peaks_polar = PointList( - peaks_all.ravel().view([ - ('qt', float), - ('qr', float), - ('intensity', float), - ('prom_annular', float), - ('sigma_annular', float), - ('prom_radial', float), - ('sigma_radial', float), - ]), - name = 'peaks_polar') - + peaks_all.ravel().view( + [ + ("qt", float), + ("qr", float), + ("intensity", float), + ("prom_annular", float), + ("sigma_annular", float), + ("prom_radial", float), + ("sigma_radial", float), + ] + ), + name="peaks_polar", + ) if plot_result: # init im_plot = im_polar.copy() im_plot = np.maximum(im_plot, 0) ** plot_power_scale - t = np.linspace(0,2*np.pi,180+1) + t = np.linspace(0, 2 * np.pi, 180 + 1) ct = np.cos(t) st = np.sin(t) - - fig,ax = plt.subplots(figsize=figsize) + fig, ax = plt.subplots(figsize=figsize) ax.imshow( im_plot, - cmap = 'gray', - ) + cmap="gray", + ) # peaks ax.scatter( - peaks_polar['qr'], - peaks_polar['qt'], - s = peaks_polar['intensity'] * plot_scale_size, - marker='o', - color = (1,0,0), - ) + peaks_polar["qr"], + peaks_polar["qt"], + s=peaks_polar["intensity"] * plot_scale_size, + marker="o", + color=(1, 0, 0), + ) for a0 in range(peaks_polar.data.shape[0]): ax.plot( - peaks_polar['qr'][a0] + st * peaks_polar['sigma_radial'][a0], - peaks_polar['qt'][a0] + ct * peaks_polar['sigma_annular'][a0], - linewidth = 1, - color = 'r', - ) - if peaks_polar['qt'][a0] - peaks_polar['sigma_annular'][a0] < 0: + peaks_polar["qr"][a0] + st * peaks_polar["sigma_radial"][a0], + peaks_polar["qt"][a0] + ct * peaks_polar["sigma_annular"][a0], + linewidth=1, + color="r", + ) + if peaks_polar["qt"][a0] - peaks_polar["sigma_annular"][a0] < 0: ax.plot( - peaks_polar['qr'][a0] + st * peaks_polar['sigma_radial'][a0], - peaks_polar['qt'][a0] + ct * peaks_polar['sigma_annular'][a0] + im_plot.shape[0], - linewidth = 1, - color = 'r', - ) - if peaks_polar['qt'][a0] + peaks_polar['sigma_annular'][a0] > im_plot.shape[0]: + peaks_polar["qr"][a0] + st * peaks_polar["sigma_radial"][a0], + peaks_polar["qt"][a0] + + ct * peaks_polar["sigma_annular"][a0] + + im_plot.shape[0], + linewidth=1, + color="r", + ) + if ( + peaks_polar["qt"][a0] + peaks_polar["sigma_annular"][a0] + > im_plot.shape[0] + ): ax.plot( - peaks_polar['qr'][a0] + st * peaks_polar['sigma_radial'][a0], - peaks_polar['qt'][a0] + ct * peaks_polar['sigma_annular'][a0] - im_plot.shape[0], - linewidth = 1, - color = 'r', - ) + peaks_polar["qr"][a0] + st * peaks_polar["sigma_radial"][a0], + peaks_polar["qt"][a0] + + ct * peaks_polar["sigma_annular"][a0] + - im_plot.shape[0], + linewidth=1, + color="r", + ) # plot appearance - ax.set_xlim((0,im_plot.shape[1]-1)) - ax.set_ylim((im_plot.shape[0]-1,0)) + ax.set_xlim((0, im_plot.shape[1] - 1)) + ax.set_ylim((im_plot.shape[0] - 1, 0)) if returnfig and plot_result: if return_background: return peaks_polar, sig_bg, sig_bg_mask, fig, ax else: - return peaks_polar, fig, ax + return peaks_polar, fig, ax else: if return_background: return peaks_polar, sig_bg, sig_bg_mask @@ -353,23 +367,23 @@ def find_peaks_single_pattern( def find_peaks( self, - mask = None, - bragg_peaks = None, - bragg_mask_radius = None, - sigma_annular_deg = 10.0, - sigma_radial_px = 3.0, - sigma_annular_deg_max = None, - radial_background_subtract = True, - radial_background_thresh = 0.25, - num_peaks_max = 100, - threshold_abs = 1.0, - threshold_prom_annular = None, - threshold_prom_radial = None, - remove_masked_peaks = False, - scale_sigma_annular = 0.5, - scale_sigma_radial = 0.25, - progress_bar = True, - ): + mask=None, + bragg_peaks=None, + bragg_mask_radius=None, + sigma_annular_deg=10.0, + sigma_radial_px=3.0, + sigma_annular_deg_max=None, + radial_background_subtract=True, + radial_background_thresh=0.25, + num_peaks_max=100, + threshold_abs=1.0, + threshold_prom_annular=None, + threshold_prom_radial=None, + remove_masked_peaks=False, + scale_sigma_annular=0.5, + scale_sigma_radial=0.25, + progress_bar=True, +): """ Peak detection function for polar transformations. Loop through all probe positions, find peaks. Store the peak positions and background signals. @@ -380,7 +394,7 @@ def find_peaks( smoothing along the annular direction in degrees, periodic sigma_radial_px: float smoothing along the radial direction in pixels, not periodic - + Returns -------- @@ -390,27 +404,33 @@ def find_peaks( self.bragg_peaks = bragg_peaks self.bragg_mask_radius = bragg_mask_radius self.peaks = PointListArray( - dtype = [ - ('qt', ' min_num_pixels_fit: try: # perform fitting p0, pcov = curve_fit( - polar_twofold_gaussian_2D, - tq[:,mask_peak.ravel()], - im_polar[mask_peak], - p0 = p0, + polar_twofold_gaussian_2D, + tq[:, mask_peak.ravel()], + im_polar[mask_peak], + p0=p0, # bounds = bounds, - ) + ) # Output parameters - self.peaks[rx,ry]['intensity'][a0] = p0[0] - self.peaks[rx,ry]['qt'][a0] = p0[1] / t_step - self.peaks[rx,ry]['qr'][a0] = p0[2] / q_step - self.peaks[rx,ry]['sigma_annular'][a0] = p0[3] / t_step - self.peaks[rx,ry]['sigma_radial'][a0] = p0[4] / q_step - + self.peaks[rx, ry]["intensity"][a0] = p0[0] + self.peaks[rx, ry]["qt"][a0] = p0[1] / t_step + self.peaks[rx, ry]["qr"][a0] = p0[2] / q_step + self.peaks[rx, ry]["sigma_annular"][a0] = p0[3] / t_step + self.peaks[rx, ry]["sigma_radial"][a0] = p0[4] / q_step + except: pass else: # initial parameters p0 = [ - p['intensity'][a0], - p['qt'][a0] * t_step, - p['qr'][a0] * q_step, - p['sigma_annular'][a0] * t_step, - p['sigma_radial'][a0] * q_step, + p["intensity"][a0], + p["qt"][a0] * t_step, + p["qr"][a0] * q_step, + p["sigma_annular"][a0] * t_step, + p["sigma_radial"][a0] * q_step, 0, - ] + ] # Mask around peak for fitting - dt = np.mod(tt - p0[1] + np.pi/2, np.pi) - np.pi/2 - mask_peak = np.logical_and(mask_bool, - dt**2/(fit_range_sigma_annular*p0[3])**2 \ - + (qq-p0[2])**2/(fit_range_sigma_radial*p0[4])**2 <= 1) + dt = np.mod(tt - p0[1] + np.pi / 2, np.pi) - np.pi / 2 + mask_peak = np.logical_and( + mask_bool, + dt**2 / (fit_range_sigma_annular * p0[3]) ** 2 + + (qq - p0[2]) ** 2 / (fit_range_sigma_radial * p0[4]) ** 2 + <= 1, + ) if np.sum(mask_peak) > min_num_pixels_fit: try: # perform fitting p0, pcov = curve_fit( - polar_twofold_gaussian_2D_background, - tq[:,mask_peak.ravel()], - im_polar[mask_peak], - p0 = p0, + polar_twofold_gaussian_2D_background, + tq[:, mask_peak.ravel()], + im_polar[mask_peak], + p0=p0, # bounds = bounds, - ) + ) # Output parameters - self.peaks[rx,ry]['intensity'][a0] = p0[0] - self.peaks[rx,ry]['qt'][a0] = p0[1] / t_step - self.peaks[rx,ry]['qr'][a0] = p0[2] / q_step - self.peaks[rx,ry]['sigma_annular'][a0] = p0[3] / t_step - self.peaks[rx,ry]['sigma_radial'][a0] = p0[4] / q_step + self.peaks[rx, ry]["intensity"][a0] = p0[0] + self.peaks[rx, ry]["qt"][a0] = p0[1] / t_step + self.peaks[rx, ry]["qr"][a0] = p0[2] / q_step + self.peaks[rx, ry]["sigma_annular"][a0] = p0[3] / t_step + self.peaks[rx, ry]["sigma_radial"][a0] = p0[4] / q_step except: pass @@ -625,27 +650,32 @@ def refine_peaks_local( def plot_radial_peaks( self, - q_pixel_units = False, - qmin = None, - qmax = None, - qstep = None, - label_y_axis = False, - figsize = (8,4), - returnfig = False, - ): + q_pixel_units=False, + qmin=None, + qmax=None, + qstep=None, + label_y_axis=False, + figsize=(8, 4), + returnfig=False, +): """ Calculate and plot the total peak signal as a function of the radial coordinate. """ - + # Get all peak data vects = np.concatenate( - [self.peaks[i,j].data for i in range(self._datacube.Rshape[0]) for j in range(self._datacube.Rshape[1])]) + [ + self.peaks[i, j].data + for i in range(self._datacube.Rshape[0]) + for j in range(self._datacube.Rshape[1]) + ] + ) if q_pixel_units: - qr = vects['qr'] + qr = vects["qr"] else: - qr = (vects['qr'] + self.qmin) * self._radial_step - intensity = vects['intensity'] + qr = (vects["qr"] + self.qmin) * self._radial_step + intensity = vects["intensity"] # bins if qmin is None: @@ -654,7 +684,7 @@ def plot_radial_peaks( qmax = self.qq[-1] if qstep is None: qstep = self.qq[1] - self.qq[0] - q_bins = np.arange(qmin,qmax,qstep) + q_bins = np.arange(qmin, qmax, qstep) q_num = q_bins.shape[0] if q_pixel_units: q_bins /= self._radial_step @@ -677,48 +707,45 @@ def plot_radial_peaks( minlength=q_num, ) - # plotting - fig,ax = plt.subplots(figsize = figsize) + fig, ax = plt.subplots(figsize=figsize) ax.plot( q_bins, int_peaks, - color = 'r', - linewidth = 2, - ) - ax.set_xlim((q_bins[0],q_bins[-1])) + color="r", + linewidth=2, + ) + ax.set_xlim((q_bins[0], q_bins[-1])) if q_pixel_units: ax.set_xlabel( - 'Scattering Angle (pixels)', - fontsize = 14, - ) + "Scattering Angle (pixels)", + fontsize=14, + ) else: ax.set_xlabel( - 'Scattering Angle (' + self.calibration.get_Q_pixel_units() +')', - fontsize = 14, - ) - ax.set_ylabel( - 'Total Peak Signal', - fontsize = 14, + "Scattering Angle (" + self.calibration.get_Q_pixel_units() + ")", + fontsize=14, ) + ax.set_ylabel( + "Total Peak Signal", + fontsize=14, + ) if not label_y_axis: - ax.tick_params( - left = False, - labelleft = False) + ax.tick_params(left=False, labelleft=False) if returnfig: - return fig,ax + return fig, ax def model_radial_background( self, - ring_position = None, - ring_sigma = None, - ring_int = None, - refine_model = True, - plot_result = True, - figsize = (8,4), - ): + ring_position=None, + ring_sigma=None, + ring_int=None, + refine_model=True, + plot_result=True, + figsize=(8, 4), +): """ User provided radial background model, of the form: @@ -729,20 +756,20 @@ def model_radial_background( + int_n * exp( - (q - q_n)**2 / (2*sn**2) ) where n is the number of amorphous halos / rings included in the fit. - + """ # Get mean radial background and mask self.background_radial_mean = np.sum( - self.background_radial * self.background_radial_mask, - axis=(0,1)) - background_radial_mean_norm = np.sum( - self.background_radial_mask, - axis=(0,1)) - self.background_mask = \ - background_radial_mean_norm > (np.max(background_radial_mean_norm)*0.05) - self.background_radial_mean[self.background_mask] \ - /= background_radial_mean_norm[self.background_mask] + self.background_radial * self.background_radial_mask, axis=(0, 1) + ) + background_radial_mean_norm = np.sum(self.background_radial_mask, axis=(0, 1)) + self.background_mask = background_radial_mean_norm > ( + np.max(background_radial_mean_norm) * 0.05 + ) + self.background_radial_mean[self.background_mask] /= background_radial_mean_norm[ + self.background_mask + ] self.background_radial_mean[np.logical_not(self.background_mask)] = 0 # init @@ -751,11 +778,15 @@ def model_radial_background( num_rings = ring_position.shape[0] else: num_rings = 0 - self.background_coefs = np.zeros(3 + 3*num_rings) + self.background_coefs = np.zeros(3 + 3 * num_rings) if ring_sigma is None: - ring_sigma = np.atleast_1d(np.ones(num_rings)) \ - * self.polar_shape[1] * 0.05 * self._radial_step + ring_sigma = ( + np.atleast_1d(np.ones(num_rings)) + * self.polar_shape[1] + * 0.05 + * self._radial_step + ) else: ring_sigma = np.atleast_1d(np.array(ring_sigma)) @@ -770,7 +801,7 @@ def model_radial_background( # Additional Gaussians if ring_int is None: # Estimate peak intensities - sig_0 = int_const + int_0*np.exp(self.qq**2/(-2*sigma_0**2)) + sig_0 = int_const + int_0 * np.exp(self.qq**2 / (-2 * sigma_0**2)) sig_peaks = np.maximum(self.background_radial_mean - sig_0, 0.0) ring_int = np.atleast_1d(np.zeros(num_rings)) @@ -781,23 +812,24 @@ def model_radial_background( else: ring_int = np.atleast_1d(np.array(ring_int)) for a0 in range(num_rings): - self.background_coefs[3*a0+3] = ring_int[a0] - self.background_coefs[3*a0+4] = ring_sigma[a0] - self.background_coefs[3*a0+5] = ring_position[a0] + self.background_coefs[3 * a0 + 3] = ring_int[a0] + self.background_coefs[3 * a0 + 4] = ring_sigma[a0] + self.background_coefs[3 * a0 + 5] = ring_position[a0] lb = np.zeros_like(self.background_coefs) ub = np.ones_like(self.background_coefs) * np.inf # Create background model def background_model(q, *coefs): coefs = np.squeeze(np.array(coefs)) - num_rings = np.round((coefs.shape[0] - 3)/3).astype('int') + num_rings = np.round((coefs.shape[0] - 3) / 3).astype("int") - sig = np.ones(q.shape[0])*coefs[0] - sig += coefs[1]*np.exp(q**2/(-2*coefs[2]**2)) + sig = np.ones(q.shape[0]) * coefs[0] + sig += coefs[1] * np.exp(q**2 / (-2 * coefs[2] ** 2)) for a0 in range(num_rings): - sig += coefs[3*a0+3]*np.exp( - (q-coefs[3*a0+5])**2 / (-2*coefs[3*a0+4]**2)) + sig += coefs[3 * a0 + 3] * np.exp( + (q - coefs[3 * a0 + 5]) ** 2 / (-2 * coefs[3 * a0 + 4] ** 2) + ) return sig @@ -806,35 +838,34 @@ def background_model(q, *coefs): # Refine background model coefficients if refine_model: self.background_coefs = curve_fit( - self.background_model, - self.qq[self.background_mask], - self.background_radial_mean[self.background_mask], - p0 = self.background_coefs, - xtol = 1e-12, - bounds = (lb,ub), + self.background_model, + self.qq[self.background_mask], + self.background_radial_mean[self.background_mask], + p0=self.background_coefs, + xtol=1e-12, + bounds=(lb, ub), )[0] # plotting if plot_result: self.plot_radial_background( - q_pixel_units = False, - plot_background_model = True, - figsize = figsize, - ) - + q_pixel_units=False, + plot_background_model=True, + figsize=figsize, + ) def refine_peaks( self, - mask = None, + mask=None, # reset_fits_to_init_positions = False, - scale_sigma_estimate = 0.5, - min_num_pixels_fit = 10, - maxfev = None, - progress_bar = True, - ): + scale_sigma_estimate=0.5, + min_num_pixels_fit=10, + maxfev=None, + progress_bar=True, +): """ - Use global fitting model for all images. Requires an background model + Use global fitting model for all images. Requires an background model specified with self.model_radial_background(). TODO: add fitting reset @@ -868,35 +899,37 @@ def refine_peaks( q_step = self._radial_step # Background model params - num_rings = np.round((self.background_coefs.shape[0]-3)/3).astype('int') + num_rings = np.round((self.background_coefs.shape[0] - 3) / 3).astype("int") # basis - qq,tt = np.meshgrid( + qq, tt = np.meshgrid( self.qq, self.tt, - ) - basis = np.zeros((qq.size,3)) - basis[:,0] = tt.ravel() - basis[:,1] = qq.ravel() - basis[:,2] = num_rings + ) + basis = np.zeros((qq.size, 3)) + basis[:, 0] = tt.ravel() + basis[:, 1] = qq.ravel() + basis[:, 2] = num_rings # init self.peaks_refine = PointListArray( - dtype = [ - ('qt', 'float'), - ('qr', 'float'), - ('intensity', 'float'), - ('sigma_annular', 'float'), - ('sigma_radial', 'float')], - shape = self._datacube.Rshape, - name = 'peaks_polardata_refined', + dtype=[ + ("qt", "float"), + ("qr", "float"), + ("intensity", "float"), + ("sigma_annular", "float"), + ("sigma_radial", "float"), + ], + shape=self._datacube.Rshape, + name="peaks_polardata_refined", + ) + self.background_refine = np.zeros( + ( + self._datacube.Rshape[0], + self._datacube.Rshape[1], + np.round(3 * num_rings + 3).astype("int"), ) - self.background_refine = np.zeros(( - self._datacube.Rshape[0], - self._datacube.Rshape[1], - np.round(3*num_rings+3).astype('int'), - )) - + ) # Main loop over probe positions for rx, ry in tqdmnd( @@ -904,138 +937,143 @@ def refine_peaks( self._datacube.shape[1], desc="Refining peaks ", unit=" probe positions", - disable=not progress_bar): - + disable=not progress_bar, + ): # Get transformed image and normalization array im_polar, im_polar_norm, norm_array, mask_bool = self.transform( - self._datacube.data[rx,ry], - mask = mask, - returnval = 'all_zeros', - ) + self._datacube.data[rx, ry], + mask=mask, + returnval="all_zeros", + ) # Change sign convention of mask mask_bool = np.logical_not(mask_bool) # Get initial peaks, in dimensioned units - p = self.peaks[rx,ry] - qt = p.data['qt'] * t_step - qr = (p.data['qr'] + self.qmin) * q_step - int_peaks = p.data['intensity'] - s_annular = p.data['sigma_annular'] * t_step - s_radial = p.data['sigma_radial'] * q_step - num_peaks = p['qt'].shape[0] + p = self.peaks[rx, ry] + qt = p.data["qt"] * t_step + qr = (p.data["qr"] + self.qmin) * q_step + int_peaks = p.data["intensity"] + s_annular = p.data["sigma_annular"] * t_step + s_radial = p.data["sigma_radial"] * q_step + num_peaks = p["qt"].shape[0] # unified coefficients # Note we sharpen sigma estimate for refinement - coefs_all = np.hstack(( - self.background_coefs, - qt, - qr, - int_peaks, - s_annular * scale_sigma_estimate, - s_radial * scale_sigma_estimate, - )) + coefs_all = np.hstack( + ( + self.background_coefs, + qt, + qr, + int_peaks, + s_annular * scale_sigma_estimate, + s_radial * scale_sigma_estimate, + ) + ) # bounds lb = np.zeros_like(coefs_all) ub = np.ones_like(coefs_all) * np.inf - # Construct fitting model def fit_image(basis, *coefs): coefs = np.squeeze(np.array(coefs)) - num_rings = np.round(basis[0,2]).astype('int') - num_peaks = np.round((coefs.shape[0] - (3*num_rings+3))/5).astype('int') - - coefs_bg = coefs[:(3*num_rings+3)] - coefs_peaks = coefs[(3*num_rings+3):] + num_rings = np.round(basis[0, 2]).astype("int") + num_peaks = np.round((coefs.shape[0] - (3 * num_rings + 3)) / 5).astype( + "int" + ) + coefs_bg = coefs[: (3 * num_rings + 3)] + coefs_peaks = coefs[(3 * num_rings + 3) :] # Background - sig = self.background_model( - basis[:,1], - coefs_bg) + sig = self.background_model(basis[:, 1], coefs_bg) # add peaks for a0 in range(num_peaks): - dt = np.mod(basis[:,0] - coefs_peaks[num_peaks*0+a0] + np.pi/2, np.pi) - np.pi/2 - dq = basis[:,1] - coefs_peaks[num_peaks*1+a0] + dt = ( + np.mod( + basis[:, 0] - coefs_peaks[num_peaks * 0 + a0] + np.pi / 2, np.pi + ) + - np.pi / 2 + ) + dq = basis[:, 1] - coefs_peaks[num_peaks * 1 + a0] - sig += coefs_peaks[num_peaks*2+a0] \ - * np.exp( - dt**2 / (-2*coefs_peaks[num_peaks*3+a0]**2) + \ - dq**2 / (-2*coefs_peaks[num_peaks*4+a0]**2)) + sig += coefs_peaks[num_peaks * 2 + a0] * np.exp( + dt**2 / (-2 * coefs_peaks[num_peaks * 3 + a0] ** 2) + + dq**2 / (-2 * coefs_peaks[num_peaks * 4 + a0] ** 2) + ) return sig # refine fitting model try: with warnings.catch_warnings(): - warnings.simplefilter('ignore') + warnings.simplefilter("ignore") if maxfev is None: coefs_all = curve_fit( - fit_image, - basis[mask_bool.ravel(),:], - im_polar[mask_bool], - p0 = coefs_all, - xtol = 1e-12, - bounds = (lb,ub), + fit_image, + basis[mask_bool.ravel(), :], + im_polar[mask_bool], + p0=coefs_all, + xtol=1e-12, + bounds=(lb, ub), )[0] else: coefs_all = curve_fit( - fit_image, - basis[mask_bool.ravel(),:], - im_polar[mask_bool], - p0 = coefs_all, - xtol = 1e-12, - maxfev = maxfev, - bounds = (lb,ub), + fit_image, + basis[mask_bool.ravel(), :], + im_polar[mask_bool], + p0=coefs_all, + xtol=1e-12, + maxfev=maxfev, + bounds=(lb, ub), )[0] # Output refined peak parameters - coefs_peaks = np.reshape( - coefs_all[(3*num_rings+3):], - (5,num_peaks)).T - self.peaks_refine[rx,ry] = PointList( - coefs_peaks.ravel().view([ - ('qt', float), - ('qr', float), - ('intensity', float), - ('sigma_annular', float), - ('sigma_radial', float), - ]), - name = 'peaks_polar') + coefs_peaks = np.reshape(coefs_all[(3 * num_rings + 3) :], (5, num_peaks)).T + self.peaks_refine[rx, ry] = PointList( + coefs_peaks.ravel().view( + [ + ("qt", float), + ("qr", float), + ("intensity", float), + ("sigma_annular", float), + ("sigma_radial", float), + ] + ), + name="peaks_polar", + ) except: # if fitting has failed, we will still output the last iteration # TODO - add a flag for unconverged fits - coefs_peaks = np.reshape( - coefs_all[(3*num_rings+3):], - (5,num_peaks)).T - self.peaks_refine[rx,ry] = PointList( - coefs_peaks.ravel().view([ - ('qt', float), - ('qr', float), - ('intensity', float), - ('sigma_annular', float), - ('sigma_radial', float), - ]), - name = 'peaks_polar') + coefs_peaks = np.reshape(coefs_all[(3 * num_rings + 3) :], (5, num_peaks)).T + self.peaks_refine[rx, ry] = PointList( + coefs_peaks.ravel().view( + [ + ("qt", float), + ("qr", float), + ("intensity", float), + ("sigma_annular", float), + ("sigma_radial", float), + ] + ), + name="peaks_polar", + ) # mean background signal, # # but none of the peaks. # pass # Output refined parameters for background - coefs_bg = coefs_all[:(3*num_rings+3)] - self.background_refine[rx,ry] = coefs_bg - + coefs_bg = coefs_all[: (3 * num_rings + 3)] + self.background_refine[rx, ry] = coefs_bg # # Testing # im_fit = np.reshape( # fit_image(basis,coefs_all), # self.polar_shape) - # fig,ax = plt.subplots(figsize=(8,6)) # ax.imshow( # np.vstack(( @@ -1048,102 +1086,93 @@ def fit_image(basis, *coefs): def plot_radial_background( self, - q_pixel_units = False, - label_y_axis = False, - plot_background_model = False, - figsize = (8,4), - returnfig = False, - ): + q_pixel_units=False, + label_y_axis=False, + plot_background_model=False, + figsize=(8, 4), + returnfig=False, +): """ Calculate and plot the mean background signal, background standard deviation. """ - + # mean self.background_radial_mean = np.sum( - self.background_radial * self.background_radial_mask, - axis=(0,1)) - background_radial_mean_norm = np.sum( - self.background_radial_mask, - axis=(0,1)) - self.background_mask = \ - background_radial_mean_norm > (np.max(background_radial_mean_norm)*0.05) - self.background_radial_mean[self.background_mask] \ - /= background_radial_mean_norm[self.background_mask] + self.background_radial * self.background_radial_mask, axis=(0, 1) + ) + background_radial_mean_norm = np.sum(self.background_radial_mask, axis=(0, 1)) + self.background_mask = background_radial_mean_norm > ( + np.max(background_radial_mean_norm) * 0.05 + ) + self.background_radial_mean[self.background_mask] /= background_radial_mean_norm[ + self.background_mask + ] self.background_radial_mean[np.logical_not(self.background_mask)] = 0 # variance and standard deviation self.background_radial_var = np.sum( - (self.background_radial - self.background_radial_mean[None,None,:])**2 \ + (self.background_radial - self.background_radial_mean[None, None, :]) ** 2 * self.background_radial_mask, - axis=(0,1)) - self.background_radial_var[self.background_mask] \ - /= self.background_radial_var[self.background_mask] + axis=(0, 1), + ) + self.background_radial_var[self.background_mask] /= self.background_radial_var[ + self.background_mask + ] self.background_radial_var[np.logical_not(self.background_mask)] = 0 self.background_radial_std = np.sqrt(self.background_radial_var) - if q_pixel_units: q_axis = np.arange(self.qq.shape[0]) else: q_axis = self.qq[self.background_mask] - fig,ax = plt.subplots(figsize = figsize) + fig, ax = plt.subplots(figsize=figsize) ax.fill_between( - q_axis, - self.background_radial_mean[self.background_mask] \ - - self.background_radial_std[self.background_mask], - self.background_radial_mean[self.background_mask] \ - + self.background_radial_std[self.background_mask], - color = 'r', + q_axis, + self.background_radial_mean[self.background_mask] + - self.background_radial_std[self.background_mask], + self.background_radial_mean[self.background_mask] + + self.background_radial_std[self.background_mask], + color="r", alpha=0.2, - ) + ) ax.plot( q_axis, self.background_radial_mean[self.background_mask], - color = 'r', - linewidth = 2, - ) + color="r", + linewidth=2, + ) # overlay fitting model if plot_background_model: sig = self.background_model( self.qq, self.background_coefs, - ) - ax.plot( - q_axis, - sig, - color = 'k', - linewidth = 2, - linestyle = '--' - ) + ) + ax.plot(q_axis, sig, color="k", linewidth=2, linestyle="--") # plot appearance - ax.set_xlim(( - q_axis[0], - q_axis[-1])) + ax.set_xlim((q_axis[0], q_axis[-1])) if q_pixel_units: ax.set_xlabel( - 'Scattering Angle (pixels)', - fontsize = 14, - ) + "Scattering Angle (pixels)", + fontsize=14, + ) else: ax.set_xlabel( - 'Scattering Angle (' + self.calibration.get_Q_pixel_units() +')', - fontsize = 14, - ) - ax.set_ylabel( - 'Background Signal', - fontsize = 14, + "Scattering Angle (" + self.calibration.get_Q_pixel_units() + ")", + fontsize=14, ) + ax.set_ylabel( + "Background Signal", + fontsize=14, + ) if not label_y_axis: - ax.tick_params( - left = False, - labelleft = False) + ax.tick_params(left=False, labelleft=False) if returnfig: - return fig,ax + return fig, ax def make_orientation_histogram( @@ -1153,9 +1182,9 @@ def make_orientation_histogram( orientation_offset_degrees: float = 0.0, orientation_separate_bins: bool = False, upsample_factor: float = 4.0, - use_refined_peaks = True, - use_peak_sigma = False, - peak_sigma_samples = 6, + use_refined_peaks=True, + use_peak_sigma=False, + peak_sigma_samples=6, theta_step_deg: float = None, sigma_x: float = 1.0, sigma_y: float = 1.0, @@ -1163,7 +1192,7 @@ def make_orientation_histogram( normalize_intensity_image: bool = False, normalize_intensity_stack: bool = True, progress_bar: bool = True, - ): +): """ Make an orientation histogram, in order to use flowline visualization of orientation maps. Use peaks attached to polardatacube. @@ -1188,7 +1217,7 @@ def make_orientation_histogram( progress_bar (bool): Enable progress bar Returns: - orient_hist (array): 4D array containing Bragg peak intensity histogram + orient_hist (array): 4D array containing Bragg peak intensity histogram [radial_bin x_probe y_probe theta] """ @@ -1197,7 +1226,7 @@ def make_orientation_histogram( # Get angles from polardatacube theta = self.tt else: - theta = np.arange(0,180,theta_step_deg) * np.pi / 180.0 + theta = np.arange(0, 180, theta_step_deg) * np.pi / 180.0 dtheta = theta[1] - theta[0] dtheta_deg = dtheta * 180 / np.pi num_theta_bins = np.size(theta) @@ -1205,24 +1234,22 @@ def make_orientation_histogram( # Input bins radial_ranges = np.array(radial_ranges) if radial_ranges.ndim == 1: - radial_ranges = radial_ranges[None,:] + radial_ranges = radial_ranges[None, :] radial_ranges_2 = radial_ranges**2 num_radii = radial_ranges.shape[0] size_input = self._datacube.shape[0:2] # Output size - size_output = np.round(np.array(size_input).astype('float') * upsample_factor).astype('int') + size_output = np.round( + np.array(size_input).astype("float") * upsample_factor + ).astype("int") # output init - orient_hist = np.zeros([ - num_radii, - size_output[0], - size_output[1], - num_theta_bins]) + orient_hist = np.zeros([num_radii, size_output[0], size_output[1], num_theta_bins]) if use_peak_sigma: - v_sigma = np.linspace(-2,2,2*peak_sigma_samples+1) - w_sigma = np.exp(-v_sigma**2/2) + v_sigma = np.linspace(-2, 2, 2 * peak_sigma_samples + 1) + w_sigma = np.exp(-(v_sigma**2) / 2) if use_refined_peaks is False: warnings.warn("Orientation histogram is using non-refined peak positions") @@ -1232,38 +1259,37 @@ def make_orientation_histogram( t = "Generating histogram " + str(a0) for rx, ry in tqdmnd( - *size_input, - desc=t, - unit=" probe positions", - disable=not progress_bar - ): - x = (rx + 0.5)*upsample_factor - 0.5 - y = (ry + 0.5)*upsample_factor - 0.5 - x = np.clip(x,0,size_output[0]-2) - y = np.clip(y,0,size_output[1]-2) + *size_input, desc=t, unit=" probe positions", disable=not progress_bar + ): + x = (rx + 0.5) * upsample_factor - 0.5 + y = (ry + 0.5) * upsample_factor - 0.5 + x = np.clip(x, 0, size_output[0] - 2) + y = np.clip(y, 0, size_output[1] - 2) - xF = np.floor(x).astype('int') - yF = np.floor(y).astype('int') + xF = np.floor(x).astype("int") + yF = np.floor(y).astype("int") dx = x - xF dy = y - yF add_data = False if use_refined_peaks: - q = self.peaks_refine[rx,ry]['qr'] + q = self.peaks_refine[rx, ry]["qr"] else: - q = (self.peaks[rx,ry]['qr'] + self.qmin) * self._radial_step + q = (self.peaks[rx, ry]["qr"] + self.qmin) * self._radial_step r2 = q**2 - sub = np.logical_and(r2 >= radial_ranges_2[a0,0], r2 < radial_ranges_2[a0,1]) + sub = np.logical_and( + r2 >= radial_ranges_2[a0, 0], r2 < radial_ranges_2[a0, 1] + ) if np.any(sub): add_data = True - intensity = self.peaks[rx,ry]['intensity'][sub] + intensity = self.peaks[rx, ry]["intensity"][sub] # Angles of all peaks if use_refined_peaks: - theta = self.peaks_refine[rx,ry]['qt'][sub] + theta = self.peaks_refine[rx, ry]["qt"][sub] else: - theta = self.peaks[rx,ry]['qt'][sub] * self._annular_step + theta = self.peaks[rx, ry]["qt"][sub] * self._annular_step if orientation_flip_sign: theta *= -1 theta += orientation_offset_degrees @@ -1273,75 +1299,109 @@ def make_orientation_histogram( # If needed, expand signal using peak sigma to write into multiple bins if use_peak_sigma: if use_refined_peaks: - theta_std = self.peaks_refine[rx,ry]['sigma_annular'][sub] / dtheta + theta_std = ( + self.peaks_refine[rx, ry]["sigma_annular"][sub] / dtheta + ) else: - theta_std = self.peaks[rx,ry]['sigma_annular'][sub] / dtheta - t = (t[:,None] + theta_std[:,None]*v_sigma[None,:]).ravel() - intensity = (intensity[:,None] * w_sigma[None,:]).ravel() + theta_std = self.peaks[rx, ry]["sigma_annular"][sub] / dtheta + t = (t[:, None] + theta_std[:, None] * v_sigma[None, :]).ravel() + intensity = (intensity[:, None] * w_sigma[None, :]).ravel() if add_data: - tF = np.floor(t).astype('int') + tF = np.floor(t).astype("int") dt = t - tF - orient_hist[a0,xF ,yF ,:] = orient_hist[a0,xF ,yF ,:] + \ - np.bincount(np.mod(tF ,num_theta_bins), - weights=(1-dx)*(1-dy)*(1-dt)*intensity,minlength=num_theta_bins) - orient_hist[a0,xF ,yF ,:] = orient_hist[a0,xF ,yF ,:] + \ - np.bincount(np.mod(tF+1,num_theta_bins), - weights=(1-dx)*(1-dy)*( dt)*intensity,minlength=num_theta_bins) - - orient_hist[a0,xF+1,yF ,:] = orient_hist[a0,xF+1,yF ,:] + \ - np.bincount(np.mod(tF ,num_theta_bins), - weights=( dx)*(1-dy)*(1-dt)*intensity,minlength=num_theta_bins) - orient_hist[a0,xF+1,yF ,:] = orient_hist[a0,xF+1,yF ,:] + \ - np.bincount(np.mod(tF+1,num_theta_bins), - weights=( dx)*(1-dy)*( dt)*intensity,minlength=num_theta_bins) - - orient_hist[a0,xF ,yF+1,:] = orient_hist[a0,xF ,yF+1,:] + \ - np.bincount(np.mod(tF ,num_theta_bins), - weights=(1-dx)*( dy)*(1-dt)*intensity,minlength=num_theta_bins) - orient_hist[a0,xF ,yF+1,:] = orient_hist[a0,xF ,yF+1,:] + \ - np.bincount(np.mod(tF+1,num_theta_bins), - weights=(1-dx)*( dy)*( dt)*intensity,minlength=num_theta_bins) - - orient_hist[a0,xF+1,yF+1,:] = orient_hist[a0,xF+1,yF+1,:] + \ - np.bincount(np.mod(tF ,num_theta_bins), - weights=( dx)*( dy)*(1-dt)*intensity,minlength=num_theta_bins) - orient_hist[a0,xF+1,yF+1,:] = orient_hist[a0,xF+1,yF+1,:] + \ - np.bincount(np.mod(tF+1,num_theta_bins), - weights=( dx)*( dy)*( dt)*intensity,minlength=num_theta_bins) + orient_hist[a0, xF, yF, :] = orient_hist[a0, xF, yF, :] + np.bincount( + np.mod(tF, num_theta_bins), + weights=(1 - dx) * (1 - dy) * (1 - dt) * intensity, + minlength=num_theta_bins, + ) + orient_hist[a0, xF, yF, :] = orient_hist[a0, xF, yF, :] + np.bincount( + np.mod(tF + 1, num_theta_bins), + weights=(1 - dx) * (1 - dy) * (dt) * intensity, + minlength=num_theta_bins, + ) + + orient_hist[a0, xF + 1, yF, :] = orient_hist[ + a0, xF + 1, yF, : + ] + np.bincount( + np.mod(tF, num_theta_bins), + weights=(dx) * (1 - dy) * (1 - dt) * intensity, + minlength=num_theta_bins, + ) + orient_hist[a0, xF + 1, yF, :] = orient_hist[ + a0, xF + 1, yF, : + ] + np.bincount( + np.mod(tF + 1, num_theta_bins), + weights=(dx) * (1 - dy) * (dt) * intensity, + minlength=num_theta_bins, + ) + + orient_hist[a0, xF, yF + 1, :] = orient_hist[ + a0, xF, yF + 1, : + ] + np.bincount( + np.mod(tF, num_theta_bins), + weights=(1 - dx) * (dy) * (1 - dt) * intensity, + minlength=num_theta_bins, + ) + orient_hist[a0, xF, yF + 1, :] = orient_hist[ + a0, xF, yF + 1, : + ] + np.bincount( + np.mod(tF + 1, num_theta_bins), + weights=(1 - dx) * (dy) * (dt) * intensity, + minlength=num_theta_bins, + ) + + orient_hist[a0, xF + 1, yF + 1, :] = orient_hist[ + a0, xF + 1, yF + 1, : + ] + np.bincount( + np.mod(tF, num_theta_bins), + weights=(dx) * (dy) * (1 - dt) * intensity, + minlength=num_theta_bins, + ) + orient_hist[a0, xF + 1, yF + 1, :] = orient_hist[ + a0, xF + 1, yF + 1, : + ] + np.bincount( + np.mod(tF + 1, num_theta_bins), + weights=(dx) * (dy) * (dt) * intensity, + minlength=num_theta_bins, + ) # smoothing / interpolation if (sigma_x is not None) or (sigma_y is not None) or (sigma_theta is not None): if num_radii > 1: - print('Interpolating orientation matrices ...', end='') + print("Interpolating orientation matrices ...", end="") else: - print('Interpolating orientation matrix ...', end='') + print("Interpolating orientation matrix ...", end="") if sigma_x is not None and sigma_x > 0: orient_hist = gaussian_filter1d( - orient_hist,sigma_x*upsample_factor, - mode='nearest', + orient_hist, + sigma_x * upsample_factor, + mode="nearest", axis=1, - truncate=3.0) + truncate=3.0, + ) if sigma_y is not None and sigma_y > 0: orient_hist = gaussian_filter1d( - orient_hist,sigma_y*upsample_factor, - mode='nearest', + orient_hist, + sigma_y * upsample_factor, + mode="nearest", axis=2, - truncate=3.0) + truncate=3.0, + ) if sigma_theta is not None and sigma_theta > 0: orient_hist = gaussian_filter1d( - orient_hist,sigma_theta/dtheta_deg, - mode='wrap', - axis=3, - truncate=2.0) - print(' done.') + orient_hist, sigma_theta / dtheta_deg, mode="wrap", axis=3, truncate=2.0 + ) + print(" done.") # normalization if normalize_intensity_stack is True: - orient_hist = orient_hist / np.max(orient_hist) + orient_hist = orient_hist / np.max(orient_hist) elif normalize_intensity_image is True: for a0 in range(num_radii): - orient_hist[a0,:,:,:] = orient_hist[a0,:,:,:] / np.max(orient_hist[a0,:,:,:]) + orient_hist[a0, :, :, :] = orient_hist[a0, :, :, :] / np.max( + orient_hist[a0, :, :, :] + ) - return orient_hist \ No newline at end of file + return orient_hist diff --git a/py4DSTEM/process/rdf/__init__.py b/py4DSTEM/process/rdf/__init__.py index 025f794da..feff32583 100644 --- a/py4DSTEM/process/rdf/__init__.py +++ b/py4DSTEM/process/rdf/__init__.py @@ -1,2 +1 @@ from py4DSTEM.process.rdf.rdf import * - diff --git a/py4DSTEM/process/rdf/amorph.py b/py4DSTEM/process/rdf/amorph.py index a537896b9..3aaf63c45 100644 --- a/py4DSTEM/process/rdf/amorph.py +++ b/py4DSTEM/process/rdf/amorph.py @@ -1,9 +1,10 @@ import numpy as np import matplotlib.pyplot as plt -from py4DSTEM.process.utils.elliptical_coords import * ## What else is used here? These fns have - ## moved around some. In general, specifying - ## the fns is better practice. TODO: change - ## this import +from py4DSTEM.process.utils.elliptical_coords import * ## What else is used here? These fns have + +## moved around some. In general, specifying +## the fns is better practice. TODO: change +## this import from py4DSTEM.process.calibration import fit_ellipse_amorphous_ring import matplotlib from tqdm import tqdm @@ -76,9 +77,9 @@ def calculate_coef_strain(coef_cube, r_ref): R / r_ref ) # this is a correction factor for what defines 0 strain, and must be applied to A, B and C. This has been found _experimentally_! TODO have someone else read this - A = 1 / r_ratio ** 2 - B = coef_cube[:, :, 9] / r_ratio ** 2 - C = coef_cube[:, :, 10] / r_ratio ** 2 + A = 1 / r_ratio**2 + B = coef_cube[:, :, 9] / r_ratio**2 + C = coef_cube[:, :, 10] / r_ratio**2 exx, eyy, exy = np.empty_like(A), np.empty_like(C), np.empty_like(B) diff --git a/py4DSTEM/process/rdf/rdf.py b/py4DSTEM/process/rdf/rdf.py index a1b7754c5..cee7eeee9 100644 --- a/py4DSTEM/process/rdf/rdf.py +++ b/py4DSTEM/process/rdf/rdf.py @@ -8,19 +8,21 @@ from py4DSTEM.process.utils import single_atom_scatter + def get_radial_intensity(polar_img, polar_mask): """ Takes in a radial transformed image and the radial mask (if any) applied to that image. Designed to be compatible with polar-elliptical transforms from utils """ - yMean = np.mean(polar_img,axis=0) - yNorm = np.mean(polar_mask,axis=0) + yMean = np.mean(polar_img, axis=0) + yNorm = np.mean(polar_mask, axis=0) sub = yNorm > 1e-1 yMean[sub] = yMean[sub] / yNorm[sub] return yMean -def fit_scattering_factor(scale, elements, composition, q_arr,units): + +def fit_scattering_factor(scale, elements, composition, q_arr, units): """ Scale is linear factor Elements is an 1D array of atomic numbers. @@ -31,18 +33,20 @@ def fit_scattering_factor(scale, elements, composition, q_arr,units): """ ##TODO: actually do fitting - scatter = single_atom_scatter(elements,composition,q_arr,units) + scatter = single_atom_scatter(elements, composition, q_arr, units) scatter.get_scattering_factor() - return scale*scatter.fe**2 + return scale * scatter.fe**2 -def get_phi(radialIntensity,scatter,q_arr): + +def get_phi(radialIntensity, scatter, q_arr): """ ymean scale*scatter.fe**2 """ - return ((radialIntensity-scatter)/scatter)*q_arr + return ((radialIntensity - scatter) / scatter) * q_arr + -def get_mask(left,right,midpoint,slopes,q_arr): +def get_mask(left, right, midpoint, slopes, q_arr): """ start is float stop is float @@ -50,25 +54,28 @@ def get_mask(left,right,midpoint,slopes,q_arr): slopes is [float,float] """ vec = q_arr - mask_left = (erf(slopes[0]*(vec-left)) + 1) / 2 - mask_right = (erf(slopes[1]*(right-vec)) + 1) / 2 + mask_left = (erf(slopes[0] * (vec - left)) + 1) / 2 + mask_right = (erf(slopes[1] * (right - vec)) + 1) / 2 mid_idx = np.max(np.where(q_arr < midpoint)) mask_left[mid_idx:] = 0 mask_right[0:mid_idx] = 0 return mask_left + mask_right + def get_rdf(phi, q_arr): """ phi can be masked or not masked """ - sample_freq = 1/(q_arr[1]-q_arr[0]) #this assumes regularly spaced samples in q-space - radius = (np.arange(q_arr.shape[0])/q_arr.shape[0])*sample_freq - radius = radius*0.5 #scaling factor - radius += radius[1] #shift by minimum frequency, since first frequency sampled is finite - - G_r = dst(phi,type=2) - g_r = G_r/(4*np.pi*radius) + 1 - return g_r,radius - + sample_freq = 1 / ( + q_arr[1] - q_arr[0] + ) # this assumes regularly spaced samples in q-space + radius = (np.arange(q_arr.shape[0]) / q_arr.shape[0]) * sample_freq + radius = radius * 0.5 # scaling factor + radius += radius[ + 1 + ] # shift by minimum frequency, since first frequency sampled is finite + G_r = dst(phi, type=2) + g_r = G_r / (4 * np.pi * radius) + 1 + return g_r, radius diff --git a/py4DSTEM/process/utils/__init__.py b/py4DSTEM/process/utils/__init__.py index 9c19bef7d..643de1bf5 100644 --- a/py4DSTEM/process/utils/__init__.py +++ b/py4DSTEM/process/utils/__init__.py @@ -11,6 +11,5 @@ get_maxima_2D, get_shifted_ar, filter_2D_maxima, - linear_interpolation_2D + linear_interpolation_2D, ) - diff --git a/py4DSTEM/process/utils/cross_correlate.py b/py4DSTEM/process/utils/cross_correlate.py index 2d874e7a8..f9aac1312 100644 --- a/py4DSTEM/process/utils/cross_correlate.py +++ b/py4DSTEM/process/utils/cross_correlate.py @@ -118,10 +118,13 @@ def align_images_fourier( y0 = xp.round((y0 + dy) * 2.0) / 2.0 # subpixel shifts - xy_shift = upsampled_correlation(cc, upsample_factor, xp.array([x0, y0]), device = device) + xy_shift = upsampled_correlation( + cc, upsample_factor, xp.array([x0, y0]), device=device + ) return xy_shift + def align_and_shift_images( image_1, image_2, @@ -151,7 +154,7 @@ def align_and_shift_images( elif device == "gpu": xp = cp - + image_1 = xp.asarray(image_1) image_2 = xp.asarray(image_2) @@ -170,6 +173,6 @@ def align_and_shift_images( - image_1.shape[1] / 2 ) - image_2_shifted = get_shifted_ar(image_2, dx, dy, device= device) + image_2_shifted = get_shifted_ar(image_2, dx, dy, device=device) return image_2_shifted diff --git a/py4DSTEM/process/utils/masks.py b/py4DSTEM/process/utils/masks.py index 0caf0a3f7..c6800edc9 100644 --- a/py4DSTEM/process/utils/masks.py +++ b/py4DSTEM/process/utils/masks.py @@ -3,7 +3,8 @@ import numpy as np from scipy.ndimage import binary_dilation -def get_beamstop_mask(dp,qx0,qy0,theta,dtheta=1,w=10,r=10): + +def get_beamstop_mask(dp, qx0, qy0, theta, dtheta=1, w=10, r=10): """ Generates a beamstop shaped mask. @@ -19,38 +20,37 @@ def get_beamstop_mask(dp,qx0,qy0,theta,dtheta=1,w=10,r=10): (2d boolean array): the mask """ # Handle inputs - theta = np.mod(np.radians(theta),2*np.pi) + theta = np.mod(np.radians(theta), 2 * np.pi) dtheta = np.abs(np.radians(dtheta)) # Get a meshgrid - Q_Nx,Q_Ny = dp.shape - qyy,qxx = np.meshgrid(np.arange(Q_Ny),np.arange(Q_Nx)) - qyy,qxx = qyy-qy0,qxx-qx0 + Q_Nx, Q_Ny = dp.shape + qyy, qxx = np.meshgrid(np.arange(Q_Ny), np.arange(Q_Nx)) + qyy, qxx = qyy - qy0, qxx - qx0 # wedge handles if dtheta > 0: - qzz = qxx+qyy*1j - phi = np.mod(np.angle(qzz),2*np.pi) + qzz = qxx + qyy * 1j + phi = np.mod(np.angle(qzz), 2 * np.pi) # Handle the branch cut in the complex plane - if theta-dtheta<0: - phi,theta = np.mod(phi+dtheta,2*np.pi),theta+dtheta - elif theta+dtheta>2*np.pi: - phi,theta = np.mod(phi-dtheta,2*np.pi),theta-dtheta - mask1 = np.abs(phi-theta)0: - mask1 = binary_dilation(mask1,iterations=w) + if theta - dtheta < 0: + phi, theta = np.mod(phi + dtheta, 2 * np.pi), theta + dtheta + elif theta + dtheta > 2 * np.pi: + phi, theta = np.mod(phi - dtheta, 2 * np.pi), theta - dtheta + mask1 = np.abs(phi - theta) < dtheta + if w > 0: + mask1 = binary_dilation(mask1, iterations=w) # straight handles else: pass - # circle mask - qrr = np.hypot(qxx,qyy) - mask2 = qrr 0: ar = gaussian_filter(ar, sigma) # Get maxima and intensity arrays - maxima_bool = np.logical_and((ar > np.roll(ar, -1)) , (ar >= np.roll(ar, +1))) + maxima_bool = np.logical_and((ar > np.roll(ar, -1)), (ar >= np.roll(ar, +1))) x = np.arange(len(ar))[maxima_bool] intensity = ar[maxima_bool] # Sort by intensity - temp_ar = np.array([(x, inten) for inten, x in sorted(zip(intensity, x), reverse=True)]) + temp_ar = np.array( + [(x, inten) for inten, x in sorted(zip(intensity, x), reverse=True)] + ) x, intensity = temp_ar[:, 0], temp_ar[:, 1] # Remove points which are too close @@ -235,7 +270,7 @@ def get_maxima_1D(ar, sigma=0, minSpacing=0, minRelativeIntensity=0, relativeToP for i in range(len(x)): if not deletemask[i]: delete = np.abs(x[i] - x) < minSpacing - delete[:i + 1] = False + delete[: i + 1] = False deletemask = deletemask | delete x = np.delete(x, deletemask.nonzero()[0]) intensity = np.delete(intensity, deletemask.nonzero()[0]) @@ -259,7 +294,6 @@ def linear_interpolation_1D(ar, x): return (1 - dx) * ar[x0] + dx * ar[x1] - def add_to_2D_array_from_floats(ar, x, y, I): """ Adds the values I to array ar, distributing the value between the four pixels nearest @@ -271,13 +305,15 @@ def add_to_2D_array_from_floats(ar, x, y, I): Nx, Ny = ar.shape x0, x1 = (np.floor(x)).astype(int), (np.ceil(x)).astype(int) y0, y1 = (np.floor(y)).astype(int), (np.ceil(y)).astype(int) - mask = np.logical_and(np.logical_and(np.logical_and((x0>=0),(y0>=0)),(x1= 0), (y0 >= 0)), (x1 < Nx)), (y1 < Ny) + ) dx = x - x0 dy = y - y0 ar[x0[mask], y0[mask]] += (1 - dx[mask]) * (1 - dy[mask]) * I[mask] - ar[x0[mask], y1[mask]] += (1 - dx[mask]) * ( dy[mask]) * I[mask] - ar[x1[mask], y0[mask]] += ( dx[mask]) * (1 - dy[mask]) * I[mask] - ar[x1[mask], y1[mask]] += ( dx[mask]) * ( dy[mask]) * I[mask] + ar[x0[mask], y1[mask]] += (1 - dx[mask]) * (dy[mask]) * I[mask] + ar[x1[mask], y0[mask]] += (dx[mask]) * (1 - dy[mask]) * I[mask] + ar[x1[mask], y1[mask]] += (dx[mask]) * (dy[mask]) * I[mask] return ar @@ -304,7 +340,9 @@ def get_voronoi_vertices(voronoi, nx, ny, dist=10): (list of ndarrays of shape (N,2)): the (x,y) coords of the vertices of each voronoi region """ - assert isinstance(voronoi, Voronoi), "voronoi must be a scipy.spatial.Voronoi instance" + assert isinstance( + voronoi, Voronoi + ), "voronoi must be a scipy.spatial.Voronoi instance" vertex_list = [] @@ -315,9 +353,9 @@ def get_voronoi_vertices(voronoi, nx, ny, dist=10): for i in range(len(voronoi.ridge_vertices)): ridge = voronoi.ridge_vertices[i] if -1 in ridge: - edgeridge_vertices_and_points.append([max(ridge), - voronoi.ridge_points[i, 0], - voronoi.ridge_points[i, 1]]) + edgeridge_vertices_and_points.append( + [max(ridge), voronoi.ridge_points[i, 0], voronoi.ridge_points[i, 1]] + ) edgeridge_vertices_and_points = np.array(edgeridge_vertices_and_points) # Loop over all regions @@ -335,8 +373,12 @@ def get_voronoi_vertices(voronoi, nx, ny, dist=10): # For unknown vertices, get the first vertex it connects to, # and the two voronoi points that this ridge divides index_prev = vertex_indices[(i - 1) % len(vertex_indices)] - edgeridge_index = int(np.argwhere(edgeridge_vertices_and_points[:, 0] == index_prev)) - index_vert, region0, region1 = edgeridge_vertices_and_points[edgeridge_index, :] + edgeridge_index = int( + np.argwhere(edgeridge_vertices_and_points[:, 0] == index_prev) + ) + index_vert, region0, region1 = edgeridge_vertices_and_points[ + edgeridge_index, : + ] x, y = voronoi.vertices[index_vert] # Only add new points for unknown vertices if the known index it connects to # is inside the frame. Add points by finding the line segment starting at @@ -360,8 +402,12 @@ def get_voronoi_vertices(voronoi, nx, ny, dist=10): # Repeat for the second vertec the unknown vertex connects to index_next = vertex_indices[(i + 1) % len(vertex_indices)] - edgeridge_index = int(np.argwhere(edgeridge_vertices_and_points[:, 0] == index_next)) - index_vert, region0, region1 = edgeridge_vertices_and_points[edgeridge_index, :] + edgeridge_index = int( + np.argwhere(edgeridge_vertices_and_points[:, 0] == index_next) + ) + index_vert, region0, region1 = edgeridge_vertices_and_points[ + edgeridge_index, : + ] x, y = voronoi.vertices[index_vert] if (x > 0) and (x < nx) and (y > 0) and (y < ny): x_r0, y_r0 = voronoi.points[region0] @@ -388,17 +434,20 @@ def get_voronoi_vertices(voronoi, nx, ny, dist=10): return vertex_list + def get_ewpc_filter_function(Q_Nx, Q_Ny): - ''' + """ Returns a function for computing the exit wave power cepstrum of a diffraction pattern using a Hanning window. This can be passed as the filter_function in the Bragg disk detection functions (with the probe an array of ones) to find the lattice vectors by the EWPC method (but be careful as the lengths are now in realspace units!) See https://arxiv.org/abs/1911.00984 - ''' - h = np.hanning(Q_Nx)[:,np.newaxis] * np.hanning(Q_Ny)[np.newaxis,:] - return lambda x: np.abs(np.fft.fftshift(np.fft.fft2(h*np.log(np.maximum(x,0.01)))))**2 - + """ + h = np.hanning(Q_Nx)[:, np.newaxis] * np.hanning(Q_Ny)[np.newaxis, :] + return ( + lambda x: np.abs(np.fft.fftshift(np.fft.fft2(h * np.log(np.maximum(x, 0.01))))) + ** 2 + ) def fourier_resample( @@ -408,7 +457,8 @@ def fourier_resample( force_nonnegative=False, bandlimit_nyquist=None, bandlimit_power=2, - dtype=np.float32): + dtype=np.float32, +): """ Resize a 2D array along any dimension, using Fourier interpolation / extrapolation. For 4D input arrays, only the final two axes can be resized. @@ -433,33 +483,37 @@ def fourier_resample( # Verify input is 2D or 4D if np.size(array.shape) != 2 and np.size(array.shape) != 4: - raise Exception('Function does not support arrays with ' \ - + str(np.size(array.shape)) + ' dimensions') + raise Exception( + "Function does not support arrays with " + + str(np.size(array.shape)) + + " dimensions" + ) # Get input size from last 2 dimensions input__size = array.shape[-2:] - if scale is not None: - assert output_size is None, 'Cannot specify both a scaling factor and output size' - assert np.size(scale) == 1, 'scale should be a single value' + assert ( + output_size is None + ), "Cannot specify both a scaling factor and output size" + assert np.size(scale) == 1, "scale should be a single value" scale = np.asarray(scale) - output_size = (input__size * scale).astype('intp') + output_size = (input__size * scale).astype("intp") else: - assert scale is None, 'Cannot specify both a scaling factor and output size' - assert np.size(output_size) == 2, 'output_size must contain two values' + assert scale is None, "Cannot specify both a scaling factor and output size" + assert np.size(output_size) == 2, "output_size must contain two values" output_size = np.asarray(output_size) scale_output = np.prod(output_size) / np.prod(input__size) - if bandlimit_nyquist is not None: kx = np.fft.fftfreq(output_size[0]) ky = np.fft.fftfreq(output_size[1]) - k2 = kx[:,None]**2 + ky[None,:]**2 - # Gaussian filter - k_filt = np.exp((k2**(bandlimit_power/2))/(-2*bandlimit_nyquist**bandlimit_power)) - + k2 = kx[:, None] ** 2 + ky[None, :] ** 2 + # Gaussian filter + k_filt = np.exp( + (k2 ** (bandlimit_power / 2)) / (-2 * bandlimit_nyquist**bandlimit_power) + ) # generate slices # named as {dimension}_{corner}_{in_/out}, @@ -468,37 +522,37 @@ def fourier_resample( # x slices if output_size[0] > input__size[0]: # x dimension increases - x0 = int((input__size[0]+1)//2) - x1 = int( input__size[0] //2) + x0 = int((input__size[0] + 1) // 2) + x1 = int(input__size[0] // 2) x_ul_out = slice(0, x0) x_ul_in_ = slice(0, x0) - x_ll_out = slice(0-x1+output_size[0], output_size[0]) - x_ll_in_ = slice(0-x1+input__size[0], input__size[0]) + x_ll_out = slice(0 - x1 + output_size[0], output_size[0]) + x_ll_in_ = slice(0 - x1 + input__size[0], input__size[0]) x_ur_out = slice(0, x0) x_ur_in_ = slice(0, x0) - x_lr_out = slice(0-x1+output_size[0], output_size[0]) - x_lr_in_ = slice(0-x1+input__size[0], input__size[0]) + x_lr_out = slice(0 - x1 + output_size[0], output_size[0]) + x_lr_in_ = slice(0 - x1 + input__size[0], input__size[0]) elif output_size[0] < input__size[0]: # x dimension decreases - x0 = int((output_size[0]+1)//2) - x1 = int( output_size[0] //2) + x0 = int((output_size[0] + 1) // 2) + x1 = int(output_size[0] // 2) x_ul_out = slice(0, x0) x_ul_in_ = slice(0, x0) - x_ll_out = slice(0-x1+output_size[0], output_size[0]) - x_ll_in_ = slice(0-x1+input__size[0], input__size[0]) + x_ll_out = slice(0 - x1 + output_size[0], output_size[0]) + x_ll_in_ = slice(0 - x1 + input__size[0], input__size[0]) x_ur_out = slice(0, x0) x_ur_in_ = slice(0, x0) - x_lr_out = slice(0-x1+output_size[0], output_size[0]) - x_lr_in_ = slice(0-x1+input__size[0], input__size[0]) + x_lr_out = slice(0 - x1 + output_size[0], output_size[0]) + x_lr_in_ = slice(0 - x1 + input__size[0], input__size[0]) else: # x dimension does not change @@ -514,11 +568,11 @@ def fourier_resample( x_lr_out = slice(None) x_lr_in_ = slice(None) - #y slices + # y slices if output_size[1] > input__size[1]: # y increases - y0 = int((input__size[1]+1)//2) - y1 = int( input__size[1] //2) + y0 = int((input__size[1] + 1) // 2) + y1 = int(input__size[1] // 2) y_ul_out = slice(0, y0) y_ul_in_ = slice(0, y0) @@ -526,16 +580,16 @@ def fourier_resample( y_ll_out = slice(0, y0) y_ll_in_ = slice(0, y0) - y_ur_out = slice(0-y1+output_size[1], output_size[1]) - y_ur_in_ = slice(0-y1+input__size[1], input__size[1]) + y_ur_out = slice(0 - y1 + output_size[1], output_size[1]) + y_ur_in_ = slice(0 - y1 + input__size[1], input__size[1]) - y_lr_out = slice(0-y1+output_size[1], output_size[1]) - y_lr_in_ = slice(0-y1+input__size[1], input__size[1]) + y_lr_out = slice(0 - y1 + output_size[1], output_size[1]) + y_lr_in_ = slice(0 - y1 + input__size[1], input__size[1]) elif output_size[1] < input__size[1]: # y decreases - y0 = int((output_size[1]+1)//2) - y1 = int( output_size[1] //2) + y0 = int((output_size[1] + 1) // 2) + y1 = int(output_size[1] // 2) y_ul_out = slice(0, y0) y_ul_in_ = slice(0, y0) @@ -543,11 +597,11 @@ def fourier_resample( y_ll_out = slice(0, y0) y_ll_in_ = slice(0, y0) - y_ur_out = slice(0-y1+output_size[1], output_size[1]) - y_ur_in_ = slice(0-y1+input__size[1], input__size[1]) + y_ur_out = slice(0 - y1 + output_size[1], output_size[1]) + y_ur_in_ = slice(0 - y1 + input__size[1], input__size[1]) - y_lr_out = slice(0-y1+output_size[1], output_size[1]) - y_lr_in_ = slice(0-y1+input__size[1], input__size[1]) + y_lr_out = slice(0 - y1 + output_size[1], output_size[1]) + y_lr_in_ = slice(0 - y1 + input__size[1], input__size[1]) else: # y dimension does not change @@ -564,7 +618,7 @@ def fourier_resample( y_lr_in_ = slice(None) if len(array.shape) == 2: - # image array + # image array array_resize = np.zeros(output_size, dtype=np.complex64) array_fft = np.fft.fft2(array) @@ -581,7 +635,6 @@ def fourier_resample( # Back to real space array_resize = np.real(np.fft.ifft2(array_resize)).astype(dtype) - elif len(array.shape) == 4: # This case is the same as the 2D case, but loops over the probe index arrays @@ -590,26 +643,34 @@ def fourier_resample( array_fft = np.zeros(input__size, dtype=np.complex64) array_output = np.zeros(output_size, dtype=np.complex64) - for (Rx,Ry) in tqdmnd(array.shape[0],array.shape[1],desc='Resampling 4D datacube',unit='DP',unit_scale=True): - array_fft[:,:] = np.fft.fft2(array[Rx,Ry,:,:]) - array_output[:,:] = 0 + for Rx, Ry in tqdmnd( + array.shape[0], + array.shape[1], + desc="Resampling 4D datacube", + unit="DP", + unit_scale=True, + ): + array_fft[:, :] = np.fft.fft2(array[Rx, Ry, :, :]) + array_output[:, :] = 0 # copy each quadrant into the resize array - array_output[x_ul_out,y_ul_out] = array_fft[x_ul_in_,y_ul_in_] - array_output[x_ll_out,y_ll_out] = array_fft[x_ll_in_,y_ll_in_] - array_output[x_ur_out,y_ur_out] = array_fft[x_ur_in_,y_ur_in_] - array_output[x_lr_out,y_lr_out] = array_fft[x_lr_in_,y_lr_in_] + array_output[x_ul_out, y_ul_out] = array_fft[x_ul_in_, y_ul_in_] + array_output[x_ll_out, y_ll_out] = array_fft[x_ll_in_, y_ll_in_] + array_output[x_ur_out, y_ur_out] = array_fft[x_ur_in_, y_ur_in_] + array_output[x_lr_out, y_lr_out] = array_fft[x_lr_in_, y_lr_in_] # Band limit if needed if bandlimit_nyquist is not None: array_output *= k_filt # Back to real space - array_resize[Rx,Ry,:,:] = np.real(np.fft.ifft2(array_output)).astype(dtype) + array_resize[Rx, Ry, :, :] = np.real(np.fft.ifft2(array_output)).astype( + dtype + ) # Enforce positivity if needed, after filtering if force_nonnegative: - array_resize = np.maximum(array_resize,0) + array_resize = np.maximum(array_resize, 0) # Normalization array_resize = array_resize * scale_output @@ -617,23 +678,19 @@ def fourier_resample( return array_resize - - - - -#import matplotlib.pyplot as plt -#from mpl_toolkits.axes_grid1 import make_axes_locatable -#from mpl_toolkits.axes_grid1.anchored_artists import AnchoredSizeBar -#import matplotlib.font_manager as fm +# import matplotlib.pyplot as plt +# from mpl_toolkits.axes_grid1 import make_axes_locatable +# from mpl_toolkits.axes_grid1.anchored_artists import AnchoredSizeBar +# import matplotlib.font_manager as fm # # -#try: +# try: # from IPython.display import clear_output -#except ImportError: +# except ImportError: # def clear_output(wait=True): # pass # -#def plot(img, title='Image', savePath=None, cmap='inferno', show=True, vmax=None, +# def plot(img, title='Image', savePath=None, cmap='inferno', show=True, vmax=None, # figsize=(10, 10), scale=None): # fig, ax = plt.subplots(figsize=figsize) # im = ax.imshow(img, interpolation='nearest', cmap=plt.get_cmap(cmap), vmax=vmax) @@ -658,9 +715,3 @@ def fourier_resample( # fig.savefig(savePath + '.eps', dpi=600) # if show: # plt.show() - - - - - - diff --git a/py4DSTEM/process/wholepatternfit/wp_models.py b/py4DSTEM/process/wholepatternfit/wp_models.py index b19ffc902..3d53c1743 100644 --- a/py4DSTEM/process/wholepatternfit/wp_models.py +++ b/py4DSTEM/process/wholepatternfit/wp_models.py @@ -1,11 +1,24 @@ -from inspect import signature from typing import Optional +from enum import Flag, auto import numpy as np -from pdb import set_trace +class WPFModelType(Flag): + """ + Flags to signify capabilities and other semantics of a Model + """ + + BACKGROUND = auto() + + AMORPHOUS = auto() + LATTICE = auto() + MOIRE = auto() + + DUMMY = auto() # Model has no direct contribution to pattern + META = auto() # Model depends on multiple sub-Models -class WPFModelPrototype: + +class WPFModel: """ Prototype class for a compent of a whole-pattern model. Holds the following: @@ -29,11 +42,7 @@ class WPFModelPrototype: • keyword arguments. this is to provide some pre-computed information for convenience """ - def __init__( - self, - name: str, - params: dict, - ): + def __init__(self, name: str, params: dict, model_type=WPFModelType.DUMMY): self.name = name self.params = params @@ -41,12 +50,9 @@ def __init__( self.hasJacobian = getattr(self, "jacobian", None) is not None - # # check the function obeys the spec - # assert ( - # len(signature(self.func).parameters) == len(params) + 2 - # ), f"The model function has the wrong number of arguments in its signature. It must be written as func(DP, param1, param2, ..., **kwargs). The current signature is {str(signature(self.func))}" + self.model_type = model_type - def func(self, DP: np.ndarray, *args, **kwargs) -> None: + def func(self, DP: np.ndarray, x, **kwargs) -> None: raise NotImplementedError() # Required signature for the Jacobian: @@ -61,28 +67,40 @@ def __init__( initial_value, lower_bound: Optional[float] = None, upper_bound: Optional[float] = None, - ): - + ): + """ + Object representing a fitting parameter with bounds. + + Can be specified three ways: + Parameter(initial_value) - Unbounded, with an initial guess + Parameter(initial_value, deviation) - Bounded within deviation of initial_guess + Parameter(initial_value, lower_bound, upper_bound) - Both bounds specified + """ if hasattr(initial_value, "__iter__"): if len(initial_value) == 2: initial_value = ( initial_value[0], - initial_value[0]-initial_value[1], - initial_value[0]+initial_value[1], - ) + initial_value[0] - initial_value[1], + initial_value[0] + initial_value[1], + ) self.set_params(*initial_value) else: self.set_params(initial_value, lower_bound, upper_bound) + # Store a dummy offset. This must be set by WPF during setup + # This stores the index in the master parameter and Jacobian arrays + # corresponding to this parameter + self.offset = np.nan + def set_params( - self, - initial_value, - lower_bound, + self, + initial_value, + lower_bound, upper_bound, - ): + ): self.initial_value = initial_value self.lower_bound = lower_bound if lower_bound is not None else -np.inf - self.upper_bound = upper_bound if upper_bound is not None else np.inf + self.upper_bound = upper_bound if upper_bound is not None else np.inf def __str__(self): return f"Value: {self.initial_value} (Range: {self.lower_bound},{self.upper_bound})" @@ -91,22 +109,81 @@ def __repr__(self): return f"Value: {self.initial_value} (Range: {self.lower_bound},{self.upper_bound})" -class DCBackground(WPFModelPrototype): +class _BaseModel(WPFModel): + """ + Model object used by the WPF class as a container for the global Parameters. + + **This object should not be instantiated directly.** + """ + + def __init__(self, x0, y0, name="Globals"): + params = {"x center": Parameter(x0), "y center": Parameter(y0)} + + super().__init__(name, params, model_type=WPFModelType.DUMMY) + + def func(self, DP: np.ndarray, x, **kwargs) -> None: + pass + + def jacobian(self, J: np.ndarray, *args, **kwargs) -> None: + pass + + +class DCBackground(WPFModel): + """ + Model representing constant background intensity. + + Parameters + ---------- + background_value + Background intensity value. + Specified as initial_value, (initial_value, deviation), or + (initial_value, lower_bound, upper_bound). See + Parameter documentation for details. + """ + def __init__(self, background_value=0.0, name="DC Background"): params = {"DC Level": Parameter(background_value)} - super().__init__(name, params) + super().__init__(name, params, model_type=WPFModelType.BACKGROUND) - def func(self, DP: np.ndarray, level, **kwargs) -> None: - DP += level + def func(self, DP: np.ndarray, x, **kwargs) -> None: + DP += x[self.params["DC Level"].offset] - def jacobian(self, J: np.ndarray, *args, offset: int, **kwargs): - J[:, offset] = 1 + def jacobian(self, J: np.ndarray, *args, **kwargs): + J[:, self.params["DC Level"].offset] = 1 -class GaussianBackground(WPFModelPrototype): +class GaussianBackground(WPFModel): + """ + Model representing a 2D Gaussian intensity distribution + + Parameters + ---------- + WPF: WholePatternFit + Parent WPF object + sigma + parameter specifying width of the Gaussian + Specified as initial_value, (initial_value, deviation), or + (initial_value, lower_bound, upper_bound). See + Parameter documentation for details. + intensity + parameter specifying intensity of the Gaussian + Specified as initial_value, (initial_value, deviation), or + (initial_value, lower_bound, upper_bound). See + Parameter documentation for details. + global_center: bool + If True, uses same center coordinate as the global model + If False, uses an independent center + x0, y0: + Center coordinates of model for local origin + Specified as initial_value, (initial_value, deviation), or + (initial_value, lower_bound, upper_bound). See + Parameter documentation for details. + """ + def __init__( self, + WPF, sigma, intensity, global_center=True, @@ -116,94 +193,90 @@ def __init__( ): params = {"sigma": Parameter(sigma), "intensity": Parameter(intensity)} if global_center: - self.func = self.global_center_func - self.jacobian = self.global_center_jacobian + params["x center"] = WPF.coordinate_model.params["x center"] + params["y center"] = WPF.coordinate_model.params["y center"] else: params["x center"] = Parameter(x0) params["y center"] = Parameter(y0) - self.func = self.local_center_func - self.jacobian = self.local_center_jacobian - - super().__init__(name, params) - def global_center_func(self, DP: np.ndarray, sigma, level, **kwargs) -> None: - DP += level * np.exp(kwargs["global_r"] ** 2 / (-2 * sigma**2)) + super().__init__(name, params, model_type=WPFModelType.BACKGROUND) - def global_center_jacobian( - self, J: np.ndarray, sigma, level, offset: int, **kwargs - ) -> None: + def func(self, DP: np.ndarray, x: np.ndarray, **kwargs) -> None: + sigma = x[self.params["sigma"].offset] + level = x[self.params["intensity"].offset] - exp_expr = np.exp(kwargs["global_r"] ** 2 / (-2 * sigma**2)) + r = kwargs["parent"]._get_distance( + x, self.params["x center"], self.params["y center"] + ) - # dF/d(global_x0) - J[:, 0] += ( - level * (kwargs["xArray"] - kwargs["global_x0"]) * exp_expr / sigma**2 - ).ravel() + DP += level * np.exp(r**2 / (-2 * sigma**2)) - # dF/d(global_y0) - J[:, 1] += ( - level * (kwargs["yArray"] - kwargs["global_y0"]) * exp_expr / sigma**2 - ).ravel() - - # dF/s(sigma) - J[:, offset] = (level * kwargs["global_r"] ** 2 * exp_expr / sigma**3).ravel() + def jacobian(self, J: np.ndarray, x: np.ndarray, **kwargs) -> None: + sigma = x[self.params["sigma"].offset] + level = x[self.params["intensity"].offset] + x0 = x[self.params["x center"].offset] + y0 = x[self.params["y center"].offset] - # dF/d(level) - J[:, offset + 1] = exp_expr.ravel() - - def local_center_func(self, DP: np.ndarray, sigma, level, x0, y0, **kwargs) -> None: - DP += level * np.exp( - ((kwargs["xArray"] - x0) ** 2 + (kwargs["yArray"] - y0) ** 2) - / (-2 * sigma**2) + r = kwargs["parent"]._get_distance( + x, self.params["x center"], self.params["y center"] ) + exp_expr = np.exp(r**2 / (-2 * sigma**2)) - def local_center_jacobian( - self, J: np.ndarray, sigma, level, x0, y0, offset: int, **kwargs - ) -> None: - - # dF/s(sigma) - J[:, offset] = ( - level - * ((kwargs["xArray"] - x0) ** 2 + (kwargs["yArray"] - y0) ** 2) - * np.exp( - ((kwargs["xArray"] - x0) ** 2 + (kwargs["yArray"] - y0) ** 2) - / (-2 * sigma**2) - ) - / sigma**3 + # dF/d(x0) + J[:, self.params["x center"].offset] += ( + level * (kwargs["xArray"] - x0) * exp_expr / sigma**2 ).ravel() - # dF/d(level) - J[:, offset + 1] = np.exp( - ((kwargs["xArray"] - x0) ** 2 + (kwargs["yArray"] - y0) ** 2) - / (-2 * sigma**2) + # dF/d(y0) + J[:, self.params["y center"].offset] += ( + level * (kwargs["yArray"] - y0) * exp_expr / sigma**2 ).ravel() - # dF/d(x0) - J[:, offset + 2] = ( - level - * (kwargs["xArray"] - x0) - * np.exp( - ((kwargs["xArray"] - x0) ** 2 + (kwargs["yArray"] - y0) ** 2) - / (-2 * sigma**2) - ) - / sigma**2 + # dF/s(sigma) + J[:, self.params["sigma"].offset] += ( + level * r**2 * exp_expr / sigma**3 ).ravel() - # dF/d(y0) - J[:, offset + 3] = ( - level - * (kwargs["yArray"] - y0) - * np.exp( - ((kwargs["xArray"] - x0) ** 2 + (kwargs["yArray"] - y0) ** 2) - / (-2 * sigma**2) - ) - / sigma**2 - ).ravel() + # dF/d(level) + J[:, self.params["intensity"].offset] += exp_expr.ravel() -class GaussianRing(WPFModelPrototype): +class GaussianRing(WPFModel): + """ + Model representing a halo with Gaussian falloff + + Parameters + ---------- + WPF: WholePatternFit + parent fitting object + radius: + radius of halo + Specified as initial_value, (initial_value, deviation), or + (initial_value, lower_bound, upper_bound). See + Parameter documentation for details. + sigma: + width of Gaussian falloff + Specified as initial_value, (initial_value, deviation), or + (initial_value, lower_bound, upper_bound). See + Parameter documentation for details. + intensity: + Intensity of the halo + Specified as initial_value, (initial_value, deviation), or + (initial_value, lower_bound, upper_bound). See + Parameter documentation for details. + global_center: bool + If True, uses same center coordinate as the global model + If False, uses an independent center + x0, y0: + Center coordinates of model for local origin + Specified as initial_value, (initial_value, deviation), or + (initial_value, lower_bound, upper_bound). See + Parameter documentation for details. + """ + def __init__( self, + WPF, radius, sigma, intensity, @@ -216,114 +289,126 @@ def __init__( "radius": Parameter(radius), "sigma": Parameter(sigma), "intensity": Parameter(intensity), + "x center": WPF.coordinate_model.params["x center"] + if global_center + else Parameter(x0), + "y center": WPF.coordinate_model.params["y center"] + if global_center + else Parameter(y0), } - if global_center: - self.func = self.global_center_func - self.jacobian = self.global_center_jacobian - else: - params["x center"] = Parameter(x0) - params["y center"] = Parameter(y0) - self.func = self.local_center_func - self.jacobian = self.local_center_jacobian - super().__init__(name, params) + super().__init__(name, params, model_type=WPFModelType.AMORPHOUS) + + def func(self, DP: np.ndarray, x: np.ndarray, **kwargs) -> None: + radius = x[self.params["radius"].offset] + sigma = x[self.params["sigma"].offset] + level = x[self.params["level"].offset] + + r = kwargs["parent"]._get_distance( + x, self.params["x center"], self.params["y center"] + ) + + DP += level * np.exp((r - radius) ** 2 / (-2 * sigma**2)) - def global_center_func( - self, DP: np.ndarray, radius, sigma, level, **kwargs - ) -> None: - DP += level * np.exp((kwargs["global_r"] - radius) ** 2 / (-2 * sigma**2)) + def jacobian(self, J: np.ndarray, x: np.ndarray, **kwargs) -> None: + radius = x[self.params["radius"].offset] + sigma = x[self.params["sigma"].offset] + level = x[self.params["level"].offset] - def global_center_jacobian( - self, J: np.ndarray, radius, sigma, level, offset: int, **kwargs - ) -> None: + x0 = x[self.params["x center"].offset] + y0 = x[self.params["y center"].offset] + r = kwargs["parent"]._get_distance( + x, self.params["x center"], self.params["y center"] + ) - local_r = radius - kwargs["global_r"] + local_r = radius - r clipped_r = np.maximum(local_r, 0.1) exp_expr = np.exp(local_r**2 / (-2 * sigma**2)) - # dF/d(global_x0) - J[:, 0] += ( + # dF/d(x0) + J[:, self.params["x center"].offset] += ( level * exp_expr - * (kwargs["xArray"] - kwargs["global_x0"]) + * (kwargs["xArray"] - x0) * local_r / (sigma**2 * clipped_r) ).ravel() - # dF/d(global_y0) - J[:, 1] += ( + # dF/d(y0) + J[:, self.parans["y center"].offset] += ( level * exp_expr - * (kwargs["yArray"] - kwargs["global_y0"]) + * (kwargs["yArray"] - y0) * local_r / (sigma**2 * clipped_r) ).ravel() # dF/d(radius) - J[:, offset] += (-1.0 * level * exp_expr * local_r / (sigma**2)).ravel() - - # dF/d(sigma) - J[:, offset + 1] = ( - level * local_r ** 2 * exp_expr / sigma**3 - ).ravel() - - # dF/d(level) - J[:, offset + 2] = exp_expr.ravel() - - def local_center_func( - self, DP: np.ndarray, radius, sigma, level, x0, y0, **kwargs - ) -> None: - local_r = np.hypot(kwargs["xArray"] - x0, kwargs["yArray"] - y0) - DP += level * np.exp((local_r - radius) ** 2 / (-2 * sigma**2)) - - def local_center_jacobian( - self, J: np.ndarray, radius, sigma, level, x0, y0, offset: int, **kwargs - ) -> None: - return NotImplementedError() - # dF/d(radius) - - # dF/s(sigma) - J[:, offset] = ( - level - * ((kwargs["xArray"] - x0) ** 2 + (kwargs["yArray"] - y0) ** 2) - * np.exp( - ((kwargs["xArray"] - x0) ** 2 + (kwargs["yArray"] - y0) ** 2) - / (-2 * sigma**2) - ) - / sigma**3 + J[:, self.params["radius"].offset] += ( + -1.0 * level * exp_expr * local_r / (sigma**2) ).ravel() - # dF/d(level) - J[:, offset + 1] = np.exp( - ((kwargs["xArray"] - x0) ** 2 + (kwargs["yArray"] - y0) ** 2) - / (-2 * sigma**2) + # dF/d(sigma) + J[:, self.params["sigma"].offset] += ( + level * local_r**2 * exp_expr / sigma**3 ).ravel() - # dF/d(x0) - J[:, offset + 2] = ( - level - * (kwargs["xArray"] - x0) - * np.exp( - ((kwargs["xArray"] - x0) ** 2 + (kwargs["yArray"] - y0) ** 2) - / (-2 * sigma**2) - ) - / sigma**2 - ).ravel() + # dF/d(intensity) + J[:, self.params["intensity"].offset] += exp_expr.ravel() - # dF/d(y0) - J[:, offset + 3] = ( - level - * (kwargs["yArray"] - y0) - * np.exp( - ((kwargs["xArray"] - x0) ** 2 + (kwargs["yArray"] - y0) ** 2) - / (-2 * sigma**2) - ) - / sigma**2 - ).ravel() +class SyntheticDiskLattice(WPFModel): + """ + Model representing a lattice of diffraction disks with a soft edge + + Parameters + ---------- + + WPF: WholePatternFit + parent fitting object + ux,uy,vx,vy + x and y components of the lattice vectors u and v. + Specified as initial_value, (initial_value, deviation), or + (initial_value, lower_bound, upper_bound). See + Parameter documentation for details. + disk_radius + Radius of each diffraction disk. + Specified as initial_value, (initial_value, deviation), or + (initial_value, lower_bound, upper_bound). See + Parameter documentation for details. + disk_width + Width of the smooth falloff at the edge of the disk + Specified as initial_value, (initial_value, deviation), or + (initial_value, lower_bound, upper_bound). See + Parameter documentation for details. + u_max, v_max + Maximum lattice indices to include in the pattern. + Disks outside the pattern are automatically clipped. + intensity_0 + Initial intensity for each diffraction disk. + Each disk intensity is an independent fit variable in the final model + Specified as initial_value, (initial_value, deviation), or + (initial_value, lower_bound, upper_bound). See + Parameter documentation for details. + refine_radius: bool + Flag whether disk radius is made a fitting parameter + refine_width: bool + Flag whether disk edge width is made a fitting parameter + global_center: bool + If True, uses same center coordinate as the global model + If False, uses an independent center + x0, y0: + Center coordinates of model for local origin + Specified as initial_value, (initial_value, deviation), or + (initial_value, lower_bound, upper_bound). See + Parameter documentation for details. + exclude_indices: list + Indices to exclude from the pattern + include_indices: list + If specified, only the indices in the list are added to the pattern + """ -class SyntheticDiskLattice(WPFModelPrototype): def __init__( self, WPF, @@ -352,15 +437,14 @@ def __init__( params = {} if global_center: - self.func = self.global_center_func - self.jacobian = self.global_center_jacobian - - x0 = WPF.static_data["global_x0"] - y0 = WPF.static_data["global_y0"] + params["x center"] = WPF.coordinate_model.params["x center"] + params["y center"] = WPF.coordinate_model.params["y center"] else: params["x center"] = Parameter(x0) params["y center"] = Parameter(y0) - self.func = self.local_center_func + + x0 = params["x center"].initial_value + y0 = params["y center"].initial_value params["ux"] = Parameter(ux) params["uy"] = Parameter(uy) @@ -414,150 +498,607 @@ def __init__( if refine_width: params["edge width"] = Parameter(disk_width) - super().__init__(name, params) - - def global_center_func(self, DP: np.ndarray, *args, **kwargs) -> None: - # copy the global centers in the right place for the local center generator - self.local_center_func( - DP, kwargs["global_x0"], kwargs["global_y0"], *args, **kwargs - ) + super().__init__(name, params, model_type=WPFModelType.LATTICE) - def local_center_func(self, DP: np.ndarray, *args, **kwargs) -> None: + def func(self, DP: np.ndarray, x: np.ndarray, **static_data) -> None: + x0 = x[self.params["x center"].offset] + y0 = x[self.params["y center"].offset] + ux = x[self.params["ux"].offset] + uy = x[self.params["uy"].offset] + vx = x[self.params["vx"].offset] + vy = x[self.params["vy"].offset] - x0 = args[0] - y0 = args[1] - ux = args[2] - uy = args[3] - vx = args[4] - vy = args[5] + disk_radius = ( + x[self.params["disk radius"].offset] + if self.refine_radius + else self.disk_radius + ) - if self.refine_radius & self.refine_width: - disk_radius = args[-2] - elif self.refine_radius: - disk_radius = args[-1] - else: - disk_radius = self.disk_radius - disk_width = args[-1] if self.refine_width else self.disk_width + disk_width = ( + x[self.params["edge width"].offset] + if self.refine_width + else self.disk_width + ) for i, (u, v) in enumerate(zip(self.u_inds, self.v_inds)): - x = x0 + (u * ux) + (v * vx) - y = y0 + (u * uy) + (v * vy) - # if (x > 0) & (x < kwargs["Q_Nx"]) & (y > 0) & (y < kwargs["Q_Nx"]): - DP += args[i + 6] / ( + x_pos = x0 + (u * ux) + (v * vx) + y_pos = y0 + (u * uy) + (v * vy) + + DP += x[self.params[f"[{u},{v}] Intensity"].offset] / ( 1.0 + np.exp( np.minimum( 4 * ( np.sqrt( - (kwargs["xArray"] - x) ** 2 + (kwargs["yArray"] - y) ** 2 + (static_data["xArray"] - x_pos) ** 2 + + (static_data["yArray"] - y_pos) ** 2 ) - disk_radius ) - / disk_width, - 20) + / disk_width, + 20, + ) ) ) - def global_center_jacobian( - self, J: np.ndarray, *args, offset: int, **kwargs - ) -> None: - - x0 = kwargs["global_x0"] - y0 = kwargs["global_y0"] - r = np.maximum(5e-1, kwargs["global_r"]) - ux = args[0] - uy = args[1] - vx = args[2] - vy = args[3] - - if self.refine_radius & self.refine_width: - disk_radius = args[-2] - radius_ind = -2 - elif self.refine_radius: - disk_radius = args[-1] - radius_ind = -1 - else: - disk_radius = self.disk_radius - disk_width = args[-1] if self.refine_width else self.disk_width + def jacobian(self, J: np.ndarray, x: np.ndarray, **static_data) -> None: + x0 = x[self.params["x center"].offset] + y0 = x[self.params["y center"].offset] + ux = x[self.params["ux"].offset] + uy = x[self.params["uy"].offset] + vx = x[self.params["vx"].offset] + vy = x[self.params["vy"].offset] + WPF = static_data["parent"] + + r = np.maximum( + 5e-1, WPF._get_distance(x, self.params["x center"], self.params["y center"]) + ) + + disk_radius = ( + x[self.params["disk radius"].offset] + if self.refine_radius + else self.disk_radius + ) + + disk_width = ( + x[self.params["edge width"].offset] + if self.refine_width + else self.disk_width + ) for i, (u, v) in enumerate(zip(self.u_inds, self.v_inds)): - x = x0 + (u * ux) + (v * vx) - y = y0 + (u * uy) + (v * vy) + x_pos = x0 + (u * ux) + (v * vx) + y_pos = y0 + (u * uy) + (v * vy) - disk_intensity = args[i + 6] + disk_intensity = x[self.params[f"[{u},{v}] Intensity"].offset] - # if (x > 0) & (x < kwargs["Q_Nx"]) & (y > 0) & (y < kwargs["Q_Nx"]): r_disk = np.maximum( 5e-1, - np.sqrt((kwargs["xArray"] - x) ** 2 + (kwargs["yArray"] - y) ** 2), + np.sqrt( + (static_data["xArray"] - x_pos) ** 2 + + (static_data["yArray"] - y_pos) ** 2 + ), ) mask = r_disk < (2 * disk_radius) - top_exp = mask * np.exp(4 * ((mask * r_disk) - disk_radius) / disk_width) + top_exp = mask * np.exp( + np.minimum(30, 4 * ((mask * r_disk) - disk_radius) / disk_width) + ) - # dF/d(global_x0) + # dF/d(x0) dx = ( 4 - * args[i + 4] - * (kwargs["xArray"] - x) + * disk_intensity + * (static_data["xArray"] - x_pos) * top_exp / ((1.0 + top_exp) ** 2 * disk_width * r) ).ravel() - # dF/d(global_y0) + # dF/d(y0) dy = ( 4 - * args[i + 4] - * (kwargs["yArray"] - y) + * disk_intensity + * (static_data["yArray"] - y_pos) * top_exp / ((1.0 + top_exp) ** 2 * disk_width * r) ).ravel() - # because... reasons, sometimes we get NaN - # very far from the disk center. let's zero those: - # dx[np.isnan(dx)] = 0.0 - # dy[np.isnan(dy)] = 0.0 - - # insert global positional derivatives - J[:, 0] += disk_intensity * dx - J[:, 1] += disk_intensity * dy + # insert center position derivatives + J[:, self.params["x center"].offset] += disk_intensity * dx + J[:, self.params["y center"].offset] += disk_intensity * dy # insert lattice vector derivatives - J[:, offset] += disk_intensity * u * dx - J[:, offset + 1] += disk_intensity * u * dy - J[:, offset + 2] += disk_intensity * v * dx - J[:, offset + 3] += disk_intensity * v * dy + J[:, self.params["ux"].offset] += disk_intensity * u * dx + J[:, self.params["uy"].offset] += disk_intensity * u * dy + J[:, self.params["vx"].offset] += disk_intensity * v * dx + J[:, self.params["vy"].offset] += disk_intensity * v * dy # insert intensity derivative dI = (mask * (1.0 / (1.0 + top_exp))).ravel() - # dI[np.isnan(dI)] = 0.0 - J[:, offset + i + 4] = dI + J[:, self.params[f"[{u},{v}] Intensity"].offset] += dI # insert disk radius derivative if self.refine_radius: dR = ( - 4.0 * args[i + 4] * top_exp / (disk_width * (1.0 + top_exp) ** 2) + 4.0 * disk_intensity * top_exp / (disk_width * (1.0 + top_exp) ** 2) ).ravel() - # dR[np.isnan(dR)] = 0.0 - J[:, offset + len(args) + radius_ind] += dR + J[:, self.params["disk radius"].offset] += dR if self.refine_width: dW = ( 4.0 - * args[i + 4] + * disk_intensity * top_exp * (r_disk - disk_radius) / (disk_width**2 * (1.0 + top_exp) ** 2) ).ravel() - # dW[np.isnan(dW)] = 0.0 - J[:, offset + len(args) - 1] += dW + J[:, self.params["edge width"].offset] += dW + + +class SyntheticDiskMoire(WPFModel): + """ + Model of diffraction disks arising from interference between two lattices. + + The Moire unit cell is determined automatically using the two input lattices. + + Parameters + ---------- + WPF: WholePatternFit + parent fitting object + lattice_a, lattice_b: SyntheticDiskLattice + parent lattices for the Moire + intensity_0 + Initial guess of Moire disk intensity + Specified as initial_value, (initial_value, deviation), or + (initial_value, lower_bound, upper_bound). See + Parameter documentation for details. + decorated_peaks: list + When specified, only the reflections in the list are decorated with Moire spots + If not specified, all peaks are decorated + link_moire_disk_intensities: bool + When False, each Moire disk has an independently fit intensity + When True, Moire disks arising from the same order of parent reflection share + the same intensity + link_disk_parameters: bool + When True, edge_width and disk_radius are inherited from lattice_a + refine_width: bool + Flag whether disk edge width is a fit variable + edge_width + Width of the soft edge of the diffraction disk. + Specified as initial_value, (initial_value, deviation), or + (initial_value, lower_bound, upper_bound). See + Parameter documentation for details. + refine_radius: bool + Flag whether disk radius is a fit variable + disk radius + Radius of the diffraction disks + Specified as initial_value, (initial_value, deviation), or + (initial_value, lower_bound, upper_bound). See + Parameter documentation for details. + """ + + def __init__( + self, + WPF, + lattice_a: SyntheticDiskLattice, + lattice_b: SyntheticDiskLattice, + intensity_0: float, + decorated_peaks: list = None, + link_moire_disk_intensities: bool = False, + link_disk_parameters: bool = True, + refine_width: bool = True, + edge_width: list = None, + refine_radius: bool = True, + disk_radius: list = None, + name: str = "Moire Lattice", + ): + # ensure both models share the same center coordinate + if (lattice_a.params["x center"] is not lattice_b.params["x center"]) or ( + lattice_a.params["y center"] is not lattice_b.params["y center"] + ): + raise ValueError( + "The center coordinates for each model must be linked, " + "either by passing global_center=True or linking after instantiation." + ) + + self.lattice_a = lattice_a + self.lattice_b = lattice_b + + # construct a 2x4 matrix "M" that transforms the parent lattices into + # the moire lattice vectors + + lat_ab = self._get_parent_lattices(lattice_a, lattice_b) + + # pick the pairing that gives the smallest unit cell + test_peaks = np.stack((lattice_b.u_inds, lattice_b.v_inds), axis=1) + tests = np.stack( + [ + np.hstack((np.eye(2), np.vstack((b1, b2)))) + for b1 in test_peaks + for b2 in test_peaks + if not np.allclose(b1, b2) + ], + axis=0, + ) + # choose only cells where the two unit vectors are not nearly parallel, + # and penalize cells with large discrepancy in lattce vector length + lat_m = tests @ lat_ab + a_dot_b = ( + np.sum(lat_m[:, 0] * lat_m[:, 1], axis=1) + / np.minimum( + np.linalg.norm(lat_m[:, 0], axis=1), np.linalg.norm(lat_m[:, 1], axis=1) + ) + ** 2 + ) + tests = tests[ + np.abs(a_dot_b) < 0.9 + ] # this factor of 0.9 sets the parallel cutoff + # with the parallel vectors filtered, pick the cell with the smallest volume + lat_m = tests @ lat_ab + V = np.sum( + lat_m[:, 0] + * np.cross( + np.hstack((lat_m[:, 1], np.zeros((lat_m.shape[0],))[:, None])), + [0, 0, 1], + )[:, :2], + axis=1, + ) + M = tests[np.argmin(np.abs(V))] + + # ensure the moire vectors are less 90 deg apart + if np.arccos( + ((M @ lat_ab)[0] @ (M @ lat_ab)[1]) + / (np.linalg.norm((M @ lat_ab)[0]) * np.linalg.norm((M @ lat_ab)[1])) + ) > np.radians(90): + M[1] *= -1.0 + + # ensure they are right-handed + if np.cross(*(M @ lat_ab)) < 0.0: + M = np.flipud(np.eye(2)) @ M + + # store moire construction + self.moire_matrix = M + + # generate the indices of each peak, then find unique peaks + if decorated_peaks is not None: + decorated_peaks = np.array(decorated_peaks) + parent_peaks = np.vstack( + ( + np.concatenate( + (decorated_peaks, np.zeros_like(decorated_peaks)), axis=1 + ), + np.concatenate( + (np.zeros_like(decorated_peaks), decorated_peaks), axis=1 + ), + ) + ) + else: + parent_peaks = np.vstack( + ( + np.concatenate( + ( + np.stack((lattice_a.u_inds, lattice_a.v_inds), axis=1), + np.zeros((lattice_a.u_inds.shape[0], 2)), + ), + axis=1, + ), + np.concatenate( + ( + np.zeros((lattice_b.u_inds.shape[0], 2)), + np.stack((lattice_b.u_inds, lattice_b.v_inds), axis=1), + ), + axis=1, + ), + ) + ) + + # trial indices for moire peaks + mx, my = np.mgrid[-1:2, -1:2] + moire_peaks = np.stack([mx.ravel(), my.ravel()], axis=1)[1:-1] + + # construct a giant index array with columns a_h a_k b_h b_k m_h m_k + parent_expanded = np.zeros((parent_peaks.shape[0], 6)) + parent_expanded[:, :4] = parent_peaks + moire_expanded = np.zeros((moire_peaks.shape[0], 6)) + moire_expanded[:, 4:] = moire_peaks + + all_indices = ( + parent_expanded[:, None, :] + moire_expanded[None, :, :] + ).reshape(-1, 6) + + lat_abm = np.vstack((lat_ab, M @ lat_ab)) + + all_peaks = all_indices @ lat_abm + + _, idx_unique = np.unique(all_peaks, axis=0, return_index=True) + + all_indices = all_indices[idx_unique] + + # remove peaks outside of pattern + Q_Nx = WPF.static_data["Q_Nx"] + Q_Ny = WPF.static_data["Q_Ny"] + all_peaks = all_indices @ lat_abm + all_peaks[:, 0] += lattice_a.params["x center"].initial_value + all_peaks[:, 1] += lattice_a.params["y center"].initial_value + delete_mask = np.logical_or.reduce( + [ + all_peaks[:, 0] < 0.0, + all_peaks[:, 0] >= Q_Nx, + all_peaks[:, 1] < 0.0, + all_peaks[:, 1] >= Q_Ny, + ] + ) + all_indices = all_indices[~delete_mask] + + # remove spots that coincide with primary peaks + parent_spots = parent_peaks @ lat_ab + self.moire_indices_uvm = np.array( + [idx for idx in all_indices if (idx @ lat_abm) not in parent_spots] + ) + + self.link_moire_disk_intensities = link_moire_disk_intensities + if link_moire_disk_intensities: + # each order of parent reflection has a separate moire intensity + max_order = int(np.max(np.abs(self.moire_indices_uvm[:, :4]))) + + params = { + f"Order {n} Moire Intensity": Parameter(intensity_0) + for n in range(max_order + 1) + } + else: + params = { + f"a ({ax},{ay}), b ({bx},{by}), moire ({mx},{my}) Intensity": Parameter( + intensity_0 + ) + for ax, ay, bx, by, mx, my in self.moire_indices_uvm + } + + params["x center"] = lattice_a.params["x center"] + params["y center"] = lattice_a.params["y center"] + + # add disk edge and width parameters if needed + if link_disk_parameters: + if (lattice_a.refine_width) and (lattice_b.refine_width): + self.refine_width = True + params["edge width"] = lattice_a.params["edge width"] + if (lattice_a.refine_radius) and (lattice_b.refine_radius): + self.refine_radius = True + params["disk radius"] = lattice_a.params["disk radius"] + else: + self.refine_width = refine_width + if self.refine_width: + params["edge width"] = Parameter(edge_width) + + self.refine_radius = refine_radius + if self.refine_radius: + params["disk radius"] = Parameter(disk_radius) + + # store some data that helps compute the derivatives + selector_matrices = np.eye(8).reshape(-1, 4, 2) + selector_parameters = [ + self.lattice_a.params["ux"], + self.lattice_a.params["uy"], + self.lattice_a.params["vx"], + self.lattice_a.params["vy"], + self.lattice_b.params["ux"], + self.lattice_b.params["uy"], + self.lattice_b.params["vx"], + self.lattice_b.params["vy"], + ] + self.parent_vector_selectors = [ + (p, m) for p, m in zip(selector_parameters, selector_matrices) + ] + + super().__init__( + name, + params, + model_type=WPFModelType.META | WPFModelType.MOIRE, + ) + + def _get_parent_lattices(self, lattice_a, lattice_b): + lat_a = np.array( + [ + [ + lattice_a.params["ux"].initial_value, + lattice_a.params["uy"].initial_value, + ], + [ + lattice_a.params["vx"].initial_value, + lattice_a.params["vy"].initial_value, + ], + ] + ) + + lat_b = np.array( + [ + [ + lattice_b.params["ux"].initial_value, + lattice_b.params["uy"].initial_value, + ], + [ + lattice_b.params["vx"].initial_value, + lattice_b.params["vy"].initial_value, + ], + ] + ) + + return np.vstack((lat_a, lat_b)) + + def func(self, DP: np.ndarray, x: np.ndarray, **static_data): + # construct the moire unit cell from the current vectors + # of the two parent lattices + + lat_ab = self._get_parent_lattices(self.lattice_a, self.lattice_b) + lat_abm = np.vstack((lat_ab, self.moire_matrix @ lat_ab)) + + # grab shared parameters + disk_radius = ( + x[self.params["disk radius"].offset] + if self.refine_radius + else self.disk_radius + ) + + disk_width = ( + x[self.params["edge width"].offset] + if self.refine_width + else self.disk_width + ) + + # compute positions of each moire peak + positions = self.moire_indices_uvm @ lat_abm + positions += np.array( + [x[self.params["x center"].offset], x[self.params["y center"].offset]] + ) + + for (x_pos, y_pos), indices in zip(positions, self.moire_indices_uvm): + # Each peak has an intensity based on the max index of parent lattice + # which it decorates + order = int(np.max(np.abs(indices[:4]))) + + if self.link_moire_disk_intensities: + intensity = x[self.params[f"Order {order} Moire Intensity"].offset] + else: + ax, ay, bx, by, mx, my = indices + intensity = x[ + self.params[ + f"a ({ax},{ay}), b ({bx},{by}), moire ({mx},{my}) Intensity" + ].offset + ] + + DP += intensity / ( + 1.0 + + np.exp( + np.minimum( + 4 + * ( + np.sqrt( + (static_data["xArray"] - x_pos) ** 2 + + (static_data["yArray"] - y_pos) ** 2 + ) + - disk_radius + ) + / disk_width, + 20, + ) + ) + ) + + def jacobian(self, J: np.ndarray, x: np.ndarray, **static_data): + # construct the moire unit cell from the current vectors + # of the two parent lattices + lat_ab = self._get_parent_lattices(self.lattice_a, self.lattice_b) + lat_abm = np.vstack((lat_ab, self.moire_matrix @ lat_ab)) + + # grab shared parameters + disk_radius = ( + x[self.params["disk radius"].offset] + if self.refine_radius + else self.disk_radius + ) + + disk_width = ( + x[self.params["edge width"].offset] + if self.refine_width + else self.disk_width + ) + + # distance from center coordinate + r = np.maximum( + 5e-1, + static_data["parent"]._get_distance( + x, self.params["x center"], self.params["y center"] + ), + ) + + # compute positions of each moire peak + positions = self.moire_indices_uvm @ lat_abm + positions += np.array( + [x[self.params["x center"].offset], x[self.params["y center"].offset]] + ) + + for (x_pos, y_pos), indices in zip(positions, self.moire_indices_uvm): + # Each peak has an intensity based on the max index of parent lattice + # which it decorates + if self.link_moire_disk_intensities: + order = int(np.max(np.abs(indices[:4]))) + intensity_idx = self.params[f"Order {order} Moire Intensity"].offset + else: + ax, ay, bx, by, mx, my = indices + intensity_idx = self.params[ + f"a ({ax},{ay}), b ({bx},{by}), moire ({mx},{my}) Intensity" + ].offset + disk_intensity = x[intensity_idx] + + r_disk = np.maximum( + 5e-1, + np.sqrt( + (static_data["xArray"] - x_pos) ** 2 + + (static_data["yArray"] - y_pos) ** 2 + ), + ) + + mask = r_disk < (2 * disk_radius) + + # clamp the argument of the exponent at a very large finite value + top_exp = mask * np.exp( + np.minimum(30, 4 * ((mask * r_disk) - disk_radius) / disk_width) + ) + + # dF/d(x0) + dx = ( + 4 + * disk_intensity + * (static_data["xArray"] - x_pos) + * top_exp + / ((1.0 + top_exp) ** 2 * disk_width * r) + ).ravel() + + # dF/d(y0) + dy = ( + 4 + * disk_intensity + * (static_data["yArray"] - y_pos) + * top_exp + / ((1.0 + top_exp) ** 2 * disk_width * r) + ).ravel() + + # insert center position derivatives + J[:, self.params["x center"].offset] += disk_intensity * dx + J[:, self.params["y center"].offset] += disk_intensity * dy + + # insert lattice vector derivatives + for par, mat in self.parent_vector_selectors: + # find the x and y derivatives of the position of this + # disk in terms of each of the parent lattice vectors + d_abm = np.vstack((mat, self.moire_matrix @ mat)) + d_param = indices @ d_abm + J[:, par.offset] += disk_intensity * (d_param[0] * dx + d_param[1] * dy) + + # insert intensity derivative + dI = (mask * (1.0 / (1.0 + top_exp))).ravel() + J[:, intensity_idx] += dI + + # insert disk radius derivative + if self.refine_radius: + dR = ( + 4.0 * disk_intensity * top_exp / (disk_width * (1.0 + top_exp) ** 2) + ).ravel() + J[:, self.params["disk radius"].offset] += dR - # set_trace() + if self.refine_width: + dW = ( + 4.0 + * disk_intensity + * top_exp + * (r_disk - disk_radius) + / (disk_width**2 * (1.0 + top_exp) ** 2) + ).ravel() + J[:, self.params["edge width"].offset] += dW -class ComplexOverlapKernelDiskLattice(WPFModelPrototype): +class ComplexOverlapKernelDiskLattice(WPFModel): def __init__( self, WPF, @@ -570,24 +1111,29 @@ def __init__( v_max: int, intensity_0: float, exclude_indices: list = [], + global_center: bool = True, + x0=0.0, + y0=0.0, name="Complex Overlapped Disk Lattice", verbose=False, ): + return NotImplementedError( + "This model type has not been updated for use with the new architecture." + ) params = {} - # if global_center: - # self.func = self.global_center_func - # self.jacobian = self.global_center_jacobian + self.probe_kernelFT = np.fft.fft2(probe_kernel) - # x0 = WPF.static_data["global_x0"] - # y0 = WPF.static_data["global_y0"] - # else: - # params["x center"] = Parameter(x0) - # params["y center"] = Parameter(y0) - # self.func = self.local_center_func + if global_center: + params["x center"] = WPF.coordinate_model.params["x center"] + params["y center"] = WPF.coordinate_model.params["y center"] + else: + params["x center"] = Parameter(x0) + params["y center"] = Parameter(y0) - self.probe_kernelFT = np.fft.fft2(probe_kernel) + x0 = params["x center"].initial_value + y0 = params["y center"].initial_value params["ux"] = Parameter(ux) params["uy"] = Parameter(uy) @@ -636,24 +1182,15 @@ def __init__( self.u_inds = self.u_inds[~delete_mask] self.v_inds = self.v_inds[~delete_mask] - self.func = self.global_center_func - - super().__init__(name, params) - - def global_center_func(self, DP: np.ndarray, *args, **kwargs) -> None: - # copy the global centers in the right place for the local center generator - self.local_center_func( - DP, kwargs["global_x0"], kwargs["global_y0"], *args, **kwargs - ) - - def local_center_func(self, DP: np.ndarray, *args, **kwargs) -> None: + super().__init__(name, params, model_type=WPFModelType.LATTICE) - x0 = args[0] - y0 = args[1] - ux = args[2] - uy = args[3] - vx = args[4] - vy = args[5] + def func(self, DP: np.ndarray, x_fit, **kwargs) -> None: + x0 = x_fit[self.params["x center"].offset] + y0 = x_fit[self.params["y center"].offset] + ux = x_fit[self.params["ux"].offset] + uy = x_fit[self.params["uy"].offset] + vx = x_fit[self.params["vx"].offset] + vy = x_fit[self.params["vy"].offset] localDP = np.zeros_like(DP, dtype=np.complex64) @@ -662,8 +1199,8 @@ def local_center_func(self, DP: np.ndarray, *args, **kwargs) -> None: y = y0 + (u * uy) + (v * vy) localDP += ( - args[2 * i + 6] - * np.exp(1j * args[2 * i + 7]) + x_fit[self.params[f"[{u},{v}] Intensity"].offset] + * np.exp(1j * x_fit[self.params[f"[{u},{v}] Phase"].offset]) * np.abs( np.fft.ifft2( self.probe_kernelFT @@ -675,7 +1212,7 @@ def local_center_func(self, DP: np.ndarray, *args, **kwargs) -> None: DP += np.abs(localDP) ** 2 -class KernelDiskLattice(WPFModelPrototype): +class KernelDiskLattice(WPFModel): def __init__( self, WPF, @@ -688,24 +1225,25 @@ def __init__( v_max: int, intensity_0: float, exclude_indices: list = [], + global_center: bool = True, + x0=0.0, + y0=0.0, name="Custom Kernel Disk Lattice", verbose=False, ): - params = {} - # if global_center: - # self.func = self.global_center_func - # self.jacobian = self.global_center_jacobian + self.probe_kernelFT = np.fft.fft2(probe_kernel) - # x0 = WPF.static_data["global_x0"] - # y0 = WPF.static_data["global_y0"] - # else: - # params["x center"] = Parameter(x0) - # params["y center"] = Parameter(y0) - # self.func = self.local_center_func + if global_center: + params["x center"] = WPF.coordinate_model.params["x center"] + params["y center"] = WPF.coordinate_model.params["y center"] + else: + params["x center"] = Parameter(x0) + params["y center"] = Parameter(y0) - self.probe_kernelFT = np.fft.fft2(probe_kernel) + x0 = params["x center"].initial_value + y0 = params["y center"].initial_value params["ux"] = Parameter(ux) params["uy"] = Parameter(uy) @@ -724,16 +1262,8 @@ def __init__( self.xqArray = np.tile(np.fft.fftfreq(Q_Nx)[:, np.newaxis], (1, Q_Ny)) for i, (u, v) in enumerate(zip(u_inds.ravel(), v_inds.ravel())): - x = ( - WPF.static_data["global_x0"] - + (u * params["ux"].initial_value) - + (v * params["vx"].initial_value) - ) - y = ( - WPF.static_data["global_y0"] - + (u * params["uy"].initial_value) - + (v * params["vy"].initial_value) - ) + x = x0 + (u * params["ux"].initial_value) + (v * params["vx"].initial_value) + y = y0 + (u * params["uy"].initial_value) + (v * params["vy"].initial_value) if [u, v] in exclude_indices: delete_mask[i] = True elif (x < 0) or (x > Q_Nx) or (y < 0) or (y > Q_Ny): @@ -748,31 +1278,22 @@ def __init__( self.u_inds = self.u_inds[~delete_mask] self.v_inds = self.v_inds[~delete_mask] - self.func = self.global_center_func - - super().__init__(name, params) - - def global_center_func(self, DP: np.ndarray, *args, **kwargs) -> None: - # copy the global centers in the right place for the local center generator - self.local_center_func( - DP, kwargs["global_x0"], kwargs["global_y0"], *args, **kwargs - ) - - def local_center_func(self, DP: np.ndarray, *args, **kwargs) -> None: + super().__init__(name, params, model_type=WPFModelType.LATTICE) - x0 = args[0] - y0 = args[1] - ux = args[2] - uy = args[3] - vx = args[4] - vy = args[5] + def func(self, DP: np.ndarray, x_fit: np.ndarray, **static_data) -> None: + x0 = x_fit[self.params["x center"].offset] + y0 = x_fit[self.params["y center"].offset] + ux = x_fit[self.params["ux"].offset] + uy = x_fit[self.params["uy"].offset] + vx = x_fit[self.params["vx"].offset] + vy = x_fit[self.params["vy"].offset] for i, (u, v) in enumerate(zip(self.u_inds, self.v_inds)): x = x0 + (u * ux) + (v * vx) y = y0 + (u * uy) + (v * vy) DP += ( - args[i + 6] + x_fit[self.params[f"[{u},{v}] Intensity"].offset] * np.abs( np.fft.ifft2( self.probe_kernelFT diff --git a/py4DSTEM/process/wholepatternfit/wpf.py b/py4DSTEM/process/wholepatternfit/wpf.py index 542fb1b14..f206004b4 100644 --- a/py4DSTEM/process/wholepatternfit/wpf.py +++ b/py4DSTEM/process/wholepatternfit/wpf.py @@ -1,18 +1,25 @@ +from __future__ import annotations from py4DSTEM import DataCube, RealSlice from emdfile import tqdmnd -from py4DSTEM.process.wholepatternfit.wp_models import WPFModelPrototype +from py4DSTEM.process.wholepatternfit.wp_models import ( + WPFModel, + _BaseModel, + WPFModelType, + Parameter, +) from typing import Optional import numpy as np -from scipy.optimize import least_squares, minimize +from scipy.optimize import least_squares import matplotlib.pyplot as plt import matplotlib.colors as mpl_c from matplotlib.gridspec import GridSpec -import warnings -class WholePatternFit: +__all__ = ["WholePatternFit"] + +class WholePatternFit: from py4DSTEM.process.wholepatternfit.wpf_viz import ( show_model_grid, show_lattice_points, @@ -27,7 +34,6 @@ def __init__( mask: Optional[np.ndarray] = None, use_jacobian: bool = True, meanCBED: Optional[np.ndarray] = None, - fit_power: float = 1, ): """ Perform pixelwise fits using composable models and numerical optimization. @@ -63,8 +69,6 @@ def __init__( meanCBED: Optional np.ndarray, used to specify the diffraction pattern used for initial refinement of the parameters. If not specified, the average across all scan positions is computed - fit_power: float, diffraction patterns are raised to this power, sets the gamma - level at which the patterns are compared """ self.datacube = datacube @@ -72,39 +76,44 @@ def __init__( meanCBED if meanCBED is not None else np.mean(datacube.data, axis=(0, 1)) ) # Global scaling parameter - self.intensity_scale = 1/np.mean(self.meanCBED) + self.intensity_scale = 1 / np.mean(self.meanCBED) self.mask = mask if mask is not None else np.ones_like(self.meanCBED) - self.model = [] - self.model_param_inds = [] - - self.nParams = 0 - self.use_jacobian = use_jacobian - if hasattr(x0, "__iter__") and hasattr(y0, "__iter__"): x0 = np.array(x0) y0 = np.array(y0) if x0.size == 2: - self.global_xy0_lb = np.array([x0[0] - x0[1], y0[0] - y0[1]]) - self.global_xy0_ub = np.array([x0[0] + x0[1], y0[0] + y0[1]]) + global_xy0_lb = np.array([x0[0] - x0[1], y0[0] - y0[1]]) + global_xy0_ub = np.array([x0[0] + x0[1], y0[0] + y0[1]]) elif x0.size == 3: - self.global_xy0_lb = np.array([x0[1], y0[1]]) - self.global_xy0_ub = np.array([x0[2], y0[2]]) + global_xy0_lb = np.array([x0[1], y0[1]]) + global_xy0_ub = np.array([x0[2], y0[2]]) else: - self.global_xy0_lb = np.array([0.0, 0.0]) - self.global_xy0_ub = np.array([datacube.Q_Nx, datacube.Q_Ny]) + global_xy0_lb = np.array([0.0, 0.0]) + global_xy0_ub = np.array([datacube.Q_Nx, datacube.Q_Ny]) x0 = x0[0] y0 = y0[0] else: - self.global_xy0_lb = np.array([0.0, 0.0]) - self.global_xy0_ub = np.array([datacube.Q_Nx, datacube.Q_Ny]) + global_xy0_lb = np.array([0.0, 0.0]) + global_xy0_ub = np.array([datacube.Q_Nx, datacube.Q_Ny]) - # set up the global arguments - self._setup_static_data(x0,y0) + # The WPF object holds a special Model that manages the shareable center coordinates + self.coordinate_model = _BaseModel( + x0=(x0, global_xy0_lb[0], global_xy0_ub[0]), + y0=(y0, global_xy0_lb[1], global_xy0_ub[1]), + ) - self.fit_power = fit_power + self.model = [ + self.coordinate_model, + ] + + self.nParams = 0 + self.use_jacobian = use_jacobian + + # set up the global arguments + self._setup_static_data() # for debugging: tracks all function evals self._track = False @@ -112,33 +121,111 @@ def __init__( self._xevals = [] # self._cost_history = [] - def add_model(self, model: WPFModelPrototype): + def add_model(self, model: WPFModel): + """ + Add a WPFModel to the current model + + Parameters + ---------- + model: WPFModel + model to add to the fitting routine + """ self.model.append(model) - # keep track of where each model's parameter list begins - self.model_param_inds.append(self.nParams) self.nParams += len(model.params.keys()) - self._scrape_model_params() + self._finalize_model() - def add_model_list(self, model_list): + def add_model_list(self, model_list: list[WPFModel]): + """ + Add multiple WPFModel objects to the current model + + Parameters + ---------- + model: list[WPFModel] + models to add to the fitting routine + """ for m in model_list: self.add_model(m) - def generate_initial_pattern(self): + def link_parameters( + self, + parent_model: WPFModel, + child_model: WPFModel | list[WPFModel], + parameters: str | list[str], + ): + """ + Link parameters of separate models together. The parameters of + the child_model are replaced with the parameters of the parent_model. + Note, this does not add the models to the WPF object, that must + be performed separately. + + Parameters + ---------- + parent_model: WPFModel + model from which parameters will be copied + child_model: WPFModel or list of WPFModels + model(s) whose independent parameters are to be linked + with those of the parent_model + parameters: str or list of str + names of parameters to be linked + """ + # Make sure child_model and parameters are iterable + child_model = ( + [ + child_model, + ] + if not hasattr(child_model, "__iter__") + else child_model + ) + + parameters = ( + [ + parameters, + ] + if not hasattr(parameters, "__iter__") + else parameters + ) + + for child in child_model: + for par in parameters: + child.params[par] = parent_model.params[par] + + def generate_initial_pattern(self) -> np.ndarray: + """ + Generate a diffraction pattern using the initial parameter + guesses for each model component + + Returns + ------- + initial_pattern: np.ndarray + + """ # update parameters: - self._scrape_model_params() + self._finalize_model() return self._pattern(self.x0, self.static_data.copy()) / self.intensity_scale def fit_to_mean_CBED(self, **fit_opts): + """ + Fit model parameters to the mean CBED pattern + Parameters + ---------- + fit_opts: keyword arguments passed to scipy.optimize.least_squares + + Returns + ------- + optimizer_result: dict + Output of scipy.optimize.least_squares + (also stored in self.mean_CBED_fit) + + """ # first make sure we have the latest parameters - self._scrape_model_params() + self._finalize_model() # set the current active pattern to the mean CBED: current_pattern = self.meanCBED * self.intensity_scale - shared_data = self.static_data.copy() self._fevals = [] self._xevals = [] @@ -147,6 +234,7 @@ def fit_to_mean_CBED(self, **fit_opts): default_opts = { "method": "trf", "verbose": 1, + "x_scale": "jac", } default_opts.update(fit_opts) @@ -156,7 +244,7 @@ def fit_to_mean_CBED(self, **fit_opts): self.x0, jac=self._jacobian, bounds=(self.lower_bound, self.upper_bound), - args=(current_pattern, shared_data), + args=(current_pattern, self.static_data), **default_opts, ) else: @@ -164,7 +252,7 @@ def fit_to_mean_CBED(self, **fit_opts): self._pattern_error, self.x0, bounds=(self.lower_bound, self.upper_bound), - args=(current_pattern, shared_data), + args=(current_pattern, self.static_data), **default_opts, ) @@ -181,18 +269,19 @@ def fit_to_mean_CBED(self, **fit_opts): ax.set_xlabel("Iterations") ax.set_yscale("log") - DP = self._pattern(self.mean_CBED_fit.x, shared_data) / self.intensity_scale + DP = ( + self._pattern(self.mean_CBED_fit.x, self.static_data) / self.intensity_scale + ) ax = fig.add_subplot(gs[0, 1]) CyRd = mpl_c.LinearSegmentedColormap.from_list( "CyRd", ["#00ccff", "#ffffff", "#ff0000"] ) - im = ax.matshow( + ax.matshow( err_im := -(DP - self.meanCBED), cmap=CyRd, vmin=-np.abs(err_im).max() / 4, vmax=np.abs(err_im).max() / 4, ) - # fig.colorbar(im) ax.set_title("Error") ax.axis("off") @@ -209,10 +298,15 @@ def fit_to_mean_CBED(self, **fit_opts): return opt def fit_all_patterns( - self, - resume = False, - **fit_opts - ): + self, + resume: bool = False, + real_space_mask: Optional[np.ndarray] = None, + show_fit_metrics: bool = True, + distributed: bool = True, + num_jobs: int = None, + threads_per_job: int = 1, + **fit_opts, + ): """ Apply model fitting to all patterns. @@ -220,6 +314,18 @@ def fit_all_patterns( ---------- resume: bool (optional) Set to true to continue a previous fit with more iterations. + real_space_mask: np.ndarray of bools (optional) + Only perform the fitting on a subset of the probe positions, + where real_space_mask[rx,ry] == True. + distributed: bool (optional) + Whether to evaluate using a pool of worker threads + num_jobs: int (optional) + number of parallel worker threads to launch if distributed=True + Defaults to number of CPU cores + threads_per_job: int (optional) + number of threads for each parallel job. If num_jobs is not specified, + the number of workers is automatically chosen so as to not oversubscribe + the cores (num_jobs = CPU_count // threads_per_job) fit_opts: args (optional) args passed to scipy.optimize.least_squares @@ -228,12 +334,12 @@ def fit_all_patterns( fit_data: RealSlice Fitted coefficients for all probe positions fit_metrics: RealSlice - Fitting metrixs for all probe positions + Fitting metrics for all probe positions """ # make sure we have the latest parameters - self._scrape_model_params() + unique_params, unique_names = self._finalize_model() # set tracking off self._track = False @@ -242,124 +348,144 @@ def fit_all_patterns( if resume: assert hasattr(self, "fit_data"), "No existing data resuming fit!" - fit_data = np.zeros((self.datacube.R_Nx, self.datacube.R_Ny, self.x0.shape[0])) - fit_metrics = np.zeros((self.datacube.R_Nx, self.datacube.R_Ny, 4)) - - for rx, ry in tqdmnd(self.datacube.R_Nx, self.datacube.R_Ny): - current_pattern = self.datacube.data[rx, ry, :, :] * self.intensity_scale - shared_data = self.static_data.copy() - self._cost_history = ( - [] - ) # clear this so it doesn't grow: TODO make this not stupid + # init + fit_data = np.zeros((self.x0.shape[0], self.datacube.R_Nx, self.datacube.R_Ny)) + fit_metrics = np.zeros((4, self.datacube.R_Nx, self.datacube.R_Ny)) - try: - x0 = self.fit_data.data[rx, ry] if resume else self.x0 + # Default fitting options + default_opts = { + "method": "trf", + "verbose": 0, + "x_scale": "jac", + } + default_opts.update(fit_opts) - if self.hasJacobian & self.use_jacobian: - opt = least_squares( - self._pattern_error, - x0, - jac=self._jacobian, - bounds=(self.lower_bound, self.upper_bound), - args=(current_pattern, shared_data), - **fit_opts, - ) - else: - opt = least_squares( - self._pattern_error, - x0, - bounds=(self.lower_bound, self.upper_bound), - args=(current_pattern, shared_data), - **fit_opts, + # Masking function + if real_space_mask is None: + mask = np.ones( + (self.datacube.R_Nx, self.datacube.R_Ny), + dtype=bool, + ) + else: + mask = real_space_mask + + # Loop over probe positions + if not distributed: + for rx, ry in tqdmnd(self.datacube.R_Nx, self.datacube.R_Ny): + if mask[rx, ry]: + current_pattern = ( + self.datacube.data[rx, ry, :, :] * self.intensity_scale ) + x0 = self.fit_data.data[rx, ry] if resume else self.x0 + + try: + if self.hasJacobian & self.use_jacobian: + opt = least_squares( + self._pattern_error, + x0, + jac=self._jacobian, + bounds=(self.lower_bound, self.upper_bound), + args=(current_pattern, self.static_data), + **default_opts, + ) + else: + opt = least_squares( + self._pattern_error, + x0, + bounds=(self.lower_bound, self.upper_bound), + args=(current_pattern, self.static_data), + **default_opts, + ) + + fit_data_single = opt.x + fit_metrics_single = [ + opt.cost, + opt.optimality, + opt.nfev, + opt.status, + ] + except Exception as err: + fit_data_single = x0 + fit_metrics_single = [0, 0, 0, -2] + + fit_data[:, rx, ry] = fit_data_single + fit_metrics[:, rx, ry] = fit_metrics_single - fit_data[rx, ry, :] = opt.x - fit_metrics[rx, ry, :] = [ - opt.cost, - opt.optimality, - opt.nfev, - opt.status, - ] - # except LinAlgError as err: - # added so that sending an interupt or keyboard interupt breaks out of the for loop rather than just the probe - except InterruptedError: - break - except KeyboardInterrupt: - break - except: - warnings.warn(f'Fit on position ({rx,ry}) failed with error') - - - # Convert to RealSlices - model_names = [] - for m in self.model: - n = m.name - if n in model_names: - i = 1 - while n in model_names: - n = m.name + "_" + str(i) - i += 1 - model_names.append(n) - - param_names = ["global_x0", "global_y0"] + [ - n + "/" + k - for m, n in zip(self.model, model_names) - for k in m.params.keys() - ] + else: + # distributed evaluation + self._fit_distributed( + resume=resume, + real_space_mask=mask, + num_jobs=num_jobs, + threads_per_job=threads_per_job, + fit_opts=default_opts, + fit_data=fit_data, + fit_metrics=fit_metrics, + ) - self.fit_data = RealSlice(fit_data, name="Fit Data", slicelabels=param_names) + self.fit_data = RealSlice(fit_data, name="Fit Data", slicelabels=unique_names) self.fit_metrics = RealSlice( fit_metrics, name="Fit Metrics", slicelabels=["cost", "optimality", "nfev", "status"], ) - self.show_fit_metrics() + if show_fit_metrics: + self.show_fit_metrics() return self.fit_data, self.fit_metrics def accept_mean_CBED_fit(self): + """ + Sets the parameters optimized by fitting to mean CBED + as the initial guess for each of the component models. + """ x = self.mean_CBED_fit.x - self.static_data["global_x0"] = x[0] - self.static_data["global_y0"] = x[1] - self.static_data["global_r"] = np.hypot( - (self.static_data["xArray"] - x[0]), (self.static_data["yArray"] - x[1]) - ) + for model in self.model: + for param in model.params.values(): + param.initial_value = x[param.offset] - for i, m in enumerate(self.model): - ind = self.model_param_inds[i] + 2 - for j, k in enumerate(m.params.keys()): - m.params[k].initial_value = x[ind + j] + def get_lattice_maps(self) -> list[RealSlice]: + """ + Get the fitted reciprical lattice vectors refined at each scan point. - def get_lattice_maps(self): + Returns + ------- + g_maps: list[RealSlice] + RealSlice objects containing the lattice data for each scan position + """ assert hasattr(self, "fit_data"), "Please run fitting first!" - lattices = [ - (i, m) - for i, m in enumerate(self.model) - if "lattice" in type(m).__name__.lower() - ] + lattices = [m for m in self.model if WPFModelType.LATTICE in m.model_type] g_maps = [] - for (i, l) in lattices: - param_list = list(l.params.keys()) - lattice_offset = param_list.index("ux") - data_offset = self.model_param_inds[i] + 2 + lattice_offset - - # TODO: Use proper RealSlice semantics for access - data = self.fit_data.data[:, :, data_offset : data_offset + 4] + for lat in lattices: + data = np.stack( + [ + self.fit_data.data[lat.params["ux"].offset], + self.fit_data.data[lat.params["uy"].offset], + self.fit_data.data[lat.params["vx"].offset], + self.fit_data.data[lat.params["vy"].offset], + self.fit_metrics["status"].data + >= 0, # negative status indicates fit error + ], + axis=0, + ) g_map = RealSlice( - np.dstack((data, np.ones(data.shape[:2], dtype=np.bool_))), + data, slicelabels=["g1x", "g1y", "g2x", "g2y", "mask"], - name=l.name, + name=lat.name, ) g_maps.append(g_map) return g_maps - def _setup_static_data(self,x0,y0): + def _setup_static_data(self): + """ + Generate basic data that each model can access during the fitting routine + """ self.static_data = {} xArray, yArray = np.mgrid[0 : self.datacube.Q_Nx, 0 : self.datacube.Q_Ny] @@ -369,29 +495,30 @@ def _setup_static_data(self,x0,y0): self.static_data["Q_Nx"] = self.datacube.Q_Nx self.static_data["Q_Ny"] = self.datacube.Q_Ny - self.static_data["global_x0"] = x0 - self.static_data["global_y0"] = y0 - self.static_data["global_r"] = np.hypot( - (self.static_data["xArray"] - x0), - (self.static_data["yArray"] - y0), - ) + self.static_data["parent"] = self - def _pattern_error(self, x, current_pattern, shared_data): - - DP = np.zeros((self.datacube.Q_Nx, self.datacube.Q_Ny)) + def _get_distance(self, params: np.ndarray, x: Parameter, y: Parameter): + """ + Return the distance from a point in pixel coordinates specified + by two Parameter objects. + This method caches the result from the _BaseModel for performance + """ + if ( + x is self.model[0].params["x center"] + and y is self.model[0].params["y center"] + ): + # TODO: actually implement caching + pass - shared_data["global_x0"] = x[0] - shared_data["global_y0"] = x[1] - shared_data["global_r"] = np.hypot( - (shared_data["xArray"] - x[0]), - (shared_data["yArray"] - x[1]), + return np.hypot( + self.static_data["xArray"] - params[x.offset], + self.static_data["yArray"] - params[y.offset], ) - for i, m in enumerate(self.model): - ind = self.model_param_inds[i] + 2 - m.func(DP, *x[ind : ind + m.nParams].tolist(), **shared_data) + def _pattern_error(self, x, current_pattern, shared_data): + DP = self._pattern(x, shared_data) - DP = (DP**self.fit_power - current_pattern**self.fit_power) * self.mask + DP = (DP - current_pattern) * self.mask if self._track: self._fevals.append(DP) @@ -401,58 +528,183 @@ def _pattern_error(self, x, current_pattern, shared_data): return DP.ravel() def _pattern(self, x, shared_data): - DP = np.zeros((self.datacube.Q_Nx, self.datacube.Q_Ny)) - shared_data["global_x0"] = x[0] - shared_data["global_y0"] = x[1] - shared_data["global_r"] = np.hypot( - (shared_data["xArray"] - x[0]), - (shared_data["yArray"] - x[1]), - ) - - for i, m in enumerate(self.model): - ind = self.model_param_inds[i] + 2 - m.func(DP, *x[ind : ind + m.nParams].tolist(), **shared_data) + for m in self.model: + m.func(DP, x, **shared_data) - return (DP**self.fit_power) * self.mask + return DP * self.mask def _jacobian(self, x, current_pattern, shared_data): # TODO: automatic mixed analytic/finite difference - J = np.zeros(((self.datacube.Q_Nx * self.datacube.Q_Ny), self.nParams + 2)) + J = np.zeros(((self.datacube.Q_Nx * self.datacube.Q_Ny), self.nParams)) - shared_data["global_x0"] = x[0] - shared_data["global_y0"] = x[1] - shared_data["global_r"] = np.hypot( - (shared_data["xArray"] - x[0]), - (shared_data["yArray"] - x[1]), - ) - - for i, m in enumerate(self.model): - ind = self.model_param_inds[i] + 2 - m.jacobian(J, *x[ind : ind + m.nParams].tolist(), offset=ind, **shared_data) + for m in self.model: + m.jacobian(J, x, **shared_data) return J * self.mask.ravel()[:, np.newaxis] - def _scrape_model_params(self): + def _finalize_model(self): + # iterate over all models and assign indices, accumulate list + # of unique parameters. then, accumulate initial value and bounds vectors + + # get unique names for each model + model_names = [] + for m in self.model: + n = m.name + if n in model_names: + i = 1 + while n in model_names: + n = m.name + "_" + str(i) + i += 1 + model_names.append(n) - self.x0 = np.zeros((self.nParams + 2,)) - self.upper_bound = np.zeros_like(self.x0) - self.lower_bound = np.zeros_like(self.x0) + unique_params = [] + unique_names = [] + idx = 0 + for model, model_name in zip(self.model, model_names): + for param_name, param in model.params.items(): + if param not in unique_params: + unique_params.append(param) + unique_names.append(model_name + "/" + param_name) + param.offset = idx + idx += 1 + + self.x0 = np.array([param.initial_value for param in unique_params]) + self.upper_bound = np.array([param.upper_bound for param in unique_params]) + self.lower_bound = np.array([param.lower_bound for param in unique_params]) - self.x0[0:2] = np.array( - [self.static_data["global_x0"], self.static_data["global_y0"]] - ) - self.upper_bound[0:2] = self.global_xy0_ub - self.lower_bound[0:2] = self.global_xy0_lb + self.hasJacobian = all([m.hasJacobian for m in self.model]) - for i, m in enumerate(self.model): - ind = self.model_param_inds[i] + 2 + self.nParams = self.x0.shape[0] - for j, v in enumerate(m.params.values()): - self.x0[ind + j] = v.initial_value - self.upper_bound[ind + j] = v.upper_bound - self.lower_bound[ind + j] = v.lower_bound + return unique_params, unique_names - self.hasJacobian = all([m.hasJacobian for m in self.model]) + def _fit_single_pattern( + self, + data: np.ndarray, + initial_guess: np.ndarray, + mask: bool, + fit_opts, + ): + """ + Apply model fitting to one pattern. + + Parameters + ---------- + data: np.ndarray + Diffraction pattern + initial_guess: np.ndarray + starting guess for fitting + mask: bool + Fitting is skipped if mask is False, and default values are returned + fit_opts: + args passed to scipy.optimize.least_squares + + Returns + -------- + fit_coefs: np.array + Fitted coefficients + fit_metrics: np.array + Fitting metrics + + """ + if mask: + try: + if self.hasJacobian & self.use_jacobian: + opt = least_squares( + self._pattern_error, + initial_guess, + jac=self._jacobian, + bounds=(self.lower_bound, self.upper_bound), + args=(data * self.intensity_scale, self.static_data), + **fit_opts, + ) + else: + opt = least_squares( + self._pattern_error, + initial_guess, + bounds=(self.lower_bound, self.upper_bound), + args=(data * self.intensity_scale, self.static_data), + **fit_opts, + ) + + fit_coefs = opt.x + fit_metrics_single = [ + opt.cost, + opt.optimality, + opt.nfev, + opt.status, + ] + except Exception as err: + # print(err) + fit_coefs = initial_guess + fit_metrics_single = [0, 0, 0, -2] + + return fit_coefs, fit_metrics_single + else: + return np.zeros_like(initial_guess), [0, 0, 0, 0] + + def _fit_distributed( + self, + fit_opts: dict, + fit_data: np.ndarray, + fit_metrics: np.ndarray, + real_space_mask: np.ndarray, + resume=False, + num_jobs=None, + threads_per_job=1, + ): + """ + Run fitting using multiprocessing to fit several patterns in parallel + """ + from mpire import WorkerPool, cpu_count + from threadpoolctl import threadpool_limits + + # prevent oversubscription when using multiple threads per job + num_jobs = num_jobs or cpu_count() // threads_per_job + + def f(shared_data, args): + with threadpool_limits(limits=threads_per_job): + return self._fit_single_pattern(**args, fit_opts=shared_data) + + # hopefully the data entries remain as views until dispatch time... + fit_inputs = [ + ( + { + "data": self.datacube[rx, ry], + "initial_guess": self.fit_data[rx, ry] if resume else self.x0, + "mask": real_space_mask[rx, ry], + }, + ) + for rx in range(self.datacube.R_Nx) + for ry in range(self.datacube.R_Ny) + ] + + with WorkerPool( + n_jobs=num_jobs, + shared_objects=fit_opts, + ) as pool: + results = pool.map( + f, + fit_inputs, + progress_bar=True, + ) + + for (rx, ry), res in zip( + np.ndindex((self.datacube.R_Nx, self.datacube.R_Ny)), results + ): + fit_data[:, rx, ry] = res[0] + fit_metrics[:, rx, ry] = res[1] + + def __getstate__(self): + # Prevent pickling from copying the datacube, so that distributed + # evaluation does not balloon memory usage. + # Copy the object's state from self.__dict__ which contains + # all our instance attributes. Always use the dict.copy() + # method to avoid modifying the original state. + state = self.__dict__.copy() + # Remove the unpicklable entries. + del state["datacube"] + return state diff --git a/py4DSTEM/process/wholepatternfit/wpf_viz.py b/py4DSTEM/process/wholepatternfit/wpf_viz.py index da972ca33..436ae40a2 100644 --- a/py4DSTEM/process/wholepatternfit/wpf_viz.py +++ b/py4DSTEM/process/wholepatternfit/wpf_viz.py @@ -5,26 +5,15 @@ import matplotlib.colors as mpl_c from matplotlib.gridspec import GridSpec +from py4DSTEM.process.wholepatternfit.wp_models import WPFModelType + + def show_model_grid(self, x=None, **plot_kwargs): - if x is None: - x = self.mean_CBED_fit.x - - shared_data = self.static_data.copy() - shared_data["global_x0"] = x[0] - shared_data["global_y0"] = x[1] - shared_data["global_r"] = np.hypot( - (shared_data["xArray"] - x[0]), - (shared_data["yArray"] - x[1]), - ) + x = self.mean_CBED_fit.x if x is None else x - shared_data["global_x0"] = x[0] - shared_data["global_y0"] = x[1] - shared_data["global_r"] = np.hypot( - (shared_data["xArray"] - x[0]), - (shared_data["yArray"] - x[1]), - ) + model = [m for m in self.model if WPFModelType.DUMMY not in m.model_type] - N = len(self.model) + N = len(model) cols = int(np.ceil(np.sqrt(N))) rows = (N + 1) // cols @@ -32,30 +21,80 @@ def show_model_grid(self, x=None, **plot_kwargs): kwargs.update(plot_kwargs) fig, ax = plt.subplots(rows, cols, **kwargs) - for i, (a, m) in enumerate(zip(ax.flat, self.model)): + for a, m in zip(ax.flat, model): DP = np.zeros((self.datacube.Q_Nx, self.datacube.Q_Ny)) - ind = self.model_param_inds[i] + 2 - m.func(DP, *x[ind : ind + m.nParams].tolist(), **shared_data) + m.func(DP, x, **self.static_data) a.matshow(DP, cmap="turbo") - a.text(0.5, 0.92, m.name, transform=a.transAxes, ha="center", va="center") + + # Determine if text color should be white or black + int_range = np.array((np.min(DP), np.max(DP))) + if int_range[0] != int_range[1]: + r = (np.mean(DP[: DP.shape[0] // 10, :]) - int_range[0]) / ( + int_range[1] - int_range[0] + ) + if r < 0.5: + color = "w" + else: + color = "k" + else: + color = "w" + + a.text( + 0.5, + 0.92, + m.name, + transform=a.transAxes, + ha="center", + va="center", + color=color, + ) for a in ax.flat: a.axis("off") plt.show() + def show_lattice_points( - self, - im = None, - vmin = None, - vmax = None, - power = None, - returnfig=False, - *args, - **kwargs - ): + self, + im=None, + vmin=None, + vmax=None, + power=None, + show_vectors=True, + crop_to_pattern=False, + returnfig=False, + moire_origin_idx=[0, 0, 0, 0], + *args, + **kwargs, +): """ Plotting utility to show the initial lattice points. + + Parameters + ---------- + im: np.ndarray + Optional: Image to show, defaults to mean CBED + vmin, vmax: float + Intensity ranges for plotting im + power: float + Gamma level for showing im + show_vectors: bool + Flag to plot the lattice vectors + crop_to_pattern: bool + Flag to limit the field of view to the pattern area. If False, + spots outside the pattern are shown + returnfig: bool + If True, (fig,ax) are returned and plt.show() is not called + moire_origin_idx: list of length 4 + Indices of peak on which to draw Moire vectors, written as + [a_u, a_v, b_u, b_v] + args, kwargs + Passed to plt.subplots + + Returns + ------- + fig,ax: If returnfig=True """ if im is None: @@ -66,41 +105,111 @@ def show_lattice_points( fig, ax = plt.subplots(*args, **kwargs) if vmin is None and vmax is None: ax.matshow( - im**power, + im**power, cmap="gray", - ) + ) else: ax.matshow( im**power, - vmin = vmin, - vmax = vmax, + vmin=vmin, + vmax=vmax, cmap="gray", + ) + + lattices = [m for m in self.model if WPFModelType.LATTICE in m.model_type] + + for m in lattices: + ux, uy = m.params["ux"].initial_value, m.params["uy"].initial_value + vx, vy = m.params["vx"].initial_value, m.params["vy"].initial_value + + lat = np.array([[ux, uy], [vx, vy]]) + inds = np.stack([m.u_inds, m.v_inds], axis=1) + + spots = inds @ lat + spots[:, 0] += m.params["x center"].initial_value + spots[:, 1] += m.params["y center"].initial_value + + axpts = ax.scatter( + spots[:, 1], + spots[:, 0], + s=100, + marker="x", + label=m.name, + ) + + if show_vectors: + ax.arrow( + m.params["y center"].initial_value, + m.params["x center"].initial_value, + m.params["uy"].initial_value, + m.params["ux"].initial_value, + length_includes_head=True, + color=axpts.get_facecolor(), + width=1.0, + ) + + ax.arrow( + m.params["y center"].initial_value, + m.params["x center"].initial_value, + m.params["vy"].initial_value, + m.params["vx"].initial_value, + length_includes_head=True, + color=axpts.get_facecolor(), + width=1.0, ) - for m in self.model: - if "Lattice" in m.name: - ux, uy = m.params["ux"].initial_value, m.params["uy"].initial_value - vx, vy = m.params["vx"].initial_value, m.params["vy"].initial_value + moires = [m for m in self.model if WPFModelType.MOIRE in m.model_type] + + for m in moires: + lat_ab = m._get_parent_lattices(m.lattice_a, m.lattice_b) + lat_abm = np.vstack((lat_ab, m.moire_matrix @ lat_ab)) - lat = np.array([[ux, uy], [vx, vy]]) - inds = np.stack([m.u_inds, m.v_inds], axis=1) + spots = m.moire_indices_uvm @ lat_abm + spots[:, 0] += m.params["x center"].initial_value + spots[:, 1] += m.params["y center"].initial_value - spots = inds @ lat - spots[:, 0] += self.static_data["global_x0"] - spots[:, 1] += self.static_data["global_y0"] + axpts = ax.scatter( + spots[:, 1], + spots[:, 0], + s=100, + marker="+", + label=m.name, + ) + + if show_vectors: + arrow_origin = np.array(moire_origin_idx) @ lat_ab + arrow_origin[0] += m.params["x center"].initial_value + arrow_origin[1] += m.params["y center"].initial_value + + ax.arrow( + arrow_origin[1], + arrow_origin[0], + lat_abm[4, 1], + lat_abm[4, 0], + length_includes_head=True, + color=axpts.get_facecolor(), + width=1.0, + ) - ax.scatter( - spots[:, 1], - spots[:, 0], - s = 100, - marker="x", - label=m.name, - ) + ax.arrow( + arrow_origin[1], + arrow_origin[0], + lat_abm[5, 1], + lat_abm[5, 0], + length_includes_head=True, + color=axpts.get_facecolor(), + width=1.0, + ) ax.legend() + if crop_to_pattern: + ax.set_xlim(0, im.shape[1] - 1) + ax.set_ylim(im.shape[0] - 1, 0) + return (fig, ax) if returnfig else plt.show() + def show_fit_metrics(self, returnfig=False, **subplots_kwargs): assert hasattr(self, "fit_metrics"), "Please run fitting first!" @@ -113,6 +222,7 @@ def show_fit_metrics(self, returnfig=False, **subplots_kwargs): opt_cmap = mpl_c.ListedColormap( ( + (0.6, 0.05, 0.05), (0.8941176470588236, 0.10196078431372549, 0.10980392156862745), (0.21568627450980393, 0.49411764705882355, 0.7215686274509804), (0.30196078431372547, 0.6862745098039216, 0.2901960784313726), @@ -122,11 +232,12 @@ def show_fit_metrics(self, returnfig=False, **subplots_kwargs): ) ) im = ax[0, 1].matshow( - self.fit_metrics["status"].data, cmap=opt_cmap, vmin=-1.5, vmax=4.5 + self.fit_metrics["status"].data, cmap=opt_cmap, vmin=-2.5, vmax=4.5 ) - cbar = fig.colorbar(im, ax=ax[0, 1], ticks=[-1, 0, 1, 2, 3, 4]) + cbar = fig.colorbar(im, ax=ax[0, 1], ticks=[-2, -1, 0, 1, 2, 3, 4]) cbar.ax.set_yticklabels( [ + "Unknown Error", "MINPACK Error", "Max f evals exceeded", "$gtol$ satisfied", @@ -148,4 +259,4 @@ def show_fit_metrics(self, returnfig=False, **subplots_kwargs): fig.set_facecolor("w") - return (fig, ax) if returnfig else plt.show() \ No newline at end of file + return (fig, ax) if returnfig else plt.show() diff --git a/py4DSTEM/utils/__init__.py b/py4DSTEM/utils/__init__.py index 06489e7f9..b0c484e80 100644 --- a/py4DSTEM/utils/__init__.py +++ b/py4DSTEM/utils/__init__.py @@ -1,2 +1 @@ from py4DSTEM.utils.configuration_checker import check_config - diff --git a/py4DSTEM/utils/configuration_checker.py b/py4DSTEM/utils/configuration_checker.py index 0ccf633fc..26b0b89d5 100644 --- a/py4DSTEM/utils/configuration_checker.py +++ b/py4DSTEM/utils/configuration_checker.py @@ -1,74 +1,72 @@ -#### this file contains a function/s that will check if various +#### this file contains a function/s that will check if various # libaries/compute options are available import importlib from operator import mod # list of modules we expect/may expect to be installed -# as part of a standard py4DSTEM installation +# as part of a standard py4DSTEM installation # this needs to be the import name e.g. import mp_api not mp-api modules = [ - 'crystal4D', - 'cupy', - 'dask', - 'dill', - 'distributed', - 'gdown', - 'h5py', - 'ipyparallel', - 'jax', - 'matplotlib', - 'mp_api', - 'ncempy', - 'numba', - 'numpy', - 'pymatgen', - 'skimage', - 'sklearn', - 'scipy', - 'tensorflow', - 'tensorflow-addons', - 'tqdm' + "crystal4D", + "cupy", + "dask", + "dill", + "distributed", + "gdown", + "h5py", + "ipyparallel", + "jax", + "matplotlib", + "mp_api", + "ncempy", + "numba", + "numpy", + "pymatgen", + "skimage", + "sklearn", + "scipy", + "tensorflow", + "tensorflow-addons", + "tqdm", ] -# currently this was copy and pasted from setup.py, -# hopefully there's a programatic way to do this. +# currently this was copy and pasted from setup.py, +# hopefully there's a programatic way to do this. module_depenencies = { - 'base' : [ - 'numpy', - 'scipy', - 'h5py', - 'ncempy', - 'matplotlib', - 'skimage', - 'sklearn', - 'tqdm', - 'dill', - 'gdown', - 'dask', - 'distributed' - ], - 'ipyparallel': ['ipyparallel', 'dill'], - 'cuda': ['cupy'], - 'acom': ['pymatgen', 'mp_api'], - 'aiml': ['tensorflow','tensorflow-addons','crystal4D'], - 'aiml-cuda': ['tensorflow','tensorflow-addons','crystal4D','cupy'], - 'numba': ['numba'] - } - - - + "base": [ + "numpy", + "scipy", + "h5py", + "ncempy", + "matplotlib", + "skimage", + "sklearn", + "tqdm", + "dill", + "gdown", + "dask", + "distributed", + ], + "ipyparallel": ["ipyparallel", "dill"], + "cuda": ["cupy"], + "acom": ["pymatgen", "mp_api"], + "aiml": ["tensorflow", "tensorflow-addons", "crystal4D"], + "aiml-cuda": ["tensorflow", "tensorflow-addons", "crystal4D", "cupy"], + "numba": ["numba"], +} #### Class and Functions to Create Coloured Strings #### class colours: - CEND = '\x1b[0m' - WARNING = '\x1b[7;93m' - SUCCESS = '\x1b[7;92m' - FAIL = '\x1b[7;91m' - BOLD = '\033[1m' - UNDERLINE = '\033[4m' - -def create_warning(s:str)->str: + CEND = "\x1b[0m" + WARNING = "\x1b[7;93m" + SUCCESS = "\x1b[7;92m" + FAIL = "\x1b[7;91m" + BOLD = "\033[1m" + UNDERLINE = "\033[4m" + + +def create_warning(s: str) -> str: """ Creates a yellow shaded with white font version of string s @@ -80,7 +78,9 @@ def create_warning(s:str)->str: """ s = colours.WARNING + s + colours.CEND return s -def create_success(s:str)-> str: + + +def create_success(s: str) -> str: """ Creates a yellow shaded with white font version of string s @@ -92,7 +92,9 @@ def create_success(s:str)-> str: """ s = colours.SUCCESS + s + colours.CEND return s -def create_failure(s:str)->str: + + +def create_failure(s: str) -> str: """ Creates a yellow shaded with white font version of string s @@ -104,7 +106,9 @@ def create_failure(s:str)->str: """ s = colours.FAIL + s + colours.CEND return s -def create_bold(s:str)->str: + + +def create_bold(s: str) -> str: """ Creates a yellow shaded with white font version of string s @@ -116,7 +120,9 @@ def create_bold(s:str)->str: """ s = colours.BOLD + s + colours.CEND return s -def create_underline(s:str)->str: + + +def create_underline(s: str) -> str: """ Creates an underlined version of string s @@ -130,10 +136,11 @@ def create_underline(s:str)->str: return s -#### Functions to check imports etc. +#### Functions to check imports etc. ### here I use the term state to define a boolean condition as to whether a libary/module was sucessfully imported/can be used -def get_import_states(modules:list = modules)->dict: + +def get_import_states(modules: list = modules) -> dict: """ Check the ability to import modules and store the results as a boolean value. Returns as a dict. e.g. import_states_dict['numpy'] == True/False @@ -147,8 +154,8 @@ def get_import_states(modules:list = modules)->dict: # Create the import states dict import_states_dict = {} - # check if the modules import - # and update the states dict to reflect this + # check if the modules import + # and update the states dict to reflect this for m in modules: state = import_tester(m) import_states_dict[m] = state @@ -156,7 +163,7 @@ def get_import_states(modules:list = modules)->dict: return import_states_dict -def get_module_states(state_dict:dict)->dict: +def get_module_states(state_dict: dict) -> dict: """_summary_ Args: @@ -166,15 +173,13 @@ def get_module_states(state_dict:dict)->dict: dict: _description_ """ - # create an empty dict to put module states into: module_states = {} # key is the name of the module e.g. ACOM - # val is a list of its dependencies + # val is a list of its dependencies # module_dependencies comes from the namespace for key, val in module_depenencies.items(): - # create a list to store the status of the depencies temp_lst = [] @@ -184,13 +189,13 @@ def get_module_states(state_dict:dict)->dict: temp_lst.append(state_dict[depend]) # check that all the depencies could be imported i.e. state == True - # and set the state of the module to that + # and set the state of the module to that module_states[key] = all(temp_lst) == True return module_states -def print_import_states(import_states:dict)->None: +def print_import_states(import_states: dict) -> None: """_summary_ Args: @@ -208,7 +213,7 @@ def print_import_states(import_states:dict)->None: s = create_success(s) s = f"{s: <80}" print(s) - #if unable to import i.e. state == False + # if unable to import i.e. state == False else: s = f" Module {m.capitalize()} Import Failed " s = create_failure(s) @@ -217,9 +222,7 @@ def print_import_states(import_states:dict)->None: return None - - -def print_module_states(module_states:dict)->None: +def print_module_states(module_states: dict) -> None: """_summary_ Args: @@ -244,7 +247,10 @@ def print_module_states(module_states:dict)->None: print(s) return None -def perfrom_extra_checks(import_states:dict, verbose:bool, gratuitously_verbose:bool, **kwargs)->None: + +def perfrom_extra_checks( + import_states: dict, verbose: bool, gratuitously_verbose: bool, **kwargs +) -> None: """_summary_ Args: @@ -256,7 +262,7 @@ def perfrom_extra_checks(import_states:dict, verbose:bool, gratuitously_verbose: _type_: _description_ """ - # print a output module + # print a output module extra_checks_message = "Running Extra Checks" extra_checks_message = create_bold(extra_checks_message) print(f"{extra_checks_message}") @@ -279,11 +285,10 @@ def perfrom_extra_checks(import_states:dict, verbose:bool, gratuitously_verbose: else: pass - return None -def import_tester(m:str)->bool: +def import_tester(m: str) -> bool: """ This function will try and import the module, m, it returns the success as boolean and prints a message. @@ -305,9 +310,7 @@ def import_tester(m:str)->bool: return state - - -def check_module_functionality(state_dict:dict)->None: +def check_module_functionality(state_dict: dict) -> None: """ This function checks all the py4DSTEM modules, e.g. acom, ml-ai, and whether all the required dependencies are importable @@ -323,9 +326,8 @@ def check_module_functionality(state_dict:dict)->None: module_states = {} # key is the name of the module e.g. ACOM - # val is a list of its dependencies + # val is a list of its dependencies for key, val in module_depenencies.items(): - # create a list to store the status of the depencies temp_lst = [] @@ -335,7 +337,7 @@ def check_module_functionality(state_dict:dict)->None: temp_lst.append(state_dict[depend]) # check that all the depencies could be imported i.e. state == True - # and set the state of the module to that + # and set the state of the module to that module_states[key] = all(temp_lst) == True # Print out the state of all the modules in colour code @@ -351,14 +353,13 @@ def check_module_functionality(state_dict:dict)->None: s = create_failure(s) print(s) - return None # module_states + return None # module_states + #### ADDTIONAL CHECKS #### -def check_cupy_gpu( - gratuitously_verbose:bool, - **kwargs - ): + +def check_cupy_gpu(gratuitously_verbose: bool, **kwargs): """ This function performs some additional tests which may be useful in diagnosing Cupy GPU performance @@ -385,11 +386,11 @@ def check_cupy_gpu( print(s) # Count how many GPUs Cupy can detect - # probably should change this to a while loop ... + # probably should change this to a while loop ... for i in range(24): try: d = cp.cuda.Device(i) - hasattr(d, 'attributes') + hasattr(d, "attributes") except: num_gpus_detected = i break @@ -416,7 +417,7 @@ def check_cupy_gpu( cupy_version = cp.__version__ print(f"Cupy Version:\t\t{cupy_version}") - # if verbose print extra information + # if verbose print extra information if gratuitously_verbose: for i in range(num_gpus_detected): d = cp.cuda.Device(i) @@ -427,7 +428,7 @@ def check_cupy_gpu( return None -def print_no_extra_checks(m:str): +def print_no_extra_checks(m: str): """ This function prints a warning style message that the module m currently has no extra checks. @@ -445,21 +446,18 @@ def print_no_extra_checks(m:str): return None -# dict of extra check functions -funcs_dict = { - "cupy" : check_cupy_gpu -} - +# dict of extra check functions +funcs_dict = {"cupy": check_cupy_gpu} #### main function used to check the configuration of the installation def check_config( - #modules:list = modules, # removed to not be user editable as this will often break. Could make it append to modules... but for now just removing - verbose:bool = False, - gratuitously_verbose:bool = False, + # modules:list = modules, # removed to not be user editable as this will often break. Could make it append to modules... but for now just removing + verbose: bool = False, + gratuitously_verbose: bool = False, # egregiously_verbose:bool = False - )->None: +) -> None: """ This function checks the state of required imports to run py4DSTEM. @@ -473,14 +471,14 @@ def check_config( None """ - # get the states of all imports + # get the states of all imports states_dict = get_import_states(modules) # get the states of all modules dependencies modules_dict = get_module_states(states_dict) - # print the modules compatiabiltiy - # prepare a message + # print the modules compatiabiltiy + # prepare a message modules_checks_message = "Checking Module Dependencies" modules_checks_message = create_bold(modules_checks_message) print(modules_checks_message) @@ -495,8 +493,10 @@ def check_config( print_import_states(states_dict) - perfrom_extra_checks(import_states=states_dict, verbose=verbose, gratuitously_verbose=gratuitously_verbose) - - + perfrom_extra_checks( + import_states=states_dict, + verbose=verbose, + gratuitously_verbose=gratuitously_verbose, + ) return None diff --git a/py4DSTEM/version.py b/py4DSTEM/version.py index 9df5075b8..224f1fb74 100644 --- a/py4DSTEM/version.py +++ b/py4DSTEM/version.py @@ -1,2 +1 @@ -__version__='0.14.3' - +__version__ = "0.14.4" diff --git a/py4DSTEM/visualize/__init__.py b/py4DSTEM/visualize/__init__.py index d5c183ab5..d9e5b4c68 100644 --- a/py4DSTEM/visualize/__init__.py +++ b/py4DSTEM/visualize/__init__.py @@ -3,4 +3,3 @@ from py4DSTEM.visualize.vis_RQ import * from py4DSTEM.visualize.vis_grid import * from py4DSTEM.visualize.vis_special import * - diff --git a/py4DSTEM/visualize/overlay.py b/py4DSTEM/visualize/overlay.py index 7e7147a15..996bb89b3 100644 --- a/py4DSTEM/visualize/overlay.py +++ b/py4DSTEM/visualize/overlay.py @@ -1,5 +1,5 @@ import numpy as np -from matplotlib.patches import Rectangle,Circle,Wedge,Ellipse +from matplotlib.patches import Rectangle, Circle, Wedge, Ellipse from matplotlib.axes import Axes from matplotlib.colors import is_color_like from numbers import Number @@ -9,219 +9,277 @@ from emdfile import PointList - -def add_rectangles(ax,d): +def add_rectangles(ax, d): """ Adds one or more rectangles to Axis ax using the parameters in dictionary d. """ # Handle inputs - assert isinstance(ax,Axes) + assert isinstance(ax, Axes) # lims - assert('lims' in d.keys()) - lims = d['lims'] - if isinstance(lims,tuple): - assert(len(lims)==4) + assert "lims" in d.keys() + lims = d["lims"] + if isinstance(lims, tuple): + assert len(lims) == 4 lims = [lims] - assert(isinstance(lims,list)) + assert isinstance(lims, list) N = len(lims) - assert(all([isinstance(t,tuple) for t in lims])) - assert(all([len(t)==4 for t in lims])) + assert all([isinstance(t, tuple) for t in lims]) + assert all([len(t) == 4 for t in lims]) # color - color = d['color'] if 'color' in d.keys() else 'r' - if isinstance(color,list): - assert(len(color)==N) - assert(all([is_color_like(c) for c in color])) + color = d["color"] if "color" in d.keys() else "r" + if isinstance(color, list): + assert len(color) == N + assert all([is_color_like(c) for c in color]) else: assert is_color_like(color) color = [color for i in range(N)] # fill - fill = d['fill'] if 'fill' in d.keys() else False - if isinstance(fill,bool): + fill = d["fill"] if "fill" in d.keys() else False + if isinstance(fill, bool): fill = [fill for i in range(N)] else: - assert(isinstance(fill,list)) - assert(len(fill)==N) - assert(all([isinstance(f,bool) for f in fill])) + assert isinstance(fill, list) + assert len(fill) == N + assert all([isinstance(f, bool) for f in fill]) # alpha - alpha = d['alpha'] if 'alpha' in d.keys() else 1 - if isinstance(alpha,(float,int,np.float64)): + alpha = d["alpha"] if "alpha" in d.keys() else 1 + if isinstance(alpha, (float, int, np.float64)): alpha = [alpha for i in range(N)] else: - assert(isinstance(alpha,list)) - assert(len(alpha)==N) - assert(all([isinstance(a,(float,int,np.float64)) for a in alpha])) + assert isinstance(alpha, list) + assert len(alpha) == N + assert all([isinstance(a, (float, int, np.float64)) for a in alpha]) # linewidth - linewidth = d['linewidth'] if 'linewidth' in d.keys() else 2 - if isinstance(linewidth,(float,int,np.float64)): + linewidth = d["linewidth"] if "linewidth" in d.keys() else 2 + if isinstance(linewidth, (float, int, np.float64)): linewidth = [linewidth for i in range(N)] else: - assert(isinstance(linewidth,list)) - assert(len(linewidth)==N) - assert(all([isinstance(lw,(float,int,np.float64)) for lw in linewidth])) + assert isinstance(linewidth, list) + assert len(linewidth) == N + assert all([isinstance(lw, (float, int, np.float64)) for lw in linewidth]) # additional parameters - kws = [k for k in d.keys() if k not in ('lims','color','fill','alpha','linewidth')] + kws = [ + k for k in d.keys() if k not in ("lims", "color", "fill", "alpha", "linewidth") + ] kwargs = dict() for k in kws: kwargs[k] = d[k] # add the rectangles for i in range(N): - l,c,f,a,lw = lims[i],color[i],fill[i],alpha[i],linewidth[i] - rect = Rectangle((l[2]-0.5,l[0]-0.5),l[3]-l[2],l[1]-l[0],color=c,fill=f, - alpha=a,linewidth=lw,**kwargs) + l, c, f, a, lw = lims[i], color[i], fill[i], alpha[i], linewidth[i] + rect = Rectangle( + (l[2] - 0.5, l[0] - 0.5), + l[3] - l[2], + l[1] - l[0], + color=c, + fill=f, + alpha=a, + linewidth=lw, + **kwargs, + ) ax.add_patch(rect) return -def add_circles(ax,d): + +def add_circles(ax, d): """ adds one or more circles to axis ax using the parameters in dictionary d. """ # handle inputs - assert isinstance(ax,Axes) + assert isinstance(ax, Axes) # center - assert('center' in d.keys()) - center = d['center'] - if isinstance(center,tuple): - assert(len(center)==2) + assert "center" in d.keys() + center = d["center"] + if isinstance(center, tuple): + assert len(center) == 2 center = [center] - assert(isinstance(center,list)) + assert isinstance(center, list) N = len(center) - assert(all([isinstance(x,tuple) for x in center])) - assert(all([len(x)==2 for x in center])) + assert all([isinstance(x, tuple) for x in center]) + assert all([len(x) == 2 for x in center]) # radius - assert('R' in d.keys()) - R = d['R'] - if isinstance(R,Number): + assert "R" in d.keys() + R = d["R"] + if isinstance(R, Number): R = [R for i in range(N)] - assert(isinstance(R,list)) - assert(len(R)==N) - assert(all([isinstance(i,Number) for i in R])) + assert isinstance(R, list) + assert len(R) == N + assert all([isinstance(i, Number) for i in R]) # color - color = d['color'] if 'color' in d.keys() else 'r' - if isinstance(color,list): - assert(len(color)==N) - assert(all([is_color_like(c) for c in color])) + color = d["color"] if "color" in d.keys() else "r" + if isinstance(color, list): + assert len(color) == N + assert all([is_color_like(c) for c in color]) else: assert is_color_like(color) color = [color for i in range(N)] # fill - fill = d['fill'] if 'fill' in d.keys() else False - if isinstance(fill,bool): + fill = d["fill"] if "fill" in d.keys() else False + if isinstance(fill, bool): fill = [fill for i in range(N)] else: - assert(isinstance(fill,list)) - assert(len(fill)==N) - assert(all([isinstance(f,bool) for f in fill])) + assert isinstance(fill, list) + assert len(fill) == N + assert all([isinstance(f, bool) for f in fill]) # alpha - alpha = d['alpha'] if 'alpha' in d.keys() else 1 - if isinstance(alpha,(float,int,np.float64)): + alpha = d["alpha"] if "alpha" in d.keys() else 1 + if isinstance(alpha, (float, int, np.float64)): alpha = [alpha for i in range(N)] else: - assert(isinstance(alpha,list)) - assert(len(alpha)==N) - assert(all([isinstance(a,(float,int,np.float64)) for a in alpha])) + assert isinstance(alpha, list) + assert len(alpha) == N + assert all([isinstance(a, (float, int, np.float64)) for a in alpha]) # linewidth - linewidth = d['linewidth'] if 'linewidth' in d.keys() else 2 - if isinstance(linewidth,(float,int,np.float64)): + linewidth = d["linewidth"] if "linewidth" in d.keys() else 2 + if isinstance(linewidth, (float, int, np.float64)): linewidth = [linewidth for i in range(N)] else: - assert(isinstance(linewidth,list)) - assert(len(linewidth)==N) - assert(all([isinstance(lw,(float,int,np.float64)) for lw in linewidth])) + assert isinstance(linewidth, list) + assert len(linewidth) == N + assert all([isinstance(lw, (float, int, np.float64)) for lw in linewidth]) # additional parameters - kws = [k for k in d.keys() if k not in ('center','R','color','fill','alpha','linewidth')] + kws = [ + k + for k in d.keys() + if k not in ("center", "R", "color", "fill", "alpha", "linewidth") + ] kwargs = dict() for k in kws: kwargs[k] = d[k] # add the circles for i in range(N): - cent,r,col,f,a,lw = center[i],R[i],color[i],fill[i],alpha[i],linewidth[i] - circ = Circle((cent[1],cent[0]),r,color=col,fill=f,alpha=a,linewidth=lw,**kwargs) + cent, r, col, f, a, lw = ( + center[i], + R[i], + color[i], + fill[i], + alpha[i], + linewidth[i], + ) + circ = Circle( + (cent[1], cent[0]), r, color=col, fill=f, alpha=a, linewidth=lw, **kwargs + ) ax.add_patch(circ) return -def add_annuli(ax,d): + +def add_annuli(ax, d): """ Adds one or more annuli to Axis ax using the parameters in dictionary d. """ - # Handle inputs - assert isinstance(ax,Axes) + + # Check that all required inputs are present + assert isinstance(ax, Axes) + assert "center" in d.keys() + assert "radii" in d.keys() + + # Get user-provided center and radii + center = d["center"] + radii = d["radii"] + + # Determine number of annuli being plotted + if isinstance(center, list): + N = len(center) + elif isinstance(radii, list): + N = len(radii) + else: + N = 1 + # center - assert('center' in d.keys()) - center = d['center'] - if isinstance(center,tuple): - assert(len(center)==2) - center = [center] - assert(isinstance(center,list)) - N = len(center) - assert(all([isinstance(x,tuple) for x in center])) - assert(all([len(x)==2 for x in center])) + if isinstance(center, tuple): + assert len(center) == 2 + center = [center] * N + # assert(isinstance(center,list)) + assert all([isinstance(x, tuple) for x in center]) + assert all([len(x) == 2 for x in center]) # radii - assert('radii' in d.keys()) - radii = d['radii'] - if isinstance(radii,tuple): - assert(len(radii)==2) + if isinstance(radii, tuple): + assert len(radii) == 2 ri = [radii[0] for i in range(N)] ro = [radii[1] for i in range(N)] else: - assert(isinstance(radii,list)) - assert(all([isinstance(x,tuple) for x in radii])) - assert(len(radii)==N) + assert isinstance(radii, list) + assert all([isinstance(x, tuple) for x in radii]) + assert len(radii) == N ri = [radii[i][0] for i in range(N)] ro = [radii[i][1] for i in range(N)] - assert(all([isinstance(i,Number) for i in ri])) - assert(all([isinstance(i,Number) for i in ro])) + assert all([isinstance(i, Number) for i in ri]) + assert all([isinstance(i, Number) for i in ro]) # color - color = d['color'] if 'color' in d.keys() else 'r' - if isinstance(color,list): - assert(len(color)==N) - assert(all([is_color_like(c) for c in color])) + color = d["color"] if "color" in d.keys() else "r" + if isinstance(color, list): + assert len(color) == N + assert all([is_color_like(c) for c in color]) else: assert is_color_like(color) color = [color for i in range(N)] # fill - fill = d['fill'] if 'fill' in d.keys() else False - if isinstance(fill,bool): + fill = d["fill"] if "fill" in d.keys() else True + if isinstance(fill, bool): fill = [fill for i in range(N)] else: - assert(isinstance(fill,list)) - assert(len(fill)==N) - assert(all([isinstance(f,bool) for f in fill])) + assert isinstance(fill, list) + assert len(fill) == N + assert all([isinstance(f, bool) for f in fill]) # alpha - alpha = d['alpha'] if 'alpha' in d.keys() else 1 - if isinstance(alpha,(float,int,np.float64)): + alpha = d["alpha"] if "alpha" in d.keys() else 1 + if isinstance(alpha, (float, int, np.float64)): alpha = [alpha for i in range(N)] else: - assert(isinstance(alpha,list)) - assert(len(alpha)==N) - assert(all([isinstance(a,(float,int,np.float64)) for a in alpha])) + assert isinstance(alpha, list) + assert len(alpha) == N + assert all([isinstance(a, (float, int, np.float64)) for a in alpha]) # linewidth - linewidth = d['linewidth'] if 'linewidth' in d.keys() else 2 - if isinstance(linewidth,(float,int,np.float64)): + linewidth = d["linewidth"] if "linewidth" in d.keys() else 2 + if isinstance(linewidth, (float, int, np.float64)): linewidth = [linewidth for i in range(N)] else: - assert(isinstance(linewidth,list)) - assert(len(linewidth)==N) - assert(all([isinstance(lw,(float,int,np.float64)) for lw in linewidth])) + assert isinstance(linewidth, list) + assert len(linewidth) == N + assert all([isinstance(lw, (float, int, np.float64)) for lw in linewidth]) # additional parameters - kws = [k for k in d.keys() if k not in ('center','radii','color','fill','alpha','linewidth')] + kws = [ + k + for k in d.keys() + if k not in ("center", "radii", "color", "fill", "alpha", "linewidth") + ] kwargs = dict() for k in kws: kwargs[k] = d[k] # add the annuli for i in range(N): - cent,Ri,Ro,col,f,a,lw = center[i],ri[i],ro[i],color[i],fill[i],alpha[i],linewidth[i] - annulus = Wedge((cent[1],cent[0]),Ro,0,360,width=Ro-Ri,color=col,fill=f,alpha=a, - linewidth=lw,**kwargs) + cent, Ri, Ro, col, f, a, lw = ( + center[i], + ri[i], + ro[i], + color[i], + fill[i], + alpha[i], + linewidth[i], + ) + annulus = Wedge( + (cent[1], cent[0]), + Ro, + 0, + 360, + width=Ro - Ri, + color=col, + fill=f, + alpha=a, + linewidth=lw, + **kwargs, + ) ax.add_patch(annulus) return -def add_ellipses(ax,d): + +def add_ellipses(ax, d): """ Adds one or more ellipses to axis ax using the parameters in dictionary d. @@ -237,202 +295,244 @@ def add_ellipses(ax,d): linestyle """ # handle inputs - assert isinstance(ax,Axes) + assert isinstance(ax, Axes) # semimajor axis length - assert('a' in d.keys()) - a = d['a'] - if isinstance(a,Number): + assert "a" in d.keys() + a = d["a"] + if isinstance(a, Number): a = [a] - assert(isinstance(a,list)) + assert isinstance(a, list) N = len(a) - assert(all([isinstance(i,Number) for i in a])) + assert all([isinstance(i, Number) for i in a]) # semiminor axis length - assert('b' in d.keys()) - b = d['b'] - if isinstance(b,Number): + assert "b" in d.keys() + b = d["b"] + if isinstance(b, Number): b = [b] - assert(isinstance(b,list)) - assert(len(b)==N) - assert(all([isinstance(i,Number) for i in b])) + assert isinstance(b, list) + assert len(b) == N + assert all([isinstance(i, Number) for i in b]) # center - assert('center' in d.keys()) - center = d['center'] - if isinstance(center,tuple): - assert(len(center)==2) + assert "center" in d.keys() + center = d["center"] + if isinstance(center, tuple): + assert len(center) == 2 center = [center for i in range(N)] - assert(isinstance(center,list)) - assert(len(center)==N) - assert(all([isinstance(x,tuple) for x in center])) - assert(all([len(x)==2 for x in center])) + assert isinstance(center, list) + assert len(center) == N + assert all([isinstance(x, tuple) for x in center]) + assert all([len(x) == 2 for x in center]) # theta - assert('theta' in d.keys()) - theta = d['theta'] - if isinstance(theta,Number): + assert "theta" in d.keys() + theta = d["theta"] + if isinstance(theta, Number): theta = [theta for i in range(N)] - assert(isinstance(theta,list)) - assert(len(theta)==N) - assert(all([isinstance(i,Number) for i in theta])) + assert isinstance(theta, list) + assert len(theta) == N + assert all([isinstance(i, Number) for i in theta]) # color - color = d['color'] if 'color' in d.keys() else 'r' - if isinstance(color,list): - assert(len(color)==N) - assert(all([is_color_like(c) for c in color])) + color = d["color"] if "color" in d.keys() else "r" + if isinstance(color, list): + assert len(color) == N + assert all([is_color_like(c) for c in color]) else: assert is_color_like(color) color = [color for i in range(N)] # fill - fill = d['fill'] if 'fill' in d.keys() else False - if isinstance(fill,bool): + fill = d["fill"] if "fill" in d.keys() else False + if isinstance(fill, bool): fill = [fill for i in range(N)] else: - assert(isinstance(fill,list)) - assert(len(fill)==N) - assert(all([isinstance(f,bool) for f in fill])) + assert isinstance(fill, list) + assert len(fill) == N + assert all([isinstance(f, bool) for f in fill]) # alpha - alpha = d['alpha'] if 'alpha' in d.keys() else 1 - if isinstance(alpha,(float,int,np.float64)): + alpha = d["alpha"] if "alpha" in d.keys() else 1 + if isinstance(alpha, (float, int, np.float64)): alpha = [alpha for i in range(N)] else: - assert(isinstance(alpha,list)) - assert(len(alpha)==N) - assert(all([isinstance(alp,(float,int,np.float64)) for alp in alpha])) + assert isinstance(alpha, list) + assert len(alpha) == N + assert all([isinstance(alp, (float, int, np.float64)) for alp in alpha]) # linewidth - linewidth = d['linewidth'] if 'linewidth' in d.keys() else 2 - if isinstance(linewidth,(float,int,np.float64)): + linewidth = d["linewidth"] if "linewidth" in d.keys() else 2 + if isinstance(linewidth, (float, int, np.float64)): linewidth = [linewidth for i in range(N)] else: - assert(isinstance(linewidth,list)) - assert(len(linewidth)==N) - assert(all([isinstance(lw,(float,int,np.float64)) for lw in linewidth])) + assert isinstance(linewidth, list) + assert len(linewidth) == N + assert all([isinstance(lw, (float, int, np.float64)) for lw in linewidth]) # linestyle - linestyle = d['linestyle'] if 'linestyle' in d.keys() else '-' - if isinstance(linestyle,(str)): + linestyle = d["linestyle"] if "linestyle" in d.keys() else "-" + if isinstance(linestyle, (str)): linestyle = [linestyle for i in range(N)] else: - assert(isinstance(linestyle,list)) - assert(len(linestyle)==N) - assert(all([isinstance(lw,(str)) for lw in linestyle])) + assert isinstance(linestyle, list) + assert len(linestyle) == N + assert all([isinstance(lw, (str)) for lw in linestyle]) # additional parameters - kws = [k for k in d.keys() if k not in ('center','a','b','theta','color', - 'fill','alpha','linewidth','linestyle')] + kws = [ + k + for k in d.keys() + if k + not in ( + "center", + "a", + "b", + "theta", + "color", + "fill", + "alpha", + "linewidth", + "linestyle", + ) + ] kwargs = dict() for k in kws: kwargs[k] = d[k] # add the ellipses for i in range(N): - cent,_a,_b,_theta,col,f,_alpha,lw,ls = (center[i],a[i],b[i],theta[i],color[i],fill[i], - alpha[i],linewidth[i],linestyle[i]) - ellipse = Ellipse((cent[1],cent[0]),2*_b,2*_a,-np.degrees(_theta),color=col,fill=f, - alpha=_alpha,linewidth=lw,linestyle=ls,**kwargs) + cent, _a, _b, _theta, col, f, _alpha, lw, ls = ( + center[i], + a[i], + b[i], + theta[i], + color[i], + fill[i], + alpha[i], + linewidth[i], + linestyle[i], + ) + ellipse = Ellipse( + (cent[1], cent[0]), + 2 * _b, + 2 * _a, + -np.degrees(_theta), + color=col, + fill=f, + alpha=_alpha, + linewidth=lw, + linestyle=ls, + **kwargs, + ) ax.add_patch(ellipse) return -def add_points(ax,d): + +def add_points(ax, d): """ adds one or more points to axis ax using the parameters in dictionary d. """ # handle inputs - assert isinstance(ax,Axes) + assert isinstance(ax, Axes) # x - assert('x' in d.keys()) - x = d['x'] - if isinstance(x,Number): + assert "x" in d.keys() + x = d["x"] + if isinstance(x, Number): x = [x] x = np.array(x) N = len(x) # y - assert('y' in d.keys()) - y = d['y'] - if isinstance(y,Number): + assert "y" in d.keys() + y = d["y"] + if isinstance(y, Number): y = [y] y = np.array(y) - assert(len(y)==N) + assert len(y) == N # s - s = d['s'] if 's' in d.keys() else np.ones(N) - if isinstance(s,Number): - s = np.ones_like(x)*s - assert(len(s)==N) - s = np.where(s>0,s,0) + s = d["s"] if "s" in d.keys() else np.ones(N) + if isinstance(s, Number): + s = np.ones_like(x) * s + assert len(s) == N + s = np.where(s > 0, s, 0) # scale - scale = d['scale'] if 'scale' in d.keys() else 25 - assert isinstance(scale,Number) + scale = d["scale"] if "scale" in d.keys() else 25 + assert isinstance(scale, Number) # point color - color = d['pointcolor'] if 'pointcolor' in d.keys() else 'r' - if isinstance(color,(list,np.ndarray)): - assert(len(color)==N) - assert(all([is_color_like(c) for c in color])) + color = d["pointcolor"] if "pointcolor" in d.keys() else "r" + if isinstance(color, (list, np.ndarray)): + assert len(color) == N + assert all([is_color_like(c) for c in color]) else: assert is_color_like(color) color = [color for i in range(N)] # alpha - alpha = d['alpha'] if 'alpha' in d.keys() else 1. - assert isinstance(alpha,Number) + alpha = d["alpha"] if "alpha" in d.keys() else 1.0 + assert isinstance(alpha, Number) # open_circles - open_circles = d['open_circles'] if 'open_circles' in d.keys() else False - assert isinstance(open_circles,bool) + open_circles = d["open_circles"] if "open_circles" in d.keys() else False + assert isinstance(open_circles, bool) # additional parameters - kws = [k for k in d.keys() if k not in ('x','y','s','scale','pointcolor','alpha', - 'open_circles')] + kws = [ + k + for k in d.keys() + if k not in ("x", "y", "s", "scale", "pointcolor", "alpha", "open_circles") + ] kwargs = dict() for k in kws: kwargs[k] = d[k] # add the points if open_circles: - ax.scatter(y,x,s=scale,edgecolor=color,facecolor='none',alpha=alpha,**kwargs) + ax.scatter( + y, x, s=scale, edgecolor=color, facecolor="none", alpha=alpha, **kwargs + ) else: - ax.scatter(y,x,s=s*scale/np.max(s),color=color,alpha=alpha,**kwargs) + ax.scatter(y, x, s=s * scale / np.max(s), color=color, alpha=alpha, **kwargs) return -def add_pointlabels(ax,d): +def add_pointlabels(ax, d): """ adds number indices for a set of points to axis ax using the parameters in dictionary d. """ # handle inputs - assert isinstance(ax,Axes) + assert isinstance(ax, Axes) # x - assert('x' in d.keys()) - x = d['x'] - if isinstance(x,Number): + assert "x" in d.keys() + x = d["x"] + if isinstance(x, Number): x = [x] x = np.array(x) N = len(x) # y - assert('y' in d.keys()) - y = d['y'] - if isinstance(y,Number): + assert "y" in d.keys() + y = d["y"] + if isinstance(y, Number): y = [y] y = np.array(y) - assert(len(y)==N) + assert len(y) == N # size - size = d['size'] if 'size' in d.keys() else 20 - assert isinstance(size,Number) + size = d["size"] if "size" in d.keys() else 20 + assert isinstance(size, Number) # color - color = d['color'] if 'color' in d.keys() else 'r' + color = d["color"] if "color" in d.keys() else "r" assert is_color_like(color) # alpha - alpha = d['alpha'] if 'alpha' in d.keys() else 1. - assert isinstance(alpha,Number) + alpha = d["alpha"] if "alpha" in d.keys() else 1.0 + assert isinstance(alpha, Number) # labels - labels = d['labels'] if 'labels' in d.keys() else np.arange(N).astype(str) - assert len(labels)==N + labels = d["labels"] if "labels" in d.keys() else np.arange(N).astype(str) + assert len(labels) == N # additional parameters - kws = [k for k in d.keys() if k not in ('x','y','size','color','alpha','labels')] + kws = [ + k for k in d.keys() if k not in ("x", "y", "size", "color", "alpha", "labels") + ] kwargs = dict() for k in kws: kwargs[k] = d[k] # add the point labels for i in range(N): - ax.text(y[i],x[i],s=labels[i],color=color,size=size,alpha=alpha,**kwargs) + ax.text(y[i], x[i], s=labels[i], color=color, size=size, alpha=alpha, **kwargs) return -def add_bragg_index_labels(ax,d): + +def add_bragg_index_labels(ax, d): """ Adds labels for indexed bragg directions to a plot, using the parameters in dict d. @@ -448,54 +548,58 @@ def add_bragg_index_labels(ax,d): pointcolor (color) """ # handle inputs - assert isinstance(ax,Axes) + assert isinstance(ax, Axes) # bragg directions - assert('bragg_directions' in d.keys()) - bragg_directions = d['bragg_directions'] - assert isinstance(bragg_directions,PointList) - for k in ('qx','qy','h','k'): + assert "bragg_directions" in d.keys() + bragg_directions = d["bragg_directions"] + assert isinstance(bragg_directions, PointList) + for k in ("qx", "qy", "h", "k"): assert k in bragg_directions.data.dtype.fields - include_l = True if 'l' in bragg_directions.data.dtype.fields else False + include_l = True if "l" in bragg_directions.data.dtype.fields else False # offsets - hoffset = d['hoffset'] if 'hoffset' in d.keys() else 0 - voffset = d['voffset'] if 'voffset' in d.keys() else 5 + hoffset = d["hoffset"] if "hoffset" in d.keys() else 0 + voffset = d["voffset"] if "voffset" in d.keys() else 5 # size, color - size = d['size'] if 'size' in d.keys() else 20 - assert isinstance(size,Number) - color = d['color'] if 'color' in d.keys() else 'w' + size = d["size"] if "size" in d.keys() else 20 + assert isinstance(size, Number) + color = d["color"] if "color" in d.keys() else "w" assert is_color_like(color) # points - points = d['points'] if 'points' in d.keys() else True - pointsize = d['pointsize'] if 'pointsize' in d.keys() else 50 - pointcolor = d['pointcolor'] if 'pointcolor' in d.keys() else 'r' - assert isinstance(points,bool) - assert isinstance(pointsize,Number) + points = d["points"] if "points" in d.keys() else True + pointsize = d["pointsize"] if "pointsize" in d.keys() else 50 + pointcolor = d["pointcolor"] if "pointcolor" in d.keys() else "r" + assert isinstance(points, bool) + assert isinstance(pointsize, Number) assert is_color_like(pointcolor) # add the points if points: - ax.scatter(bragg_directions.data['qy'],bragg_directions.data['qx'], - color=pointcolor,s=pointsize) + ax.scatter( + bragg_directions.data["qy"], + bragg_directions.data["qx"], + color=pointcolor, + s=pointsize, + ) # add index labels for i in range(bragg_directions.length): - x,y = bragg_directions.data['qx'][i],bragg_directions.data['qy'][i] + x, y = bragg_directions.data["qx"][i], bragg_directions.data["qy"][i] x -= voffset y += hoffset - h,k = bragg_directions.data['h'][i],bragg_directions.data['k'][i] - h = str(h) if h>=0 else r'$\overline{{{}}}$'.format(np.abs(h)) - k = str(k) if k>=0 else r'$\overline{{{}}}$'.format(np.abs(k)) - s = h+','+k + h, k = bragg_directions.data["h"][i], bragg_directions.data["k"][i] + h = str(h) if h >= 0 else r"$\overline{{{}}}$".format(np.abs(h)) + k = str(k) if k >= 0 else r"$\overline{{{}}}$".format(np.abs(k)) + s = h + "," + k if include_l: - l = bragg_directions.data['l'][i] - l = str(l) if l>=0 else r'$\overline{{{}}}$'.format(np.abs(l)) + l = bragg_directions.data["l"][i] + l = str(l) if l >= 0 else r"$\overline{{{}}}$".format(np.abs(l)) s += l - ax.text(y,x,s,color=color,size=size,ha='center',va='bottom') + ax.text(y, x, s, color=color, size=size, ha="center", va="bottom") return -def add_vector(ax,d): +def add_vector(ax, d): """ Adds a vector to an image, using the parameters in dict d. @@ -509,46 +613,60 @@ def add_vector(ax,d): labelcolor (color) """ # handle inputs - assert isinstance(ax,Axes) + assert isinstance(ax, Axes) # head and tail positions - assert('x0' in d.keys()) - assert('y0' in d.keys()) - assert('vx' in d.keys()) - assert('vy' in d.keys()) - x0,y0,vx,vy = d['x0'],d['y0'],d['vx'],d['vy'] + assert "x0" in d.keys() + assert "y0" in d.keys() + assert "vx" in d.keys() + assert "vy" in d.keys() + x0, y0, vx, vy = d["x0"], d["y0"], d["vx"], d["vy"] # width - width = d['width'] if 'width' in d.keys() else 1 + width = d["width"] if "width" in d.keys() else 1 # color - color = d['color'] if 'color' in d.keys() else 'r' + color = d["color"] if "color" in d.keys() else "r" assert is_color_like(color) # label - label = d['label'] if 'label' in d.keys() else False - labelsize = d['labelsize'] if 'labelsize' in d.keys() else 20 - labelcolor = d['labelcolor'] if 'labelcolor' in d.keys() else 'w' - assert isinstance(label,(str,bool)) - assert isinstance(labelsize,Number) + label = d["label"] if "label" in d.keys() else False + labelsize = d["labelsize"] if "labelsize" in d.keys() else 20 + labelcolor = d["labelcolor"] if "labelcolor" in d.keys() else "w" + assert isinstance(label, (str, bool)) + assert isinstance(labelsize, Number) assert is_color_like(labelcolor) # additional parameters - kws = [k for k in d.keys() if k not in ('x0','y0','vx','vy', - 'width','color','label','labelsize','labelcolor')] + kws = [ + k + for k in d.keys() + if k + not in ( + "x0", + "y0", + "vx", + "vy", + "width", + "color", + "label", + "labelsize", + "labelcolor", + ) + ] kwargs = dict() for k in kws: kwargs[k] = d[k] # Add the vector - ax.arrow(y0,x0,vy,vx,color=color,width=width,length_includes_head=True,**kwargs) + ax.arrow( + y0, x0, vy, vx, color=color, width=width, length_includes_head=True, **kwargs + ) # Add label if label: - x,y = x0+0.5*vx,y0+0.5*vy - ax.text(y,x,label,size=labelsize, - color=labelcolor,ha='center',va='center') + x, y = x0 + 0.5 * vx, y0 + 0.5 * vy + ax.text(y, x, label, size=labelsize, color=labelcolor, ha="center", va="center") return - -def add_grid_overlay(ax,d): +def add_grid_overlay(ax, d): """ adds an overlaid grid over some subset of pixels in an image using the parameters in dictionary d. @@ -561,41 +679,51 @@ def add_grid_overlay(ax,d): alpha (number) """ # handle inputs - assert isinstance(ax,Axes) + assert isinstance(ax, Axes) # corner, extent - lims = [0,0,0,0] - for i,k in enumerate(('x0','y0','xL','yL')): - assert(k in d.keys()), "Error: add_grid_overlay expects keys 'x0','y0','xL','yL'" + lims = [0, 0, 0, 0] + for i, k in enumerate(("x0", "y0", "xL", "yL")): + assert k in d.keys(), "Error: add_grid_overlay expects keys 'x0','y0','xL','yL'" lims[i] = d[k] - x0,y0,xL,yL = lims + x0, y0, xL, yL = lims # color - color = d['color'] if 'color' in d.keys() else 'k' + color = d["color"] if "color" in d.keys() else "k" assert is_color_like(color) # alpha - alpha = d['alpha'] if 'alpha' in d.keys() else 1 - assert isinstance(alpha,(Number)) + alpha = d["alpha"] if "alpha" in d.keys() else 1 + assert isinstance(alpha, (Number)) # linewidth - linewidth = d['linewidth'] if 'linewidth' in d.keys() else 1 - assert isinstance(linewidth,(Number)) + linewidth = d["linewidth"] if "linewidth" in d.keys() else 1 + assert isinstance(linewidth, (Number)) # additional parameters - kws = [k for k in d.keys() if k not in ('x0','y0','xL','yL','color', - 'alpha','linewidth')] + kws = [ + k + for k in d.keys() + if k not in ("x0", "y0", "xL", "yL", "color", "alpha", "linewidth") + ] kwargs = dict() for k in kws: kwargs[k] = d[k] # add the grid - yy,xx = np.meshgrid(np.arange(y0,y0+yL),np.arange(x0,x0+xL)) + yy, xx = np.meshgrid(np.arange(y0, y0 + yL), np.arange(x0, x0 + xL)) for xi in range(xL): for yi in range(yL): - x,y = xx[xi,yi],yy[xi,yi] - rect = Rectangle((y-0.5,x-0.5),1,1,lw=linewidth,color=color, - alpha=alpha,fill=False) + x, y = xx[xi, yi], yy[xi, yi] + rect = Rectangle( + (y - 0.5, x - 0.5), + 1, + 1, + lw=linewidth, + color=color, + alpha=alpha, + fill=False, + ) ax.add_patch(rect) return -def add_scalebar(ax,d): +def add_scalebar(ax, d): """ Adds an overlaid scalebar to an image, using the parameters in dict d. @@ -616,86 +744,113 @@ def add_scalebar(ax,d): ticks (bool) if False, turns off image border ticks """ # handle inputs - assert isinstance(ax,Axes) + assert isinstance(ax, Axes) # image extent - assert('Nx' in d.keys()) - assert('Ny' in d.keys()) - Nx,Ny = d['Nx'],d['Ny'] + assert "Nx" in d.keys() + assert "Ny" in d.keys() + Nx, Ny = d["Nx"], d["Ny"] # real or diffraction - space = d['space'] if 'space' in d.keys() else 'Q' - assert(space in ('Q','R')) + space = d["space"] if "space" in d.keys() else "Q" + assert space in ("Q", "R") # length,width - length = d['length'] if 'length' in d.keys() else None - width = d['width'] if 'width' in d.keys() else 6 + length = d["length"] if "length" in d.keys() else None + width = d["width"] if "width" in d.keys() else 6 # pixelsize, pixelunits - pixelsize = d['pixelsize'] if 'pixelsize' in d.keys() else 1 - pixelunits = d['pixelunits'] if 'pixelunits' in d.keys() else 'pixels' + pixelsize = d["pixelsize"] if "pixelsize" in d.keys() else 1 + pixelunits = d["pixelunits"] if "pixelunits" in d.keys() else "pixels" # color - color = d['color'] if 'color' in d.keys() else 'w' + color = d["color"] if "color" in d.keys() else "w" assert is_color_like(color) # labels - label = d['label'] if 'label' in d.keys() else True - labelsize = d['labelsize'] if 'labelsize' in d.keys() else 16 - labelcolor = d['labelcolor'] if 'labelcolor' in d.keys() else color - assert isinstance(label,bool) - assert isinstance(labelsize,Number) + label = d["label"] if "label" in d.keys() else True + labelsize = d["labelsize"] if "labelsize" in d.keys() else 16 + labelcolor = d["labelcolor"] if "labelcolor" in d.keys() else color + assert isinstance(label, bool) + assert isinstance(labelsize, Number) assert is_color_like(labelcolor) # alpha - alpha = d['alpha'] if 'alpha' in d.keys() else 1 - assert isinstance(alpha,(Number)) + alpha = d["alpha"] if "alpha" in d.keys() else 1 + assert isinstance(alpha, (Number)) # position - position = d['position'] if 'position' in d.keys() else 'br' - assert position in ('ul','ur','bl','br') + position = d["position"] if "position" in d.keys() else "br" + assert position in ("ul", "ur", "bl", "br") # ticks - ticks = d['ticks'] if 'ticks' in d.keys() else False - assert isinstance(ticks,bool) + ticks = d["ticks"] if "ticks" in d.keys() else False + assert isinstance(ticks, bool) # additional parameters - kws = [k for k in d.keys() if k not in ('Nx','Ny','length', - 'width','pixelsize','pixelunits','color','label', - 'labelsize','labelcolor','alpha','position','ticks')] + kws = [ + k + for k in d.keys() + if k + not in ( + "Nx", + "Ny", + "length", + "width", + "pixelsize", + "pixelunits", + "color", + "label", + "labelsize", + "labelcolor", + "alpha", + "position", + "ticks", + ) + ] kwargs = dict() for k in kws: kwargs[k] = d[k] # Get length if length is None: - length_units,length_pixels,_ = get_nice_spacing(Nx,Ny,pixelsize) + length_units, length_pixels, _ = get_nice_spacing(Nx, Ny, pixelsize) else: - length_units,length_pixels = length,length/pixelsize + length_units, length_pixels = length, length / pixelsize # Get position - if position == 'ul': - x0,y0 = 0,0 - xshiftdir,yshiftdir = 1,1 - elif position == 'ur': - x0,y0 = 0,Ny-1 - xshiftdir,yshiftdir = 1,-1 - elif position == 'bl': - x0,y0 = Nx-1,0 - xshiftdir,yshiftdir = -1,1 + if position == "ul": + x0, y0 = 0, 0 + xshiftdir, yshiftdir = 1, 1 + elif position == "ur": + x0, y0 = 0, Ny - 1 + xshiftdir, yshiftdir = 1, -1 + elif position == "bl": + x0, y0 = Nx - 1, 0 + xshiftdir, yshiftdir = -1, 1 else: - x0,y0 = Nx-1,Ny-1 - xshiftdir,yshiftdir = -1,-1 - pad = 0.2*length_pixels + x0, y0 = Nx - 1, Ny - 1 + xshiftdir, yshiftdir = -1, -1 + pad = 0.2 * length_pixels xshift = xshiftdir * pad - yshift = yshiftdir * (length_pixels/2.+pad) + yshift = yshiftdir * (length_pixels / 2.0 + pad) x0 = x0 + xshift y0 = y0 + yshift - xi,yi = x0,y0-length_pixels/2. - xf,yf = x0,y0+length_pixels/2. - labelpos_x = x0 + pad*xshiftdir/2. + xi, yi = x0, y0 - length_pixels / 2.0 + xf, yf = x0, y0 + length_pixels / 2.0 + labelpos_x = x0 + pad * xshiftdir / 2.0 labelpos_y = y0 # Add line - ax.plot((yi,yf),(xi,xf),lw=width,color=color,alpha=alpha) + ax.plot((yi, yf), (xi, xf), lw=width, color=color, alpha=alpha) # Add label if label: - labeltext = f'{np.round(length_units,3)}'+' '+pixelunits - if xshiftdir>0: va='top' - else: va='bottom' - ax.text(labelpos_y,labelpos_x,labeltext,size=labelsize, - color=labelcolor,alpha=alpha,ha='center',va=va) + labeltext = f"{np.round(length_units,3)}" + " " + pixelunits + if xshiftdir > 0: + va = "top" + else: + va = "bottom" + ax.text( + labelpos_y, + labelpos_x, + labeltext, + size=labelsize, + color=labelcolor, + alpha=alpha, + ha="center", + va=va, + ) # if not ticks: # ax.set_xticks([]) @@ -703,7 +858,7 @@ def add_scalebar(ax,d): return -def add_cartesian_grid(ax,d): +def add_cartesian_grid(ax, d): """ Adds an overlaid cartesian coordinate grid over an image using the parameters in dictionary d. @@ -724,77 +879,100 @@ def add_cartesian_grid(ax,d): alpha (number) """ # handle inputs - assert isinstance(ax,Axes) + assert isinstance(ax, Axes) # origin - assert('x0' in d.keys()) - assert('y0' in d.keys()) - x0,y0 = d['x0'],d['y0'] + assert "x0" in d.keys() + assert "y0" in d.keys() + x0, y0 = d["x0"], d["y0"] # image extent - assert('Nx' in d.keys()) - assert('Ny' in d.keys()) - Nx,Ny = d['Nx'],d['Ny'] - assert x0labelsize/2: - ax.text(y0,rlabelpositions[i],rticklabels[i],size=labelsize, - color=labelcolor,alpha=alpha,ha='center',va='center') + if xpos > labelsize / 2: + ax.text( + y0, + rlabelpositions[i], + rticklabels[i], + size=labelsize, + color=labelcolor, + alpha=alpha, + ha="center", + va="center", + ) # Add theta gridlines - def add_line(ax,x0,y0,theta,Nx,Ny): - """ adds a line through (x0,y0) at an angle theta which terminates at the image edges - returns the termination points (xi,yi),(xf,xy) + def add_line(ax, x0, y0, theta, Nx, Ny): + """adds a line through (x0,y0) at an angle theta which terminates at the image edges + returns the termination points (xi,yi),(xf,xy) """ - theta = np.mod(np.pi/2-theta,np.pi) - if theta==0: - xs,ys = [0,Nx-1],[y0,y0] - elif theta==np.pi/2: - xs,ys = [x0,x0],[0,Ny-1] + theta = np.mod(np.pi / 2 - theta, np.pi) + if theta == 0: + xs, ys = [0, Nx - 1], [y0, y0] + elif theta == np.pi / 2: + xs, ys = [x0, x0], [0, Ny - 1] else: # Get line params m = np.tan(theta) - b = y0-m*x0 + b = y0 - m * x0 # Get intersections with x=0,x=Nx-1,y=0,y=Ny-1 - x1,y1 = 0,b - x2,y2 = Nx-1,m*(Nx-1)+b - x3,y3 = -b/m,0 - x4,y4 = (Ny-1 -b)/m,Ny-1 + x1, y1 = 0, b + x2, y2 = Nx - 1, m * (Nx - 1) + b + x3, y3 = -b / m, 0 + x4, y4 = (Ny - 1 - b) / m, Ny - 1 # Determine which points are on the image bounding box - xs,ys = [],[] - if 0<=y1=1 and base<2.1: - _spacing=0.5 - elif base>=2.1 and base<4.6: - _spacing=1 - elif base>=4.6 and base<=10: - _spacing=2 + D = np.mean((Nx * pixelsize, Ny * pixelsize)) / 2.0 + exp = int(log(D, 10)) + if np.sign(log(D, 10)) < 0: + exp -= 1 + base = D / (10**exp) + if base >= 1 and base < 2.1: + _spacing = 0.5 + elif base >= 2.1 and base < 4.6: + _spacing = 1 + elif base >= 4.6 and base <= 10: + _spacing = 2 # if base>=1 and base<1.25: # _spacing=0.4 # elif base>=1.25 and base<1.75: @@ -1053,7 +1283,4 @@ def get_nice_spacing(Nx,Ny,pixelsize): else: raise Exception("how did this happen?? base={}".format(base)) spacing = _spacing * 10**exp - return spacing,spacing/pixelsize,_spacing - - - + return spacing, spacing / pixelsize, _spacing diff --git a/py4DSTEM/visualize/show.py b/py4DSTEM/visualize/show.py index 3b9d99e43..fb99de5ae 100644 --- a/py4DSTEM/visualize/show.py +++ b/py4DSTEM/visualize/show.py @@ -1,38 +1,34 @@ -import numpy as np -import matplotlib.pyplot as plt import warnings -from matplotlib.figure import Figure -from matplotlib.axes import Axes -from matplotlib.colors import is_color_like,ListedColormap -from numpy.ma import MaskedArray -from numbers import Number -from math import log from copy import copy +from math import log +from numbers import Number -from py4DSTEM.data import ( - Calibration, - DiffractionSlice, - RealSlice -) +import matplotlib.pyplot as plt +import numpy as np +from matplotlib.axes import Axes +from matplotlib.colors import is_color_like +from matplotlib.figure import Figure +from py4DSTEM.data import Calibration, DiffractionSlice, RealSlice from py4DSTEM.visualize.overlay import ( - add_rectangles, - add_circles, add_annuli, + add_cartesian_grid, + add_circles, add_ellipses, - add_points, add_grid_overlay, - add_cartesian_grid, + add_points, add_polarelliptical_grid, - add_rtheta_grid,add_scalebar + add_rectangles, + add_rtheta_grid, + add_scalebar, ) def show( ar, - figsize=(8,8), - cmap='gray', - scaling='none', - intensity_range='ordered', + figsize=(8, 8), + cmap="gray", + scaling="none", + intensity_range="ordered", clipvals=None, vmin=None, vmax=None, @@ -40,7 +36,7 @@ def show( max=None, power=None, power_offset=True, - combine_images = False, + combine_images=False, ticks=True, bordercolor=None, borderwidth=5, @@ -53,8 +49,8 @@ def show( hist=False, n_bins=256, mask=None, - mask_color='k', - mask_alpha=0., + mask_color="k", + mask_alpha=0.0, masked_intensity_range=False, rectangle=None, circle=None, @@ -69,7 +65,7 @@ def show( calibration=None, rx=None, ry=None, - space='Q', + space="Q", pixelsize=None, pixelunits=None, x0=None, @@ -78,7 +74,9 @@ def show( e=None, theta=None, title=None, - **kwargs): + show_fft=False, + **kwargs +): """ General visualization function for 2D arrays. @@ -300,32 +298,44 @@ def show( pixels will be used scalebar (None or dict or Bool): if None, and a DiffractionSlice or RealSlice with calibrations is passed, adds a scalebar. If scalebar is not displaying the proper - calibration, check .calibration pixel_size and pixel_units. If None and an array is passed, - does not add a scalebar. If a dict is passed, it is propagated to the add_scalebar function - which will attempt to use it to overlay a scalebar. If True, uses calibraiton or pixelsize/pixelunits + calibration, check .calibration pixel_size and pixel_units. If None and an array is passed, + does not add a scalebar. If a dict is passed, it is propagated to the add_scalebar function + which will attempt to use it to overlay a scalebar. If True, uses calibraiton or pixelsize/pixelunits for scalebar. If False, no scalebar is added. + show_fft (Bool): if True, plots 2D-fft of array **kwargs: any keywords accepted by matplotlib's ax.matshow() Returns: if returnfig==False (default), the figure is plotted and nothing is returned. if returnfig==True, return the figure and the axis. """ + if scalebar == True: + scalebar = {} + # Alias dep - if min is not None: vmin=min - if max is not None: vmax=max + if min is not None: + vmin = min + if max is not None: + vmax = max if min is not None or max is not None: - warnings.warn("Warning, min/max are deprecated and will not be supported in a future version. Use vmin/vmax instead.") + warnings.warn( + "Warning, min/max are deprecated and will not be supported in a future version. Use vmin/vmax instead." + ) if clipvals is not None: - warnings.warn("Warning, clipvals is deprecated and will not be supported in a future version. Use intensity_range instead.") + warnings.warn( + "Warning, clipvals is deprecated and will not be supported in a future version. Use intensity_range instead." + ) if intensity_range is None: intensity_range = clipvals - # plot a grid if `ar` is a list, or use multichannel functionality to make an RGBa image - ar = ar[0] if (isinstance(ar,list) and len(ar) == 1) else ar - if isinstance(ar,list): + # check if list is of length 1 + ar = ar[0] if (isinstance(ar, list) and len(ar) == 1) else ar + + # plot a grid if `ar` is a list, or use multichannel functionality to make an RGBA image + if isinstance(ar, list): args = locals() - if 'kwargs' in args.keys(): - del args['kwargs'] + if "kwargs" in args.keys(): + del args["kwargs"] rm = [] for k in args.keys(): if args[k] is None: @@ -336,41 +346,41 @@ def show( if combine_images is False: # use show_grid to plot grid of images from py4DSTEM.visualize.show_extention import _show_grid + if returnfig: - return _show_grid( - **args, - **kwargs) + return _show_grid(**args, **kwargs) else: - _show_grid( - **args, - **kwargs) + _show_grid(**args, **kwargs) return else: # generate a multichannel combined RGB image - + # init num_images = len(ar) - hue_angles = np.linspace(0.0,2.0*np.pi,num_images,endpoint=False) + hue_angles = np.linspace(0.0, 2.0 * np.pi, num_images, endpoint=False) cos_total = np.zeros(ar[0].shape) sin_total = np.zeros(ar[0].shape) val_total = np.zeros(ar[0].shape) # loop over images from py4DSTEM.visualize import show + + if show_fft: + ar = np.abs(np.fft.fftshift(np.fft.fft2(ar.copy()))) for a0 in range(num_images): im = show( - ar[a0], - scaling='none', - intensity_range=intensity_range, - clipvals=clipvals, - vmin=vmin, - vmax=vmax, - power=power, - power_offset=power_offset, - return_ar_scaled = True, - show_image=False, - **kwargs, - ) + ar[a0], + scaling="none", + intensity_range=intensity_range, + clipvals=clipvals, + vmin=vmin, + vmax=vmax, + power=power, + power_offset=power_offset, + return_ar_scaled=True, + show_image=False, + **kwargs, + ) cos_total += np.cos(hue_angles[a0]) * im sin_total += np.sin(hue_angles[a0]) * im # val_max = np.maximum(val_max, im) @@ -378,101 +388,118 @@ def show( # Assemble final image sat_change = np.maximum(val_total - 1.0, 0.0) - ar_rgb = np.zeros((ar[0].shape[0],ar[0].shape[1],3)) - ar_rgb[:,:,0] = np.mod(np.arctan2(sin_total,cos_total) / (2*np.pi), 1.0) - ar_rgb[:,:,1] = 1 - sat_change - ar_rgb[:,:,2] = val_total# np.sqrt(cos_total**2 + sin_total**2) - ar_rgb = np.clip(ar_rgb,0.0,1.0) + ar_hsv = np.zeros((ar[0].shape[0], ar[0].shape[1], 3)) + ar_hsv[:, :, 0] = np.mod( + np.arctan2(sin_total, cos_total) / (2 * np.pi), 1.0 + ) + ar_hsv[:, :, 1] = 1 - sat_change + ar_hsv[:, :, 2] = val_total # np.sqrt(cos_total**2 + sin_total**2) + ar_hsv = np.clip(ar_hsv, 0.0, 1.0) # Convert to RGB from matplotlib.colors import hsv_to_rgb - ar_rgb = hsv_to_rgb(ar_rgb) + + ar_rgb = hsv_to_rgb(ar_hsv) # Output image for plotting ar = ar_rgb - if scalebar == True: - scalebar = {} - # support for native data types - elif not isinstance(ar,np.ndarray): + elif not isinstance(ar, np.ndarray): # support for calibration/auto-scalebars - if hasattr(ar, 'calibration') and (ar.calibration is not None) \ - and (scalebar != False): + if ( + hasattr(ar, "calibration") + and (ar.calibration is not None) + and (scalebar != False) + ): cal = ar.calibration er = ".calibration attribute must be a Calibration instance" assert isinstance(cal, Calibration), er if isinstance(ar, DiffractionSlice): scalebar = { - 'Nx':ar.data.shape[0], - 'Ny':ar.data.shape, - 'pixelsize':cal.get_Q_pixel_size(), - 'pixelunits':cal.get_Q_pixel_units(), - 'space':'Q', - 'position':'br' + "Nx": ar.data.shape[0], + "Ny": ar.data.shape[1], + "pixelsize": cal.get_Q_pixel_size(), + "pixelunits": cal.get_Q_pixel_units(), + "space": "Q", + "position": "br", } pixelsize = cal.get_Q_pixel_size() pixelunits = cal.get_Q_pixel_units() elif isinstance(ar, RealSlice): scalebar = { - 'Nx':ar.data.shape[0], - 'Ny':ar.data.shape, - 'pixelsize':cal.get_R_pixel_size(), - 'pixelunits':cal.get_R_pixel_units(), - 'space':'Q', - 'position':'br' + "Nx": ar.data.shape[0], + "Ny": ar.data.shape[1], + "pixelsize": cal.get_R_pixel_size(), + "pixelunits": cal.get_R_pixel_units(), + "space": "Q", + "position": "br", } pixelsize = cal.get_R_pixel_size() pixelunits = cal.get_R_pixel_units() # get the data - if hasattr(ar, 'data'): + if hasattr(ar, "data"): if ar.data.ndim == 2: ar = ar.data else: raise Exception('input argument "ar" has unsupported type ' + str(type(ar))) - # Otherwise, plot one image + if show_fft: + if combine_images is False: + ar = np.abs(np.fft.fftshift(np.fft.fft2(ar.copy()))) # get image from a masked array if mask is not None: assert mask.shape == ar.shape - assert is_color_like(mask_color) or mask_color=='empty' - if isinstance(ar,np.ma.masked_array): - ar = np.ma.array(data=ar.data,mask=np.logical_or(ar.mask,~mask)) + assert is_color_like(mask_color) or mask_color == "empty" + if isinstance(ar, np.ma.masked_array): + ar = np.ma.array(data=ar.data, mask=np.logical_or(ar.mask, ~mask)) else: - ar = np.ma.array(data=ar,mask=np.logical_not(mask)) - elif isinstance(ar,np.ma.masked_array): + ar = np.ma.array(data=ar, mask=np.logical_not(mask)) + elif isinstance(ar, np.ma.masked_array): pass else: - mask = np.zeros_like(ar,dtype=bool) - ar = np.ma.array(data=ar,mask=mask) + mask = np.zeros_like(ar, dtype=bool) + ar = np.ma.array(data=ar, mask=mask) # New intensity scaling logic - assert scaling in ('none','full','log','power','hist') - assert intensity_range in ('ordered','absolute','manual','minmax','std','centered') + assert scaling in ("none", "full", "log", "power", "hist") + assert intensity_range in ( + "ordered", + "absolute", + "manual", + "minmax", + "std", + "centered", + ) if power is not None: - scaling = 'power' - if scaling == 'none': + scaling = "power" + if scaling == "none": _ar = ar.copy() - _mask = np.ones_like(_ar.data,dtype=bool) - elif scaling == 'full': - _ar = np.reshape(ar.ravel().argsort().argsort(),ar.shape) / (ar.size-1) - _mask = np.ones_like(_ar.data,dtype=bool) - elif scaling == 'log': - _mask = ar.data>0.0 - _ar = np.zeros_like(ar.data,dtype=float) + _mask = np.ones_like(_ar.data, dtype=bool) + elif scaling == "full": + _ar = np.reshape(ar.ravel().argsort().argsort(), ar.shape) / (ar.size - 1) + _mask = np.ones_like(_ar.data, dtype=bool) + elif scaling == "log": + _mask = ar.data > 0.0 + _ar = np.zeros_like(ar.data, dtype=float) _ar[_mask] = np.log(ar.data[_mask]) _ar[~_mask] = np.nan - if clipvals == 'absolute': + if np.all(np.isnan(_ar)): + _ar[:, :] = 0 + if intensity_range == "absolute": if vmin != None: - if vmin > 0.0: vmin = np.log(vmin) - else: vmin = np.min(_ar[_mask]) - if vmax != None: vmax = np.log(vmax) - elif scaling == 'power': + if vmin > 0.0: + vmin = np.log(vmin) + else: + vmin = np.min(_ar[_mask]) + if vmax != None: + vmax = np.log(vmax) + elif scaling == "power": if power_offset is False: - _mask = ar.data>0.0 - _ar = np.zeros_like(ar.data,dtype=float) - _ar[_mask] = np.power(ar.data[_mask],power) + _mask = ar.data > 0.0 + _ar = np.zeros_like(ar.data, dtype=float) + _ar[_mask] = np.power(ar.data[_mask], power) _ar[~_mask] = np.nan else: ar_min = np.min(ar) @@ -480,99 +507,113 @@ def show( _ar = np.power(ar.copy() - np.min(ar), power) else: _ar = np.power(ar.copy(), power) - _mask = np.ones_like(_ar.data,dtype=bool) - if intensity_range == 'absolute': - if vmin != None: vmin = np.power(vmin,power) - if vmax != None: vmax = np.power(vmax,power) + _mask = np.ones_like(_ar.data, dtype=bool) + if intensity_range == "absolute": + if vmin != None: + vmin = np.power(vmin, power) + if vmax != None: + vmax = np.power(vmax, power) else: raise Exception - # Create the masked array applying the user mask (this is done before the + # Create the masked array applying the user mask (this is done before the # vmin and vmax are determined so the mask affects those) - _ar = np.ma.array(data=_ar.data,mask=np.logical_or(~_mask, ar.mask)) + _ar = np.ma.array(data=_ar.data, mask=np.logical_or(~_mask, ar.mask)) - #set scaling for boolean arrays - if _ar.dtype == 'bool': - intensity_range = 'absolute' + # set scaling for boolean arrays + if _ar.dtype == "bool": + intensity_range = "absolute" vmin = 0 vmax = 1 # Set the clipvalues - if intensity_range == 'manual': - warnings.warn("Warning - intensity_range='manual' is deprecated, use 'absolute' instead") - intensity_range = 'absolute' - if intensity_range == 'ordered': - if vmin is None: vmin = 0.02 - if vmax is None: vmax = 0.98 + if intensity_range == "manual": + warnings.warn( + "Warning - intensity_range='manual' is deprecated, use 'absolute' instead" + ) + intensity_range = "absolute" + if intensity_range == "ordered": + if vmin is None: + vmin = 0.02 + if vmax is None: + vmax = 0.98 if masked_intensity_range: - vals = np.sort(_ar[np.logical_and(~np.isnan(_ar), _ar.mask==False)]) + vals = np.sort( + _ar[np.logical_and(~np.isnan(_ar), np.logical_not(_ar.mask))] + ) else: vals = np.sort(_ar.data[~np.isnan(_ar)]) - ind_vmin = np.round((vals.shape[0]-1)*vmin).astype('int') - ind_vmax = np.round((vals.shape[0]-1)*vmax).astype('int') - ind_vmin = np.max([0,ind_vmin]) - ind_vmax = np.min([len(vals)-1,ind_vmax]) + ind_vmin = np.round((vals.shape[0] - 1) * vmin).astype("int") + ind_vmax = np.round((vals.shape[0] - 1) * vmax).astype("int") + ind_vmin = np.max([0, ind_vmin]) + ind_vmax = np.min([len(vals) - 1, ind_vmax]) vmin = vals[ind_vmin] vmax = vals[ind_vmax] # check if vmin and vmax are the same, defaulting to minmax scaling if needed if vmax == vmin: vmin = vals[0] vmax = vals[-1] - elif intensity_range == 'minmax': - vmin,vmax = np.nanmin(_ar),np.nanmax(_ar) - elif intensity_range == 'absolute': + elif intensity_range == "minmax": + vmin, vmax = np.nanmin(_ar), np.nanmax(_ar) + elif intensity_range == "absolute": if vmin is None: vmin = np.min(_ar) - print("Warning, vmin not provided, setting minimum intensity = " + str(vmin)) + print( + "Warning, vmin not provided, setting minimum intensity = " + str(vmin) + ) if vmax is None: vmax = np.max(_ar) - print("Warning, vmax not provided, setting maximum intensity = " + str(vmax)) + print( + "Warning, vmax not provided, setting maximum intensity = " + str(vmax) + ) # assert vmin is not None and vmax is not None # vmin,vmax = vmin,vmax - elif intensity_range == 'std': + elif intensity_range == "std": assert vmin is not None and vmax is not None - m,s = np.nanmedian(_ar),np.nanstd(_ar) - vmin = m + vmin*s - vmax = m + vmax*s - elif intensity_range == 'centered': + m, s = np.nanmedian(_ar), np.nanstd(_ar) + vmin = m + vmin * s + vmax = m + vmax * s + elif intensity_range == "centered": c = np.nanmean(_ar) if vmin is None else vmin - m = np.nanmax(np.ma.abs(c-_ar)) if vmax is None else vmax - vmin = c-m - vmax = c+m + m = np.nanmax(np.ma.abs(c - _ar)) if vmax is None else vmax + vmin = c - m + vmax = c + m else: raise Exception if show_image: # Create or attach to the appropriate Figure and Axis if figax is None: - fig,ax = plt.subplots(1,1,figsize=figsize) + fig, ax = plt.subplots(1, 1, figsize=figsize) else: - fig,ax = figax - assert(isinstance(fig,Figure)) - assert(isinstance(ax,Axes)) - + fig, ax = figax + assert isinstance(fig, Figure) + assert isinstance(ax, Axes) # Create colormap with mask_color for bad values cm = copy(plt.get_cmap(cmap)) - if mask_color=='empty': + if mask_color == "empty": cm.set_bad(alpha=0) else: cm.set_bad(color=mask_color) # Plot the image if not hist: - cax = ax.matshow(_ar,vmin=vmin,vmax=vmax,cmap=cm,**kwargs) - if np.any(_ar.mask): - mask_display = np.ma.array(data=_ar.data,mask=~_ar.mask) - ax.matshow(mask_display,cmap=cmap,alpha=mask_alpha,vmin=vmin,vmax=vmax) + cax = ax.matshow(_ar, vmin=vmin, vmax=vmax, cmap=cm, **kwargs) + if np.any(_ar.mask): + mask_display = np.ma.array(data=_ar.data, mask=~_ar.mask) + ax.matshow( + mask_display, cmap=cmap, alpha=mask_alpha, vmin=vmin, vmax=vmax + ) # ...or, plot its histogram else: - hist,bin_edges = np.histogram(_ar,bins=np.linspace(np.min(_ar), - np.max(_ar),num=n_bins)) - w = bin_edges[1]-bin_edges[0] - x = bin_edges[:-1]+w/2. - ax.bar(x,hist,width=w) - ax.vlines((vmin,vmax),0,ax.get_ylim()[1],color='k',ls='--') + hist, bin_edges = np.histogram( + _ar, bins=np.linspace(np.min(_ar), np.max(_ar), num=n_bins) + ) + w = bin_edges[1] - bin_edges[0] + x = bin_edges[:-1] + w / 2.0 + ax.bar(x, hist, width=w) + ax.vlines((vmin, vmax), 0, ax.get_ylim()[1], color="k", ls="--") # add a title if title is not None: @@ -580,7 +621,7 @@ def show( # Add a border if bordercolor is not None: - for s in ['bottom','top','left','right']: + for s in ["bottom", "top", "left", "right"]: ax.spines[s].set_color(bordercolor) ax.spines[s].set_linewidth(borderwidth) ax.set_xticks([]) @@ -588,132 +629,156 @@ def show( # Add shape/point overlays if rectangle is not None: - add_rectangles(ax,rectangle) + add_rectangles(ax, rectangle) if circle is not None: - add_circles(ax,circle) + add_circles(ax, circle) if annulus is not None: - add_annuli(ax,annulus) + add_annuli(ax, annulus) if ellipse is not None: - add_ellipses(ax,ellipse) + add_ellipses(ax, ellipse) if points is not None: - add_points(ax,points) + add_points(ax, points) if grid_overlay is not None: - add_grid_overlay(ax,grid_overlay) - + add_grid_overlay(ax, grid_overlay) # Parse arguments for scale/coordinate overlays if calibration is not None: - assert isinstance(calibration,Calibration) - assert space in ('Q','R') + assert isinstance(calibration, Calibration) + assert space in ("Q", "R") # pixel size/units if pixelsize is None and calibration is None: pixelsize = 1 if pixelsize is not None: pass else: - if space == 'Q': + if space == "Q": pixelsize = calibration.get_Q_pixel_size() else: pixelsize = calibration.get_R_pixel_size() if pixelunits is None and calibration is None: - pixelunits = 'pixels' + pixelunits = "pixels" if pixelunits is not None: pass else: - if space == 'Q': + if space == "Q": pixelunits = calibration.get_Q_pixel_units() else: pixelunits = calibration.get_R_pixel_units() # origin - if space == 'Q': + if space == "Q": if x0 is not None: pass elif calibration is not None: try: - x0 = calibration.get_origin(rx,ry)[0] + x0 = calibration.get_origin(rx, ry)[0] except AttributeError: - raise Exception('The Calibration instance passed does not contain a value for qx0') + raise Exception( + "The Calibration instance passed does not contain a value for qx0" + ) else: x0 = 0 if y0 is not None: pass elif calibration is not None: try: - y0 = calibration.get_origin(rx,ry)[1] + y0 = calibration.get_origin(rx, ry)[1] except AttributeError: - raise Exception('The Calibration instance passed does not contain a value for qy0') + raise Exception( + "The Calibration instance passed does not contain a value for qy0" + ) else: y0 = 0 else: x0 = x0 if x0 is not None else 0 y0 = y0 if y0 is not None else 0 # ellipticity - if space == 'Q': + if space == "Q": if a is not None: pass elif calibration is not None: try: - a = calibration.get_a(rx,ry) + a = calibration.get_a(rx, ry) except AttributeError: - raise Exception('The Calibration instance passed does not contain a value for a') + raise Exception( + "The Calibration instance passed does not contain a value for a" + ) else: a = 1 if theta is not None: pass elif calibration is not None: try: - theta = calibration.get_theta(rx,ry) + theta = calibration.get_theta(rx, ry) except AttributeError: - raise Exception('The Calibration instance passed does not contain a value for theta') + raise Exception( + "The Calibration instance passed does not contain a value for theta" + ) else: theta = 0 else: a = a if a is not None else 1 theta = theta if theta is not None else 0 - # Add a scalebar if scalebar is not None and scalebar is not False: # Add the grid - scalebar['Nx'],scalebar['Ny']=ar.shape - scalebar['pixelsize'] = pixelsize - scalebar['pixelunits'] = pixelunits - scalebar['space'] = space - add_scalebar(ax,scalebar) - + scalebar["Nx"] = ar.shape[0] + scalebar["Ny"] = ar.shape[1] + scalebar["pixelsize"] = pixelsize + scalebar["pixelunits"] = pixelunits + scalebar["space"] = space + # determine good default scale bar fontsize + if figax is not None: + bbox = figax[1].get_window_extent() + dpi = figax[0].dpi + size = (bbox.width / dpi, bbox.height / dpi) + scalebar["labelsize"] = np.min(np.array(size)) * 3.0 + if "labelsize" not in scalebar.keys(): + scalebar["labelsize"] = np.min(np.array(figsize)) * 2.0 + add_scalebar(ax, scalebar) # Add cartesian grid if cartesian_grid is not None: - Nx,Ny = ar.shape - assert isinstance(x0,Number), "Error: x0 must be a number. If a Coordinate system was passed, try passing a position (rx,ry)." - assert isinstance(y0,Number), "Error: y0 must be a number. If a Coordinate system was passed, try passing a position (rx,ry)." - cartesian_grid['x0'],cartesian_grid['y0']=x0,y0 - cartesian_grid['Nx'],cartesian_grid['Ny']=Nx,Ny - cartesian_grid['pixelsize'] = pixelsize - cartesian_grid['pixelunits'] = pixelunits - cartesian_grid['space'] = space - add_cartesian_grid(ax,cartesian_grid) - + Nx, Ny = ar.shape + assert isinstance( + x0, Number + ), "Error: x0 must be a number. If a Coordinate system was passed, try passing a position (rx,ry)." + assert isinstance( + y0, Number + ), "Error: y0 must be a number. If a Coordinate system was passed, try passing a position (rx,ry)." + cartesian_grid["x0"], cartesian_grid["y0"] = x0, y0 + cartesian_grid["Nx"], cartesian_grid["Ny"] = Nx, Ny + cartesian_grid["pixelsize"] = pixelsize + cartesian_grid["pixelunits"] = pixelunits + cartesian_grid["space"] = space + add_cartesian_grid(ax, cartesian_grid) # Add polarelliptical grid if polarelliptical_grid is not None: - Nx,Ny = ar.shape - assert isinstance(x0,Number), "Error: x0 must be a number. If a Coordinate system was passed, try passing a position (rx,ry)." - assert isinstance(y0,Number), "Error: y0 must be a number. If a Coordinate system was passed, try passing a position (rx,ry)." - assert isinstance(e,Number), "Error: e must be a number. If a Coordinate system was passed, try passing a position (rx,ry)." - assert isinstance(theta,Number), "Error: theta must be a number. If a Coordinate system was passed, try passing a position (rx,ry)." - polarelliptical_grid['x0'],polarelliptical_grid['y0']=x0,y0 - polarelliptical_grid['e'],polarelliptical_grid['theta']=e,theta - polarelliptical_grid['Nx'],polarelliptical_grid['Ny']=Nx,Ny - polarelliptical_grid['pixelsize'] = pixelsize - polarelliptical_grid['pixelunits'] = pixelunits - polarelliptical_grid['space'] = space - add_polarelliptical_grid(ax,polarelliptical_grid) - + Nx, Ny = ar.shape + assert isinstance( + x0, Number + ), "Error: x0 must be a number. If a Coordinate system was passed, try passing a position (rx,ry)." + assert isinstance( + y0, Number + ), "Error: y0 must be a number. If a Coordinate system was passed, try passing a position (rx,ry)." + assert isinstance( + e, Number + ), "Error: e must be a number. If a Coordinate system was passed, try passing a position (rx,ry)." + assert isinstance( + theta, Number + ), "Error: theta must be a number. If a Coordinate system was passed, try passing a position (rx,ry)." + polarelliptical_grid["x0"], polarelliptical_grid["y0"] = x0, y0 + polarelliptical_grid["e"], polarelliptical_grid["theta"] = e, theta + polarelliptical_grid["Nx"], polarelliptical_grid["Ny"] = Nx, Ny + polarelliptical_grid["pixelsize"] = pixelsize + polarelliptical_grid["pixelunits"] = pixelunits + polarelliptical_grid["space"] = space + add_polarelliptical_grid(ax, polarelliptical_grid) # Add r-theta grid if rtheta_grid is not None: - add_rtheta_grid(ax,rtheta_grid) + add_rtheta_grid(ax, rtheta_grid) # tick marks if ticks is False: @@ -722,28 +787,38 @@ def show( # Show or return returnval = [] - if returnfig: returnval.append((fig,ax)) + if returnfig: + returnval.append((fig, ax)) if return_ar_scaled: - ar_scaled = np.clip((ar - vmin)/(vmax - vmin),0.0,1.0) - returnval.append(ar_scaled) + ar_scaled = np.clip((ar - vmin) / (vmax - vmin), 0.0, 1.0) + returnval.append(ar_scaled) if return_intensity_range: - if scaling == 'log': - vmin,vmax = np.power(np.e,vmin),np.power(np.e,vmax) - elif scaling == 'power': - vmin,vmax = np.power(vmin,1/power),np.power(vmax,1/power) - returnval.append((vmin,vmax)) - if returncax: returnval.append(cax) - if len(returnval)==0: + if scaling == "log": + vmin, vmax = np.power(np.e, vmin), np.power(np.e, vmax) + elif scaling == "power": + vmin, vmax = np.power(vmin, 1 / power), np.power(vmax, 1 / power) + returnval.append((vmin, vmax)) + if returncax: + returnval.append(cax) + if len(returnval) == 0: if figax is None: plt.show() return - elif(len(returnval))==1: + elif (len(returnval)) == 1: return returnval[0] else: return tuple(returnval) -def show_hist(arr, bins=200, vlines=None, vlinecolor='k', vlinestyle='--', - returnhist=False, returnfig=False): + +def show_hist( + arr, + bins=200, + vlines=None, + vlinecolor="k", + vlinestyle="--", + returnhist=False, + returnfig=False, +): """ Visualization function to show histogram from any ndarray (arr). @@ -766,39 +841,62 @@ def show_hist(arr, bins=200, vlines=None, vlinecolor='k', vlinestyle='--', """ counts, bin_edges = np.histogram(arr, bins=bins, range=(np.min(arr), np.max(arr))) bin_width = bin_edges[1] - bin_edges[0] - bin_centers = bin_edges[:-1] + bin_width/2 + bin_centers = bin_edges[:-1] + bin_width / 2 - fig, ax = plt.subplots(1,1) - ax.bar(bin_centers, counts, width = bin_width, align = 'center') - plt.ylabel('Counts') - plt.xlabel('Intensity') + fig, ax = plt.subplots(1, 1) + ax.bar(bin_centers, counts, width=bin_width, align="center") + plt.ylabel("Counts") + plt.xlabel("Intensity") if vlines is not None: - ax.vlines(vlines,0,np.max(counts),color=vlinecolor,ls=vlinestyle) + ax.vlines(vlines, 0, np.max(counts), color=vlinecolor, ls=vlinestyle) if not returnhist and not returnfig: plt.show() return elif returnhist and not returnfig: - return counts,bin_edges + return counts, bin_edges elif not returnhist and returnfig: return fig, ax else: - return (counts,bin_edges),(fig,ax) + return (counts, bin_edges), (fig, ax) + # Show functions with overlaid scalebars and/or coordinate system gridlines -def show_Q(ar,scalebar=True,grid=False,polargrid=False, - Q_pixel_size=None,Q_pixel_units=None, - calibration=None,rx=None,ry=None, - qx0=None,qy0=None, - e=None,theta=None, - scalebarloc=0,scalebarsize=None,scalebarwidth=None, - scalebartext=None,scalebartextloc='above',scalebartextsize=12, - gridspacing=None,gridcolor='w', - majorgridlines=True,majorgridlw=1,majorgridls=':', - minorgridlines=True,minorgridlw=0.5,minorgridls=':', - gridlabels=False,gridlabelsize=12,gridlabelcolor='k', - alpha=0.35, - **kwargs): + +def show_Q( + ar, + scalebar=True, + grid=False, + polargrid=False, + Q_pixel_size=None, + Q_pixel_units=None, + calibration=None, + rx=None, + ry=None, + qx0=None, + qy0=None, + e=None, + theta=None, + scalebarloc=0, + scalebarsize=None, + scalebarwidth=None, + scalebartext=None, + scalebartextloc="above", + scalebartextsize=12, + gridspacing=None, + gridcolor="w", + majorgridlines=True, + majorgridlw=1, + majorgridls=":", + minorgridlines=True, + minorgridlw=0.5, + minorgridls=":", + gridlabels=False, + gridlabelsize=12, + gridlabelcolor="k", + alpha=0.35, + **kwargs +): """ Shows a diffraction space image with options for several overlays to define the scale, including a scalebar, a cartesian grid, or a polar / polar-elliptical grid. @@ -815,38 +913,58 @@ def show_Q(ar,scalebar=True,grid=False,polargrid=False, may be passed to this function as kwargs. """ # Check inputs - assert(isinstance(ar,np.ndarray) and len(ar.shape)==2) + assert isinstance(ar, np.ndarray) and len(ar.shape) == 2 if calibration is not None: - assert isinstance(calibration,Calibration) + assert isinstance(calibration, Calibration) try: - Q_pixel_size = Q_pixel_size if Q_pixel_size is not None else \ - calibration.get_Q_pixel_size() + Q_pixel_size = ( + Q_pixel_size if Q_pixel_size is not None else calibration.get_Q_pixel_size() + ) except AttributeError: - raise Exception("Q_pixel_size must be specified, either in calibration or absolutely") + raise Exception( + "Q_pixel_size must be specified, either in calibration or absolutely" + ) try: - Q_pixel_units = Q_pixel_units if Q_pixel_units is not None else \ - calibration.get_Q_pixel_units() + Q_pixel_units = ( + Q_pixel_units + if Q_pixel_units is not None + else calibration.get_Q_pixel_units() + ) except AttributeError: - raise Exception("Q_pixel_size must be specified, either in calibration or absolutely") + raise Exception( + "Q_pixel_size must be specified, either in calibration or absolutely" + ) if grid or polargrid: try: - qx0 = qx0 if qx0 is not None else calibration.get_qx0(rx,ry) + qx0 = qx0 if qx0 is not None else calibration.get_qx0(rx, ry) except AttributeError: - raise Exception("qx0 must be specified, either in calibration or absolutely") + raise Exception( + "qx0 must be specified, either in calibration or absolutely" + ) try: - qy0 = qy0 if qy0 is not None else calibration.get_qy0(rx,ry) + qy0 = qy0 if qy0 is not None else calibration.get_qy0(rx, ry) except AttributeError: - raise Exception("qy0 must be specified, either in calibration or absolutely") - assert isinstance(qx0,Number), "Error: qx0 must be a number. If a Coordinate system was passed, try passing a position (rx,ry)." - assert isinstance(qy0,Number), "Error: qy0 must be a number. If a Coordinate system was passed, try passing a position (rx,ry)." + raise Exception( + "qy0 must be specified, either in calibration or absolutely" + ) + assert isinstance( + qx0, Number + ), "Error: qx0 must be a number. If a Coordinate system was passed, try passing a position (rx,ry)." + assert isinstance( + qy0, Number + ), "Error: qy0 must be a number. If a Coordinate system was passed, try passing a position (rx,ry)." if polargrid: - e = e if e is not None else calibration.get_e(rx,ry) - theta = theta if theta is not None else calibration.get_theta(rx,ry) - assert isinstance(e,Number), "Error: e must be a number. If a Coordinate system was passed, try passing a position (rx,ry)." - assert isinstance(theta,Number), "Error: theta must be a number. If a Coordinate system was passed, try passing a position (rx,ry)." + e = e if e is not None else calibration.get_e(rx, ry) + theta = theta if theta is not None else calibration.get_theta(rx, ry) + assert isinstance( + e, Number + ), "Error: e must be a number. If a Coordinate system was passed, try passing a position (rx,ry)." + assert isinstance( + theta, Number + ), "Error: theta must be a number. If a Coordinate system was passed, try passing a position (rx,ry)." # Make the plot - fig,ax = show(ar,returnfig=True,**kwargs) + fig, ax = show(ar, returnfig=True, **kwargs) # Add a scalebar if scalebar: @@ -855,75 +973,82 @@ def show_Q(ar,scalebar=True,grid=False,polargrid=False, # Add a cartesian grid if grid: # parse arguments - assert isinstance(majorgridlines,bool) + assert isinstance(majorgridlines, bool) majorgridlw = majorgridlw if majorgridlines else 0 - assert isinstance(majorgridlw,Number) - assert isinstance(majorgridls,str) - assert isinstance(minorgridlines,bool) + assert isinstance(majorgridlw, Number) + assert isinstance(majorgridls, str) + assert isinstance(minorgridlines, bool) minorgridlw = minorgridlw if minorgridlines else 0 - assert isinstance(minorgridlw,Number) - assert isinstance(minorgridls,str) + assert isinstance(minorgridlw, Number) + assert isinstance(minorgridls, str) assert is_color_like(gridcolor) - assert isinstance(gridlabels,bool) - assert isinstance(gridlabelsize,Number) + assert isinstance(gridlabels, bool) + assert isinstance(gridlabelsize, Number) assert is_color_like(gridlabelcolor) if gridspacing is not None: - assert isinstance(gridspacing,Number) + assert isinstance(gridspacing, Number) - Q_Nx,Q_Ny = ar.shape - assert qx0=1 and base<1.25: - _gridspacing=0.4 - elif base>=1.25 and base<1.75: - _gridspacing=0.5 - elif base>=1.75 and base<2.5: - _gridspacing=0.75 - elif base>=2.5 and base<3.25: - _gridspacing=1 - elif base>=3.25 and base<4.75: - _gridspacing=1.5 - elif base>=4.75 and base<6: - _gridspacing=2 - elif base>=6 and base<8: - _gridspacing=2.5 - elif base>=8 and base<10: - _gridspacing=3 + D = np.mean((Q_Nx * Q_pixel_size, Q_Ny * Q_pixel_size)) / 2.0 + exp = int(log(D, 10)) + if np.sign(log(D, 10)) < 0: + exp -= 1 + base = D / (10**exp) + if base >= 1 and base < 1.25: + _gridspacing = 0.4 + elif base >= 1.25 and base < 1.75: + _gridspacing = 0.5 + elif base >= 1.75 and base < 2.5: + _gridspacing = 0.75 + elif base >= 2.5 and base < 3.25: + _gridspacing = 1 + elif base >= 3.25 and base < 4.75: + _gridspacing = 1.5 + elif base >= 4.75 and base < 6: + _gridspacing = 2 + elif base >= 6 and base < 8: + _gridspacing = 2.5 + elif base >= 8 and base < 10: + _gridspacing = 3 else: raise Exception("how did this happen?? base={}".format(base)) gridspacing = _gridspacing * 10**exp # Get the positions and label for the major gridlines - xmin = (-qx0)*Q_pixel_size - xmax = (Q_Nx-1-qx0)*Q_pixel_size - ymin = (-qy0)*Q_pixel_size - ymax = (Q_Ny-1-qy0)*Q_pixel_size - xticksmajor = np.concatenate((-1*np.arange(0,np.abs(xmin),gridspacing)[1:][::-1], - np.arange(0,xmax,gridspacing))) - yticksmajor = np.concatenate((-1*np.arange(0,np.abs(ymin),gridspacing)[1:][::-1], - np.arange(0,ymax,gridspacing))) + xmin = (-qx0) * Q_pixel_size + xmax = (Q_Nx - 1 - qx0) * Q_pixel_size + ymin = (-qy0) * Q_pixel_size + ymax = (Q_Ny - 1 - qy0) * Q_pixel_size + xticksmajor = np.concatenate( + ( + -1 * np.arange(0, np.abs(xmin), gridspacing)[1:][::-1], + np.arange(0, xmax, gridspacing), + ) + ) + yticksmajor = np.concatenate( + ( + -1 * np.arange(0, np.abs(ymin), gridspacing)[1:][::-1], + np.arange(0, ymax, gridspacing), + ) + ) xticklabels = xticksmajor.copy() yticklabels = yticksmajor.copy() - xticksmajor = (xticksmajor-xmin)/Q_pixel_size - yticksmajor = (yticksmajor-ymin)/Q_pixel_size + xticksmajor = (xticksmajor - xmin) / Q_pixel_size + yticksmajor = (yticksmajor - ymin) / Q_pixel_size # Labels - exp_spacing = int(np.round(log(gridspacing,10),6)) - if np.sign(log(gridspacing,10))<0: - exp_spacing-=1 - base_spacing = gridspacing/(10**exp_spacing) - xticklabels = xticklabels/(10**exp_spacing) - yticklabels = yticklabels/(10**exp_spacing) + exp_spacing = int(np.round(log(gridspacing, 10), 6)) + if np.sign(log(gridspacing, 10)) < 0: + exp_spacing -= 1 + xticklabels = xticklabels / (10**exp_spacing) + yticklabels = yticklabels / (10**exp_spacing) if exp_spacing == 1: xticklabels *= 10 yticklabels *= 10 - if _gridspacing in (0.4,0.75,1.5,2.5) and exp_spacing!=1: + if _gridspacing in (0.4, 0.75, 1.5, 2.5) and exp_spacing != 1: xticklabels = ["{:.1f}".format(n) for n in xticklabels] yticklabels = ["{:.1f}".format(n) for n in yticklabels] else: @@ -933,51 +1058,64 @@ def show_Q(ar,scalebar=True,grid=False,polargrid=False, # Add the grid ax.set_xticks(yticksmajor) ax.set_yticks(xticksmajor) - ax.xaxis.set_ticks_position('bottom') + ax.xaxis.set_ticks_position("bottom") if gridlabels: - ax.set_xticklabels(yticklabels,size=gridlabelsize,color=gridlabelcolor) - ax.set_yticklabels(xticklabels,size=gridlabelsize,color=gridlabelcolor) - if exp_spacing in (0,1): - ax.set_xlabel(r"$q_y$ ("+Q_pixel_units+")") - ax.set_ylabel(r"$q_x$ ("+Q_pixel_units+")") + ax.set_xticklabels(yticklabels, size=gridlabelsize, color=gridlabelcolor) + ax.set_yticklabels(xticklabels, size=gridlabelsize, color=gridlabelcolor) + if exp_spacing in (0, 1): + ax.set_xlabel(r"$q_y$ (" + Q_pixel_units + ")") + ax.set_ylabel(r"$q_x$ (" + Q_pixel_units + ")") else: - ax.set_xlabel(r"$q_y$ ("+Q_pixel_units+" e"+str(exp_spacing)+")") - ax.set_ylabel(r"$q_x$ ("+Q_pixel_units+" e"+str(exp_spacing)+")") + ax.set_xlabel( + r"$q_y$ (" + Q_pixel_units + " e" + str(exp_spacing) + ")" + ) + ax.set_ylabel( + r"$q_x$ (" + Q_pixel_units + " e" + str(exp_spacing) + ")" + ) else: ax.set_xticklabels([]) ax.set_yticklabels([]) - ax.grid(linestyle=majorgridls,linewidth=majorgridlw,color=gridcolor,alpha=alpha) - - + ax.grid( + linestyle=majorgridls, linewidth=majorgridlw, color=gridcolor, alpha=alpha + ) # Add the grid if majorgridlines: - add_cartesian_grid(ax,d={ - 'x0':qx0,'y0':qy0, - 'spacing':gridspacing, - 'majorlw':majorgridlw, - 'majorls':majorgridls, - 'minorlw':minorgridlw, - 'minorls':minorgridls, - 'color':gridcolor, - 'label':gridlabels, - 'labelsize':gridlabelsize, - 'labelcolor':gridlabelcolor, - 'alpha':alpha}) + add_cartesian_grid( + ax, + d={ + "x0": qx0, + "y0": qy0, + "spacing": gridspacing, + "majorlw": majorgridlw, + "majorls": majorgridls, + "minorlw": minorgridlw, + "minorls": minorgridls, + "color": gridcolor, + "label": gridlabels, + "labelsize": gridlabelsize, + "labelcolor": gridlabelcolor, + "alpha": alpha, + }, + ) if minorgridlines: - add_cartesian_grid(ax,d={ - 'x0':qx0,'y0':qy0, - 'spacing':gridspacing, - 'majorlw':majorgridlw, - 'majorls':majorgridls, - 'minorlw':minorgridlw, - 'minorls':minorgridls, - 'color':gridcolor, - 'label':gridlabels, - 'labelsize':gridlabelsize, - 'labelcolor':gridlabelcolor, - 'alpha':alpha}) - + add_cartesian_grid( + ax, + d={ + "x0": qx0, + "y0": qy0, + "spacing": gridspacing, + "majorlw": majorgridlw, + "majorls": majorgridls, + "minorlw": minorgridlw, + "minorls": minorgridls, + "color": gridcolor, + "label": gridlabels, + "labelsize": gridlabelsize, + "labelcolor": gridlabelcolor, + "alpha": alpha, + }, + ) # Add a polar-elliptical grid if polargrid: @@ -988,8 +1126,17 @@ def show_Q(ar,scalebar=True,grid=False,polargrid=False, # Shape overlays -def show_rectangles(ar,lims=(0,1,0,1),color='r',fill=True,alpha=0.25,linewidth=2,returnfig=False, - **kwargs): + +def show_rectangles( + ar, + lims=(0, 1, 0, 1), + color="r", + fill=True, + alpha=0.25, + linewidth=2, + returnfig=False, + **kwargs +): """ Visualization function which plots a 2D array with one or more overlayed rectangles. lims is specified in the order (x0,xf,y0,yf). The rectangle bounds begin at the upper @@ -1016,25 +1163,33 @@ def show_rectangles(ar,lims=(0,1,0,1),color='r',fill=True,alpha=0.25,linewidth=2 If returnfig==False, the figure and its one axis are returned, and can be further edited. """ - fig,ax = show(ar,returnfig=True,**kwargs) - d = {'lims':lims,'color':color,'fill':fill,'alpha':alpha,'linewidth':linewidth} - add_rectangles(ax,d) + fig, ax = show(ar, returnfig=True, **kwargs) + d = { + "lims": lims, + "color": color, + "fill": fill, + "alpha": alpha, + "linewidth": linewidth, + } + add_rectangles(ax, d) if not returnfig: return else: - return fig,ax + return fig, ax + def show_circles( ar, center, R, - color='r', + color="r", fill=True, alpha=0.3, linewidth=2, returnfig=False, - **kwargs): + **kwargs +): """ Visualization function which plots a 2D array with one or more overlayed circles. To overlay one circle, center must be a single 2-tuple. To overlay N circles, @@ -1059,22 +1214,37 @@ def show_circles( further edited. """ - fig,ax = show( - ar, - returnfig=True, - **kwargs - ) + fig, ax = show(ar, returnfig=True, **kwargs) - d = {'center':center,'R':R,'color':color,'fill':fill,'alpha':alpha,'linewidth':linewidth} - add_circles(ax,d) + d = { + "center": center, + "R": R, + "color": color, + "fill": fill, + "alpha": alpha, + "linewidth": linewidth, + } + add_circles(ax, d) if not returnfig: return else: - return fig,ax + return fig, ax + -def show_ellipses(ar,center,a,b,theta,color='r',fill=True,alpha=0.3,linewidth=2, - returnfig=False,**kwargs): +def show_ellipses( + ar, + center, + a, + b, + theta, + color="r", + fill=True, + alpha=0.3, + linewidth=2, + returnfig=False, + **kwargs +): """ Visualization function which plots a 2D array with one or more overlayed ellipses. To overlay one ellipse, center must be a single 2-tuple. To overlay N circles, @@ -1101,18 +1271,36 @@ def show_ellipses(ar,center,a,b,theta,color='r',fill=True,alpha=0.3,linewidth=2, If returnfig==False, the figure and its one axis are returned, and can be further edited. """ - fig,ax = show(ar,returnfig=True,**kwargs) - d = {'center':center,'a':a,'b':b,'theta':theta,'color':color,'fill':fill, - 'alpha':alpha,'linewidth':linewidth} - add_ellipses(ax,d) + fig, ax = show(ar, returnfig=True, **kwargs) + d = { + "center": center, + "a": a, + "b": b, + "theta": theta, + "color": color, + "fill": fill, + "alpha": alpha, + "linewidth": linewidth, + } + add_ellipses(ax, d) if not returnfig: return else: - return fig,ax + return fig, ax + -def show_annuli(ar,center,radii,color='r',fill=True,alpha=0.3,linewidth=2,returnfig=False, - **kwargs): +def show_annuli( + ar, + center, + radii, + color="r", + fill=True, + alpha=0.3, + linewidth=2, + returnfig=False, + **kwargs +): """ Visualization function which plots a 2D array with one or more overlayed annuli. To overlay one annulus, center must be a single 2-tuple. To overlay N annuli, @@ -1135,18 +1323,36 @@ def show_annuli(ar,center,radii,color='r',fill=True,alpha=0.3,linewidth=2,return If returnfig==False, the figure and its one axis are returned, and can be further edited. """ - fig,ax = show(ar,returnfig=True,**kwargs) - d = {'center':center,'radii':radii,'color':color,'fill':fill,'alpha':alpha, - 'linewidth':linewidth} - add_annuli(ax,d) + fig, ax = show(ar, returnfig=True, **kwargs) + d = { + "center": center, + "radii": radii, + "color": color, + "fill": fill, + "alpha": alpha, + "linewidth": linewidth, + } + add_annuli(ax, d) if not returnfig: return else: - return fig,ax + return fig, ax -def show_points(ar,x,y,s=1,scale=50,alpha=1,pointcolor='r',open_circles=False, - title = None, returnfig=False,**kwargs): + +def show_points( + ar, + x, + y, + s=1, + scale=50, + alpha=1, + pointcolor="r", + open_circles=False, + title=None, + returnfig=False, + **kwargs +): """ Plots a 2D array with one or more points. x and y are the point centers and must have the same length, N. @@ -1168,15 +1374,19 @@ def show_points(ar,x,y,s=1,scale=50,alpha=1,pointcolor='r',open_circles=False, If returnfig==False, the figure and its one axis are returned, and can be further edited. """ - fig,ax = show(ar,title = title, returnfig=True,**kwargs) - d = {'x':x,'y':y,'s':s,'scale':scale,'pointcolor':pointcolor,'alpha':alpha, - 'open_circles':open_circles} - add_points(ax,d) + fig, ax = show(ar, title=title, returnfig=True, **kwargs) + d = { + "x": x, + "y": y, + "s": s, + "scale": scale, + "pointcolor": pointcolor, + "alpha": alpha, + "open_circles": open_circles, + } + add_points(ax, d) if not returnfig: return else: - return fig,ax - - - + return fig, ax diff --git a/py4DSTEM/visualize/show_extention.py b/py4DSTEM/visualize/show_extention.py index f1d59e439..8fdf522a2 100644 --- a/py4DSTEM/visualize/show_extention.py +++ b/py4DSTEM/visualize/show_extention.py @@ -1,34 +1,34 @@ from py4DSTEM.visualize.vis_grid import show_image_grid + def _show_grid(**kwargs): - """ - """ - assert 'ar' in kwargs.keys() - ar = kwargs['ar'] - del kwargs['ar'] + """ """ + assert "ar" in kwargs.keys() + ar = kwargs["ar"] + del kwargs["ar"] # parse grid of images - if isinstance(ar[0],list): - assert(all([isinstance(ar[i],list) for i in range(len(ar))])) + if isinstance(ar[0], list): + assert all([isinstance(ar[i], list) for i in range(len(ar))]) W = len(ar[0]) H = len(ar) + def get_ar(i): - h = i//W - w = i%W + h = i // W + w = i % W try: return ar[h][w] except IndexError: return + else: W = len(ar) H = 1 + def get_ar(i): return ar[i] - if kwargs['returnfig']: - return show_image_grid(get_ar,H,W,**kwargs) + if kwargs["returnfig"]: + return show_image_grid(get_ar, H, W, **kwargs) else: - show_image_grid(get_ar,H,W,**kwargs) - - - + show_image_grid(get_ar, H, W, **kwargs) diff --git a/py4DSTEM/visualize/vis_RQ.py b/py4DSTEM/visualize/vis_RQ.py index 72c50a396..6c2fbff3c 100644 --- a/py4DSTEM/visualize/vis_RQ.py +++ b/py4DSTEM/visualize/vis_RQ.py @@ -2,27 +2,49 @@ import matplotlib.pyplot as plt from matplotlib.axes import Axes -from py4DSTEM.visualize.show import show,show_points +from py4DSTEM.visualize.show import show, show_points - -def show_selected_dp(datacube,image,rx,ry,figsize=(12,6),returnfig=False, - pointsize=50,pointcolor='r',scaling='log',**kwargs): - """ - """ - dp = datacube.data[rx,ry,:,:] - fig,(ax1,ax2) = plt.subplots(1,2,figsize=figsize) - _,_=show_points(image,rx,ry,scale=pointsize,pointcolor=pointcolor,figax=(fig,ax1),returnfig=True) - _,_=show(dp,figax=(fig,ax2),scaling=scaling,returnfig=True) +def show_selected_dp( + datacube, + image, + rx, + ry, + figsize=(12, 6), + returnfig=False, + pointsize=50, + pointcolor="r", + scaling="log", + **kwargs +): + """ """ + dp = datacube.data[rx, ry, :, :] + fig, (ax1, ax2) = plt.subplots(1, 2, figsize=figsize) + _, _ = show_points( + image, + rx, + ry, + scale=pointsize, + pointcolor=pointcolor, + figax=(fig, ax1), + returnfig=True, + ) + _, _ = show(dp, figax=(fig, ax2), scaling=scaling, returnfig=True) if not returnfig: plt.show() return else: - return fig,(ax1,ax2) + return fig, (ax1, ax2) -def show_RQ(realspace_image, diffractionspace_image, - realspace_pdict={}, diffractionspace_pdict={'scaling':'log'}, - figsize=(12,6),returnfig=False): + +def show_RQ( + realspace_image, + diffractionspace_image, + realspace_pdict={}, + diffractionspace_pdict={"scaling": "log"}, + figsize=(12, 6), + returnfig=False, +): """ Shows side-by-side real/reciprocal space images. @@ -33,16 +55,17 @@ def show_RQ(realspace_image, diffractionspace_image, to the show() fn for the real space image diffractionspace_pdict (dictionary) """ - fig,(ax1,ax2) = plt.subplots(1,2,figsize=figsize) - show(realspace_image,figax=(fig,ax1),**realspace_pdict) - show(diffractionspace_image,figax=(fig,ax2),**diffractionspace_pdict) + fig, (ax1, ax2) = plt.subplots(1, 2, figsize=figsize) + show(realspace_image, figax=(fig, ax1), **realspace_pdict) + show(diffractionspace_image, figax=(fig, ax2), **diffractionspace_pdict) if not returnfig: plt.show() return else: - return fig,(ax1,ax2) + return fig, (ax1, ax2) + -def ax_addvector(ax,vx,vy,vlength,x0,y0,width=1,color='r'): +def ax_addvector(ax, vx, vy, vlength, x0, y0, width=1, color="r"): """ Adds a vector to the subplot at ax. @@ -54,11 +77,12 @@ def ax_addvector(ax,vx,vy,vlength,x0,y0,width=1,color='r'): vlength (number) the vector length x0,y0 (numbers) the origin / vector tail position """ - vL = np.hypot(vx,vy) - vx,vy = vlength*vx/vL,vlength*vy/vL - ax.arrow(y0,x0,vy,vx,color=color,width=width,length_includes_head=True) + vL = np.hypot(vx, vy) + vx, vy = vlength * vx / vL, vlength * vy / vL + ax.arrow(y0, x0, vy, vx, color=color, width=width, length_includes_head=True) + -def ax_addvector_RtoQ(ax,vx,vy,vlength,x0,y0,QR_rotation,width=1,color='r'): +def ax_addvector_RtoQ(ax, vx, vy, vlength, x0, y0, QR_rotation, width=1, color="r"): """ Adds a vector to the subplot at ax, where the vector (vx,vy) passed to the function is in real space and the plotted vector is transformed @@ -79,11 +103,13 @@ def ax_addvector_RtoQ(ax,vx,vy,vlength,x0,y0,QR_rotation,width=1,color='r'): with respect to diffraction space. In degrees. """ from py4DSTEM.process.calibration.rotation import get_Qvector_from_Rvector - _,_,vx,vy = get_Qvector_from_Rvector(vx,vy,QR_rotation) - vx,vy = vx*vlength,vy*vlength - ax.arrow(y0,x0,vy,vx,color=color,width=width,length_includes_head=True) -def ax_addvector_QtoR(ax,vx,vy,vlength,x0,y0,QR_rotation,width=1,color='r'): + _, _, vx, vy = get_Qvector_from_Rvector(vx, vy, QR_rotation) + vx, vy = vx * vlength, vy * vlength + ax.arrow(y0, x0, vy, vx, color=color, width=width, length_includes_head=True) + + +def ax_addvector_QtoR(ax, vx, vy, vlength, x0, y0, QR_rotation, width=1, color="r"): """ Adds a vector to the subplot at ax, where the vector (vx,vy) passed to the function is in reciprocal space and the plotted vector is @@ -103,17 +129,34 @@ def ax_addvector_QtoR(ax,vx,vy,vlength,x0,y0,QR_rotation,width=1,color='r'): with respect to diffraction space. In degrees. """ from py4DSTEM.process.calibration.rotation import get_Rvector_from_Qvector - vx,vy,_,_ = get_Rvector_from_Qvector(vx,vy,QR_rotation) - vx,vy = vx*vlength,vy*vlength - ax.arrow(y0,x0,vy,vx,color=color,width=width,length_includes_head=True) - -def show_RQ_vector(realspace_image, diffractionspace_image, - realspace_pdict, diffractionspace_pdict, - vx,vy,vlength_R,vlength_Q,x0_R,y0_R,x0_Q,y0_Q, - QR_rotation,vector_space='R', - width_R=1,color_R='r', - width_Q=1,color_Q='r', - figsize=(12,6),returnfig=False): + + vx, vy, _, _ = get_Rvector_from_Qvector(vx, vy, QR_rotation) + vx, vy = vx * vlength, vy * vlength + ax.arrow(y0, x0, vy, vx, color=color, width=width, length_includes_head=True) + + +def show_RQ_vector( + realspace_image, + diffractionspace_image, + realspace_pdict, + diffractionspace_pdict, + vx, + vy, + vlength_R, + vlength_Q, + x0_R, + y0_R, + x0_Q, + y0_Q, + QR_rotation, + vector_space="R", + width_R=1, + color_R="r", + width_Q=1, + color_Q="r", + figsize=(12, 6), + returnfig=False, +): """ Shows side-by-side real/reciprocal space images with a vector overlaid in each showing corresponding directions. @@ -142,29 +185,70 @@ def show_RQ_vector(realspace_image, diffractionspace_image, function describes a real or diffracation space vector. """ - assert(vector_space in ('R','Q')) - fig,(ax1,ax2) = show_RQ(realspace_image, diffractionspace_image, - realspace_pdict, diffractionspace_pdict, - figsize=figsize,returnfig=True) - if vector_space=='R': - ax_addvector(ax1,vx,vy,vlength_R,x0_R,y0_R,width=width_R,color=color_R) - ax_addvector_RtoQ(ax2,vx,vy,vlength_Q,x0_Q,y0_Q,QR_rotation,width=width_Q,color=color_Q) + assert vector_space in ("R", "Q") + fig, (ax1, ax2) = show_RQ( + realspace_image, + diffractionspace_image, + realspace_pdict, + diffractionspace_pdict, + figsize=figsize, + returnfig=True, + ) + if vector_space == "R": + ax_addvector(ax1, vx, vy, vlength_R, x0_R, y0_R, width=width_R, color=color_R) + ax_addvector_RtoQ( + ax2, + vx, + vy, + vlength_Q, + x0_Q, + y0_Q, + QR_rotation, + width=width_Q, + color=color_Q, + ) else: - ax_addvector(ax2,vx,vy,vlength_Q,x0_Q,y0_Q,width=width_Q,color=color_Q) - ax_addvector_QtoR(ax1,vx,vy,vlength_R,x0_R,y0_R,QR_rotation,width=width_R,color=color_R) + ax_addvector(ax2, vx, vy, vlength_Q, x0_Q, y0_Q, width=width_Q, color=color_Q) + ax_addvector_QtoR( + ax1, + vx, + vy, + vlength_R, + x0_R, + y0_R, + QR_rotation, + width=width_R, + color=color_R, + ) if not returnfig: plt.show() return else: - return fig,(ax1,ax2) - -def show_RQ_vectors(realspace_image, diffractionspace_image, - realspace_pdict, diffractionspace_pdict, - vx,vy,vlength_R,vlength_Q,x0_R,y0_R,x0_Q,y0_Q, - QR_rotation,vector_space='R', - width_R=1,color_R='r', - width_Q=1,color_Q='r', - figsize=(12,6),returnfig=False): + return fig, (ax1, ax2) + + +def show_RQ_vectors( + realspace_image, + diffractionspace_image, + realspace_pdict, + diffractionspace_pdict, + vx, + vy, + vlength_R, + vlength_Q, + x0_R, + y0_R, + x0_Q, + y0_Q, + QR_rotation, + vector_space="R", + width_R=1, + color_R="r", + width_Q=1, + color_Q="r", + figsize=(12, 6), + returnfig=False, +): """ Shows side-by-side real/reciprocal space images with several vectors overlaid in each showing corresponding directions. @@ -193,35 +277,57 @@ def show_RQ_vectors(realspace_image, diffractionspace_image, function describes a real or diffracation space vector. """ - assert(vector_space in ('R','Q')) - assert(len(vx)==len(vy)) - if isinstance(color_R,tuple) or isinstance(color_R,list): - assert(len(vx)==len(color_R)) + assert vector_space in ("R", "Q") + assert len(vx) == len(vy) + if isinstance(color_R, tuple) or isinstance(color_R, list): + assert len(vx) == len(color_R) else: - color_R =[color_R for i in range(len(vx))] - if isinstance(color_Q,tuple) or isinstance(color_Q,list): - assert(len(vx)==len(color_Q)) + color_R = [color_R for i in range(len(vx))] + if isinstance(color_Q, tuple) or isinstance(color_Q, list): + assert len(vx) == len(color_Q) else: - color_Q =[color_Q for i in range(len(vx))] - - fig,(ax1,ax2) = show_RQ(realspace_image, diffractionspace_image, - realspace_pdict, diffractionspace_pdict, - figsize=figsize,returnfig=True) - for x,y,cR,cQ in zip(vx,vy,color_R,color_Q): - if vector_space=='R': - ax_addvector(ax1,x,y,vlength_R,x0_R,y0_R,width=width_R,color=cR) - ax_addvector_RtoQ(ax2,x,y,vlength_Q,x0_Q,y0_Q,QR_rotation,width=width_Q,color=cQ) + color_Q = [color_Q for i in range(len(vx))] + + fig, (ax1, ax2) = show_RQ( + realspace_image, + diffractionspace_image, + realspace_pdict, + diffractionspace_pdict, + figsize=figsize, + returnfig=True, + ) + for x, y, cR, cQ in zip(vx, vy, color_R, color_Q): + if vector_space == "R": + ax_addvector(ax1, x, y, vlength_R, x0_R, y0_R, width=width_R, color=cR) + ax_addvector_RtoQ( + ax2, x, y, vlength_Q, x0_Q, y0_Q, QR_rotation, width=width_Q, color=cQ + ) else: - ax_addvector(ax2,x,y,vlength_Q,x0_Q,y0_Q,width=width_Q,color=cQ) - ax_addvector_QtoR(ax1,x,y,vlength_R,x0_R,y0_R,QR_rotation,width=width_R,color=cR) + ax_addvector(ax2, x, y, vlength_Q, x0_Q, y0_Q, width=width_Q, color=cQ) + ax_addvector_QtoR( + ax1, x, y, vlength_R, x0_R, y0_R, QR_rotation, width=width_R, color=cR + ) if not returnfig: plt.show() return else: - return fig,(ax1,ax2) + return fig, (ax1, ax2) + -def ax_addaxes(ax,vx,vy,vlength,x0,y0,width=1,color='r',labelaxes=True, - labelsize=12,labelcolor='r',righthandedcoords=True): +def ax_addaxes( + ax, + vx, + vy, + vlength, + x0, + y0, + width=1, + color="r", + labelaxes=True, + labelsize=12, + labelcolor="r", + righthandedcoords=True, +): """ Adds a pair of x/y axes to the matplotlib subplot ax. The user supplies the x-axis direction with (vx,vy), and the y-axis is then chosen @@ -239,26 +345,39 @@ def ax_addaxes(ax,vx,vy,vlength,x0,y0,width=1,color='r',labelaxes=True, with respect to x-axis """ # Get the x-axis - vL = np.hypot(vx,vy) - xaxis_x,xaxis_y = vlength*vx/vL,vlength*vy/vL + vL = np.hypot(vx, vy) + xaxis_x, xaxis_y = vlength * vx / vL, vlength * vy / vL # Get the y-axes if righthandedcoords: - yaxis_x,yaxis_y = -xaxis_y,xaxis_x + yaxis_x, yaxis_y = -xaxis_y, xaxis_x else: - yaxis_x,yaxis_y = xaxis_y,-xaxis_x - ax_addvector(ax,xaxis_x,xaxis_y,vlength,x0,y0,width=width,color=color) - ax_addvector(ax,yaxis_x,yaxis_y,vlength,x0,y0,width=width,color=color) + yaxis_x, yaxis_y = xaxis_y, -xaxis_x + ax_addvector(ax, xaxis_x, xaxis_y, vlength, x0, y0, width=width, color=color) + ax_addvector(ax, yaxis_x, yaxis_y, vlength, x0, y0, width=width, color=color) # Label axes: if labelaxes: - xaxislabel_x = x0 + 1.1*xaxis_x + xaxislabel_x = x0 + 1.1 * xaxis_x xaxislabel_y = y0 + xaxis_y yaxislabel_x = x0 + yaxis_x - yaxislabel_y = y0 + 1.1*yaxis_y - ax.text(xaxislabel_y,xaxislabel_x,'x',color=labelcolor,size=labelsize) - ax.text(yaxislabel_y,yaxislabel_x,'y',color=labelcolor,size=labelsize) + yaxislabel_y = y0 + 1.1 * yaxis_y + ax.text(xaxislabel_y, xaxislabel_x, "x", color=labelcolor, size=labelsize) + ax.text(yaxislabel_y, yaxislabel_x, "y", color=labelcolor, size=labelsize) -def ax_addaxes_QtoR(ax,vx,vy,vlength,x0,y0,QR_rotation,width=1,color='r', - labelaxes=True,labelsize=12,labelcolor='r'): + +def ax_addaxes_QtoR( + ax, + vx, + vy, + vlength, + x0, + y0, + QR_rotation, + width=1, + color="r", + labelaxes=True, + labelsize=12, + labelcolor="r", +): """ Adds a pair of x/y axes to the matplotlib subplot ax. The user supplies the x-axis direction with (vx,vy) in reciprocal space coordinates, and @@ -280,12 +399,38 @@ def ax_addaxes_QtoR(ax,vx,vy,vlength,x0,y0,QR_rotation,width=1,color='r', with respect to diffraction space. In degrees. """ from py4DSTEM.process.calibration.rotation import get_Rvector_from_Qvector - vx,vy,_,_ = get_Rvector_from_Qvector(vx,vy,QR_rotation) - ax_addaxes(ax,vx,vy,vlength,x0,y0,width=width,color=color,labelaxes=labelaxes, - labelsize=labelsize,labelcolor=labelcolor,righthandedcoords=True) -def ax_addaxes_RtoQ(ax,vx,vy,vlength,x0,y0,QR_rotation,width=1,color='r', - labelaxes=True,labelsize=12,labelcolor='r'): + vx, vy, _, _ = get_Rvector_from_Qvector(vx, vy, QR_rotation) + ax_addaxes( + ax, + vx, + vy, + vlength, + x0, + y0, + width=width, + color=color, + labelaxes=labelaxes, + labelsize=labelsize, + labelcolor=labelcolor, + righthandedcoords=True, + ) + + +def ax_addaxes_RtoQ( + ax, + vx, + vy, + vlength, + x0, + y0, + QR_rotation, + width=1, + color="r", + labelaxes=True, + labelsize=12, + labelcolor="r", +): """ Adds a pair of x/y axes to the matplotlib subplot ax. The user supplies the x-axis direction with (vx,vy) in real space coordinates, and the function @@ -307,17 +452,51 @@ def ax_addaxes_RtoQ(ax,vx,vy,vlength,x0,y0,QR_rotation,width=1,color='r', with respect to diffraction space. In degrees. """ from py4DSTEM.process.calibration.rotation import get_Qvector_from_Rvector - _,_,vx,vy = get_Qvector_from_Rvector(vx,vy,QR_rotation) - ax_addaxes(ax,vx,vy,vlength,x0,y0,width=width,color=color,labelaxes=labelaxes, - labelsize=labelsize,labelcolor=labelcolor,righthandedcoords=True) - -def show_RQ_axes(realspace_image,diffractionspace_image, - realspace_pdict, diffractionspace_pdict, - vx,vy,vlength_R,vlength_Q,x0_R,y0_R,x0_Q,y0_Q, - QR_rotation,vector_space='R', - width_R=1,color_R='r',width_Q=1,color_Q='r', - labelaxes=True,labelcolor_R='r',labelcolor_Q='r', - labelsize_R=12,labelsize_Q=12,figsize=(12,6),returnfig=False): + + _, _, vx, vy = get_Qvector_from_Rvector(vx, vy, QR_rotation) + ax_addaxes( + ax, + vx, + vy, + vlength, + x0, + y0, + width=width, + color=color, + labelaxes=labelaxes, + labelsize=labelsize, + labelcolor=labelcolor, + righthandedcoords=True, + ) + + +def show_RQ_axes( + realspace_image, + diffractionspace_image, + realspace_pdict, + diffractionspace_pdict, + vx, + vy, + vlength_R, + vlength_Q, + x0_R, + y0_R, + x0_Q, + y0_Q, + QR_rotation, + vector_space="R", + width_R=1, + color_R="r", + width_Q=1, + color_Q="r", + labelaxes=True, + labelcolor_R="r", + labelcolor_Q="r", + labelsize_R=12, + labelsize_Q=12, + figsize=(12, 6), + returnfig=False, +): """ Shows side-by-side real/reciprocal space images with a set of corresponding coordinate axes overlaid in each. (vx,vy) specifies the x-axis, and the y-axis @@ -348,24 +527,73 @@ def show_RQ_axes(realspace_image,diffractionspace_image, function describes a real or diffracation space vector. """ - assert(vector_space in ('R','Q')) - fig,(ax1,ax2) = show_RQ(realspace_image, diffractionspace_image, - realspace_pdict, diffractionspace_pdict, - figsize=figsize,returnfig=True) - if vector_space=='R': - ax_addaxes(ax1,vx,vy,vlength_R,x0_R,y0_R,width=width_R,color=color_R, - labelaxes=labelaxes,labelsize=labelsize_R,labelcolor=labelcolor_R) - ax_addaxes_RtoQ(ax2,vx,vy,vlength_Q,x0_Q,y0_Q,QR_rotation,width=width_Q,color=color_Q, - labelaxes=labelaxes,labelsize=labelsize_Q,labelcolor=labelcolor_Q) + assert vector_space in ("R", "Q") + fig, (ax1, ax2) = show_RQ( + realspace_image, + diffractionspace_image, + realspace_pdict, + diffractionspace_pdict, + figsize=figsize, + returnfig=True, + ) + if vector_space == "R": + ax_addaxes( + ax1, + vx, + vy, + vlength_R, + x0_R, + y0_R, + width=width_R, + color=color_R, + labelaxes=labelaxes, + labelsize=labelsize_R, + labelcolor=labelcolor_R, + ) + ax_addaxes_RtoQ( + ax2, + vx, + vy, + vlength_Q, + x0_Q, + y0_Q, + QR_rotation, + width=width_Q, + color=color_Q, + labelaxes=labelaxes, + labelsize=labelsize_Q, + labelcolor=labelcolor_Q, + ) else: - ax_addaxes(ax2,vx,vy,vlength_Q,x0_Q,y0_Q,width=width_Q,color=color_Q, - labelaxes=labelaxes,labelsize=labelsize_Q,labelcolor=labelcolor_Q) - ax_addaxes_QtoR(ax1,vx,vy,vlength_R,x0_R,y0_R,QR_rotation,width=width_R,color=color_R, - labelaxes=labelaxes,labelsize=labelsize_R,labelcolor=labelcolor_R) + ax_addaxes( + ax2, + vx, + vy, + vlength_Q, + x0_Q, + y0_Q, + width=width_Q, + color=color_Q, + labelaxes=labelaxes, + labelsize=labelsize_Q, + labelcolor=labelcolor_Q, + ) + ax_addaxes_QtoR( + ax1, + vx, + vy, + vlength_R, + x0_R, + y0_R, + QR_rotation, + width=width_R, + color=color_R, + labelaxes=labelaxes, + labelsize=labelsize_R, + labelcolor=labelcolor_R, + ) if not returnfig: plt.show() return else: - return fig,(ax1,ax2) - - + return fig, (ax1, ax2) diff --git a/py4DSTEM/visualize/vis_grid.py b/py4DSTEM/visualize/vis_grid.py index 48b5b158a..d24b0b8d8 100644 --- a/py4DSTEM/visualize/vis_grid.py +++ b/py4DSTEM/visualize/vis_grid.py @@ -2,12 +2,13 @@ import matplotlib.pyplot as plt from matplotlib.patches import Rectangle -from py4DSTEM.visualize.show import show,show_points +from py4DSTEM.visualize.show import show, show_points from py4DSTEM.visualize.overlay import add_grid_overlay - -def show_DP_grid(datacube,x0,y0,xL,yL,axsize=(6,6),returnfig=False,space=0,**kwargs): +def show_DP_grid( + datacube, x0, y0, xL, yL, axsize=(6, 6), returnfig=False, space=0, **kwargs +): """ Shows a grid of diffraction patterns from DataCube datacube, starting from scan position (x0,y0) and extending xL,yL. @@ -24,26 +25,28 @@ def show_DP_grid(datacube,x0,y0,xL,yL,axsize=(6,6),returnfig=False,space=0,**kwa if returnfig==false, the figure and its one axis are returned, and can be further edited. """ - yy,xx = np.meshgrid(np.arange(y0,y0+yL),np.arange(x0,x0+xL)) + yy, xx = np.meshgrid(np.arange(y0, y0 + yL), np.arange(x0, x0 + xL)) - fig,axs = plt.subplots(xL,yL,figsize=(yL*axsize[0],xL*axsize[1])) + fig, axs = plt.subplots(xL, yL, figsize=(yL * axsize[0], xL * axsize[1])) for xi in range(xL): for yi in range(yL): - ax = axs[xi,yi] - x,y = xx[xi,yi],yy[xi,yi] - dp = datacube.data[x,y,:,:] - _,_ = show(dp,figax=(fig,ax),returnfig=True,**kwargs) + ax = axs[xi, yi] + x, y = xx[xi, yi], yy[xi, yi] + dp = datacube.data[x, y, :, :] + _, _ = show(dp, figax=(fig, ax), returnfig=True, **kwargs) plt.tight_layout() - plt.subplots_adjust(wspace=space,hspace=space) + plt.subplots_adjust(wspace=space, hspace=space) if not returnfig: plt.show() return else: - return fig,axs + return fig, axs + -def show_grid_overlay(image,x0,y0,xL,yL,color='k',linewidth=1,alpha=1, - returnfig=False,**kwargs): +def show_grid_overlay( + image, x0, y0, xL, yL, color="k", linewidth=1, alpha=1, returnfig=False, **kwargs +): """ Shows the image with an overlaid boxgrid outline about the pixels beginning at (x0,y0) and with extent xL,yL in the two directions. @@ -53,19 +56,31 @@ def show_grid_overlay(image,x0,y0,xL,yL,color='k',linewidth=1,alpha=1, x0,y0 the corner of the grid xL,xL the extent of the grid """ - fig,ax = show(image,returnfig=True,**kwargs) - add_grid_overlay(ax,d={'x0':x0,'y0':y0,'xL':xL,'yL':yL, - 'color':color,'linewidth':linewidth,'alpha':alpha}) + fig, ax = show(image, returnfig=True, **kwargs) + add_grid_overlay( + ax, + d={ + "x0": x0, + "y0": y0, + "xL": xL, + "yL": yL, + "color": color, + "linewidth": linewidth, + "alpha": alpha, + }, + ) plt.tight_layout() if not returnfig: plt.show() return else: - return fig,ax + return fig, ax + -def _show_grid_overlay(image,x0,y0,xL,yL,color='k',linewidth=1,alpha=1, - returnfig=False,**kwargs): +def _show_grid_overlay( + image, x0, y0, xL, yL, color="k", linewidth=1, alpha=1, returnfig=False, **kwargs +): """ Shows the image with an overlaid boxgrid outline about the pixels beginning at (x0,y0) and with extent xL,yL in the two directions. @@ -75,14 +90,21 @@ def _show_grid_overlay(image,x0,y0,xL,yL,color='k',linewidth=1,alpha=1, x0,y0 the corner of the grid xL,xL the extent of the grid """ - yy,xx = np.meshgrid(np.arange(y0,y0+yL),np.arange(x0,x0+xL)) + yy, xx = np.meshgrid(np.arange(y0, y0 + yL), np.arange(x0, x0 + xL)) - fig,ax = show(image,returnfig=True,**kwargs) + fig, ax = show(image, returnfig=True, **kwargs) for xi in range(xL): for yi in range(yL): - x,y = xx[xi,yi],yy[xi,yi] - rect = Rectangle((y-0.5,x-0.5),1,1,lw=linewidth,color=color, - alpha=alpha,fill=False) + x, y = xx[xi, yi], yy[xi, yi] + rect = Rectangle( + (y - 0.5, x - 0.5), + 1, + 1, + lw=linewidth, + color=color, + alpha=alpha, + fill=False, + ) ax.add_patch(rect) plt.tight_layout() @@ -90,24 +112,27 @@ def _show_grid_overlay(image,x0,y0,xL,yL,color='k',linewidth=1,alpha=1, plt.show() return else: - return fig,ax + return fig, ax + def show_image_grid( get_ar, - H,W, - axsize=(6,6), + H, + W, + axsize=(6, 6), returnfig=False, - figax = None, - title = None, - title_index = False, - suptitle = None, + figax=None, + title=None, + title_index=False, + suptitle=None, get_bordercolor=None, get_x=None, get_y=None, get_pointcolors=None, get_s=None, open_circles=False, - **kwargs): + **kwargs, +): """ Displays a set of images in a grid. @@ -131,14 +156,14 @@ def show_image_grid( H,W integers, the dimensions of the grid axsize the size of each image figax controls which matplotlib Axes object draws the image. - If None, generates a new figure with a single Axes instance. - Otherwise, ax must be a 2-tuple containing the matplotlib class instances + If None, generates a new figure with a single Axes instance. + Otherwise, ax must be a 2-tuple containing the matplotlib class instances (Figure,Axes), with ar then plotted in the specified Axes instance. - title if title is sting, then prints title as suptitle. If a suptitle is also provided, + title if title is sting, then prints title as suptitle. If a suptitle is also provided, the suptitle is printed insead. - if title is a list of strings (ex: ['title 1','title 2']), each array has + if title is a list of strings (ex: ['title 1','title 2']), each array has corresponding title in list. - title_index if True, prints the index i passed to get_ar over each image + title_index if True, prints the index i passed to get_ar over each image suptitle string, suptitle on plot get_bordercolor if not None, should be a function defined over @@ -166,90 +191,107 @@ def show_image_grid( _get_points = (get_x is not None) and (get_y is not None) _get_colors = get_pointcolors is not None _get_s = get_s is not None - + if figax is None: - fig,axs = plt.subplots(H,W,figsize=(W*axsize[0],H*axsize[1])) + fig, axs = plt.subplots(H, W, figsize=(W * axsize[0], H * axsize[1])) else: - fig,axs = figax - if H==1: - axs = axs[np.newaxis,:] - elif W==1: - axs = axs[:,np.newaxis] + fig, axs = figax + if H == 1: + axs = axs[np.newaxis, :] + elif W == 1: + axs = axs[:, np.newaxis] for i in range(H): for j in range(W): - ax = axs[i,j] - N = i*W+j - #make titles - if type(title) == list: + ax = axs[i, j] + N = i * W + j + # make titles + if type(title) == list: print_title = title[N] else: print_title = None - if title_index: - if print_title is not None: + if title_index: + if print_title is not None: print_title = f"{N}. " + print_title else: - print_title = f"{N}." - #make figures + print_title = f"{N}." + # make figures try: ar = get_ar(N) if _get_bordercolor and _get_points: bc = get_bordercolor(N) - x,y = get_x(N),get_y(N) + x, y = get_x(N), get_y(N) if _get_colors: pointcolors = get_pointcolors(N) else: - pointcolors='r' + pointcolors = "r" if _get_s: s = get_s(N) - _,_ = show_points( - ar,figax=(fig,ax), + _, _ = show_points( + ar, + figax=(fig, ax), returnfig=True, bordercolor=bc, - x=x,y=y,s=s, + x=x, + y=y, + s=s, pointcolor=pointcolors, open_circles=open_circles, - title = print_title, - **kwargs) + title=print_title, + **kwargs, + ) else: - _,_ = show_points( - ar,figax=(fig,ax), + _, _ = show_points( + ar, + figax=(fig, ax), returnfig=True, bordercolor=bc, - x=x,y=y, + x=x, + y=y, pointcolor=pointcolors, open_circles=open_circles, - title = print_title, - **kwargs) + title=print_title, + **kwargs, + ) elif _get_bordercolor: bc = get_bordercolor(N) - _,_ = show(ar,figax=(fig,ax),returnfig=True, - bordercolor=bc,title = print_title, **kwargs) + _, _ = show( + ar, + figax=(fig, ax), + returnfig=True, + bordercolor=bc, + title=print_title, + **kwargs, + ) elif _get_points: - x,y = get_x(N),get_y(N) + x, y = get_x(N), get_y(N) if _get_colors: pointcolors = get_pointcolors(N) else: - pointcolors='r' - _,_ = show_points( - ar,figax=(fig,ax),x=x,y=y, + pointcolors = "r" + _, _ = show_points( + ar, + figax=(fig, ax), + x=x, + y=y, returnfig=True, pointcolor=pointcolors, open_circles=open_circles, - title = print_title, - **kwargs) + title=print_title, + **kwargs, + ) else: - _,_ = show(ar,figax=(fig,ax),returnfig=True,title = print_title,**kwargs) + _, _ = show( + ar, figax=(fig, ax), returnfig=True, title=print_title, **kwargs + ) except IndexError: - ax.axis('off') + ax.axis("off") if type(title) == str: fig.suptitle(title) if suptitle: fig.suptitle(suptitle) plt.tight_layout() - + if not returnfig: return else: - return fig,axs - - + return fig, axs diff --git a/py4DSTEM/visualize/vis_special.py b/py4DSTEM/visualize/vis_special.py index 43cf7fff8..89f09606a 100644 --- a/py4DSTEM/visualize/vis_special.py +++ b/py4DSTEM/visualize/vis_special.py @@ -16,15 +16,23 @@ add_scalebar, ) from py4DSTEM.visualize.vis_grid import show_image_grid -from py4DSTEM.visualize.vis_RQ import ax_addaxes,ax_addaxes_QtoR - - - - - -def show_elliptical_fit(ar,fitradii,p_ellipse,fill=True, - color_ann='y',color_ell='r',alpha_ann=0.2,alpha_ell=0.7, - linewidth_ann=2,linewidth_ell=2,returnfig=False,**kwargs): +from py4DSTEM.visualize.vis_RQ import ax_addaxes, ax_addaxes_QtoR + + +def show_elliptical_fit( + ar, + fitradii, + p_ellipse, + fill=True, + color_ann="y", + color_ell="r", + alpha_ann=0.2, + alpha_ell=0.7, + linewidth_ann=2, + linewidth_ell=2, + returnfig=False, + **kwargs +): """ Plots an elliptical curve over its annular fit region. @@ -42,35 +50,55 @@ def show_elliptical_fit(ar,fitradii,p_ellipse,fill=True, linewidth_ann: linewidth_ell: """ - Ri,Ro = fitradii - qx0,qy0,a,b,theta = p_ellipse - fig,ax = show(ar, - annulus={'center':(qx0,qy0), - 'radii':(Ri,Ro), - 'fill':fill, - 'color':color_ann, - 'alpha':alpha_ann, - 'linewidth':linewidth_ann}, - ellipse={'center':(qx0,qy0), - 'a':a, - 'b':b, - 'theta':theta, - 'color':color_ell, - 'alpha':alpha_ell, - 'linewidth':linewidth_ell}, - returnfig=True,**kwargs) + Ri, Ro = fitradii + qx0, qy0, a, b, theta = p_ellipse + fig, ax = show( + ar, + annulus={ + "center": (qx0, qy0), + "radii": (Ri, Ro), + "fill": fill, + "color": color_ann, + "alpha": alpha_ann, + "linewidth": linewidth_ann, + }, + ellipse={ + "center": (qx0, qy0), + "a": a, + "b": b, + "theta": theta, + "color": color_ell, + "alpha": alpha_ell, + "linewidth": linewidth_ell, + }, + returnfig=True, + **kwargs, + ) if not returnfig: plt.show() return else: - return fig,ax + return fig, ax -def show_amorphous_ring_fit(dp,fitradii,p_dsg,N=12,cmap=('gray','gray'), - fitborder=True,fitbordercolor='k',fitborderlw=0.5, - scaling='log',ellipse=False,ellipse_color='r', - ellipse_alpha=0.7,ellipse_lw=2,returnfig=False,**kwargs): +def show_amorphous_ring_fit( + dp, + fitradii, + p_dsg, + N=12, + cmap=("gray", "gray"), + fitborder=True, + fitbordercolor="k", + fitborderlw=0.5, + scaling="log", + ellipse=False, + ellipse_color="r", + ellipse_alpha=0.7, + ellipse_lw=2, + returnfig=False, + **kwargs +): """ Display a diffraction pattern with a fit to its amorphous ring, interleaving the data and the fit in a pinwheel pattern. @@ -93,75 +121,112 @@ def show_amorphous_ring_fit(dp,fitradii,p_dsg,N=12,cmap=('gray','gray'), """ from py4DSTEM.process.calibration import double_sided_gaussian from py4DSTEM.process.utils import convert_ellipse_params - assert(len(p_dsg)==11) - assert(isinstance(N,(int,np.integer))) - if isinstance(cmap,tuple): - cmap_data,cmap_fit = cmap[0],cmap[1] + + assert len(p_dsg) == 11 + assert isinstance(N, (int, np.integer)) + if isinstance(cmap, tuple): + cmap_data, cmap_fit = cmap[0], cmap[1] else: - cmap_data,cmap_fit = cmap,cmap - Q_Nx,Q_Ny = dp.shape - qmin,qmax = fitradii + cmap_data, cmap_fit = cmap, cmap + Q_Nx, Q_Ny = dp.shape + qmin, qmax = fitradii # Make coords - qx0,qy0 = p_dsg[6],p_dsg[7] - qyy,qxx = np.meshgrid(np.arange(Q_Ny),np.arange(Q_Nx)) - qx,qy = qxx-qx0,qyy-qy0 - q = np.hypot(qx,qy) - theta = np.arctan2(qy,qx) + qx0, qy0 = p_dsg[6], p_dsg[7] + qyy, qxx = np.meshgrid(np.arange(Q_Ny), np.arange(Q_Nx)) + qx, qy = qxx - qx0, qyy - qy0 + q = np.hypot(qx, qy) + theta = np.arctan2(qy, qx) # Make mask - thetas = np.linspace(-np.pi,np.pi,2*N+1) - pinwheel = np.zeros((Q_Nx,Q_Ny),dtype=bool) + thetas = np.linspace(-np.pi, np.pi, 2 * N + 1) + pinwheel = np.zeros((Q_Nx, Q_Ny), dtype=bool) for i in range(N): - pinwheel += (theta>thetas[2*i]) * (theta<=thetas[2*i+1]) - mask = pinwheel * (q>qmin) * (q<=qmax) + pinwheel += (theta > thetas[2 * i]) * (theta <= thetas[2 * i + 1]) + mask = pinwheel * (q > qmin) * (q <= qmax) # Get fit data fit = double_sided_gaussian(p_dsg, qxx, qyy) # Show - (fig,ax),(vmin,vmax) = show(dp,scaling=scaling,cmap=cmap_data, - mask=np.logical_not(mask),mask_color='empty', - returnfig=True,returnclipvals=True,**kwargs) - show(fit,scaling=scaling,figax=(fig,ax),clipvals='manual',min=vmin,max=vmax, - cmap=cmap_fit,mask=mask,mask_color='empty',**kwargs) + (fig, ax), (vmin, vmax) = show( + dp, + scaling=scaling, + cmap=cmap_data, + mask=np.logical_not(mask), + mask_color="empty", + returnfig=True, + returnclipvals=True, + **kwargs, + ) + show( + fit, + scaling=scaling, + figax=(fig, ax), + clipvals="manual", + min=vmin, + max=vmax, + cmap=cmap_fit, + mask=mask, + mask_color="empty", + **kwargs, + ) if fitborder: - if N%2==1: thetas += (thetas[1]-thetas[0])/2 - if (N//2%2)==0: thetas = np.roll(thetas,-1) + if N % 2 == 1: + thetas += (thetas[1] - thetas[0]) / 2 + if (N // 2 % 2) == 0: + thetas = np.roll(thetas, -1) for i in range(N): - ax.add_patch(Wedge((qy0,qx0),qmax,np.degrees(thetas[2*i]), - np.degrees(thetas[2*i+1]),width=qmax-qmin,fill=None, - color=fitbordercolor,lw=fitborderlw)) + ax.add_patch( + Wedge( + (qy0, qx0), + qmax, + np.degrees(thetas[2 * i]), + np.degrees(thetas[2 * i + 1]), + width=qmax - qmin, + fill=None, + color=fitbordercolor, + lw=fitborderlw, + ) + ) # Add ellipse overlay if ellipse: - A,B,C = p_dsg[8],p_dsg[9],p_dsg[10] - a,b,theta = convert_ellipse_params(A,B,C) - ellipse={'center':(qx0,qy0),'a':a,'b':b,'theta':theta, - 'color':ellipse_color,'alpha':ellipse_alpha,'linewidth':ellipse_lw} - add_ellipses(ax,ellipse) + A, B, C = p_dsg[8], p_dsg[9], p_dsg[10] + a, b, theta = convert_ellipse_params(A, B, C) + ellipse = { + "center": (qx0, qy0), + "a": a, + "b": b, + "theta": theta, + "color": ellipse_color, + "alpha": ellipse_alpha, + "linewidth": ellipse_lw, + } + add_ellipses(ax, ellipse) if not returnfig: plt.show() return else: - return fig,ax + return fig, ax def show_qprofile( q, intensity, ymax=None, - figsize=(12,4), + figsize=(12, 4), returnfig=False, - color='k', - xlabel='q (pixels)', - ylabel='Intensity (A.U.)', + color="k", + xlabel="q (pixels)", + ylabel="Intensity (A.U.)", labelsize=16, ticklabelsize=14, grid=True, label=None, - **kwargs): + **kwargs +): """ Plots a diffraction space radial profile. Params: @@ -177,148 +242,167 @@ def show_qprofile( label a legend label for the plotted curve """ if ymax is None: - ymax = np.max(intensity)*1.05 + ymax = np.max(intensity) * 1.05 - fig,ax = plt.subplots(figsize=figsize) - ax.plot(q,intensity,color=color,label=label) + fig, ax = plt.subplots(figsize=figsize) + ax.plot(q, intensity, color=color, label=label) ax.grid(grid) - ax.set_ylim(0,ymax) - ax.tick_params(axis='x',labelsize=ticklabelsize) + ax.set_ylim(0, ymax) + ax.tick_params(axis="x", labelsize=ticklabelsize) ax.set_yticklabels([]) - ax.set_xlabel(xlabel,size=labelsize) - ax.set_ylabel(ylabel,size=labelsize) + ax.set_xlabel(xlabel, size=labelsize) + ax.set_ylabel(ylabel, size=labelsize) if not returnfig: plt.show() return else: - return fig,ax + return fig, ax -def show_kernel( - kernel, - R, - L, - W, - figsize=(12,6), - returnfig=False, - **kwargs): + +def show_kernel(kernel, R, L, W, figsize=(12, 6), returnfig=False, **kwargs): """ Plots, side by side, the probe kernel and its line profile. R is the kernel plot's window size. L and W are the length and width of the lineprofile. """ - lineprofile_1 = np.concatenate([ - np.sum(kernel[-L:,:W],axis=1), - np.sum(kernel[:L,:W],axis=1) - ]) - lineprofile_2 = np.concatenate([ - np.sum(kernel[:W,-L:],axis=0), - np.sum(kernel[:W,:L],axis=0) - ]) - - im_kernel = np.vstack([ - np.hstack([ - kernel[-int(R):,-int(R):], - kernel[-int(R):,:int(R)] - ]), - np.hstack([ - kernel[:int(R),-int(R):], - kernel[:int(R),:int(R)] - ]), - ]) - - fig,axs = plt.subplots(1,2,figsize=figsize) - axs[0].matshow(im_kernel,cmap='gray') - axs[0].plot( - np.ones(2*R)*R, - np.arange(2*R), - c='r') - axs[0].plot( - np.arange(2*R), - np.ones(2*R)*R, - c='c') - - - axs[1].plot( - np.arange(len(lineprofile_1)), - lineprofile_1, - c='r') - axs[1].plot( - np.arange(len(lineprofile_2)), - lineprofile_2, - c='c') + lineprofile_1 = np.concatenate( + [np.sum(kernel[-L:, :W], axis=1), np.sum(kernel[:L, :W], axis=1)] + ) + lineprofile_2 = np.concatenate( + [np.sum(kernel[:W, -L:], axis=0), np.sum(kernel[:W, :L], axis=0)] + ) + + im_kernel = np.vstack( + [ + np.hstack([kernel[-int(R) :, -int(R) :], kernel[-int(R) :, : int(R)]]), + np.hstack([kernel[: int(R), -int(R) :], kernel[: int(R), : int(R)]]), + ] + ) + + fig, axs = plt.subplots(1, 2, figsize=figsize) + axs[0].matshow(im_kernel, cmap="gray") + axs[0].plot(np.ones(2 * R) * R, np.arange(2 * R), c="r") + axs[0].plot(np.arange(2 * R), np.ones(2 * R) * R, c="c") + + axs[1].plot(np.arange(len(lineprofile_1)), lineprofile_1, c="r") + axs[1].plot(np.arange(len(lineprofile_2)), lineprofile_2, c="c") if not returnfig: plt.show() return else: - return fig,axs + return fig, axs + -def show_voronoi(ar,x,y,color_points='r',color_lines='w',max_dist=None, - returnfig=False,**kwargs): +def show_voronoi( + ar, + x, + y, + color_points="r", + color_lines="w", + max_dist=None, + returnfig=False, + **kwargs +): """ words """ from py4DSTEM.process.utils import get_voronoi_vertices - Nx,Ny = ar.shape - points = np.vstack((x,y)).T + + Nx, Ny = ar.shape + points = np.vstack((x, y)).T voronoi = Voronoi(points) - vertices = get_voronoi_vertices(voronoi,Nx,Ny) + vertices = get_voronoi_vertices(voronoi, Nx, Ny) if max_dist is None: - fig,ax = show(ar,returnfig=True,**kwargs) + fig, ax = show(ar, returnfig=True, **kwargs) else: - centers = [(x[i],y[i]) for i in range(len(x))] - fig,ax = show(ar,returnfig=True,**kwargs, - circle={'center':centers,'R':max_dist,'fill':False,'color':color_points}) + centers = [(x[i], y[i]) for i in range(len(x))] + fig, ax = show( + ar, + returnfig=True, + **kwargs, + circle={ + "center": centers, + "R": max_dist, + "fill": False, + "color": color_points, + }, + ) - ax.scatter(voronoi.points[:,1],voronoi.points[:,0],color=color_points) + ax.scatter(voronoi.points[:, 1], voronoi.points[:, 0], color=color_points) for region in range(len(vertices)): vertices_curr = vertices[region] for i in range(len(vertices_curr)): - x0,y0 = vertices_curr[i,:] - xf,yf = vertices_curr[(i+1)%len(vertices_curr),:] - ax.plot((y0,yf),(x0,xf),color=color_lines) - ax.set_xlim([0,Ny]) - ax.set_ylim([0,Nx]) + x0, y0 = vertices_curr[i, :] + xf, yf = vertices_curr[(i + 1) % len(vertices_curr), :] + ax.plot((y0, yf), (x0, xf), color=color_lines) + ax.set_xlim([0, Ny]) + ax.set_ylim([0, Nx]) plt.gca().invert_yaxis() if not returnfig: plt.show() return else: - return fig,ax + return fig, ax + -def show_class_BPs(ar,x,y,s,s2,color='r',color2='y',**kwargs): +def show_class_BPs(ar, x, y, s, s2, color="r", color2="y", **kwargs): """ words """ N = len(x) - assert(N==len(y)==len(s)) + assert N == len(y) == len(s) - fig,ax = show(ar,returnfig=True,**kwargs) - ax.scatter(y,x,s=s2,color=color2) - ax.scatter(y,x,s=s,color=color) + fig, ax = show(ar, returnfig=True, **kwargs) + ax.scatter(y, x, s=s2, color=color2) + ax.scatter(y, x, s=s, color=color) plt.show() return -def show_class_BPs_grid(ar,H,W,x,y,get_s,s2,color='r',color2='y',returnfig=False, - axsize=(6,6),titlesize=0,get_bordercolor=None,**kwargs): + +def show_class_BPs_grid( + ar, + H, + W, + x, + y, + get_s, + s2, + color="r", + color2="y", + returnfig=False, + axsize=(6, 6), + titlesize=0, + get_bordercolor=None, + **kwargs +): """ words """ - fig,axs = show_image_grid(lambda i:ar,H,W,axsize=axsize,titlesize=titlesize, - get_bordercolor=get_bordercolor,returnfig=True,**kwargs) + fig, axs = show_image_grid( + lambda i: ar, + H, + W, + axsize=axsize, + titlesize=titlesize, + get_bordercolor=get_bordercolor, + returnfig=True, + **kwargs, + ) for i in range(H): for j in range(W): - ax = axs[i,j] - N = i*W+j + ax = axs[i, j] + N = i * W + j s = get_s(N) - ax.scatter(y,x,s=s2,color=color2) - ax.scatter(y,x,s=s,color=color) + ax.scatter(y, x, s=s2, color=color2) + ax.scatter(y, x, s=s, color=color) if not returnfig: plt.show() return else: - return fig,axs + return fig, axs + def show_strain( strainmap, @@ -326,10 +410,10 @@ def show_strain( vrange_theta, vrange_exy=None, vrange_eyy=None, - flip_theta = False, + flip_theta=False, bkgrd=True, - show_cbars=('exx','eyy','exy','theta'), - bordercolor='k', + show_cbars=("exx", "eyy", "exy", "theta"), + bordercolor="k", borderwidth=1, titlesize=24, ticklabelsize=16, @@ -342,20 +426,21 @@ def show_strain( xaxis_y=0, axes_length=10, axes_width=1, - axes_color='r', - xaxis_space='Q', + axes_color="r", + xaxis_space="Q", labelaxes=True, QR_rotation=0, axes_labelsize=12, - axes_labelcolor='r', - axes_plots=('exx'), - cmap='RdBu_r', + axes_labelcolor="r", + axes_plots=("exx"), + cmap="RdBu_r", layout=0, - figsize=(12,12), - returnfig=False): + figsize=(12, 12), + returnfig=False, +): """ Display a strain map, showing the 4 strain components (e_xx,e_yy,e_xy,theta), and - masking each image with strainmap.get_slice('mask') + masking each image with strainmap.get_slice('mask') Args: strainmap (RealSlice): @@ -363,7 +448,7 @@ def show_strain( vrange_theta (length 2 list or tuple): vrange_exy (length 2 list or tuple): vrange_eyy (length 2 list or tuple): - flip_theta (bool): if True, take negative of angle + flip_theta (bool): if True, take negative of angle bkgrd (bool): show_cbars (tuple of strings): Show colorbars for the specified axes. Must be a tuple containing any, all, or none of ('exx','eyy','exy','theta'). @@ -397,11 +482,11 @@ def show_strain( returnfig (bool): """ # Lookup table for different layouts - assert(layout in (0,1,2)) + assert layout in (0, 1, 2) layout_lookup = { - 0:['left','right','left','right'], - 1:['bottom','bottom','bottom','bottom'], - 2:['right','right','right','right'], + 0: ["left", "right", "left", "right"], + 1: ["bottom", "bottom", "bottom", "bottom"], + 2: ["right", "right", "right", "right"], } layout_p = layout_lookup[layout] @@ -410,141 +495,204 @@ def show_strain( vrange_exy = vrange_exx if vrange_eyy is None: vrange_eyy = vrange_exx - for vrange in (vrange_exx,vrange_eyy,vrange_exy,vrange_theta): - assert(len(vrange)==2), 'vranges must have length 2' - vmin_exx,vmax_exx = vrange_exx[0]/100.,vrange_exx[1]/100. - vmin_eyy,vmax_eyy = vrange_eyy[0]/100.,vrange_eyy[1]/100. - vmin_exy,vmax_exy = vrange_exy[0]/100.,vrange_exy[1]/100. + for vrange in (vrange_exx, vrange_eyy, vrange_exy, vrange_theta): + assert len(vrange) == 2, "vranges must have length 2" + vmin_exx, vmax_exx = vrange_exx[0] / 100.0, vrange_exx[1] / 100.0 + vmin_eyy, vmax_eyy = vrange_eyy[0] / 100.0, vrange_eyy[1] / 100.0 + vmin_exy, vmax_exy = vrange_exy[0] / 100.0, vrange_exy[1] / 100.0 # theta is plotted in units of degrees - vmin_theta,vmax_theta = vrange_theta[0]/(180.0/np.pi),vrange_theta[1]/(180.0/np.pi) + vmin_theta, vmax_theta = vrange_theta[0] / (180.0 / np.pi), vrange_theta[1] / ( + 180.0 / np.pi + ) # Get images - e_xx = np.ma.array(strainmap.get_slice('e_xx').data,mask=strainmap.get_slice('mask').data==False) - e_yy = np.ma.array(strainmap.get_slice('e_yy').data,mask=strainmap.get_slice('mask').data==False) - e_xy = np.ma.array(strainmap.get_slice('e_xy').data,mask=strainmap.get_slice('mask').data==False) - theta = np.ma.array(strainmap.get_slice('theta').data,mask=strainmap.get_slice('mask').data==False) - if flip_theta == True: - theta = - theta + e_xx = np.ma.array( + strainmap.get_slice("e_xx").data, mask=strainmap.get_slice("mask").data == False + ) + e_yy = np.ma.array( + strainmap.get_slice("e_yy").data, mask=strainmap.get_slice("mask").data == False + ) + e_xy = np.ma.array( + strainmap.get_slice("e_xy").data, mask=strainmap.get_slice("mask").data == False + ) + theta = np.ma.array( + strainmap.get_slice("theta").data, + mask=strainmap.get_slice("mask").data == False, + ) + if flip_theta == True: + theta = -theta # Plot - if layout==0: - fig,((ax11,ax12),(ax21,ax22)) = plt.subplots(2,2,figsize=figsize) - elif layout==1: - fig,(ax11,ax12,ax21,ax22) = plt.subplots(1,4,figsize=figsize) + if layout == 0: + fig, ((ax11, ax12), (ax21, ax22)) = plt.subplots(2, 2, figsize=figsize) + elif layout == 1: + fig, (ax11, ax12, ax21, ax22) = plt.subplots(1, 4, figsize=figsize) else: - fig,(ax11,ax12,ax21,ax22) = plt.subplots(4,1,figsize=figsize) + fig, (ax11, ax12, ax21, ax22) = plt.subplots(4, 1, figsize=figsize) cax11 = show( e_xx, - figax=(fig,ax11), + figax=(fig, ax11), vmin=vmin_exx, vmax=vmax_exx, - intensity_range='absolute', + intensity_range="absolute", cmap=cmap, - returncax=True) + returncax=True, + ) cax12 = show( e_yy, - figax=(fig,ax12), + figax=(fig, ax12), vmin=vmin_eyy, vmax=vmax_eyy, - intensity_range='absolute', + intensity_range="absolute", cmap=cmap, - returncax=True) + returncax=True, + ) cax21 = show( e_xy, - figax=(fig,ax21), + figax=(fig, ax21), vmin=vmin_exy, vmax=vmax_exy, - intensity_range='absolute', + intensity_range="absolute", cmap=cmap, - returncax=True) + returncax=True, + ) cax22 = show( theta, - figax=(fig,ax22), + figax=(fig, ax22), vmin=vmin_theta, vmax=vmax_theta, - intensity_range='absolute', + intensity_range="absolute", cmap=cmap, - returncax=True) - ax11.set_title(r'$\epsilon_{xx}$',size=titlesize) - ax12.set_title(r'$\epsilon_{yy}$',size=titlesize) - ax21.set_title(r'$\epsilon_{xy}$',size=titlesize) - ax22.set_title(r'$\theta$',size=titlesize) + returncax=True, + ) + ax11.set_title(r"$\epsilon_{xx}$", size=titlesize) + ax12.set_title(r"$\epsilon_{yy}$", size=titlesize) + ax21.set_title(r"$\epsilon_{xy}$", size=titlesize) + ax22.set_title(r"$\theta$", size=titlesize) # Add black background if bkgrd: mask = np.ma.masked_where( - strainmap.get_slice('mask').data.astype(bool), - np.zeros_like(strainmap.get_slice('mask').data)) - ax11.matshow(mask,cmap='gray') - ax12.matshow(mask,cmap='gray') - ax21.matshow(mask,cmap='gray') - ax22.matshow(mask,cmap='gray') + strainmap.get_slice("mask").data.astype(bool), + np.zeros_like(strainmap.get_slice("mask").data), + ) + ax11.matshow(mask, cmap="gray") + ax12.matshow(mask, cmap="gray") + ax21.matshow(mask, cmap="gray") + ax22.matshow(mask, cmap="gray") # Colorbars - show_cbars = np.array(['exx' in show_cbars,'eyy' in show_cbars, - 'exy' in show_cbars,'theta' in show_cbars]) + show_cbars = np.array( + [ + "exx" in show_cbars, + "eyy" in show_cbars, + "exy" in show_cbars, + "theta" in show_cbars, + ] + ) if np.any(show_cbars): divider11 = make_axes_locatable(ax11) divider12 = make_axes_locatable(ax12) divider21 = make_axes_locatable(ax21) divider22 = make_axes_locatable(ax22) - cbax11 = divider11.append_axes(layout_p[0],size="4%",pad=0.15) - cbax12 = divider12.append_axes(layout_p[1],size="4%",pad=0.15) - cbax21 = divider21.append_axes(layout_p[2],size="4%",pad=0.15) - cbax22 = divider22.append_axes(layout_p[3],size="4%",pad=0.15) - for (ind,show_cbar,cax,cbax,vmin,vmax,tickside,tickunits) in zip( + cbax11 = divider11.append_axes(layout_p[0], size="4%", pad=0.15) + cbax12 = divider12.append_axes(layout_p[1], size="4%", pad=0.15) + cbax21 = divider21.append_axes(layout_p[2], size="4%", pad=0.15) + cbax22 = divider22.append_axes(layout_p[3], size="4%", pad=0.15) + for ind, show_cbar, cax, cbax, vmin, vmax, tickside, tickunits in zip( range(4), show_cbars, - (cax11,cax12,cax21,cax22), - (cbax11,cbax12,cbax21,cbax22), - (vmin_exx,vmin_eyy,vmin_exy,vmin_theta), - (vmax_exx,vmax_eyy,vmax_exy,vmax_theta), - (layout_p[0],layout_p[1],layout_p[2],layout_p[3]), - ('% ',' %','% ',r' $^\circ$')): + (cax11, cax12, cax21, cax22), + (cbax11, cbax12, cbax21, cbax22), + (vmin_exx, vmin_eyy, vmin_exy, vmin_theta), + (vmax_exx, vmax_eyy, vmax_exy, vmax_theta), + (layout_p[0], layout_p[1], layout_p[2], layout_p[3]), + ("% ", " %", "% ", r" $^\circ$"), + ): if show_cbar: - ticks = np.linspace(vmin,vmax,ticknumber,endpoint=True) + ticks = np.linspace(vmin, vmax, ticknumber, endpoint=True) if ind < 3: - ticklabels = np.round(np.linspace( - 100*vmin,100*vmax,ticknumber,endpoint=True),decimals=2).astype(str) + ticklabels = np.round( + np.linspace(100 * vmin, 100 * vmax, ticknumber, endpoint=True), + decimals=2, + ).astype(str) else: - ticklabels = np.round(np.linspace( - (180/np.pi)*vmin,(180/np.pi)*vmax,ticknumber,endpoint=True),decimals=2).astype(str) - - if tickside in ('left','right'): - cb = plt.colorbar(cax,cax=cbax,ticks=ticks,orientation='vertical') - cb.ax.set_yticklabels(ticklabels,size=ticklabelsize) + ticklabels = np.round( + np.linspace( + (180 / np.pi) * vmin, + (180 / np.pi) * vmax, + ticknumber, + endpoint=True, + ), + decimals=2, + ).astype(str) + + if tickside in ("left", "right"): + cb = plt.colorbar( + cax, cax=cbax, ticks=ticks, orientation="vertical" + ) + cb.ax.set_yticklabels(ticklabels, size=ticklabelsize) cbax.yaxis.set_ticks_position(tickside) - cbax.set_ylabel(tickunits,size=unitlabelsize,rotation=0) + cbax.set_ylabel(tickunits, size=unitlabelsize, rotation=0) cbax.yaxis.set_label_position(tickside) else: - cb = plt.colorbar(cax,cax=cbax,ticks=ticks,orientation='horizontal') - cb.ax.set_xticklabels(ticklabels,size=ticklabelsize) + cb = plt.colorbar( + cax, cax=cbax, ticks=ticks, orientation="horizontal" + ) + cb.ax.set_xticklabels(ticklabels, size=ticklabelsize) cbax.xaxis.set_ticks_position(tickside) - cbax.set_xlabel(tickunits,size=unitlabelsize,rotation=0) + cbax.set_xlabel(tickunits, size=unitlabelsize, rotation=0) cbax.xaxis.set_label_position(tickside) else: - cbax.axis('off') + cbax.axis("off") # Add coordinate axes if show_axes: - assert(xaxis_space in ('R','Q')), "xaxis_space must be 'R' or 'Q'" - show_which_axes = np.array(['exx' in axes_plots,'eyy' in axes_plots, - 'exy' in axes_plots,'theta' in axes_plots]) - for _show,_ax in zip(show_which_axes,(ax11,ax12,ax21,ax22)): + assert xaxis_space in ("R", "Q"), "xaxis_space must be 'R' or 'Q'" + show_which_axes = np.array( + [ + "exx" in axes_plots, + "eyy" in axes_plots, + "exy" in axes_plots, + "theta" in axes_plots, + ] + ) + for _show, _ax in zip(show_which_axes, (ax11, ax12, ax21, ax22)): if _show: - if xaxis_space=='R': - ax_addaxes(_ax,xaxis_x,xaxis_y,axes_length,axes_x0,axes_y0, - width=axes_width,color=axes_color,labelaxes=labelaxes, - labelsize=axes_labelsize,labelcolor=axes_labelcolor) + if xaxis_space == "R": + ax_addaxes( + _ax, + xaxis_x, + xaxis_y, + axes_length, + axes_x0, + axes_y0, + width=axes_width, + color=axes_color, + labelaxes=labelaxes, + labelsize=axes_labelsize, + labelcolor=axes_labelcolor, + ) else: - ax_addaxes_QtoR(_ax,xaxis_x,xaxis_y,axes_length,axes_x0,axes_y0,QR_rotation, - width=axes_width,color=axes_color,labelaxes=labelaxes, - labelsize=axes_labelsize,labelcolor=axes_labelcolor) + ax_addaxes_QtoR( + _ax, + xaxis_x, + xaxis_y, + axes_length, + axes_x0, + axes_y0, + QR_rotation, + width=axes_width, + color=axes_color, + labelaxes=labelaxes, + labelsize=axes_labelsize, + labelcolor=axes_labelcolor, + ) # Add borders if bordercolor is not None: - for ax in (ax11,ax12,ax21,ax22): - for s in ['bottom','top','left','right']: + for ax in (ax11, ax12, ax21, ax22): + for s in ["bottom", "top", "left", "right"]: ax.spines[s].set_color(bordercolor) ax.spines[s].set_linewidth(borderwidth) ax.set_xticks([]) @@ -554,54 +702,87 @@ def show_strain( plt.show() return else: - axs = ((ax11,ax12),(ax21,ax22)) - return fig,axs + axs = ((ax11, ax12), (ax21, ax22)) + return fig, axs -def show_pointlabels(ar,x,y,color='lightblue',size=20,alpha=1,returnfig=False,**kwargs): +def show_pointlabels( + ar, x, y, color="lightblue", size=20, alpha=1, returnfig=False, **kwargs +): """ Show enumerated index labels for a set of points """ - fig,ax = show(ar,returnfig=True,**kwargs) - d = {'x':x,'y':y,'size':size,'color':color,'alpha':alpha} - add_pointlabels(ax,d) + fig, ax = show(ar, returnfig=True, **kwargs) + d = {"x": x, "y": y, "size": size, "color": color, "alpha": alpha} + add_pointlabels(ax, d) if returnfig: - return fig,ax + return fig, ax else: plt.show() return -def select_point(ar,x,y,i,color='lightblue',color_selected='r',size=20,returnfig=False,**kwargs): +def select_point( + ar, + x, + y, + i, + color="lightblue", + color_selected="r", + size=20, + returnfig=False, + **kwargs +): """ Show enumerated index labels for a set of points, with one selected point highlighted """ - fig,ax = show(ar,returnfig=True,**kwargs) - d1 = {'x':x,'y':y,'size':size,'color':color} - d2 = {'x':x[i],'y':y[i],'size':size,'color':color_selected,'fontweight':'bold'} - add_pointlabels(ax,d1) - add_pointlabels(ax,d2) + fig, ax = show(ar, returnfig=True, **kwargs) + d1 = {"x": x, "y": y, "size": size, "color": color} + d2 = { + "x": x[i], + "y": y[i], + "size": size, + "color": color_selected, + "fontweight": "bold", + } + add_pointlabels(ax, d1) + add_pointlabels(ax, d2) if returnfig: - return fig,ax + return fig, ax else: plt.show() return -def show_max_peak_spacing(ar,spacing,braggdirections,color='g',lw=2,returnfig=False,**kwargs): - """ Show a circle of radius `spacing` about each Bragg direction - """ - centers = [(braggdirections.data['qx'][i],braggdirections.data['qy'][i]) for i in range(braggdirections.length)] - fig,ax = show(ar,circle={'center':centers,'R':spacing,'color':color,'fill':False,'lw':lw}, - returnfig=True,**kwargs) +def show_max_peak_spacing( + ar, spacing, braggdirections, color="g", lw=2, returnfig=False, **kwargs +): + """Show a circle of radius `spacing` about each Bragg direction""" + centers = [ + (braggdirections.data["qx"][i], braggdirections.data["qy"][i]) + for i in range(braggdirections.length) + ] + fig, ax = show( + ar, + circle={ + "center": centers, + "R": spacing, + "color": color, + "fill": False, + "lw": lw, + }, + returnfig=True, + **kwargs, + ) if returnfig: - return fig,ax + return fig, ax else: plt.show() return + def show_origin_meas(data): """ Show the measured positions of the origin. @@ -611,17 +792,19 @@ def show_origin_meas(data): """ from py4DSTEM.data import Calibration from py4DSTEM.datacube import DataCube - if isinstance(data,tuple): - assert len(data)==2 - qx,qy = data - elif isinstance(data,DataCube): - qx,qy = data.calibration.get_origin_meas() - elif isinstance(data,Calibration): - qx,qy = data.get_origin_meas() + + if isinstance(data, tuple): + assert len(data) == 2 + qx, qy = data + elif isinstance(data, DataCube): + qx, qy = data.calibration.get_origin_meas() + elif isinstance(data, Calibration): + qx, qy = data.get_origin_meas() else: raise Exception("data must be of type Datacube or Calibration or tuple") - show_image_grid(get_ar = lambda i:[qx,qy][i],H=1,W=2,cmap='RdBu') + show_image_grid(get_ar=lambda i: [qx, qy][i], H=1, W=2, cmap="RdBu") + def show_origin_fit(data): """ @@ -633,29 +816,49 @@ def show_origin_fit(data): """ from py4DSTEM.data import Calibration from py4DSTEM.datacube import DataCube - if isinstance(data,tuple): - assert len(data)==3 - qx0_meas,qy_meas = data[0] - qx0_fit,qy0_fit = data[1] - qx0_residuals,qy0_residuals = data[2] - elif isinstance(data,DataCube): - qx0_meas,qy0_meas = data.calibration.get_origin_meas() - qx0_fit,qy0_fit = data.calibration.get_origin() - qx0_residuals,qy0_residuals = data.calibration.get_origin_residuals() - elif isinstance(data,Calibration): - qx0_meas,qy0_meas = data.get_origin_meas() - qx0_fit,qy0_fit = data.get_origin() - qx0_residuals,qy0_residuals = data.get_origin_residuals() + + if isinstance(data, tuple): + assert len(data) == 3 + qx0_meas, qy_meas = data[0] + qx0_fit, qy0_fit = data[1] + qx0_residuals, qy0_residuals = data[2] + elif isinstance(data, DataCube): + qx0_meas, qy0_meas = data.calibration.get_origin_meas() + qx0_fit, qy0_fit = data.calibration.get_origin() + qx0_residuals, qy0_residuals = data.calibration.get_origin_residuals() + elif isinstance(data, Calibration): + qx0_meas, qy0_meas = data.get_origin_meas() + qx0_fit, qy0_fit = data.get_origin() + qx0_residuals, qy0_residuals = data.get_origin_residuals() else: raise Exception("data must be of type Datacube or Calibration or tuple") - show_image_grid(get_ar = lambda i:[qx0_meas,qx0_fit,qx0_residuals, - qy0_meas,qy0_fit,qy0_residuals][i], - H=2,W=3,cmap='RdBu') + show_image_grid( + get_ar=lambda i: [ + qx0_meas, + qx0_fit, + qx0_residuals, + qy0_meas, + qy0_fit, + qy0_residuals, + ][i], + H=2, + W=3, + cmap="RdBu", + ) + -def show_selected_dps(datacube,positions,im,bragg_pos=None, - colors=None,HW=None,figsize_im=(6,6),figsize_dp=(4,4), - **kwargs): +def show_selected_dps( + datacube, + positions, + im, + bragg_pos=None, + colors=None, + HW=None, + figsize_im=(6, 6), + figsize_dp=(4, 4), + **kwargs +): """ Shows two plots: first, a real space image overlaid with colored dots at the specified positions; second, a grid of diffraction patterns @@ -676,57 +879,74 @@ def show_selected_dps(datacube,positions,im,bragg_pos=None, *diffraction patterns*. Default is `scaling='log'` """ from py4DSTEM.datacube import DataCube - assert isinstance(datacube,DataCube) + + assert isinstance(datacube, DataCube) N = len(positions) - assert(all([len(x)==2 for x in positions])), "Improperly formated argument `positions`" + assert all( + [len(x) == 2 for x in positions] + ), "Improperly formated argument `positions`" if bragg_pos is not None: show_disk_pos = True - assert(len(bragg_pos)==N) + assert len(bragg_pos) == N else: show_disk_pos = False if colors is None: from matplotlib.cm import gist_ncar - linsp = np.linspace(0,1,N,endpoint=False) + + linsp = np.linspace(0, 1, N, endpoint=False) colors = [gist_ncar(i) for i in linsp] - assert(len(colors)==N), "Number of positions and colors don't match" + assert len(colors) == N, "Number of positions and colors don't match" from matplotlib.colors import is_color_like - assert([is_color_like(i) for i in colors]) + + assert [is_color_like(i) for i in colors] if HW is None: W = int(np.ceil(np.sqrt(N))) - if W<3: W=3 - H = int(np.ceil(N/W)) + if W < 3: + W = 3 + H = int(np.ceil(N / W)) else: - H,W = HW - assert(all([isinstance(x,(int,np.integer)) for x in (H,W)])) + H, W = HW + assert all([isinstance(x, (int, np.integer)) for x in (H, W)]) x = [i[0] for i in positions] y = [i[1] for i in positions] - if 'scaling' not in kwargs.keys(): - kwargs['scaling'] = 'log' + if "scaling" not in kwargs.keys(): + kwargs["scaling"] = "log" if not show_disk_pos: - fig,ax = show(im,figsize=figsize_im,returnfig=True) - add_points(ax,d = {'x':x,'y':y,'pointcolor':colors}) - show_image_grid(get_ar=lambda i:datacube.data[x[i],y[i],:,:],H=H,W=W, - get_bordercolor=lambda i:colors[i],axsize=figsize_dp, - **kwargs) + fig, ax = show(im, figsize=figsize_im, returnfig=True) + add_points(ax, d={"x": x, "y": y, "pointcolor": colors}) + show_image_grid( + get_ar=lambda i: datacube.data[x[i], y[i], :, :], + H=H, + W=W, + get_bordercolor=lambda i: colors[i], + axsize=figsize_dp, + **kwargs, + ) else: - show_image_grid(get_ar=lambda i:datacube.data[x[i],y[i],:,:],H=H,W=W, - get_bordercolor=lambda i:colors[i],axsize=figsize_dp, - get_x=lambda i:bragg_pos[i].data['qx'], - get_y=lambda i:bragg_pos[i].data['qy'], - get_pointcolors=lambda i:colors[i], - **kwargs) + show_image_grid( + get_ar=lambda i: datacube.data[x[i], y[i], :, :], + H=H, + W=W, + get_bordercolor=lambda i: colors[i], + axsize=figsize_dp, + get_x=lambda i: bragg_pos[i].data["qx"], + get_y=lambda i: bragg_pos[i].data["qy"], + get_pointcolors=lambda i: colors[i], + **kwargs, + ) -def Complex2RGB(complex_data, vmin=None, vmax = None, hue_start = 0, invert=False): + +def Complex2RGB(complex_data, vmin=None, vmax=None, hue_start=0, invert=False): """ complex_data (array): complex array to plot - vmin (float) : minimum absolute value - vmax (float) : maximum absolute value + vmin (float) : minimum absolute value + vmax (float) : maximum absolute value hue_start (float) : rotational offset for colormap (degrees) inverse (bool) : if True, uses light color scheme """ amp = np.abs(complex_data) - if np.isclose(np.max(amp),np.min(amp)): + if np.isclose(np.max(amp), np.min(amp)): if vmin is None: vmin = 0 if vmax is None: @@ -749,18 +969,19 @@ def Complex2RGB(complex_data, vmin=None, vmax = None, hue_start = 0, invert=Fals phase = np.angle(complex_data) + np.deg2rad(hue_start) amp /= np.max(amp) - rgb = np.zeros(phase.shape +(3,)) - rgb[...,0] = 0.5*(np.sin(phase)+1)*amp - rgb[...,1] = 0.5*(np.sin(phase+np.pi/2)+1)*amp - rgb[...,2] = 0.5*(-np.sin(phase)+1)*amp - - return 1-rgb if invert else rgb + rgb = np.zeros(phase.shape + (3,)) + rgb[..., 0] = 0.5 * (np.sin(phase) + 1) * amp + rgb[..., 1] = 0.5 * (np.sin(phase + np.pi / 2) + 1) * amp + rgb[..., 2] = 0.5 * (-np.sin(phase) + 1) * amp + + return 1 - rgb if invert else rgb + -def add_colorbar_arg(cax, vmin = None, vmax = None, hue_start = 0, invert = False): +def add_colorbar_arg(cax, vmin=None, vmax=None, hue_start=0, invert=False): """ cax : axis to add cbar too - vmin (float) : minimum absolute value - vmax (float) : maximum absolute value + vmin (float) : minimum absolute value + vmax (float) : maximum absolute value hue_start (float) : rotational offset for colormap (degrees) inverse (bool) : if True, uses light color scheme """ @@ -778,6 +999,7 @@ def add_colorbar_arg(cax, vmin = None, vmax = None, hue_start = 0, invert = Fals [r"$-\pi$", r"$-\dfrac{\pi}{2}$", "$0$", r"$\dfrac{\pi}{2}$", r"$\pi$"] ) + def show_complex( ar_complex, vmin=None, @@ -787,7 +1009,7 @@ def show_complex( pixelunits="pixels", pixelsize=1, returnfig=False, - hue_start = 0, + hue_start=0, invert=False, **kwargs ): @@ -799,7 +1021,7 @@ def show_complex( such as [array1, array2], then arrays are horizonally plotted in one figure vmin (float, optional) : minimum absolute value vmax (float, optional) : maximum absolute value - if None, vmin/vmax are set to fractions of the distribution of pixel values in the array, + if None, vmin/vmax are set to fractions of the distribution of pixel values in the array, e.g. vmin=0.02 will set the minumum display value to saturate the lower 2% of pixels cbar (bool, optional) : if True, include color wheel scalebar (bool, optional) : if True, adds scale bar @@ -808,21 +1030,32 @@ def show_complex( returnfig (bool, optional) : if True, the function returns the tuple (figure,axis) hue_start (float, optional) : rotational offset for colormap (degrees) inverse (bool) : if True, uses light color scheme - + Returns: if returnfig==False (default), the figure is plotted and nothing is returned. if returnfig==True, return the figure and the axis. """ # convert to complex colors - ar_complex = ar_complex[0] if (isinstance(ar_complex,list) and len(ar_complex) == 1) else ar_complex + ar_complex = ( + ar_complex[0] + if (isinstance(ar_complex, list) and len(ar_complex) == 1) + else ar_complex + ) if isinstance(ar_complex, list): if isinstance(ar_complex[0], list): - rgb = [Complex2RGB(ar, vmin, vmax, hue_start = hue_start, invert=invert) for sublist in ar_complex for ar in sublist] + rgb = [ + Complex2RGB(ar, vmin, vmax, hue_start=hue_start, invert=invert) + for sublist in ar_complex + for ar in sublist + ] H = len(ar_complex) W = len(ar_complex[0]) else: - rgb = [Complex2RGB(ar, vmin, vmax, hue_start=hue_start, invert=invert) for ar in ar_complex] + rgb = [ + Complex2RGB(ar, vmin, vmax, hue_start=hue_start, invert=invert) + for ar in ar_complex + ] if len(rgb[0].shape) == 4: H = len(ar_complex) W = rgb[0].shape[0] @@ -840,7 +1073,7 @@ def show_complex( is_grid = True H = rgb.shape[0] W = rgb.shape[1] - rgb = rgb.reshape((-1,)+rgb.shape[-3:]) + rgb = rgb.reshape((-1,) + rgb.shape[-3:]) else: is_grid = False # plot @@ -895,7 +1128,7 @@ def show_complex( ktheta = kra * np.exp(1j * ktheta) # convert to hsv - rgb = Complex2RGB(ktheta, 0, 0.4, hue_start = hue_start, invert=invert) + rgb = Complex2RGB(ktheta, 0, 0.4, hue_start=hue_start, invert=invert) ind = kra > 0.4 rgb[ind] = [1, 1, 1] diff --git a/setup.py b/setup.py index b0c7fa081..069bf1600 100644 --- a/setup.py +++ b/setup.py @@ -1,55 +1,64 @@ from setuptools import setup, find_packages from distutils.util import convert_path -with open("README.md","r") as f: +with open("README.md", "r") as f: long_description = f.read() version_ns = {} -vpath = convert_path('py4DSTEM/version.py') +vpath = convert_path("py4DSTEM/version.py") with open(vpath) as version_file: exec(version_file.read(), version_ns) setup( - name='py4DSTEM', - version=version_ns['__version__'], + name="py4DSTEM", + version=version_ns["__version__"], packages=find_packages(), - description='An open source python package for processing and analysis of 4D STEM data.', + description="An open source python package for processing and analysis of 4D STEM data.", long_description=long_description, long_description_content_type="text/markdown", - url='https://github.com/py4dstem/py4DSTEM/', - author='Benjamin H. Savitzky', - author_email='ben.savitzky@gmail.com', - license='GNU GPLv3', + url="https://github.com/py4dstem/py4DSTEM/", + author="Benjamin H. Savitzky", + author_email="ben.savitzky@gmail.com", + license="GNU GPLv3", keywords="STEM 4DSTEM", - python_requires='>=3.9,<3.12', + python_requires=">=3.9,<3.12", install_requires=[ - 'numpy >= 1.19', - 'scipy >= 1.5.2', - 'h5py >= 3.2.0', - 'hdf5plugin >= 4.1.3', - 'ncempy >= 1.8.1', - 'matplotlib >= 3.2.2', - 'scikit-image >= 0.17.2', - 'scikit-learn >= 0.23.2', - 'scikit-optimize >= 0.9.0', - 'tqdm >= 4.46.1', - 'dill >= 0.3.3', - 'gdown >= 4.4.0', - 'dask >= 2.3.0', - 'distributed >= 2.3.0', - 'emdfile >= 0.0.10', - ], + "numpy >= 1.19", + "scipy >= 1.5.2", + "h5py >= 3.2.0", + "hdf5plugin >= 4.1.3", + "ncempy >= 1.8.1", + "matplotlib >= 3.2.2", + "scikit-image >= 0.17.2", + "scikit-learn >= 0.23.2", + "scikit-optimize >= 0.9.0", + "tqdm >= 4.46.1", + "dill >= 0.3.3", + "gdown >= 4.7.1", + "dask >= 2.3.0", + "distributed >= 2.3.0", + "emdfile >= 0.0.13", + "mpire >= 2.7.1", + "threadpoolctl >= 3.1.0", + ], extras_require={ - 'ipyparallel': ['ipyparallel >= 6.2.4', 'dill >= 0.3.3'], - 'cuda': ['cupy >= 10.0.0'], - 'acom': ['pymatgen >= 2022', 'mp-api == 0.24.1'], - 'aiml': ['tensorflow == 2.4.1','tensorflow-addons <= 0.14.0','crystal4D'], - 'aiml-cuda': ['tensorflow == 2.4.1','tensorflow-addons <= 0.14.0','crystal4D','cupy'], - 'numba': ['numba >= 0.49.1'] - }, + "ipyparallel": ["ipyparallel >= 6.2.4", "dill >= 0.3.3"], + "cuda": ["cupy >= 10.0.0"], + "acom": ["pymatgen >= 2022", "mp-api == 0.24.1"], + "aiml": ["tensorflow == 2.4.1", "tensorflow-addons <= 0.14.0", "crystal4D"], + "aiml-cuda": [ + "tensorflow == 2.4.1", + "tensorflow-addons <= 0.14.0", + "crystal4D", + "cupy >= 10.0.0", + ], + "numba": ["numba >= 0.49.1"], + }, package_data={ - 'py4DSTEM':['process/utils/scattering_factors.txt', - 'process/diskdetection/multicorr_row_kernel.cu', - 'process/diskdetection/multicorr_col_kernel.cu'] + "py4DSTEM": [ + "process/utils/scattering_factors.txt", + "process/diskdetection/multicorr_row_kernel.cu", + "process/diskdetection/multicorr_col_kernel.cu", + ] }, ) diff --git a/test/download_test_data.py b/test/download_test_data.py index 199258557..913dc73a6 100644 --- a/test/download_test_data.py +++ b/test/download_test_data.py @@ -5,17 +5,11 @@ from py4DSTEM import _TESTPATH -filepath = _TESTPATH +filepath = _TESTPATH -if __name__ == '__main__': +if __name__ == "__main__": from py4DSTEM.io import download_file_from_google_drive as download - download( - id_ = 'unit_test_data', - destination = filepath, - overwrite = True - ) - - + download(id_="unit_test_data", destination=filepath, overwrite=True) diff --git a/test/gettestdata.py b/test/gettestdata.py index a84e5b9b3..0cb6cb964 100644 --- a/test/gettestdata.py +++ b/test/gettestdata.py @@ -9,39 +9,36 @@ from py4DSTEM.io import gdrive_download as download - # Make the argument parser parser = argparse.ArgumentParser( - description = "A command line tool for downloading data to run the py4DSTEM test suite" + description="A command line tool for downloading data to run the py4DSTEM test suite" ) # Set up data download options data_options = [ - 'tutorials', - 'io', - 'basic', - 'strain', + "tutorials", + "io", + "basic", + "strain", ] # Add arguments parser.add_argument( "data", - help = "which data to download.", - choices = data_options, + help="which data to download.", + choices=data_options, ) parser.add_argument( - "-o", "--overwrite", - help = "if turned on, overwrite files that are already present. Otherwise, skips these files.", - action = "store_true" + "-o", + "--overwrite", + help="if turned on, overwrite files that are already present. Otherwise, skips these files.", + action="store_true", ) parser.add_argument( - "-v", "--verbose", - help = "turn on verbose output", - action = "store_true" + "-v", "--verbose", help="turn on verbose output", action="store_true" ) - # Get the command line arguments args = parser.parse_args() @@ -52,34 +49,26 @@ # Set data collection key -if args.data == 'tutorials': - data = ['tutorials'] -elif args.data == 'io': - data = ['test_io','test_arina'] -elif args.data == 'basic': - data = ['small_datacube'] -elif args.data == 'strain': - data = ['strain'] +if args.data == "tutorials": + data = ["tutorials"] +elif args.data == "io": + data = ["test_io", "test_arina"] +elif args.data == "basic": + data = ["small_datacube"] +elif args.data == "strain": + data = ["strain"] else: raise Exception(f"invalid data choice, {parser.data}") # Download data for d in data: - download( - d, - destination = testpath, - overwrite = args.overwrite, - verbose = args.verbose - ) + download(d, destination=testpath, overwrite=args.overwrite, verbose=args.verbose) # Always download the basic datacube -if args.data != 'basic': +if args.data != "basic": download( - 'small_datacube', - destination = testpath, - overwrite = args.overwrite, - verbose = args.verbose + "small_datacube", + destination=testpath, + overwrite=args.overwrite, + verbose=args.verbose, ) - - - diff --git a/test/test_braggvectors.py b/test/test_braggvectors.py index 36ec138a0..1b7b6603a 100644 --- a/test/test_braggvectors.py +++ b/test/test_braggvectors.py @@ -3,93 +3,90 @@ from os.path import join # set filepath -path = join(py4DSTEM._TESTPATH,"test_io/legacy_v0.9_simAuNanoplatelet_bin.h5") - - +path = join(py4DSTEM._TESTPATH, "test_io/legacy_v0.9_simAuNanoplatelet_bin.h5") class TestDiskDetectionBasic: - # setup/teardown def setup_class(cls): - # Read sim Au datacube - datacube = py4DSTEM.io.read( - path, - data_id = 'polyAu_4DSTEM' - ) + datacube = py4DSTEM.io.read(path, data_id="polyAu_4DSTEM") cls.datacube = datacube # prepare a probe - mask = np.zeros(datacube.Rshape,dtype=bool) - mask[28:33,14:19] = 1 - probe = datacube.get_vacuum_probe( ROI=mask ) - alpha_pr,qx0_pr,qy0_pr = py4DSTEM.process.calibration.get_probe_size( probe.probe ) + mask = np.zeros(datacube.Rshape, dtype=bool) + mask[28:33, 14:19] = 1 + probe = datacube.get_vacuum_probe(ROI=mask) + alpha_pr, qx0_pr, qy0_pr = py4DSTEM.process.calibration.get_probe_size( + probe.probe + ) probe.get_kernel( - mode='sigmoid', - origin=(qx0_pr,qy0_pr), - radii=(alpha_pr,2*alpha_pr) + mode="sigmoid", origin=(qx0_pr, qy0_pr), radii=(alpha_pr, 2 * alpha_pr) ) cls.probe = probe # Set disk detection parameters cls.detect_params = { - 'corrPower': 1.0, - 'sigma': 0, - 'edgeBoundary': 2, - 'minRelativeIntensity': 0, - 'minAbsoluteIntensity': 8, - 'minPeakSpacing': 4, - 'subpixel' : 'poly', - 'maxNumPeaks': 1000, - # 'CUDA': True, + "corrPower": 1.0, + "sigma": 0, + "edgeBoundary": 2, + "minRelativeIntensity": 0, + "minAbsoluteIntensity": 8, + "minPeakSpacing": 4, + "subpixel": "poly", + "maxNumPeaks": 1000, + # 'CUDA': True, } # find disks cls.braggpeaks = datacube.find_Bragg_disks( - template = probe.kernel, + template=probe.kernel, **cls.detect_params, ) # set an arbitrary center for testing - cls.braggpeaks.calibration.set_origin((datacube.Qshape[0]/2,datacube.Qshape[1]/2)) - - + cls.braggpeaks.calibration.set_origin( + (datacube.Qshape[0] / 2, datacube.Qshape[1] / 2) + ) # tests - def test_BraggVectors_import(self): - from py4DSTEM.braggvectors import BraggVectors - pass + pass def test_disk_detection_selected_positions(self): - - rxs = 36,15,11,59,32,34 - rys = 9,15,31,39,20,68, + rxs = 36, 15, 11, 59, 32, 34 + rys = ( + 9, + 15, + 31, + 39, + 20, + 68, + ) disks_selected = self.datacube.find_Bragg_disks( - data = (rxs, rys), - template = self.probe.kernel, + data=(rxs, rys), + template=self.probe.kernel, **self.detect_params, ) def test_BraggVectors(self): - print(self.braggpeaks) print() - print(self.braggpeaks.raw[0,0]) + print(self.braggpeaks.raw[0, 0]) print() - print(self.braggpeaks.cal[0,0]) + print(self.braggpeaks.cal[0, 0]) print() - print(self.braggpeaks.get_vectors( - scan_x=5,scan_y=5, - center=True,ellipse=False,pixel=False,rotate=False - )) - - - - - + print( + self.braggpeaks.get_vectors( + scan_x=5, + scan_y=5, + center=True, + ellipse=False, + pixel=False, + rotate=False, + ) + ) diff --git a/test/test_calibration.py b/test/test_calibration.py index 80cb03493..78a24d800 100644 --- a/test/test_calibration.py +++ b/test/test_calibration.py @@ -13,7 +13,6 @@ class TestCalibration: - # setup def setup_class(cls): @@ -28,47 +27,33 @@ def teardown_method(self): if exists(path_out): remove(path_out) - # test def test_imported_datacube_calibration(self): - datacube = py4DSTEM.import_file(path_datacube) - assert(hasattr(datacube,'calibration')) - assert(isinstance(datacube.calibration,Calibration)) - assert(hasattr(datacube,'root')) - assert(isinstance(datacube.root,py4DSTEM.Root)) - + assert hasattr(datacube, "calibration") + assert isinstance(datacube.calibration, Calibration) + assert hasattr(datacube, "root") + assert isinstance(datacube.root, py4DSTEM.Root) def test_instantiated_datacube_calibration(self): + datacube = py4DSTEM.DataCube(data=np.ones((4, 8, 128, 128))) - datacube = py4DSTEM.DataCube( - data = np.ones((4,8,128,128)) - ) - - assert(hasattr(datacube,'calibration')) - assert(isinstance(datacube.calibration,Calibration)) - assert(hasattr(datacube,'root')) - assert(isinstance(datacube.root,py4DSTEM.Root)) + assert hasattr(datacube, "calibration") + assert isinstance(datacube.calibration, Calibration) + assert hasattr(datacube, "root") + assert isinstance(datacube.root, py4DSTEM.Root) datacube.calibration.set_Q_pixel_size(10) - py4DSTEM.save( - path_out, - datacube - ) + py4DSTEM.save(path_out, datacube) new_datacube = py4DSTEM.read(path_out) - assert(hasattr(new_datacube,'calibration')) - assert(isinstance(new_datacube.calibration,Calibration)) - assert(hasattr(new_datacube,'root')) - assert(isinstance(new_datacube.root,py4DSTEM.Root)) - - assert(new_datacube.calibration.get_Q_pixel_size() == 10) - - - - + assert hasattr(new_datacube, "calibration") + assert isinstance(new_datacube.calibration, Calibration) + assert hasattr(new_datacube, "root") + assert isinstance(new_datacube.root, py4DSTEM.Root) + assert new_datacube.calibration.get_Q_pixel_size() == 10 diff --git a/test/test_crystal.py b/test/test_crystal.py index 2480655f3..d5938628f 100644 --- a/test/test_crystal.py +++ b/test/test_crystal.py @@ -1,13 +1,11 @@ import numpy as np -#from py4DSTEM.classes import ( -# Crystal -#) - +# from py4DSTEM.classes import ( +# Crystal +# ) class TestCrystal: - def setup_cls(self): pass @@ -20,16 +18,8 @@ def setup_method(self): def teardown_method(self): pass - - def test_Crystal(self): - - #crystal = Crystal( **args ) - #assert(isinstance(crystal,Crystal)) + # crystal = Crystal( **args ) + # assert(isinstance(crystal,Crystal)) pass - - - - - diff --git a/test/test_datacube.py b/test/test_datacube.py index 7e0f29188..e50590fbf 100644 --- a/test/test_datacube.py +++ b/test/test_datacube.py @@ -6,12 +6,9 @@ path = py4DSTEM._TESTPATH + "/small_datacube.dm4" - class TestDataCube: - # setup/teardown def setup_class(cls): - # Read datacube datacube = py4DSTEM.import_file(path) cls.datacube = datacube @@ -19,19 +16,17 @@ def setup_class(cls): # tests def test_binning_default_dtype(self): - dtype = self.datacube.data.dtype - assert(dtype == np.uint16) + assert dtype == np.uint16 self.datacube.bin_Q(2) - assert(self.datacube.data.dtype == dtype) + assert self.datacube.data.dtype == dtype new_dtype = np.uint32 self.datacube.bin_Q(2, dtype=new_dtype) - assert(self.datacube.data.dtype == new_dtype) - assert(self.datacube.data.dtype != dtype) + assert self.datacube.data.dtype == new_dtype + assert self.datacube.data.dtype != dtype pass - diff --git a/test/test_import.py b/test/test_import.py index 6e1b25d7e..fbaa4285d 100644 --- a/test/test_import.py +++ b/test/test_import.py @@ -1,7 +1,7 @@ # test import + def test_import(): import py4DSTEM - py4DSTEM.__version__ - + py4DSTEM.__version__ diff --git a/test/test_misc.py b/test/test_misc.py index 275eb767b..aafb2de22 100644 --- a/test/test_misc.py +++ b/test/test_misc.py @@ -3,30 +3,22 @@ def test_attach(): - """ tests to make sure Data.attach handles metadata merging correctly - """ - - x = py4DSTEM.DiffractionSlice(np.ones((5,5)), name='x') - y = py4DSTEM.DiffractionSlice(np.ones((5,5)), name='y') + """tests to make sure Data.attach handles metadata merging correctly""" + x = py4DSTEM.DiffractionSlice(np.ones((5, 5)), name="x") + y = py4DSTEM.DiffractionSlice(np.ones((5, 5)), name="y") x.calibration.set_Q_pixel_size(50) y.calibration.set_Q_pixel_size(2) x.attach(y) - assert('y' in x.treekeys) - assert(x.calibration.get_Q_pixel_size() == 50) + assert "y" in x.treekeys + assert x.calibration.get_Q_pixel_size() == 50 def test_datacube_copy(): - """ tests datacube.copy() - """ - x = py4DSTEM.DataCube(data=np.zeros((3,3,4,4))) + """tests datacube.copy()""" + x = py4DSTEM.DataCube(data=np.zeros((3, 3, 4, 4))) y = x.copy() - assert(isinstance(y,py4DSTEM.DataCube)) - - - - - + assert isinstance(y, py4DSTEM.DataCube) diff --git a/test/test_native_io/test_calibration_io.py b/test/test_native_io/test_calibration_io.py index a753c7c81..c0f9cd4ab 100644 --- a/test/test_native_io/test_calibration_io.py +++ b/test/test_native_io/test_calibration_io.py @@ -3,14 +3,13 @@ from os.path import join # set filepath -#path = join(py4DSTEM._TESTPATH, "filename") - +# path = join(py4DSTEM._TESTPATH, "filename") # class TestCalibrationIO: -# -# -# +# +# +# # def test_datacube_cal_io(self): # # TODO # # make a datacube @@ -20,8 +19,8 @@ # # check its calibration # assert 0 # pass -# -# +# +# # def test_datacube_child_node(self): # # TODO # # make a datacube @@ -35,10 +34,3 @@ # # check its calibration # assert 0 # pass - - - - - - - diff --git a/test/test_native_io/test_listwrite.py b/test/test_native_io/test_listwrite.py new file mode 100644 index 000000000..e90d2ccd8 --- /dev/null +++ b/test/test_native_io/test_listwrite.py @@ -0,0 +1,29 @@ +import py4DSTEM +import numpy as np + +# filepath +from os import getcwd, remove +from os.path import join, exists + +path = join(getcwd(), "test.h5") + + +def test_listwrite(): + # make two arrays + ar1 = py4DSTEM.RealSlice(data=np.arange(24).reshape((2, 3, 4)), name="array1") + ar2 = py4DSTEM.RealSlice(data=np.arange(48).reshape((4, 3, 4)), name="array2") + + # save them + py4DSTEM.save(filepath=path, data=[ar1, ar2], mode="o") + + # read them + data1 = py4DSTEM.read(path, datapath="array1_root") + data2 = py4DSTEM.read(path, datapath="array2_root") + + # check + assert np.array_equal(data1.data, ar1.data) + assert np.array_equal(data2.data, ar2.data) + + # delete the file + if exists(path): + remove(path) diff --git a/test/test_native_io/test_realslice_read.py b/test/test_native_io/test_realslice_read.py index 8fb577ad8..eeee5217d 100644 --- a/test/test_native_io/test_realslice_read.py +++ b/test/test_native_io/test_realslice_read.py @@ -8,10 +8,6 @@ # Set filepaths filepath = join(py4DSTEM._TESTPATH, "test_io/test_realslice_io.h5") -def test_read_realslice(): - realslice = py4DSTEM.read(filepath, datapath='4DSTEM/Fit Data') - - - - +def test_read_realslice(): + realslice = py4DSTEM.read(filepath, datapath="4DSTEM/Fit Data") diff --git a/test/test_native_io/test_single_object_io.py b/test/test_native_io/test_single_object_io.py index d1a826c5f..4d2e98d28 100644 --- a/test/test_native_io/test_single_object_io.py +++ b/test/test_native_io/test_single_object_io.py @@ -1,10 +1,10 @@ import numpy as np -from os.path import join,exists +from os.path import join, exists from os import remove from numpy import array_equal import py4DSTEM -from py4DSTEM import save,read +from py4DSTEM import save, read import emdfile as emd from py4DSTEM import ( @@ -16,24 +16,21 @@ VirtualImage, VirtualDiffraction, BraggVectors, - Probe + Probe, ) # Set paths dirpath = py4DSTEM._TESTPATH -path_dm3 = join(dirpath,"test_io/small_dm3_3Dstack.dm3") -path_h5 = join(dirpath,"test.h5") +path_dm3 = join(dirpath, "test_io/small_dm3_3Dstack.dm3") +path_h5 = join(dirpath, "test.h5") -class TestDataCubeIO(): - +class TestDataCubeIO: def test_datacube_instantiation(self): """ Instantiate a datacube and apply basic calibrations """ - datacube = DataCube( - data = np.arange(np.prod((4,5,6,7))).reshape((4,5,6,7)) - ) + datacube = DataCube(data=np.arange(np.prod((4, 5, 6, 7))).reshape((4, 5, 6, 7))) # calibration datacube.calibration.set_Q_pixel_size(0.062) datacube.calibration.set_Q_pixel_units("A^-1") @@ -49,208 +46,180 @@ def test_datacube_io(self): """ datacube = self.test_datacube_instantiation() - assert(isinstance(datacube,DataCube)) + assert isinstance(datacube, DataCube) # test dim vectors - assert(datacube.dim_names[0] == 'Rx') - assert(datacube.dim_names[1] == 'Ry') - assert(datacube.dim_names[2] == 'Qx') - assert(datacube.dim_names[3] == 'Qy') - assert(datacube.dim_units[0] == 'nm') - assert(datacube.dim_units[1] == 'nm') - assert(datacube.dim_units[2] == 'A^-1') - assert(datacube.dim_units[3] == 'A^-1') - assert(datacube.dims[0][1] == 2.8) - assert(datacube.dims[2][1] == 0.062) + assert datacube.dim_names[0] == "Rx" + assert datacube.dim_names[1] == "Ry" + assert datacube.dim_names[2] == "Qx" + assert datacube.dim_names[3] == "Qy" + assert datacube.dim_units[0] == "nm" + assert datacube.dim_units[1] == "nm" + assert datacube.dim_units[2] == "A^-1" + assert datacube.dim_units[3] == "A^-1" + assert datacube.dims[0][1] == 2.8 + assert datacube.dims[2][1] == 0.062 # check the calibrations - assert(datacube.calibration.get_Q_pixel_size() == 0.062) - assert(datacube.calibration.get_Q_pixel_units() == "A^-1") + assert datacube.calibration.get_Q_pixel_size() == 0.062 + assert datacube.calibration.get_Q_pixel_units() == "A^-1" # save and read - save(path_h5,datacube,mode='o') + save(path_h5, datacube, mode="o") new_datacube = read(path_h5) # check it's the same - assert(isinstance(new_datacube,DataCube)) - assert(array_equal(datacube.data,new_datacube.data)) - assert(new_datacube.calibration.get_Q_pixel_size() == 0.062) - assert(new_datacube.calibration.get_Q_pixel_units() == "A^-1") - assert(new_datacube.dims[0][1] == 2.8) - assert(new_datacube.dims[2][1] == 0.062) - - + assert isinstance(new_datacube, DataCube) + assert array_equal(datacube.data, new_datacube.data) + assert new_datacube.calibration.get_Q_pixel_size() == 0.062 + assert new_datacube.calibration.get_Q_pixel_units() == "A^-1" + assert new_datacube.dims[0][1] == 2.8 + assert new_datacube.dims[2][1] == 0.062 -class TestBraggVectorsIO(): +class TestBraggVectorsIO: def test_braggvectors_instantiation(self): """ Instantiate a braggvectors instance """ - braggvectors = BraggVectors( - Rshape = (5,6), - Qshape = (7,8) - ) + braggvectors = BraggVectors(Rshape=(5, 6), Qshape=(7, 8)) for x in range(braggvectors.Rshape[0]): for y in range(braggvectors.Rshape[1]): - L = int(4 * (np.sin(x*y)+1)) - braggvectors._v_uncal[x,y].add( - np.ones(L,dtype=braggvectors._v_uncal.dtype) + L = int(4 * (np.sin(x * y) + 1)) + braggvectors._v_uncal[x, y].add( + np.ones(L, dtype=braggvectors._v_uncal.dtype) ) return braggvectors - def test_braggvectors_io(self): - """ Save then read a BraggVectors instance, and compare contents before/after - """ + """Save then read a BraggVectors instance, and compare contents before/after""" braggvectors = self.test_braggvectors_instantiation() - assert(isinstance(braggvectors,BraggVectors)) + assert isinstance(braggvectors, BraggVectors) # save then read - save(path_h5,braggvectors,mode='o') + save(path_h5, braggvectors, mode="o") new_braggvectors = read(path_h5) # check it's the same - assert(isinstance(new_braggvectors,BraggVectors)) - assert(new_braggvectors is not braggvectors) + assert isinstance(new_braggvectors, BraggVectors) + assert new_braggvectors is not braggvectors for x in range(new_braggvectors.shape[0]): for y in range(new_braggvectors.shape[1]): - assert(array_equal( - new_braggvectors._v_uncal[x,y].data, - braggvectors._v_uncal[x,y].data)) + assert array_equal( + new_braggvectors._v_uncal[x, y].data, + braggvectors._v_uncal[x, y].data, + ) class TestSlices: - - # test instantiation def test_diffractionslice_instantiation(self): diffractionslice = DiffractionSlice( - data = np.arange(np.prod((4,8,2))).reshape((4,8,2)), - slicelabels = ['a','b'] + data=np.arange(np.prod((4, 8, 2))).reshape((4, 8, 2)), + slicelabels=["a", "b"], ) return diffractionslice def test_realslice_instantiation(self): realslice = RealSlice( - data = np.arange(np.prod((8,4,2))).reshape((8,4,2)), - slicelabels = ['x','y'] + data=np.arange(np.prod((8, 4, 2))).reshape((8, 4, 2)), + slicelabels=["x", "y"], ) return realslice def test_virtualdiffraction_instantiation(self): virtualdiffraction = VirtualDiffraction( - data = np.arange(np.prod((8,4,2))).reshape((8,4,2)), + data=np.arange(np.prod((8, 4, 2))).reshape((8, 4, 2)), ) return virtualdiffraction def test_virtualimage_instantiation(self): virtualimage = VirtualImage( - data = np.arange(np.prod((8,4,2))).reshape((8,4,2)), + data=np.arange(np.prod((8, 4, 2))).reshape((8, 4, 2)), ) return virtualimage def test_probe_instantiation(self): - probe = Probe( - data = np.arange(8*12).reshape((8,12)) - ) + probe = Probe(data=np.arange(8 * 12).reshape((8, 12))) # add a kernel probe.kernel = np.ones_like(probe.probe) # return return probe - # test io def test_diffractionslice_io(self): - """ test diffractionslice io - """ + """test diffractionslice io""" diffractionslice = self.test_diffractionslice_instantiation() - assert(isinstance(diffractionslice,DiffractionSlice)) + assert isinstance(diffractionslice, DiffractionSlice) # save and read - save(path_h5,diffractionslice,mode='o') + save(path_h5, diffractionslice, mode="o") new_diffractionslice = read(path_h5) # check it's the same - assert(isinstance(new_diffractionslice,DiffractionSlice)) - assert(array_equal(diffractionslice.data,new_diffractionslice.data)) - assert(diffractionslice.slicelabels == new_diffractionslice.slicelabels) - + assert isinstance(new_diffractionslice, DiffractionSlice) + assert array_equal(diffractionslice.data, new_diffractionslice.data) + assert diffractionslice.slicelabels == new_diffractionslice.slicelabels def test_realslice_io(self): - """ test realslice io - """ + """test realslice io""" realslice = self.test_realslice_instantiation() - assert(isinstance(realslice,RealSlice)) + assert isinstance(realslice, RealSlice) # save and read - save(path_h5,realslice,mode='o') + save(path_h5, realslice, mode="o") rs = read(path_h5) # check it's the same - assert(isinstance(rs,RealSlice)) - assert(array_equal(realslice.data,rs.data)) - assert(rs.slicelabels == realslice.slicelabels) + assert isinstance(rs, RealSlice) + assert array_equal(realslice.data, rs.data) + assert rs.slicelabels == realslice.slicelabels def test_virtualdiffraction_io(self): - """ test virtualdiffraction io - """ + """test virtualdiffraction io""" virtualdiffraction = self.test_virtualdiffraction_instantiation() - assert(isinstance(virtualdiffraction,VirtualDiffraction)) + assert isinstance(virtualdiffraction, VirtualDiffraction) # save and read - save(path_h5,virtualdiffraction,mode='o') + save(path_h5, virtualdiffraction, mode="o") vd = read(path_h5) # check it's the same - assert(isinstance(vd,VirtualDiffraction)) - assert(array_equal(vd.data,virtualdiffraction.data)) + assert isinstance(vd, VirtualDiffraction) + assert array_equal(vd.data, virtualdiffraction.data) pass def test_virtualimage_io(self): - """ test virtualimage io - """ + """test virtualimage io""" virtualimage = self.test_virtualimage_instantiation() - assert(isinstance(virtualimage,VirtualImage)) + assert isinstance(virtualimage, VirtualImage) # save and read - save(path_h5,virtualimage,mode='o') + save(path_h5, virtualimage, mode="o") virtIm = read(path_h5) # check it's the same - assert(isinstance(virtIm,VirtualImage)) - assert(array_equal(virtualimage.data,virtIm.data)) + assert isinstance(virtIm, VirtualImage) + assert array_equal(virtualimage.data, virtIm.data) pass - def test_probe1_io(self): - """ test probe io - """ + """test probe io""" probe0 = self.test_probe_instantiation() - assert(isinstance(probe0,Probe)) + assert isinstance(probe0, Probe) # save and read - save(path_h5,probe0,mode='o') + save(path_h5, probe0, mode="o") probe = read(path_h5) # check it's the same - assert(isinstance(probe,Probe)) - assert(array_equal(probe0.data,probe.data)) + assert isinstance(probe, Probe) + assert array_equal(probe0.data, probe.data) pass - - - class TestPoints: - def test_qpoints_instantiation(self): qpoints = QPoints( - data = np.ones(10, - dtype = [('qx',float),('qy',float),('intensity',float)] - ) + data=np.ones(10, dtype=[("qx", float), ("qy", float), ("intensity", float)]) ) return qpoints - def test_qpoints_io(self): - """ test qpoints io - """ + """test qpoints io""" qpoints0 = self.test_qpoints_instantiation() - assert(isinstance(qpoints0,QPoints)) + assert isinstance(qpoints0, QPoints) # save and read - save(path_h5,qpoints0,mode='o') + save(path_h5, qpoints0, mode="o") qpoints = read(path_h5) # check it's the same - assert(isinstance(qpoints,QPoints)) - assert(array_equal(qpoints0.data,qpoints.data)) + assert isinstance(qpoints, QPoints) + assert array_equal(qpoints0.data, qpoints.data) pass - - diff --git a/test/test_native_io/test_v0_13.py b/test/test_native_io/test_v0_13.py index 92ab41ca2..e1d91bba4 100644 --- a/test/test_native_io/test_v0_13.py +++ b/test/test_native_io/test_v0_13.py @@ -6,35 +6,27 @@ filepath = join(_TESTPATH, "test_io/legacy_v0.13.h5") - - class TestV13: - # setup/teardown def setup_class(cls): cls.path = filepath pass + @classmethod def teardown_class(cls): pass + def setup_method(self, method): pass + def teardown_method(self, method): pass - - def test_print_tree(self): print_h5_tree(self.path) - def test_read(self): d = read( self.path, ) d - - - - - diff --git a/test/test_native_io/test_v0_14.py b/test/test_native_io/test_v0_14.py index 33230a816..c71638c8e 100644 --- a/test/test_native_io/test_v0_14.py +++ b/test/test_native_io/test_v0_14.py @@ -1,27 +1,21 @@ import py4DSTEM -from os.path import join,exists - - - -path = join(py4DSTEM._TESTPATH,'test_io/legacy_v0.14.h5') +from os.path import join, exists +path = join(py4DSTEM._TESTPATH, "test_io/legacy_v0.14.h5") def _make_v14_test_file(): - # enforce v14 - assert(py4DSTEM.__version__.split('.')[1]=='14'), 'no!' + assert py4DSTEM.__version__.split(".")[1] == "14", "no!" # Set filepaths - filepath_data = join(py4DSTEM._TESTPATH,"test_io/legacy_v0.9_simAuNanoplatelet_bin.h5") - - # Read sim Au datacube - datacube = py4DSTEM.io.read( - filepath_data, - data_id = 'polyAu_4DSTEM' + filepath_data = join( + py4DSTEM._TESTPATH, "test_io/legacy_v0.9_simAuNanoplatelet_bin.h5" ) + # Read sim Au datacube + datacube = py4DSTEM.io.read(filepath_data, data_id="polyAu_4DSTEM") # # Virtual diffraction @@ -29,121 +23,96 @@ def _make_v14_test_file(): datacube.get_dp_mean() datacube.get_dp_max() - # # Disk detection # find a vacuum region import numpy as np - mask = np.zeros(datacube.Rshape,dtype=bool) - mask[28:33,14:19] = 1 + + mask = np.zeros(datacube.Rshape, dtype=bool) + mask[28:33, 14:19] = 1 # generate a probe - probe = datacube.get_vacuum_probe( ROI=mask ) + probe = datacube.get_vacuum_probe(ROI=mask) # Find the center and semiangle - alpha,qx0,qy0 = py4DSTEM.process.probe.get_probe_size( probe.probe ) + alpha, qx0, qy0 = py4DSTEM.process.probe.get_probe_size(probe.probe) # prepare the probe kernel - kern = probe.get_kernel( - mode='sigmoid', - origin=(qx0,qy0), - radii=(alpha,2*alpha) - ) + kern = probe.get_kernel(mode="sigmoid", origin=(qx0, qy0), radii=(alpha, 2 * alpha)) # Set disk detection parameters detect_params = { - 'corrPower': 1.0, - 'sigma': 0, - 'edgeBoundary': 2, - 'minRelativeIntensity': 0, - 'minAbsoluteIntensity': 8, - 'minPeakSpacing': 4, - 'subpixel' : 'poly', - 'maxNumPeaks': 1000, - # 'CUDA': True, + "corrPower": 1.0, + "sigma": 0, + "edgeBoundary": 2, + "minRelativeIntensity": 0, + "minAbsoluteIntensity": 8, + "minPeakSpacing": 4, + "subpixel": "poly", + "maxNumPeaks": 1000, + # 'CUDA': True, } # compute braggpeaks = datacube.find_Bragg_disks( - template = probe.kernel, + template=probe.kernel, **detect_params, ) - # # Virtual Imaging # set geometries - geo_bf = ((qx0,qy0),alpha+6) - geo_df = ((qx0,qy0),(3*alpha,6*alpha)) + geo_bf = ((qx0, qy0), alpha + 6) + geo_df = ((qx0, qy0), (3 * alpha, 6 * alpha)) # bright field datacube.get_virtual_image( - mode = 'circle', - geometry = geo_bf, - name = 'bright_field', + mode="circle", + geometry=geo_bf, + name="bright_field", ) # dark field - datacube.get_virtual_image( - mode = 'annulus', - geometry = geo_df, - name = 'dark_field' - ) - + datacube.get_virtual_image(mode="annulus", geometry=geo_df, name="dark_field") # # Write - py4DSTEM.save( - path, - datacube, - tree=None, - mode = 'o' - ) - - - - - + py4DSTEM.save(path, datacube, tree=None, mode="o") class TestV14: - # setup/teardown def setup_class(cls): - if not(exists(path)): - print('no test file for v14 found') - if py4DSTEM.__version__.split('.')[1]=='14': - print('v14 detected. writing new test file...') + if not (exists(path)): + print("no test file for v14 found") + if py4DSTEM.__version__.split(".")[1] == "14": + print("v14 detected. writing new test file...") _make_v14_test_file() else: - raise Exception(f"No v14 testfile was found at path {path}, and a new one can't be written with this py4DSTEM version {py4DSTEM.__version__}") + raise Exception( + f"No v14 testfile was found at path {path}, and a new one can't be written with this py4DSTEM version {py4DSTEM.__version__}" + ) + @classmethod def teardown_class(cls): pass + def setup_method(self, method): pass + def teardown_method(self, method): pass - - def test_meowth(self): - - #py4DSTEM.print_h5_tree(path) + # py4DSTEM.print_h5_tree(path) data = py4DSTEM.read(path) data.tree() - assert(isinstance(data.tree('braggvectors'),py4DSTEM.BraggVectors)) - assert(isinstance(data.tree('bright_field'),py4DSTEM.VirtualImage)) - assert(isinstance(data.tree('dark_field'),py4DSTEM.VirtualImage)) - assert(isinstance(data.tree('dp_max'),py4DSTEM.VirtualDiffraction)) - assert(isinstance(data.tree('dp_mean'),py4DSTEM.VirtualDiffraction)) - assert(isinstance(data.tree('probe'),py4DSTEM.Probe)) + assert isinstance(data.tree("braggvectors"), py4DSTEM.BraggVectors) + assert isinstance(data.tree("bright_field"), py4DSTEM.VirtualImage) + assert isinstance(data.tree("dark_field"), py4DSTEM.VirtualImage) + assert isinstance(data.tree("dp_max"), py4DSTEM.VirtualDiffraction) + assert isinstance(data.tree("dp_mean"), py4DSTEM.VirtualDiffraction) + assert isinstance(data.tree("probe"), py4DSTEM.Probe) pass - - - - - - diff --git a/test/test_native_io/test_v0_9.py b/test/test_native_io/test_v0_9.py index 8024f646c..6ebea1661 100644 --- a/test/test_native_io/test_v0_9.py +++ b/test/test_native_io/test_v0_9.py @@ -1,15 +1,14 @@ from py4DSTEM import read, DataCube, _TESTPATH from os.path import join -path = join(_TESTPATH,"test_io/legacy_v0.9_simAuNanoplatelet_bin.h5") +path = join(_TESTPATH, "test_io/legacy_v0.9_simAuNanoplatelet_bin.h5") -def test_read_v0_9_noID(): +def test_read_v0_9_noID(): d = read(path) d -def test_read_v0_9_withID(): - - d = read(path, data_id="polyAu_4DSTEM") - assert(isinstance(d,DataCube)) +def test_read_v0_9_withID(): + d = read(path, data_id="polyAu_4DSTEM") + assert isinstance(d, DataCube) diff --git a/test/test_nonnative_io/test_arina.py b/test/test_nonnative_io/test_arina.py index c27cb8ef5..c02964cf8 100644 --- a/test/test_nonnative_io/test_arina.py +++ b/test/test_nonnative_io/test_arina.py @@ -8,12 +8,9 @@ def test_read_arina(): - # read - data = py4DSTEM.import_file( filepath ) + data = py4DSTEM.import_file(filepath) # check imported data assert isinstance(data, emdfile.Array) assert isinstance(data, py4DSTEM.DataCube) - - diff --git a/test/test_nonnative_io/test_dm.py b/test/test_nonnative_io/test_dm.py index a3d5aa7b0..ee6f1b2eb 100644 --- a/test/test_nonnative_io/test_dm.py +++ b/test/test_nonnative_io/test_dm.py @@ -9,16 +9,16 @@ def test_dmfile_datacube(): - data = py4DSTEM.import_file( filepath_dm4_datacube ) + data = py4DSTEM.import_file(filepath_dm4_datacube) assert isinstance(data, emdfile.Array) assert isinstance(data, py4DSTEM.DataCube) + def test_dmfile_3Darray(): - data = py4DSTEM.import_file( filepath_dm3_3Dstack ) + data = py4DSTEM.import_file(filepath_dm3_3Dstack) assert isinstance(data, emdfile.Array) # TODO # def test_dmfile_multiple_datablocks(): # def test_dmfile_2Darray - diff --git a/test/test_probe.py b/test/test_probe.py index ad10c3100..573a55f31 100644 --- a/test/test_probe.py +++ b/test/test_probe.py @@ -7,12 +7,9 @@ path = py4DSTEM._TESTPATH + "/small_datacube.dm4" - class TestProbe: - # setup/teardown def setup_class(cls): - # Read datacube datacube = py4DSTEM.import_file(path) cls.datacube = datacube @@ -20,46 +17,36 @@ def setup_class(cls): # tests def test_probe_gen_from_dp(self): - p = Probe.from_vacuum_data( - self.datacube[0,0] - ) - assert(isinstance(p,Probe)) + p = Probe.from_vacuum_data(self.datacube[0, 0]) + assert isinstance(p, Probe) pass def test_probe_gen_from_stack(self): # get a 3D stack - x,y = np.zeros(10).astype(int),np.arange(10).astype(int) - data = self.datacube.data[x,y,:,:] + x, y = np.zeros(10).astype(int), np.arange(10).astype(int) + data = self.datacube.data[x, y, :, :] # get the probe - p = Probe.from_vacuum_data( - data - ) - assert(isinstance(p,Probe)) + p = Probe.from_vacuum_data(data) + assert isinstance(p, Probe) pass def test_probe_gen_from_datacube_ROI_1(self): - ROI = np.zeros(self.datacube.Rshape,dtype=bool) - ROI[3:7,5:10] = True - p = self.datacube.get_vacuum_probe( ROI ) - assert(isinstance(p,Probe)) + ROI = np.zeros(self.datacube.Rshape, dtype=bool) + ROI[3:7, 5:10] = True + p = self.datacube.get_vacuum_probe(ROI) + assert isinstance(p, Probe) self.datacube.tree() self.datacube.tree(True) - _p = self.datacube.tree('probe') + _p = self.datacube.tree("probe") print(_p) - assert(p is self.datacube.tree('probe')) + assert p is self.datacube.tree("probe") pass def test_probe_gen_from_datacube_ROI_2(self): - ROI = (3,7,5,10) - p = self.datacube.get_vacuum_probe( ROI ) - assert(isinstance(p,Probe)) - assert(p is self.datacube.tree('probe')) + ROI = (3, 7, 5, 10) + p = self.datacube.get_vacuum_probe(ROI) + assert isinstance(p, Probe) + assert p is self.datacube.tree("probe") pass - - - - - - diff --git a/test/test_strain.py b/test/test_strain.py index bc9b8b58c..d309c5ffb 100644 --- a/test/test_strain.py +++ b/test/test_strain.py @@ -5,29 +5,23 @@ # set filepath -path = join(py4DSTEM._TESTPATH,"strain/downsample_Si_SiGe_analysis_braggdisks_cal.h5") +path = join(py4DSTEM._TESTPATH, "strain/downsample_Si_SiGe_analysis_braggdisks_cal.h5") class TestStrainMap: - # setup/teardown def setup_class(cls): - # Read braggpeaks # origin is calibrated - cls.braggpeaks = py4DSTEM.io.read( path ) - + cls.braggpeaks = py4DSTEM.io.read(path) # tests def test_strainmap_instantiation(self): - strainmap = StrainMap( - braggvectors = self.braggpeaks, + braggvectors=self.braggpeaks, ) - assert(isinstance(strainmap, StrainMap)) - assert(strainmap.calibration is not None) - assert(strainmap.calibration is strainmap.braggvectors.calibration) - - + assert isinstance(strainmap, StrainMap) + assert strainmap.calibration is not None + assert strainmap.calibration is strainmap.braggvectors.calibration diff --git a/test/test_workflow/test_basics.py b/test/test_workflow/test_basics.py index cb30f5d6c..00a0ed992 100644 --- a/test/test_workflow/test_basics.py +++ b/test/test_workflow/test_basics.py @@ -2,37 +2,30 @@ from os.path import join # set filepath -path = join(py4DSTEM._TESTPATH,"test_io/legacy_v0.9_simAuNanoplatelet_bin.h5") - +path = join(py4DSTEM._TESTPATH, "test_io/legacy_v0.9_simAuNanoplatelet_bin.h5") class TestBasics: - # setup/teardown def setup_class(cls): - # Read sim Au datacube - datacube = py4DSTEM.io.read( - path, - data_id = 'polyAu_4DSTEM' - ) + datacube = py4DSTEM.io.read(path, data_id="polyAu_4DSTEM") cls.datacube = datacube # get center and probe radius datacube.get_dp_mean() alpha, qx0, qy0 = datacube.get_probe_size() cls.alpha = alpha - cls.qx0,cls.qy0 = qx0,qy0 - + cls.qx0, cls.qy0 = qx0, qy0 # tests def test_get_dp(self): - dp = self.datacube[10,30] + dp = self.datacube[10, 30] dp def test_show(self): - dp = self.datacube[10,30] + dp = self.datacube[10, 30] py4DSTEM.visualize.show(dp) # virtual diffraction and imaging @@ -41,43 +34,34 @@ def test_virt_diffraction(self): dp_mean = self.datacube.get_dp_mean() self.datacube.get_dp_max() - def test_virt_imaging_bf(self): - - geo = ((self.qx0,self.qy0),self.alpha+3) + geo = ((self.qx0, self.qy0), self.alpha + 3) # position detector self.datacube.position_detector( - mode = 'circle', - geometry = geo, + mode="circle", + geometry=geo, ) # compute self.datacube.get_virtual_image( - mode = 'circle', - geometry = geo, - name = 'bright_field', + mode="circle", + geometry=geo, + name="bright_field", ) - - def test_virt_imaging_adf(self): - - geo = ((self.qx0,self.qy0),(3*self.alpha,6*self.alpha)) + geo = ((self.qx0, self.qy0), (3 * self.alpha, 6 * self.alpha)) # position detector self.datacube.position_detector( - mode = 'annulus', - geometry = geo, + mode="annulus", + geometry=geo, ) # compute self.datacube.get_virtual_image( - mode = 'annulus', - geometry = geo, - name = 'annular_dark_field', + mode="annulus", + geometry=geo, + name="annular_dark_field", ) - - - - diff --git a/test/test_workflow/test_disk_detection_basic.py b/test/test_workflow/test_disk_detection_basic.py index 60a5e2aa1..c707dfc40 100644 --- a/test/test_workflow/test_disk_detection_basic.py +++ b/test/test_workflow/test_disk_detection_basic.py @@ -3,66 +3,62 @@ from numpy import zeros # set filepath -path = join(py4DSTEM._TESTPATH,"test_io/legacy_v0.9_simAuNanoplatelet_bin.h5") - +path = join(py4DSTEM._TESTPATH, "test_io/legacy_v0.9_simAuNanoplatelet_bin.h5") class TestDiskDetectionBasic: - # setup/teardown def setup_class(cls): - # Read sim Au datacube - datacube = py4DSTEM.io.read( - path, - data_id = 'polyAu_4DSTEM' - ) + datacube = py4DSTEM.io.read(path, data_id="polyAu_4DSTEM") cls.datacube = datacube # prepare a probe - mask = zeros(datacube.Rshape,dtype=bool) - mask[28:33,14:19] = 1 - probe = datacube.get_vacuum_probe( ROI=mask ) - alpha_pr,qx0_pr,qy0_pr = py4DSTEM.process.calibration.get_probe_size( probe.probe ) + mask = zeros(datacube.Rshape, dtype=bool) + mask[28:33, 14:19] = 1 + probe = datacube.get_vacuum_probe(ROI=mask) + alpha_pr, qx0_pr, qy0_pr = py4DSTEM.process.calibration.get_probe_size( + probe.probe + ) probe.get_kernel( - mode='sigmoid', - origin=(qx0_pr,qy0_pr), - radii=(alpha_pr,2*alpha_pr) + mode="sigmoid", origin=(qx0_pr, qy0_pr), radii=(alpha_pr, 2 * alpha_pr) ) cls.probe = probe # Set disk detection parameters cls.detect_params = { - 'corrPower': 1.0, - 'sigma': 0, - 'edgeBoundary': 2, - 'minRelativeIntensity': 0, - 'minAbsoluteIntensity': 8, - 'minPeakSpacing': 4, - 'subpixel' : 'poly', - 'maxNumPeaks': 1000, - # 'CUDA': True, + "corrPower": 1.0, + "sigma": 0, + "edgeBoundary": 2, + "minRelativeIntensity": 0, + "minAbsoluteIntensity": 8, + "minPeakSpacing": 4, + "subpixel": "poly", + "maxNumPeaks": 1000, + # 'CUDA': True, } - # tests def test_disk_detection_selected_positions(self): - - rxs = 36,15,11,59,32,34 - rys = 9,15,31,39,20,68, + rxs = 36, 15, 11, 59, 32, 34 + rys = ( + 9, + 15, + 31, + 39, + 20, + 68, + ) disks_selected = self.datacube.find_Bragg_disks( - data = (rxs, rys), - template = self.probe.kernel, + data=(rxs, rys), + template=self.probe.kernel, **self.detect_params, ) def test_disk_detection(self): - braggpeaks = self.datacube.find_Bragg_disks( - template = self.probe.kernel, + template=self.probe.kernel, **self.detect_params, ) - - diff --git a/test/test_workflow/test_disk_detection_with_calibration.py b/test/test_workflow/test_disk_detection_with_calibration.py index b93e2d00b..bd9fec244 100644 --- a/test/test_workflow/test_disk_detection_with_calibration.py +++ b/test/test_workflow/test_disk_detection_with_calibration.py @@ -3,61 +3,51 @@ from numpy import zeros # set filepath -path = join(py4DSTEM._TESTPATH,"test_io/legacy_v0.9_simAuNanoplatelet_bin.h5") - +path = join(py4DSTEM._TESTPATH, "test_io/legacy_v0.9_simAuNanoplatelet_bin.h5") class TestDiskDetectionWithCalibration: - # setup/teardown def setup_class(cls): - # Read sim Au datacube - datacube = py4DSTEM.io.read( - path, - data_id = 'polyAu_4DSTEM' - ) + datacube = py4DSTEM.io.read(path, data_id="polyAu_4DSTEM") cls.datacube = datacube # prepare a probe - mask = zeros(datacube.Rshape,dtype=bool) - mask[28:33,14:19] = 1 - probe = datacube.get_vacuum_probe( ROI=mask ) - alpha_pr,qx0_pr,qy0_pr = py4DSTEM.process.calibration.get_probe_size( probe.probe ) + mask = zeros(datacube.Rshape, dtype=bool) + mask[28:33, 14:19] = 1 + probe = datacube.get_vacuum_probe(ROI=mask) + alpha_pr, qx0_pr, qy0_pr = py4DSTEM.process.calibration.get_probe_size( + probe.probe + ) probe.get_kernel( - mode='sigmoid', - origin=(qx0_pr,qy0_pr), - radii=(alpha_pr,2*alpha_pr) + mode="sigmoid", origin=(qx0_pr, qy0_pr), radii=(alpha_pr, 2 * alpha_pr) ) cls.probe = probe # Set disk detection parameters cls.detect_params = { - 'corrPower': 1.0, - 'sigma': 0, - 'edgeBoundary': 2, - 'minRelativeIntensity': 0, - 'minAbsoluteIntensity': 8, - 'minPeakSpacing': 4, - 'subpixel' : 'poly', - 'maxNumPeaks': 1000, - # 'CUDA': True, + "corrPower": 1.0, + "sigma": 0, + "edgeBoundary": 2, + "minRelativeIntensity": 0, + "minAbsoluteIntensity": 8, + "minPeakSpacing": 4, + "subpixel": "poly", + "maxNumPeaks": 1000, + # 'CUDA': True, } - # tests def test_disk_detection(self): - braggpeaks = self.datacube.find_Bragg_disks( - template = self.probe.kernel, + template=self.probe.kernel, **self.detect_params, ) - # calibrate center - # calibrate ellipse # calibrate pixel @@ -72,11 +62,3 @@ def test_disk_detection(self): # check loaded data # check loaded cali - - - - - - - -