diff --git a/.github/scripts/update_version.py b/.github/scripts/update_version.py index a9250757e..2aaaa07af 100644 --- a/.github/scripts/update_version.py +++ b/.github/scripts/update_version.py @@ -1,6 +1,5 @@ """ Script to update the patch version number of the py4DSTEM package. -Author: Tara Mishra (Quantumstud) """ version_file_path = "py4DSTEM/version.py" diff --git a/.github/workflows/check_install_dev.yml b/.github/workflows/check_install_dev.yml index 6987c9274..82701d50d 100644 --- a/.github/workflows/check_install_dev.yml +++ b/.github/workflows/check_install_dev.yml @@ -17,6 +17,10 @@ jobs: runs-on: [ubuntu-latest] architecture: [x86_64] python-version: ["3.9", "3.10", "3.11",] + # include: + # - python-version: "3.12.0-beta.4" + # runs-on: ubuntu-latest + # allow_failure: true # Currently no public runners available for this but this or arm64 should work next time # include: # - python-version: "3.10" diff --git a/.github/workflows/check_install_main.yml b/.github/workflows/check_install_main.yml index 63db352e4..2d1c8ed2a 100644 --- a/.github/workflows/check_install_main.yml +++ b/.github/workflows/check_install_main.yml @@ -17,10 +17,10 @@ jobs: runs-on: [ubuntu-latest, windows-latest, macos-latest] architecture: [x86_64] python-version: ["3.9", "3.10", "3.11",] - include: - - python-version: "3.12.0-beta.4" - runs-on: ubuntu-latest - allow_failure: true + #include: + # - python-version: "3.12.0-beta.4" + # runs-on: ubuntu-latest + # allow_failure: true # Currently no public runners available for this but this or arm64 should work next time # include: # - python-version: "3.10" diff --git a/.github/workflows/pypi_upload.yml b/.github/workflows/pypi_upload.yml index 07a95273d..264c69030 100644 --- a/.github/workflows/pypi_upload.yml +++ b/.github/workflows/pypi_upload.yml @@ -1,6 +1,5 @@ # Action to check the version of the package and upload it to PyPI # if the version is higher than the one on PyPI -# Author: @quantumstud name: PyPI Upload on: @@ -22,10 +21,15 @@ jobs: token: ${{ secrets.GH_ACTION_VERSION_UPDATE }} - name: Get changed files id: changed-files-specific - uses: tj-actions/changed-files@v32 + uses: tj-actions/changed-files@v39 with: files: | py4DSTEM/version.py + - name: Debug version file change checker + run: | + echo "Checking variable..." + echo ${{ steps.changed-files-specific.outputs.any_changed }} + echo "Done" - name: Running if py4DSTEM/version.py file is not changed if: steps.changed-files-specific.outputs.any_changed == 'false' run: | diff --git a/README.md b/README.md index 0561f098a..aa102542a 100644 --- a/README.md +++ b/README.md @@ -46,42 +46,50 @@ First, download and install Anaconda: www.anaconda.com/download. If you prefer a more lightweight conda client, you can instead install Miniconda: https://docs.conda.io/en/latest/miniconda.html. Then open a conda terminal and run one of the following sets of commands to ensure everything is up-to-date and create a new environment for your py4DSTEM installation: - ``` conda update conda conda create -n py4dstem conda activate py4dstem +conda install -c conda-forge py4dstem pymatgen jupyterlab ``` -Next, install py4DSTEM. To simultaneously install py4DSTEM with `pymatgen` (used in some crystal structure workflows) and `jupyterlab` (providing an interface for running Python notebooks like those provided in the [py4DSTEM tutorials repository](https://github.com/py4dstem/py4DSTEM_tutorials)) run: +In order, these commands +- ensure your installation of anaconda is up-to-date +- make a virtual environment (see below) +- enter the environment +- install py4DSTEM, as well as pymatgen (used for crystal structure calculations) and JupyterLab (an interface for running Python notebooks like those in the [py4DSTEM tutorials repository](https://github.com/py4dstem/py4DSTEM_tutorials)) + + +We've had some recent reports install of `conda` getting stuck trying to solve the environment using the above installation. If you run into this problem, you can install py4DSTEM using `pip` instead of `conda` by running: ``` -conda install -c conda-forge py4dstem pymatgen jupyterlab +conda update conda +conda create -n py4dstem python=3.10 +conda activate py4dstem +pip install py4dstem pymatgen ``` -Or if you would prefer to install only the base modules of **py4DSTEM**, you can instead run: +Both `conda` and `pip` are programs which manage package installations, i.e. make sure different codes you're installing which depend on one another are using mutually compatible versions. Each has advantages and disadvantages; `pip` is a little more bare-bones, and we've seen this install work when `conda` doesn't. If you also want to use Jupyterlab you can then use either `pip install jupyterlab` or `conda install jupyterlab`. + +If you would prefer to install only the base modules of **py4DSTEM**, and skip pymategen and Jupterlab, you can instead run: ``` conda install -c conda-forge py4dstem ``` -In Windows you should then also run: +Finally, regardless of which of the above approaches you used, in Windows you should then also run: ``` conda install pywin32 ``` -In order, these commands -- ensure your installation of anaconda is up-to-date -- make a virtual environment (see below) -- enter the environment -- install py4DSTEM, and optionally also pymatgen and JupyterLab -- on Windows, enable python to talk to the windows API +which enables Python to talk to the Windows API. Please note that virtual environments are used in the instructions above in order to make sure packages that have different dependencies don't conflict with one another. Because these directions install py4DSTEM to its own virtual environment, each time you want to use py4DSTEM you'll need to activate this environment. You can do this in the command line by running `conda activate py4dstem`, or, if you're using the Anaconda Navigator, by clicking on the Environments tab and then clicking on `py4dstem`. +Last - as of the version 0.14.4 update, we've had a few reports of problems upgrading to the newest version. We're not sure what's causing the issue yet, but have found the new version can be installed successfully in these cases using a fresh Anaconda installation. diff --git a/docs/requirements.txt b/docs/requirements.txt index 43dbc0817..03ecc7e26 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,2 +1,3 @@ emdfile -# py4dstem \ No newline at end of file +sphinx_rtd_theme +# py4dstem diff --git a/docs/source/conf.py b/docs/source/conf.py index 30ee084fe..6da66611e 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -36,7 +36,12 @@ # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. -extensions = ["sphinx.ext.autodoc", "sphinx.ext.napoleon", "sphinx.ext.intersphinx"] +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.napoleon", + "sphinx.ext.intersphinx", + "sphinx_rtd_theme", +] # Other useful extensions # sphinx_copybutton diff --git a/py4DSTEM/braggvectors/braggvector_methods.py b/py4DSTEM/braggvectors/braggvector_methods.py index 69445f324..267f81e5f 100644 --- a/py4DSTEM/braggvectors/braggvector_methods.py +++ b/py4DSTEM/braggvectors/braggvector_methods.py @@ -421,7 +421,7 @@ def measure_origin( return qx0, qy0, mask def measure_origin_beamstop( - self, center_guess, radii, max_dist=2, max_iter=1, **kwargs + self, center_guess, radii, max_dist=None, max_iter=1, **kwargs ): """ Find the origin from a set of braggpeaks assuming there is a beamstop, by identifying @@ -440,6 +440,9 @@ def measure_origin_beamstop( R_Nx, R_Ny = self.Rshape braggpeaks = self._v_uncal + if max_dist is None: + max_dist = radii[1] + # remove peaks outside the annulus braggpeaks_masked = braggpeaks.copy() for rx in range(R_Nx): @@ -470,7 +473,7 @@ def measure_origin_beamstop( x_r = -x + 2 * center_curr[0] y_r = -y + 2 * center_curr[1] dists = np.hypot(x_r - pl.data["qx"], y_r - pl.data["qy"]) - dists[is_paired] = 2 * max_dist + dists[is_paired] = max_dist matched = dists <= max_dist if any(matched): match = np.argmin(dists) diff --git a/py4DSTEM/braggvectors/diskdetection.py b/py4DSTEM/braggvectors/diskdetection.py index e726755ba..e23b10a15 100644 --- a/py4DSTEM/braggvectors/diskdetection.py +++ b/py4DSTEM/braggvectors/diskdetection.py @@ -555,6 +555,8 @@ def _find_Bragg_disks_CUDA_unbatched( # Populate a BraggVectors instance and return braggvectors = BraggVectors(datacube.Rshape, datacube.Qshape) braggvectors._v_uncal = peaks + braggvectors._set_raw_vector_getter() + braggvectors._set_cal_vector_getter() return braggvectors @@ -600,6 +602,8 @@ def _find_Bragg_disks_CUDA_batched( # Populate a BraggVectors instance and return braggvectors = BraggVectors(datacube.Rshape, datacube.Qshape) braggvectors._v_uncal = peaks + braggvectors._set_raw_vector_getter() + braggvectors._set_cal_vector_getter() return braggvectors @@ -650,6 +654,8 @@ def _find_Bragg_disks_ipp( # Populate a BraggVectors instance and return braggvectors = BraggVectors(datacube.Rshape, datacube.Qshape) braggvectors._v_uncal = peaks + braggvectors._set_raw_vector_getter() + braggvectors._set_cal_vector_getter() return braggvectors @@ -700,6 +706,8 @@ def _find_Bragg_disks_dask( # Populate a BraggVectors instance and return braggvectors = BraggVectors(datacube.Rshape, datacube.Qshape) braggvectors._v_uncal = peaks + braggvectors._set_raw_vector_getter() + braggvectors._set_cal_vector_getter() return braggvectors diff --git a/py4DSTEM/braggvectors/diskdetection_aiml_cuda.py b/py4DSTEM/braggvectors/diskdetection_aiml_cuda.py index d0f550dcc..c5f89b9fd 100644 --- a/py4DSTEM/braggvectors/diskdetection_aiml_cuda.py +++ b/py4DSTEM/braggvectors/diskdetection_aiml_cuda.py @@ -17,8 +17,8 @@ try: import cupy as cp -except: - raise ImportError("Import Error: Please install cupy before proceeding") +except ModuleNotFoundError: + raise ImportError("AIML CUDA Requires cupy") try: import tensorflow as tf diff --git a/py4DSTEM/datacube/virtualdiffraction.py b/py4DSTEM/datacube/virtualdiffraction.py index e31b984f2..65665728d 100644 --- a/py4DSTEM/datacube/virtualdiffraction.py +++ b/py4DSTEM/datacube/virtualdiffraction.py @@ -3,13 +3,12 @@ # * DataCubeVirtualDiffraction - methods inherited by DataCube for virt diffraction import numpy as np -import dask.array as da from typing import Optional import inspect from emdfile import tqdmnd, Metadata -from py4DSTEM.data import Calibration, DiffractionSlice, Data -from py4DSTEM.visualize.show import show +from py4DSTEM.data import DiffractionSlice, Data +from py4DSTEM.preprocess import get_shifted_ar # Virtual diffraction container class @@ -184,22 +183,33 @@ def get_virtual_diffraction( qx_shift = x0_mean - x0 qy_shift = y0_mean - y0 - # ...for integer shifts - if not subpixel: + if subpixel is False: # round shifts -> int qx_shift = qx_shift.round().astype(int) qy_shift = qy_shift.round().astype(int) - # ...for boolean masks and unmasked - if mask is None or mask.dtype == bool: - # get scan points - mask = np.ones(self.Rshape, dtype=bool) if mask is None else mask - mask_indices = np.nonzero(mask) - # allocate space - virtual_diffraction = np.zeros(self.Qshape) - # loop - for rx, ry in zip(mask_indices[0], mask_indices[1]): - # get shifted DP + # ...for boolean masks and unmasked + if mask is None or mask.dtype == bool: + # get scan points + mask = np.ones(self.Rshape, dtype=bool) if mask is None else mask + mask_indices = np.nonzero(mask) + # allocate space + virtual_diffraction = np.zeros(self.Qshape) + # loop + for rx, ry in zip(mask_indices[0], mask_indices[1]): + # get shifted DP + if subpixel: + DP = get_shifted_ar( + self.data[ + rx, + ry, + :, + :, + ], + qx_shift[rx, ry], + qy_shift[rx, ry], + ) + else: DP = np.roll( self.data[ rx, @@ -210,29 +220,41 @@ def get_virtual_diffraction( (qx_shift[rx, ry], qy_shift[rx, ry]), axis=(0, 1), ) - # compute - if method == "mean": - virtual_diffraction += DP - elif method == "max": - virtual_diffraction = np.maximum(virtual_diffraction, DP) - # normalize means + # compute if method == "mean": - virtual_diffraction /= len(mask_indices[0]) + virtual_diffraction += DP + elif method == "max": + virtual_diffraction = np.maximum(virtual_diffraction, DP) + # normalize means + if method == "mean": + virtual_diffraction /= len(mask_indices[0]) - # ...for floating point and complex masks + # ...for floating point and complex masks + else: + # allocate space + if mask.dtype == "complex": + virtual_diffraction = np.zeros(self.Qshape, dtype="complex") else: - # allocate space - if mask.dtype == "complex": - virtual_diffraction = np.zeros(self.Qshape, dtype="complex") + virtual_diffraction = np.zeros(self.Qshape) + # loop + for rx, ry in tqdmnd( + self.R_Nx, + self.R_Ny, + disable=not verbose, + ): + # get shifted DP + if subpixel: + DP = get_shifted_ar( + self.data[ + rx, + ry, + :, + :, + ], + qx_shift[rx, ry], + qy_shift[rx, ry], + ) else: - virtual_diffraction = np.zeros(self.Qshape) - # loop - for rx, ry in tqdmnd( - self.R_Nx, - self.R_Ny, - disable=not verbose, - ): - # get shifted DP DP = np.roll( self.data[ rx, @@ -243,21 +265,15 @@ def get_virtual_diffraction( (qx_shift[rx, ry], qy_shift[rx, ry]), axis=(0, 1), ) - # compute - w = mask[rx, ry] - if method == "mean": - virtual_diffraction += DP * w - elif method == "max": - virtual_diffraction = np.maximum( - virtual_diffraction, DP * w - ) - if method == "mean": - virtual_diffraction /= np.sum(mask) - # TODO subpixel shifting - else: - raise Exception("subpixel shifting has not been implemented yet!") - pass + # compute + w = mask[rx, ry] + if method == "mean": + virtual_diffraction += DP * w + elif method == "max": + virtual_diffraction = np.maximum(virtual_diffraction, DP * w) + if method == "mean": + virtual_diffraction /= np.sum(mask) # wrap, add to tree, and return diff --git a/py4DSTEM/datacube/virtualimage.py b/py4DSTEM/datacube/virtualimage.py index 4b2eeed39..50a297914 100644 --- a/py4DSTEM/datacube/virtualimage.py +++ b/py4DSTEM/datacube/virtualimage.py @@ -11,7 +11,8 @@ from emdfile import tqdmnd, Metadata from py4DSTEM.data import Calibration, RealSlice, Data, DiffractionSlice -from py4DSTEM.visualize.show import show +from py4DSTEM.preprocess import get_shifted_ar +from py4DSTEM.visualize import show # Virtual image container class @@ -72,6 +73,7 @@ def get_virtual_image( centered=False, calibrated=False, shift_center=False, + subpixel=False, verbose=True, dask=False, return_mask=False, @@ -138,6 +140,8 @@ def get_virtual_image( position and the mean origin position over all patterns, rounded to the nearest integer for speed. Default is False. If `shift_center` is True, `centered` is automatically set to True. + subpixel : bool + if True, applies subpixel shifts to virtual image verbose : bool toggles a progress bar dask : bool @@ -177,8 +181,7 @@ def get_virtual_image( "rectangular", "mask", ), "check doc strings for supported modes" - if shift_center == True: - centered = True + if test_config: for x, y in zip( ["centered", "calibrated", "shift_center"], @@ -242,8 +245,9 @@ def _apply_mask_dask(self, mask): self.calibration.get_origin_shift() is not None ), "origin need to be calibrated" qx_shift, qy_shift = self.calibration.get_origin_shift() - qx_shift = qx_shift.round().astype(int) - qy_shift = qy_shift.round().astype(int) + if subpixel is False: + qx_shift = qx_shift.round().astype(int) + qy_shift = qy_shift.round().astype(int) # if return_mask is True, get+return the mask and skip the computation if return_mask is not False: @@ -251,9 +255,17 @@ def _apply_mask_dask(self, mask): rx, ry = return_mask except TypeError: raise Exception( - f"if `shift_center=True`, return_mask must be a 2-tuple of ints or False, but revieced inpute value of {return_mask}" + f"if `shift_center=True`, return_mask must be a 2-tuple of \ + ints or False, but revieced inpute value of {return_mask}" + ) + if subpixel: + _mask = get_shifted_ar( + mask, qx_shift[rx, ry], qy_shift[rx, ry], bilinear=True + ) + else: + _mask = np.roll( + mask, (qx_shift[rx, ry], qy_shift[rx, ry]), axis=(0, 1) ) - _mask = np.roll(mask, (qx_shift[rx, ry], qy_shift[rx, ry]), axis=(0, 1)) return _mask # allocate space @@ -269,7 +281,14 @@ def _apply_mask_dask(self, mask): disable=not verbose, ): # get shifted mask - _mask = np.roll(mask, (qx_shift[rx, ry], qy_shift[rx, ry]), axis=(0, 1)) + if subpixel: + _mask = get_shifted_ar( + mask, qx_shift[rx, ry], qy_shift[rx, ry], bilinear=True + ) + else: + _mask = np.roll( + mask, (qx_shift[rx, ry], qy_shift[rx, ry]), axis=(0, 1) + ) # add to output array virtual_image[rx, ry] = np.sum(self.data[rx, ry] * _mask) @@ -292,6 +311,7 @@ def _apply_mask_dask(self, mask): "centered": centered, "calibrated": calibrated, "shift_center": shift_center, + "subpixel": subpixel, "verbose": verbose, "dask": dask, "return_mask": return_mask, @@ -318,6 +338,7 @@ def position_detector( centered=None, calibrated=None, shift_center=False, + subpixel=True, scan_position=None, invert=False, color="r", @@ -358,6 +379,8 @@ def position_detector( regardless of the value of `data` (enabling e.g. overlaying the mask for a specific scan position on a max or mean diffraction image.) + subpixel : bool + if True, applies subpixel shifts to virtual image invert : bool if True, invert the masked pixel (i.e. pixels *outside* the detector are overlaid with a mask) @@ -398,19 +421,24 @@ def position_detector( elif isinstance(data, np.ndarray): assert ( data.shape == self.Qshape - ), f"Can't position a detector over an image with a shape that is different from diffraction space. Diffraction space in this dataset has shape {self.Qshape} but the image passed has shape {data.shape}" + ), f"Can't position a detector over an image with a shape that is different \ + from diffraction space. Diffraction space in this dataset has shape {self.Qshape} \ + but the image passed has shape {data.shape}" image = data elif isinstance(data, DiffractionSlice): assert ( data.shape == self.Qshape - ), f"Can't position a detector over an image with a shape that is different from diffraction space. Diffraction space in this dataset has shape {self.Qshape} but the image passed has shape {data.shape}" + ), f"Can't position a detector over an image with a shape that is different \ + from diffraction space. Diffraction space in this dataset has shape {self.Qshape} \ + but the image passed has shape {data.shape}" image = data.data elif isinstance(data, tuple): rx, ry = data[:2] image = self[rx, ry] else: raise Exception( - f"Invalid argument passed to `data`. Expected None or np.ndarray or tuple, not type {type(data)}" + f"Invalid argument passed to `data`. Expected None or np.ndarray or \ + tuple, not type {type(data)}" ) # shift center @@ -419,7 +447,9 @@ def position_detector( elif shift_center == True: assert isinstance( data, tuple - ), "If shift_center is set to True, `data` should be a 2-tuple (rx,ry). To shift the detector mask while using some other input for `data`, set `shift_center` to a 2-tuple (rx,ry)" + ), "If shift_center is set to True, `data` should be a 2-tuple (rx,ry). \ + To shift the detector mask while using some other input for `data`, \ + set `shift_center` to a 2-tuple (rx,ry)" elif isinstance(shift_center, tuple): rx, ry = shift_center[:2] shift_center = True @@ -454,10 +484,15 @@ def position_detector( assert ( self.calibration.get_origin_shift() is not None ), "origin shifts need to be calibrated" - qx_shift, qy_shift = self.calibration.cal.get_origin_shift() - qx_shift = int(np.round(qx_shift[rx, ry])) - qy_shift = int(np.round(qy_shift[rx, ry])) - mask = np.roll(mask, (qx_shift, qy_shift), axis=(0, 1)) + qx_shift, qy_shift = self.calibration.get_origin_shift() + if subpixel: + mask = get_shifted_ar( + mask, qx_shift[rx, ry], qy_shift[rx, ry], bilinear=True + ) + else: + qx_shift = int(np.round(qx_shift[rx, ry])) + qy_shift = int(np.round(qy_shift[rx, ry])) + mask = np.roll(mask, (qx_shift, qy_shift), axis=(0, 1)) # Show show(image, mask=mask, mask_color=color, mask_alpha=alpha, **kwargs) diff --git a/py4DSTEM/preprocess/utils.py b/py4DSTEM/preprocess/utils.py index 0c76f35a7..752e2f81c 100644 --- a/py4DSTEM/preprocess/utils.py +++ b/py4DSTEM/preprocess/utils.py @@ -5,8 +5,8 @@ try: import cupy as cp -except ImportError: - cp = None +except ModuleNotFoundError: + cp = np def bin2D(array, factor, dtype=np.float64): diff --git a/py4DSTEM/process/diffraction/crystal.py b/py4DSTEM/process/diffraction/crystal.py index a797bd166..d3e3cebd6 100644 --- a/py4DSTEM/process/diffraction/crystal.py +++ b/py4DSTEM/process/diffraction/crystal.py @@ -37,6 +37,8 @@ class Crystal: orientation_plan, match_orientations, match_single_pattern, + cluster_grains, + cluster_orientation_map, calculate_strain, save_ang_file, symmetry_reduce_directions, @@ -52,6 +54,8 @@ class Crystal: plot_orientation_plan, plot_orientation_maps, plot_fiber_orientation_maps, + plot_clusters, + plot_cluster_size, ) from py4DSTEM.process.diffraction.crystal_calibrate import ( diff --git a/py4DSTEM/process/diffraction/crystal_ACOM.py b/py4DSTEM/process/diffraction/crystal_ACOM.py index 2346db598..da553456f 100644 --- a/py4DSTEM/process/diffraction/crystal_ACOM.py +++ b/py4DSTEM/process/diffraction/crystal_ACOM.py @@ -2,6 +2,8 @@ import matplotlib.pyplot as plt import os from typing import Union, Optional +import time, sys +from tqdm import tqdm from emdfile import tqdmnd, PointList, PointListArray from py4DSTEM.data import RealSlice @@ -14,7 +16,7 @@ try: import cupy as cp -except: +except ModuleNotFoundError: cp = None @@ -762,14 +764,34 @@ def match_orientations( self, bragg_peaks_array: PointListArray, num_matches_return: int = 1, - min_number_peaks=3, - inversion_symmetry=True, - multiple_corr_reset=True, - progress_bar: bool = True, + min_angle_between_matches_deg=None, + min_number_peaks: int = 3, + inversion_symmetry: bool = True, + multiple_corr_reset: bool = True, return_orientation: bool = True, + progress_bar: bool = True, ): """ - This function computes the orientation of any number of PointLists stored in a PointListArray, and returns an OrienationMap. + Parameters + -------- + bragg_peaks_array: PointListArray + PointListArray containing the Bragg peaks and intensities, with calibrations applied + num_matches_return: int + return these many matches as 3th dim of orient (matrix) + min_angle_between_matches_deg: int + Minimum angle between zone axis of multiple matches, in degrees. + Note that I haven't thought how to handle in-plane rotations, since multiple matches are possible. + min_number_peaks: int + Minimum number of peaks required to perform ACOM matching + inversion_symmetry: bool + check for inversion symmetry in the matches + multiple_corr_reset: bool + keep original correlation score for multiple matches + return_orientation: bool + Return orientation map from function for inspection. + The map is always stored in the Crystal object. + progress_bar: bool + Show or hide the progress bar """ orientation_map = OrientationMap( @@ -808,6 +830,7 @@ def match_orientations( orientation = self.match_single_pattern( bragg_peaks=vectors, num_matches_return=num_matches_return, + min_angle_between_matches_deg=min_angle_between_matches_deg, min_number_peaks=min_number_peaks, inversion_symmetry=inversion_symmetry, multiple_corr_reset=multiple_corr_reset, @@ -816,6 +839,8 @@ def match_orientations( ) orientation_map.set_orientation(orientation, rx, ry) + + # assign and return self.orientation_map = orientation_map if return_orientation: @@ -828,6 +853,7 @@ def match_single_pattern( self, bragg_peaks: PointList, num_matches_return: int = 1, + min_angle_between_matches_deg=None, min_number_peaks=3, inversion_symmetry=True, multiple_corr_reset=True, @@ -841,23 +867,42 @@ def match_single_pattern( """ Solve for the best fit orientation of a single diffraction pattern. - Args: - bragg_peaks (PointList): numpy array containing the Bragg positions and intensities ('qx', 'qy', 'intensity') - num_matches_return (int): return these many matches as 3th dim of orient (matrix) - min_number_peaks (int): Minimum number of peaks required to perform ACOM matching - inversion_symmetry (bool): check for inversion symmetry in the matches - multiple_corr_reset (bool): keep original correlation score for multiple matches - subpixel_tilt (bool): set to false for faster matching, returning the nearest corr point - plot_polar (bool): set to true to plot the polar transform of the diffraction pattern - plot_corr (bool): set to true to plot the resulting correlogram - returnfig (bool): Return figure handles - figsize (list): size of figure - verbose (bool): Print the fitted zone axes, correlation scores - CUDA (bool): Enable CUDA for the FFT steps - - Returns: - orientation (Orientation): Orientation class containing all outputs - fig, ax (handles): Figure handles for the plotting output + Parameters + -------- + bragg_peaks: PointList + numpy array containing the Bragg positions and intensities ('qx', 'qy', 'intensity') + num_matches_return: int + return these many matches as 3th dim of orient (matrix) + min_angle_between_matches_deg: int + Minimum angle between zone axis of multiple matches, in degrees. + Note that I haven't thought how to handle in-plane rotations, since multiple matches are possible. + min_number_peaks: int + Minimum number of peaks required to perform ACOM matching + inversion_symmetry bool + check for inversion symmetry in the matches + multiple_corr_reset bool + keep original correlation score for multiple matches + subpixel_tilt: bool + set to false for faster matching, returning the nearest corr point + plot_polar: bool + set to true to plot the polar transform of the diffraction pattern + plot_corr: bool + set to true to plot the resulting correlogram + returnfig: bool + return figure handles + figsize: list + size of figure + verbose: bool + Print the fitted zone axes, correlation scores + CUDA: bool + Enable CUDA for the FFT steps + + Returns + -------- + orientation: Orientation + Orientation class containing all outputs + fig, ax: handles + Figure handles for the plotting output """ # init orientation output @@ -1028,6 +1073,25 @@ def match_single_pattern( 0, ) + # If minimum angle is specified and we're on a match later than the first, + # we zero correlation values within the given range. + if min_angle_between_matches_deg is not None: + if match_ind > 0: + inds_previous = orientation.inds[:match_ind, 0] + for a0 in range(inds_previous.size): + mask_zero = np.arccos( + np.clip( + np.sum( + self.orientation_vecs + * self.orientation_vecs[inds_previous[a0], :], + axis=1, + ), + -1, + 1, + ) + ) < np.deg2rad(min_angle_between_matches_deg) + corr_full[mask_zero, :] = 0.0 + # Get maximum (non inverted) correlation value ind_phi = np.argmax(corr_full, axis=1) @@ -1095,6 +1159,26 @@ def match_single_pattern( ), 0, ) + + # If minimum angle is specified and we're on a match later than the first, + # we zero correlation values within the given range. + if min_angle_between_matches_deg is not None: + if match_ind > 0: + inds_previous = orientation.inds[:match_ind, 0] + for a0 in range(inds_previous.size): + mask_zero = np.arccos( + np.clip( + np.sum( + self.orientation_vecs + * self.orientation_vecs[inds_previous[a0], :], + axis=1, + ), + -1, + 1, + ) + ) < np.deg2rad(min_angle_between_matches_deg) + corr_full_inv[mask_zero, :] = 0.0 + ind_phi_inv = np.argmax(corr_full_inv, axis=1) corr_inv = np.zeros(self.orientation_num_zones, dtype="bool") @@ -1682,6 +1766,250 @@ def match_single_pattern( return orientation +def cluster_grains( + self, + threshold_add=1.0, + threshold_grow=0.1, + angle_tolerance_deg=5.0, + progress_bar=True, +): + """ + Cluster grains using rotation criterion, and correlation values. + + Parameters + -------- + threshold_add: float + Minimum signal required for a probe position to initialize a cluster. + threshold_grow: float + Minimum signal required for a probe position to be added to a cluster. + angle_tolerance_deg: float + Rotation rolerance for clustering grains. + progress_bar: bool + Turns on the progress bar for the polar transformation + + """ + + # symmetry operators + sym = self.symmetry_operators + + # Get data + # Correlation data = signal to cluster with + sig = self.orientation_map.corr.copy() + sig_init = sig.copy() + mark = sig >= threshold_grow + sig[np.logical_not(mark)] = 0 + # orientation matrix used for angle tolerance + matrix = self.orientation_map.matrix.copy() + + # init + self.cluster_sizes = np.array((), dtype="int") + self.cluster_sig = np.array(()) + self.cluster_inds = [] + self.cluster_orientation = [] + inds_all = np.zeros_like(sig, dtype="int") + inds_all.ravel()[:] = np.arange(inds_all.size) + + # Tolerance + tol = np.deg2rad(angle_tolerance_deg) + + # Main loop + search = True + comp = 0.0 + mark_total = np.sum(np.max(mark, axis=2)) + pbar = tqdm(total=mark_total, disable=not progress_bar) + while search is True: + inds_grain = np.argmax(sig) + + val = sig.ravel()[inds_grain] + + if val < threshold_add: + search = False + + else: + # Start cluster + x, y, z = np.unravel_index(inds_grain, sig.shape) + mark[x, y, z] = False + sig[x, y, z] = 0 + matrix_cluster = matrix[x, y, z] + orientation_cluster = self.orientation_map.get_orientation_single(x, y, z) + + # Neighbors to search + xr = np.clip(x + np.arange(-1, 2, dtype="int"), 0, sig.shape[0] - 1) + yr = np.clip(y + np.arange(-1, 2, dtype="int"), 0, sig.shape[1] - 1) + inds_cand = inds_all[xr[:, None], yr[None], :].ravel() + inds_cand = np.delete(inds_cand, mark.ravel()[inds_cand] == False) + + if inds_cand.size == 0: + grow = False + else: + grow = True + + # grow the cluster + while grow is True: + inds_new = np.array((), dtype="int") + + keep = np.zeros(inds_cand.size, dtype="bool") + for a0 in range(inds_cand.size): + xc, yc, zc = np.unravel_index(inds_cand[a0], sig.shape) + + # Angle test between orientation matrices + dphi = np.min( + np.arccos( + np.clip( + ( + np.trace( + self.symmetry_operators + @ matrix[xc, yc, zc] + @ np.transpose(matrix_cluster), + axis1=1, + axis2=2, + ) + - 1 + ) + / 2, + -1, + 1, + ) + ) + ) + + if np.abs(dphi) < tol: + keep[a0] = True + + sig[xc, yc, zc] = 0 + mark[xc, yc, zc] = False + + xr = np.clip( + xc + np.arange(-1, 2, dtype="int"), 0, sig.shape[0] - 1 + ) + yr = np.clip( + yc + np.arange(-1, 2, dtype="int"), 0, sig.shape[1] - 1 + ) + inds_add = inds_all[xr[:, None], yr[None], :].ravel() + inds_new = np.append(inds_new, inds_add) + + inds_grain = np.append(inds_grain, inds_cand[keep]) + inds_cand = np.unique( + np.delete(inds_new, mark.ravel()[inds_new] == False) + ) + + if inds_cand.size == 0: + grow = False + + # convert grain to x,y coordinates, add = list + xg, yg, zg = np.unravel_index(inds_grain, sig.shape) + xyg = np.unique(np.vstack((xg, yg)), axis=1) + sig_mean = np.mean(sig_init.ravel()[inds_grain]) + self.cluster_sizes = np.append(self.cluster_sizes, xyg.shape[1]) + self.cluster_sig = np.append(self.cluster_sig, sig_mean) + self.cluster_orientation.append(orientation_cluster) + self.cluster_inds.append(xyg) + + # update progressbar + new_marks = mark_total - np.sum(np.max(mark, axis=2)) + pbar.update(new_marks) + mark_total -= new_marks + + pbar.close() + + +def cluster_orientation_map( + self, + stripe_width=(2, 2), + area_min=2, +): + """ + Produce a new orientation map from the clustered grains. + Use a stripe pattern for the overlapping grains. + + Parameters + -------- + stripe_width: (int,int) + Width of stripes for plotting maps with overlapping grains + area_min: (int) + Minimum size of grains to include + + Returns + -------- + + orientation_map + The clustered orientation map + + """ + + # init + orientation_map = OrientationMap( + num_x=self.orientation_map.num_x, + num_y=self.orientation_map.num_y, + num_matches=1, + ) + im_grain = np.zeros( + (self.orientation_map.num_x, self.orientation_map.num_y), dtype="bool" + ) + im_count = np.zeros((self.orientation_map.num_x, self.orientation_map.num_y)) + im_mark = np.zeros((self.orientation_map.num_x, self.orientation_map.num_y)) + + # Loop over grains to determine number in each pixel + for a0 in range(self.cluster_sizes.shape[0]): + if self.cluster_sizes[a0] >= area_min: + im_grain[:] = False + im_grain[ + self.cluster_inds[a0][0, :], + self.cluster_inds[a0][1, :], + ] = True + im_count += im_grain + im_stripe = im_count >= 2 + im_single = np.logical_not(im_stripe) + + # prefactor for stripes + if stripe_width[0] == 0: + dx = 0 + else: + dx = 1 / stripe_width[0] + if stripe_width[1] == 0: + dy = 0 + else: + dy = 1 / stripe_width[1] + + # loop over grains + for a0 in range(self.cluster_sizes.shape[0]): + if self.cluster_sizes[a0] >= area_min: + im_grain[:] = False + im_grain[ + self.cluster_inds[a0][0, :], + self.cluster_inds[a0][1, :], + ] = True + + # non-overlapping grains + sub = np.logical_and(im_grain, im_single) + x, y = np.unravel_index(np.where(sub.ravel()), im_grain.shape) + x = np.atleast_1d(np.squeeze(x)) + y = np.atleast_1d(np.squeeze(y)) + for a1 in range(x.size): + orientation_map.set_orientation( + self.cluster_orientation[a0], x[a1], y[a1] + ) + + # overlapping grains + sub = np.logical_and(im_grain, im_stripe) + x, y = np.unravel_index(np.where(sub.ravel()), im_grain.shape) + x = np.atleast_1d(np.squeeze(x)) + y = np.atleast_1d(np.squeeze(y)) + for a1 in range(x.size): + d = np.mod( + x[a1] * dx + y[a1] * dy + im_mark[x[a1], y[a1]] + +0.5, + im_count[x[a1], y[a1]], + ) + + if d < 1.0: + orientation_map.set_orientation( + self.cluster_orientation[a0], x[a1], y[a1] + ) + im_mark[x[a1], y[a1]] += 1 + + return orientation_map + + def calculate_strain( self, bragg_peaks_array: PointListArray, diff --git a/py4DSTEM/process/diffraction/crystal_viz.py b/py4DSTEM/process/diffraction/crystal_viz.py index 8ffd558e9..e17e87b93 100644 --- a/py4DSTEM/process/diffraction/crystal_viz.py +++ b/py4DSTEM/process/diffraction/crystal_viz.py @@ -5,6 +5,8 @@ from mpl_toolkits.mplot3d import Axes3D, art3d from scipy.signal import medfilt from scipy.ndimage import gaussian_filter +from scipy.ndimage.morphology import distance_transform_edt +from skimage.morphology import dilation, erosion import warnings import numpy as np @@ -989,7 +991,7 @@ def overline(x): def plot_orientation_maps( self, - orientation_map, + orientation_map=None, orientation_ind: int = 0, dir_in_plane_degrees: float = 0.0, corr_range: np.ndarray = np.array([0, 5]), @@ -1010,6 +1012,7 @@ def plot_orientation_maps( Args: orientation_map (OrientationMap): Class containing orientation matrices, correlation values, etc. + Optional - can reference internally stored OrientationMap. orientation_ind (int): Which orientation match to plot if num_matches > 1 dir_in_plane_degrees (float): In-plane angle to plot in degrees. Default is 0 / x-axis / vertical down. corr_range (np.ndarray): Correlation intensity range for the plot @@ -1037,6 +1040,9 @@ def plot_orientation_maps( """ # Inputs + if orientation_map is None: + orientation_map = self.orientation_map + # Legend size leg_size = np.array([300, 300], dtype="int") @@ -1720,6 +1726,205 @@ def plot_fiber_orientation_maps( return images_orientation +def plot_clusters( + self, + area_min=2, + outline_grains=True, + outline_thickness=1, + fill_grains=0.25, + smooth_grains=1.0, + cmap="viridis", + figsize=(8, 8), + returnfig=False, +): + """ + Plot the clusters as an image. + + Parameters + -------- + area_min: int (optional) + Min cluster size to include, in units of probe positions. + outline_grains: bool (optional) + Set to True to draw grains with outlines + outline_thickness: int (optional) + Thickenss of the grain outline + fill_grains: float (optional) + Outlined grains are filled with this value in pixels. + smooth_grains: float (optional) + Grain boundaries are smoothed by this value in pixels. + figsize: tuple + Size of the figure panel + returnfig: bool + Setting this to true returns the figure and axis handles + + Returns + -------- + fig, ax (optional) + Figure and axes handles + + """ + + # init + im_plot = np.zeros( + ( + self.orientation_map.num_x, + self.orientation_map.num_y, + ) + ) + im_grain = np.zeros( + ( + self.orientation_map.num_x, + self.orientation_map.num_y, + ), + dtype="bool", + ) + + # make plotting image + + for a0 in range(self.cluster_sizes.shape[0]): + if self.cluster_sizes[a0] >= area_min: + if outline_grains: + im_grain[:] = False + im_grain[ + self.cluster_inds[a0][0, :], + self.cluster_inds[a0][1, :], + ] = True + + im_dist = distance_transform_edt( + erosion( + np.invert(im_grain), footprint=np.ones((3, 3), dtype="bool") + ) + ) - distance_transform_edt(im_grain) + im_dist = gaussian_filter(im_dist, sigma=smooth_grains, mode="nearest") + im_add = np.exp(im_dist**2 / (-0.5 * outline_thickness**2)) + + if fill_grains > 0: + im_dist = distance_transform_edt( + erosion( + np.invert(im_grain), footprint=np.ones((3, 3), dtype="bool") + ) + ) + im_dist = gaussian_filter( + im_dist, sigma=smooth_grains, mode="nearest" + ) + im_add += fill_grains * np.exp( + im_dist**2 / (-0.5 * outline_thickness**2) + ) + + # im_add = 1 - np.exp( + # distance_transform_edt(im_grain)**2 \ + # / (-2*outline_thickness**2)) + im_plot += im_add + # im_plot = np.minimum(im_plot, im_add) + else: + # xg,yg = np.unravel_index(self.cluster_inds[a0], im_plot.shape) + im_grain[:] = False + im_grain[ + self.cluster_inds[a0][0, :], + self.cluster_inds[a0][1, :], + ] = True + im_plot += gaussian_filter( + im_grain.astype("float"), sigma=smooth_grains, mode="nearest" + ) + + # im_plot[ + # self.cluster_inds[a0][0,:], + # self.cluster_inds[a0][1,:], + # ] += 1 + + if outline_grains: + im_plot = np.clip(im_plot, 0, 2) + + # plotting + fig, ax = plt.subplots(figsize=figsize) + ax.imshow( + im_plot, + # vmin = -3, + # vmax = 3, + cmap=cmap, + ) + + +def plot_cluster_size( + self, + area_min=None, + area_max=None, + area_step=1, + weight_intensity=False, + pixel_area=1.0, + pixel_area_units="px^2", + figsize=(8, 6), + returnfig=False, +): + """ + Plot the cluster sizes + + Parameters + -------- + area_min: int (optional) + Min area to include in pixels^2 + area_max: int (optional) + Max area bin in pixels^2 + area_step: int (optional) + Step size of the histogram bin in pixels^2 + weight_intensity: bool + Weight histogram by the peak intensity. + pixel_area: float + Size of pixel area unit square + pixel_area_units: string + Units of the pixel area + figsize: tuple + Size of the figure panel + returnfig: bool + Setting this to true returns the figure and axis handles + + Returns + -------- + fig, ax (optional) + Figure and axes handles + + """ + + if area_max is None: + area_max = np.max(self.cluster_sizes) + area = np.arange(0, area_max, area_step) + if area_min is None: + sub = self.cluster_sizes.astype("int") < area_max + else: + sub = np.logical_and( + self.cluster_sizes.astype("int") >= area_min, + self.cluster_sizes.astype("int") < area_max, + ) + if weight_intensity: + hist = np.bincount( + self.cluster_sizes[sub] // area_step, + weights=self.cluster_sig[sub], + minlength=area.shape[0], + ) + else: + hist = np.bincount( + self.cluster_sizes[sub] // area_step, + minlength=area.shape[0], + ) + + # plotting + fig, ax = plt.subplots(figsize=figsize) + ax.bar( + area * pixel_area, + hist, + width=0.8 * pixel_area * area_step, + ) + ax.set_xlim((0, area_max * pixel_area)) + ax.set_xlabel("Grain Area [" + pixel_area_units + "]") + if weight_intensity: + ax.set_ylabel("Total Signal [arb. units]") + else: + ax.set_ylabel("Number of Grains") + + if returnfig: + return fig, ax + + def axisEqual3D(ax): extents = np.array([getattr(ax, "get_{}lim".format(dim))() for dim in "xyz"]) sz = extents[:, 1] - extents[:, 0] diff --git a/py4DSTEM/process/diffraction/utils.py b/py4DSTEM/process/diffraction/utils.py index 09bd09f7c..cfb11f044 100644 --- a/py4DSTEM/process/diffraction/utils.py +++ b/py4DSTEM/process/diffraction/utils.py @@ -67,6 +67,16 @@ def get_orientation(self, ind_x, ind_y): orientation.angles = self.angles[ind_x, ind_y] return orientation + def get_orientation_single(self, ind_x, ind_y, ind_match): + orientation = Orientation(num_matches=1) + orientation.matrix = self.matrix[ind_x, ind_y, ind_match] + orientation.family = self.family[ind_x, ind_y, ind_match] + orientation.corr = self.corr[ind_x, ind_y, ind_match] + orientation.inds = self.inds[ind_x, ind_y, ind_match] + orientation.mirror = self.mirror[ind_x, ind_y, ind_match] + orientation.angles = self.angles[ind_x, ind_y, ind_match] + return orientation + # def __copy__(self): # return OrientationMap(self.name) # def __deepcopy__(self, memo): diff --git a/py4DSTEM/process/phase/iterative_base_class.py b/py4DSTEM/process/phase/iterative_base_class.py index ae4c92d4b..6d7967550 100644 --- a/py4DSTEM/process/phase/iterative_base_class.py +++ b/py4DSTEM/process/phase/iterative_base_class.py @@ -13,8 +13,8 @@ try: import cupy as cp -except ImportError: - cp = None +except ModuleNotFoundError: + cp = np from emdfile import Array, Custom, Metadata, _read_metadata, tqdmnd from py4DSTEM.data import Calibration diff --git a/py4DSTEM/process/phase/iterative_dpc.py b/py4DSTEM/process/phase/iterative_dpc.py index 4c80ed177..02138d738 100644 --- a/py4DSTEM/process/phase/iterative_dpc.py +++ b/py4DSTEM/process/phase/iterative_dpc.py @@ -13,8 +13,8 @@ try: import cupy as cp -except ImportError: - cp = None +except ModuleNotFoundError: + cp = np from emdfile import Array, Custom, Metadata, _read_metadata, tqdmnd from py4DSTEM.data import Calibration diff --git a/py4DSTEM/process/phase/iterative_mixedstate_ptychography.py b/py4DSTEM/process/phase/iterative_mixedstate_ptychography.py index 56fec1004..ceae66cd8 100644 --- a/py4DSTEM/process/phase/iterative_mixedstate_ptychography.py +++ b/py4DSTEM/process/phase/iterative_mixedstate_ptychography.py @@ -14,8 +14,8 @@ try: import cupy as cp -except ImportError: - cp = None +except ModuleNotFoundError: + cp = np from emdfile import Custom, tqdmnd from py4DSTEM import DataCube diff --git a/py4DSTEM/process/phase/iterative_multislice_ptychography.py b/py4DSTEM/process/phase/iterative_multislice_ptychography.py index a352502d0..aee383675 100644 --- a/py4DSTEM/process/phase/iterative_multislice_ptychography.py +++ b/py4DSTEM/process/phase/iterative_multislice_ptychography.py @@ -14,8 +14,8 @@ try: import cupy as cp -except ImportError: - cp = None +except ModuleNotFoundError: + cp = np from emdfile import Custom, tqdmnd from py4DSTEM import DataCube diff --git a/py4DSTEM/process/phase/iterative_overlap_magnetic_tomography.py b/py4DSTEM/process/phase/iterative_overlap_magnetic_tomography.py index 8691a121d..b09d18ca7 100644 --- a/py4DSTEM/process/phase/iterative_overlap_magnetic_tomography.py +++ b/py4DSTEM/process/phase/iterative_overlap_magnetic_tomography.py @@ -16,8 +16,8 @@ try: import cupy as cp -except ImportError: - cp = None +except ModuleNotFoundError: + cp = np from emdfile import Custom, tqdmnd from py4DSTEM import DataCube diff --git a/py4DSTEM/process/phase/iterative_overlap_tomography.py b/py4DSTEM/process/phase/iterative_overlap_tomography.py index d6bee12fd..1f6be1c38 100644 --- a/py4DSTEM/process/phase/iterative_overlap_tomography.py +++ b/py4DSTEM/process/phase/iterative_overlap_tomography.py @@ -16,8 +16,8 @@ try: import cupy as cp -except ImportError: - cp = None +except ModuleNotFoundError: + cp = np from emdfile import Custom, tqdmnd from py4DSTEM import DataCube diff --git a/py4DSTEM/process/phase/iterative_parallax.py b/py4DSTEM/process/phase/iterative_parallax.py index 80cdd8cd8..7c5896b6a 100644 --- a/py4DSTEM/process/phase/iterative_parallax.py +++ b/py4DSTEM/process/phase/iterative_parallax.py @@ -19,8 +19,8 @@ try: import cupy as cp -except ImportError: - cp = None +except ModuleNotFoundError: + cp = np warnings.simplefilter(action="always", category=UserWarning) diff --git a/py4DSTEM/process/phase/iterative_simultaneous_ptychography.py b/py4DSTEM/process/phase/iterative_simultaneous_ptychography.py index 8881d021c..e3713cde1 100644 --- a/py4DSTEM/process/phase/iterative_simultaneous_ptychography.py +++ b/py4DSTEM/process/phase/iterative_simultaneous_ptychography.py @@ -14,8 +14,8 @@ try: import cupy as cp -except ImportError: - cp = None +except ModuleNotFoundError: + cp = np from emdfile import Custom, tqdmnd from py4DSTEM import DataCube diff --git a/py4DSTEM/process/phase/iterative_singleslice_ptychography.py b/py4DSTEM/process/phase/iterative_singleslice_ptychography.py index 0480bae8a..df0ef5e1c 100644 --- a/py4DSTEM/process/phase/iterative_singleslice_ptychography.py +++ b/py4DSTEM/process/phase/iterative_singleslice_ptychography.py @@ -14,8 +14,8 @@ try: import cupy as cp -except ImportError: - cp = None +except ModuleNotFoundError: + cp = np from emdfile import Custom, tqdmnd from py4DSTEM.datacube import DataCube diff --git a/py4DSTEM/process/utils/cross_correlate.py b/py4DSTEM/process/utils/cross_correlate.py index f9aac1312..50de91e33 100644 --- a/py4DSTEM/process/utils/cross_correlate.py +++ b/py4DSTEM/process/utils/cross_correlate.py @@ -6,8 +6,8 @@ try: import cupy as cp -except ImportError: - cp = None +except ModuleNotFoundError: + cp = np def get_cross_correlation(ar, template, corrPower=1, _returnval="real"): diff --git a/py4DSTEM/process/utils/multicorr.py b/py4DSTEM/process/utils/multicorr.py index 8523c8e62..bc07390bb 100644 --- a/py4DSTEM/process/utils/multicorr.py +++ b/py4DSTEM/process/utils/multicorr.py @@ -15,8 +15,8 @@ try: import cupy as cp -except ImportError: - cp = None +except ModuleNotFoundError: + cp = np def upsampled_correlation(imageCorr, upsampleFactor, xyShift, device="cpu"): diff --git a/py4DSTEM/process/utils/utils.py b/py4DSTEM/process/utils/utils.py index 03d3d07a0..4ef2e1d8a 100644 --- a/py4DSTEM/process/utils/utils.py +++ b/py4DSTEM/process/utils/utils.py @@ -24,8 +24,8 @@ def clear_output(wait=True): try: import cupy as cp -except ImportError: - cp = None +except ModuleNotFoundError: + cp = np def radial_reduction(ar, x0, y0, binsize=1, fn=np.mean, coords=None): diff --git a/py4DSTEM/process/wholepatternfit/wp_models.py b/py4DSTEM/process/wholepatternfit/wp_models.py index b69a74f93..3d53c1743 100644 --- a/py4DSTEM/process/wholepatternfit/wp_models.py +++ b/py4DSTEM/process/wholepatternfit/wp_models.py @@ -583,7 +583,9 @@ def jacobian(self, J: np.ndarray, x: np.ndarray, **static_data) -> None: mask = r_disk < (2 * disk_radius) - top_exp = mask * np.exp(4 * ((mask * r_disk) - disk_radius) / disk_width) + top_exp = mask * np.exp( + np.minimum(30, 4 * ((mask * r_disk) - disk_radius) / disk_width) + ) # dF/d(x0) dx = ( @@ -1039,7 +1041,10 @@ def jacobian(self, J: np.ndarray, x: np.ndarray, **static_data): mask = r_disk < (2 * disk_radius) - top_exp = mask * np.exp(4 * ((mask * r_disk) - disk_radius) / disk_width) + # clamp the argument of the exponent at a very large finite value + top_exp = mask * np.exp( + np.minimum(30, 4 * ((mask * r_disk) - disk_radius) / disk_width) + ) # dF/d(x0) dx = ( diff --git a/py4DSTEM/process/wholepatternfit/wpf.py b/py4DSTEM/process/wholepatternfit/wpf.py index 3ee820060..f206004b4 100644 --- a/py4DSTEM/process/wholepatternfit/wpf.py +++ b/py4DSTEM/process/wholepatternfit/wpf.py @@ -405,9 +405,8 @@ def fit_all_patterns( opt.status, ] except Exception as err: - # print(err) fit_data_single = x0 - fit_metrics_single = [0, 0, 0, 0] + fit_metrics_single = [0, 0, 0, -2] fit_data[:, rx, ry] = fit_data_single fit_metrics[:, rx, ry] = fit_metrics_single @@ -468,7 +467,8 @@ def get_lattice_maps(self) -> list[RealSlice]: self.fit_data.data[lat.params["uy"].offset], self.fit_data.data[lat.params["vx"].offset], self.fit_data.data[lat.params["vy"].offset], - np.ones(self.fit_data.data.shape[1:], dtype=np.bool_), + self.fit_metrics["status"].data + >= 0, # negative status indicates fit error ], axis=0, ) @@ -640,7 +640,7 @@ def _fit_single_pattern( except Exception as err: # print(err) fit_coefs = initial_guess - fit_metrics_single = [0, 0, 0, 0] + fit_metrics_single = [0, 0, 0, -2] return fit_coefs, fit_metrics_single else: diff --git a/py4DSTEM/process/wholepatternfit/wpf_viz.py b/py4DSTEM/process/wholepatternfit/wpf_viz.py index 06c55edfb..436ae40a2 100644 --- a/py4DSTEM/process/wholepatternfit/wpf_viz.py +++ b/py4DSTEM/process/wholepatternfit/wpf_viz.py @@ -222,6 +222,7 @@ def show_fit_metrics(self, returnfig=False, **subplots_kwargs): opt_cmap = mpl_c.ListedColormap( ( + (0.6, 0.05, 0.05), (0.8941176470588236, 0.10196078431372549, 0.10980392156862745), (0.21568627450980393, 0.49411764705882355, 0.7215686274509804), (0.30196078431372547, 0.6862745098039216, 0.2901960784313726), @@ -231,11 +232,12 @@ def show_fit_metrics(self, returnfig=False, **subplots_kwargs): ) ) im = ax[0, 1].matshow( - self.fit_metrics["status"].data, cmap=opt_cmap, vmin=-1.5, vmax=4.5 + self.fit_metrics["status"].data, cmap=opt_cmap, vmin=-2.5, vmax=4.5 ) - cbar = fig.colorbar(im, ax=ax[0, 1], ticks=[-1, 0, 1, 2, 3, 4]) + cbar = fig.colorbar(im, ax=ax[0, 1], ticks=[-2, -1, 0, 1, 2, 3, 4]) cbar.ax.set_yticklabels( [ + "Unknown Error", "MINPACK Error", "Max f evals exceeded", "$gtol$ satisfied", diff --git a/py4DSTEM/version.py b/py4DSTEM/version.py index 23f00709c..224f1fb74 100644 --- a/py4DSTEM/version.py +++ b/py4DSTEM/version.py @@ -1 +1 @@ -__version__ = "0.14.3" +__version__ = "0.14.4" diff --git a/setup.py b/setup.py index 069bf1600..c3cbbd151 100644 --- a/setup.py +++ b/setup.py @@ -57,8 +57,8 @@ package_data={ "py4DSTEM": [ "process/utils/scattering_factors.txt", - "process/diskdetection/multicorr_row_kernel.cu", - "process/diskdetection/multicorr_col_kernel.cu", + "braggvectors/multicorr_row_kernel.cu", + "braggvectors/multicorr_col_kernel.cu", ] }, )