diff --git a/README.md b/README.md index 6df64dbc7..3fe6cc745 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ > :warning: **py4DSTEM version 0.14 update** :warning: Warning: this is a major update and we expect some workflows to break. You can still install previous versions of py4DSTEM [as discussed here](#legacyinstall) - +> :warning: **Phase retrieval refactor version 0.14.9** :warning: Warning: The phase-retrieval modules in py4DSTEM (DPC, parallax, and ptychography) underwent a major refactor in version 0.14.9 and as such older tutorial notebooks will not work as expected. Notably, class names have been pruned to remove the trailing "Reconstruction" (`DPCReconstruction` -> `DPC` etc.), and regularization functions have dropped the `_iter` suffix (and are instead specified as boolean flags). We are working on updating the tutorial notebooks to reflect these changes. In the meantime, there's some more information in the relevant pull request [here](https://github.com/py4dstem/py4DSTEM/pull/597#issuecomment-1890325568). ![py4DSTEM logo](/images/py4DSTEM_logo.png) diff --git a/docs/papers.md b/docs/papers.md index 4b1bc6b8d..4539dd943 100644 --- a/docs/papers.md +++ b/docs/papers.md @@ -5,57 +5,76 @@ Please email clophus@lbl.gov if you have used py4DSTEM for analysis and your paper is not listed below! -### 2022 (9) +### 2023 (0) -[Correlative image learning of chemo-mechanics in phase-transforming solids](https://www.nature.com/articles/s41563-021-01191-0), Nature Materials (2022) -[Correlative analysis of structure and chemistry of LixFePO4 platelets using 4D-STEM and X-ray ptychography](https://doi.org/10.1016/j.mattod.2021.10.031), Materials Today 52, 102 (2022). -[Visualizing Grain Statistics in MOCVD WSe2 through Four-Dimensional Scanning Transmission Electron Microscopy](https://doi.org/10.1021/acs.nanolett.1c04315), Nano Letters 22, 2578 (2022). +### 2022 (16) -[Electric field control of chirality](https://doi.org/10.1126/sciadv.abj8030), Science Advances 8 (2022). -[Real-Time Interactive 4D-STEM Phase-Contrast Imaging From Electron Event Representation Data: Less computation with the right representation](https://doi.org/10.1109/MSP.2021.3120981), IEEE Signal Processing Magazine 39, 25 (2022). +[Disentangling multiple scattering with deep learning: application to strain mapping from electron diffraction patterns](https://doi.org/10.1038/s41524-022-00939-9), J Munshi*, A Rakowski*, et al., npj Computational Materials 8, 254 (2022) -[Microstructural dependence of defect formation in iron-oxide thin films](https://doi.org/10.1016/j.apsusc.2022.152844), Applied Surface Science 589, 152844 (2022). +[Flexible CO2 Sensor Architecture with Selective Nitrogen Functionalities by One-Step Laser-Induced Conversion of Versatile Organic Ink](https://doi.org/10.1002/adfm.202207406), H Wang et al., Advanced Functional Materials 32, 2207406 (2022) -[Chemical and Structural Alterations in the Amorphous Structure of Obsidian due to Nanolites](https://doi.org/10.1017/S1431927621013957), Microscopy and Microanalysis 28, 289 (2022). +[Defect Contrast with 4D-STEM: Understanding Crystalline Order with Virtual Detectors and Beam Modification](https://doi.org/10.1093/micmic/ozad045) SM Ribet et al., Microscopy and Microanalysis 29, 1087 (2023). -[Nanoscale characterization of crystalline and amorphous phases in silicon oxycarbide ceramics using 4D-STEM](https://doi.org/10.1016/j.matchar.2021.111512), Materials Characterization 181, 111512 (2021). +[Structural heterogeneity in non-crystalline TexSe1−x thin films](https://doi.org/10.1063/5.0094600), B Sari et al., Applied Physics Letters 121, 012101 (2022) -[Disentangling multiple scattering with deep learning: application to strain mapping from electron diffraction patterns](https://arxiv.org/abs/2202.00204), arXiv:2202.00204 (2022). +[Cryogenic 4D-STEM analysis of an amorphouscrystalline polymer blend: Combined nanocrystalline and amorphous phase mapping](https://doi.org/10.1016/j.isci.2022.103882), J Donohue et al., iScience 25, 103882 (2022) + +[Hydrogen-assisted decohesion associated with nanosized grain boundary κ-carbides in a high-Mn lightweight steel](https://doi.org/10.1016/j.actamat.2022.118392), MN Elkot et al., Acta Materialia + 241, 118392 (2022) + +[4D-STEM Ptychography for Electron-Beam-Sensitive Materials](https://doi.org/10.1021/acscentsci.2c01137), G Li et al., ACS Central Science 8, 1579 (2022) + +[Developing a Chemical and Structural Understanding of the Surface Oxide in a Niobium Superconducting Qubit](https://doi.org/10.1021/acsnano.2c07913), AA Murthy et al., ACS Nano 16, 17257 (2022) + +[Correlative image learning of chemo-mechanics in phase-transforming solids](https://www.nature.com/articles/s41563-021-01191-0), HD Deng et al., Nature Materials (2022) + +[Correlative analysis of structure and chemistry of LixFePO4 platelets using 4D-STEM and X-ray ptychography](https://doi.org/10.1016/j.mattod.2021.10.031), LA Hughes*, BH Savitzky, et al., Materials Today 52, 102 (2022) + +[Visualizing Grain Statistics in MOCVD WSe2 through Four-Dimensional Scanning Transmission Electron Microscopy](https://doi.org/10.1021/acs.nanolett.1c04315), A Londoño-Calderon et al., Nano Letters 22, 2578 (2022) + +[Electric field control of chirality](https://doi.org/10.1126/sciadv.abj8030), P Behera et al., Science Advances 8 (2022) + +[Real-Time Interactive 4D-STEM Phase-Contrast Imaging From Electron Event Representation Data: Less computation with the right representation](https://doi.org/10.1109/MSP.2021.3120981), P Pelz et al., IEEE Signal Processing Magazine 39, 25 (2022) + +[Microstructural dependence of defect formation in iron-oxide thin films](https://doi.org/10.1016/j.apsusc.2022.152844), BK Derby et al., Applied Surface Science 589, 152844 (2022) + +[Chemical and Structural Alterations in the Amorphous Structure of Obsidian due to Nanolites](https://doi.org/10.1017/S1431927621013957), E Kennedy et al., Microscopy and Microanalysis 28, 289 (2022) + +[Nanoscale characterization of crystalline and amorphous phases in silicon oxycarbide ceramics using 4D-STEM](https://doi.org/10.1016/j.matchar.2021.111512), Ni Yang et al., Materials Characterization 181, 111512 (2021) ### 2021 (10) -[Cryoforged nanotwinned titanium with ultrahigh strength and ductility](https://doi.org/10.1126/science.abe7252), Science 16, 373, 1363 (2021). +[Cryoforged nanotwinned titanium with ultrahigh strength and ductility](https://doi.org/10.1126/science.abe7252), Science 16, 373, 1363 (2021) -[Strain fields in twisted bilayer graphene](https://doi.org/10.1038/s41563-021-00973-w), Nature Materials 20, 956 (2021). +[Strain fields in twisted bilayer graphene](https://doi.org/10.1038/s41563-021-00973-w), Nature Materials 20, 956 (2021) -[Determination of Grain-Boundary Structure and Electrostatic Characteristics in a SrTiO3 Bicrystal by Four-Dimensional Electron Microscopy](https://doi.org/10.1021/acs.nanolett.1c02960), Nanoletters 21, 9138 (2021). +[Determination of Grain-Boundary Structure and Electrostatic Characteristics in a SrTiO3 Bicrystal by Four-Dimensional Electron Microscopy](https://doi.org/10.1021/acs.nanolett.1c02960), Nanoletters 21, 9138 (2021) [Local Lattice Deformation of Tellurene Grain Boundaries by Four-Dimensional Electron Microscopy](https://pubs.acs.org/doi/10.1021/acs.jpcc.1c00308), Journal of Physical Chemistry C 125, 3396 (2021). -[Extreme mixing in nanoscale transition metal alloys](https://doi.org/10.1016/j.matt.2021.04.014), Matter 4, 2340 (2021). +[Extreme mixing in nanoscale transition metal alloys](https://doi.org/10.1016/j.matt.2021.04.014), Matter 4, 2340 (2021) -[Multibeam Electron Diffraction](https://doi.org/10.1017/S1431927620024770), Microscopy and Microanalysis 27, 129 (2021). +[Multibeam Electron Diffraction](https://doi.org/10.1017/S1431927620024770), Microscopy and Microanalysis 27, 129 (2021) -[A Fast Algorithm for Scanning Transmission Electron Microscopy Imaging and 4D-STEM Diffraction Simulations](https://doi.org/10.1017/S1431927621012083), Microscopy and Microanalysis 27, 835 (2021). +[A Fast Algorithm for Scanning Transmission Electron Microscopy Imaging and 4D-STEM Diffraction Simulations](https://doi.org/10.1017/S1431927621012083), Microscopy and Microanalysis 27, 835 (2021) -[Fast Grain Mapping with Sub-Nanometer Resolution Using 4D-STEM with Grain Classification by Principal Component Analysis and Non-Negative Matrix Factorization](https://doi.org/10.1017/S1431927621011946), Microscopy and Microanalysis 27, 794 +[Fast Grain Mapping with Sub-Nanometer Resolution Using 4D-STEM with Grain Classification by Principal Component Analysis and Non-Negative Matrix Factorization](https://doi.org/10.1017/S1431927621011946), Microscopy and Microanalysis 27, 794 (2021) -[Prismatic 2.0 – Simulation software for scanning and high resolution transmission electron microscopy (STEM and HRTEM)](https://doi.org/10.1016/j.micron.2021.103141), Micron 151, 103141 (2021). +[Prismatic 2.0 – Simulation software for scanning and high resolution transmission electron microscopy (STEM and HRTEM)](https://doi.org/10.1016/j.micron.2021.103141), Micron 151, 103141 (2021) -[4D-STEM of Beam-Sensitive Materials](https://doi.org/10.1021/acs.accounts.1c00073), Accounts of Chemical Research 54, 2543 (2021). +[4D-STEM of Beam-Sensitive Materials](https://doi.org/10.1021/acs.accounts.1c00073), Accounts of Chemical Research 54, 2543 (2021) ### 2020 (3) -[Patterned probes for high precision 4D-STEM bragg measurements](https://doi.org/10.1063/5.0015532), Ultramicroscopy 209, 112890 (2020). - +[Patterned probes for high precision 4D-STEM bragg measurements](https://doi.org/10.1063/5.0015532), Ultramicroscopy 209, 112890 (2020) -[Tilted fluctuation electron microscopy](https://doi.org/10.1063/5.0015532), Applied Physics Letters 117, 091903 (2020). +[Tilted fluctuation electron microscopy](https://doi.org/10.1063/5.0015532), Applied Physics Letters 117, 091903 (2020) [4D-STEM elastic stress state characterisation of a TWIP steel nanotwin](https://arxiv.org/abs/2004.03982), arXiv:2004.03982 diff --git a/py4DSTEM/datacube/datacube.py b/py4DSTEM/datacube/datacube.py index 1db12c7cf..9d5da2562 100644 --- a/py4DSTEM/datacube/datacube.py +++ b/py4DSTEM/datacube/datacube.py @@ -314,3 +314,1032 @@ def add(self, data, name=""): + assert len(Rshape) == 2, "Rshape must have a length of 2" + d = set_scan_shape(self, Rshape[0], Rshape[1]) + return d + + def swap_RQ(self): + """ + Swaps the first and last two dimensions of the 4D datacube. + """ + from py4DSTEM.preprocess import swap_RQ + + d = swap_RQ(self) + return d + + def swap_Rxy(self): + """ + Swaps the real space x and y coordinates. + """ + from py4DSTEM.preprocess import swap_Rxy + + d = swap_Rxy(self) + return d + + def swap_Qxy(self): + """ + Swaps the diffraction space x and y coordinates. + """ + from py4DSTEM.preprocess import swap_Qxy + + d = swap_Qxy(self) + return d + + def crop_Q(self, ROI): + """ + Crops the data in diffraction space about the region specified by ROI. + + Accepts: + ROI (4-tuple): Specifies (Qx_min,Qx_max,Qy_min,Qy_max) + """ + from py4DSTEM.preprocess import crop_data_diffraction + + assert len(ROI) == 4, "Crop region `ROI` must have length 4" + d = crop_data_diffraction(self, ROI[0], ROI[1], ROI[2], ROI[3]) + return d + + def crop_R(self, ROI): + """ + Crops the data in real space about the region specified by ROI. + + Accepts: + ROI (4-tuple): Specifies (Rx_min,Rx_max,Ry_min,Ry_max) + """ + from py4DSTEM.preprocess import crop_data_real + + assert len(ROI) == 4, "Crop region `ROI` must have length 4" + d = crop_data_real(self, ROI[0], ROI[1], ROI[2], ROI[3]) + return d + + def bin_Q(self, N, dtype=None): + """ + Bins the data in diffraction space by bin factor N + + Parameters + ---------- + N : int + The binning factor + dtype : a datatype (optional) + Specify the datatype for the output. If not passed, the datatype + is left unchanged + + Returns + ------ + datacube : DataCube + """ + from py4DSTEM.preprocess import bin_data_diffraction + + d = bin_data_diffraction(self, N, dtype) + return d + + def pad_Q(self, N=None, output_size=None): + """ + Pads the data in diffraction space by pad factor N, or to match output_size. + + Accepts: + N (float, or Sequence[float]): the padding factor + output_size ((int,int)): the padded output size + """ + from py4DSTEM.preprocess import pad_data_diffraction + + d = pad_data_diffraction(self, pad_factor=N, output_size=output_size) + return d + + def resample_Q(self, N=None, output_size=None, method="bilinear"): + """ + Resamples the data in diffraction space by resampling factor N, or to match output_size, + using either 'fourier' or 'bilinear' interpolation. + + Accepts: + N (float, or Sequence[float]): the resampling factor + output_size ((int,int)): the resampled output size + method (str): 'fourier' or 'bilinear' (default) + """ + from py4DSTEM.preprocess import resample_data_diffraction + + d = resample_data_diffraction( + self, resampling_factor=N, output_size=output_size, method=method + ) + return d + + def bin_Q_mmap(self, N, dtype=np.float32): + """ + Bins the data in diffraction space by bin factor N for memory mapped data + + Accepts: + N (int): the binning factor + dtype: the data type + """ + from py4DSTEM.preprocess import bin_data_mmap + + d = bin_data_mmap(self, N) + return d + + def bin_R(self, N): + """ + Bins the data in real space by bin factor N + + Accepts: + N (int): the binning factor + """ + from py4DSTEM.preprocess import bin_data_real + + d = bin_data_real(self, N) + return d + + def thin_R(self, N): + """ + Reduces the data in real space by skipping every N patterns in the x and y directions. + + Accepts: + N (int): the thinning factor + """ + from py4DSTEM.preprocess import thin_data_real + + d = thin_data_real(self, N) + return d + + def filter_hot_pixels(self, thresh, ind_compare=1, return_mask=False): + """ + This function performs pixel filtering to remove hot / bright pixels. We first compute a moving local ordering filter, + applied to the mean diffraction image. This ordering filter will return a single value from the local sorted intensity + values, given by ind_compare. ind_compare=0 would be the highest intensity, =1 would be the second hightest, etc. + Next, a mask is generated for all pixels which are least a value thresh higher than the local ordering filter output. + Finally, we loop through all diffraction images, and any pixels defined by mask are replaced by their 3x3 local median. + + Args: + datacube (DataCube): + thresh (float): threshold for replacing hot pixels, if pixel value minus local ordering filter exceeds it. + ind_compare (int): which median filter value to compare against. 0 = brightest pixel, 1 = next brightest, etc. + return_mask (bool): if True, returns the filter mask + + Returns: + datacube (DataCube) + mask (optional, boolean Array) the bad pixel mask + """ + from py4DSTEM.preprocess import filter_hot_pixels + + datacube = filter_hot_pixels( + self, + thresh, + ind_compare, + return_mask, + ) + return datacube + + def median_filter_masked_pixels(self, mask, kernel_width: int = 3): + """ + This function fixes a datacube where the same pixels are consistently + bad. It requires a mask that identifies all the bad pixels in the dataset. + Then for each diffraction pattern, a median kernel is applied around each + bad pixel with the specified width. + """ + from py4DSTEM.preprocess import median_filter_masked_pixels + + datacube = median_filter_masked_pixels( + self, + mask, + kernel_width, + ) + return datacube + + # Probe + + def get_vacuum_probe( + self, + ROI=None, + align=True, + mask=None, + threshold=0.0, + expansion=12, + opening=3, + verbose=False, + returncalc=True, + ): + """ + Computes a vacuum probe. + + Which diffraction patterns are included in the calculation is specified + by the `ROI` parameter. Diffraction patterns are aligned before averaging + if `align` is True (default). A global mask is applied to each diffraction + pattern before aligning/averaging if `mask` is specified. After averaging, + a final masking step is applied according to the parameters `threshold`, + `expansion`, and `opening`. + + Parameters + ---------- + ROI : optional, boolean array or len 4 list/tuple + If unspecified, uses the whole datacube. If a boolean array is + passed must be real-space shaped, and True pixels are used. If a + 4-tuple is passed, uses the region inside the limits + (rx_min,rx_max,ry_min,ry_max) + align : optional, bool + if True, aligns the probes before averaging + mask : optional, array + mask applied to each diffraction pattern before alignment and + averaging + threshold : float + in the final masking step, values less than max(probe)*threshold + are considered outside the probe + expansion : int + number of pixels by which the final mask is expanded after + thresholding + opening : int + size of binary opening applied to the final mask to eliminate stray + bright pixels + verbose : bool + toggles verbose output + returncalc : bool + if True, returns the answer + + Returns + ------- + probe : Probe, optional + the vacuum probe + """ + from py4DSTEM.process.utils import get_shifted_ar, get_shift + from py4DSTEM.braggvectors import Probe + + # parse region to use + if ROI is None: + ROI = np.ones(self.Rshape, dtype=bool) + elif isinstance(ROI, tuple): + assert len(ROI) == 4, "if ROI is a tuple must be length 4" + _ROI = np.ones(self.Rshape, dtype=bool) + ROI = _ROI[ROI[0] : ROI[1], ROI[2] : ROI[3]] + else: + assert isinstance(ROI, np.ndarray) + assert ROI.shape == self.Rshape + xy = np.vstack(np.nonzero(ROI)) + length = xy.shape[1] + + # setup global mask + if mask is None: + mask = 1 + else: + assert mask.shape == self.Qshape + + # compute average probe + probe = self.data[xy[0, 0], xy[1, 0], :, :] + for n in tqdmnd(range(1, length)): + curr_DP = self.data[xy[0, n], xy[1, n], :, :] * mask + if align: + xshift, yshift = get_shift(probe, curr_DP) + curr_DP = get_shifted_ar(curr_DP, xshift, yshift) + probe = probe * (n - 1) / n + curr_DP / n + + # mask + mask = probe > np.max(probe) * threshold + mask = binary_opening(mask, iterations=opening) + mask = binary_dilation(mask, iterations=1) + mask = ( + np.cos( + (np.pi / 2) + * np.minimum( + distance_transform_edt(np.logical_not(mask)) / expansion, 1 + ) + ) + ** 2 + ) + probe *= mask + + # make a probe, add to tree, and return + probe = Probe(probe) + self.attach(probe) + if returncalc: + return probe + + def get_probe_size( + self, + dp=None, + thresh_lower=0.01, + thresh_upper=0.99, + N=100, + plot=False, + returncal=True, + write_to_cal=True, + **kwargs, + ): + """ + Gets the center and radius of the probe in the diffraction plane. + + The algorithm is as follows: + First, create a series of N binary masks, by thresholding the diffraction + pattern DP with a linspace of N thresholds from thresh_lower to + thresh_upper, measured relative to the maximum intensity in DP. + Using the area of each binary mask, calculate the radius r of a circular + probe. Because the central disk is typically very intense relative to + the rest of the DP, r should change very little over a wide range of + intermediate values of the threshold. The range in which r is trustworthy + is found by taking the derivative of r(thresh) and finding identifying + where it is small. The radius is taken to be the mean of these r values. + Using the threshold corresponding to this r, a mask is created and the + CoM of the DP times this mask it taken. This is taken to be the origin + x0,y0. + + Args: + dp (str or array): specifies the diffraction pattern in which to + find the central disk. A position averaged, or shift-corrected + and averaged, DP works best. If mode is None, the diffraction + pattern stored in the tree from 'get_dp_mean' is used. If mode + is a string it specifies the name of another virtual diffraction + pattern in the tree. If mode is an array, the array is used to + calculate probe size. + thresh_lower (float, 0 to 1): the lower limit of threshold values + thresh_upper (float, 0 to 1): the upper limit of threshold values + N (int): the number of thresholds / masks to use + plot (bool): if True plots results + plot_params(dict): dictionary to modify defaults in plot + return_calc (bool): if True returns 3-tuple described below + write_to_cal (bool): if True, looks for a Calibration instance + and writes the measured probe radius there + + Returns: + (3-tuple): A 3-tuple containing: + + * **r**: *(float)* the central disk radius, in pixels + * **x0**: *(float)* the x position of the central disk center + * **y0**: *(float)* the y position of the central disk center + """ + # perform computation + from py4DSTEM.process.calibration import get_probe_size + + if dp is None: + assert ( + "dp_mean" in self.treekeys + ), "calculate .get_dp_mean() or pass a `dp` arg" + DP = self.tree("dp_mean").data + elif type(dp) == str: + assert dp in self.treekeys, f"mode {dp} not found in the tree" + DP = self.tree(dp) + elif type(dp) == np.ndarray: + assert dp.shape == self.Qshape, "must be a diffraction space shape 2D array" + DP = dp + + x = get_probe_size( + DP, + thresh_lower=thresh_lower, + thresh_upper=thresh_upper, + N=N, + ) + + # try to add to calibration + if write_to_cal: + try: + self.calibration.set_probe_param(x) + except AttributeError: + raise Exception( + "writing to calibrations were requested, but could not be completed" + ) + + # plot results + if plot: + from py4DSTEM.visualize import show_circles + + show_circles(DP, (x[1], x[2]), x[0], vmin=0, vmax=1, **kwargs) + + # return + if returncal: + return x + + # Bragg disks + + def find_Bragg_disks( + self, + template, + data=None, + radial_bksb=False, + filter_function=None, + corrPower=1, + sigma=None, + sigma_dp=0, + sigma_cc=2, + subpixel="multicorr", + upsample_factor=16, + minAbsoluteIntensity=0, + minRelativeIntensity=0.005, + relativeToPeak=0, + minPeakSpacing=60, + edgeBoundary=20, + maxNumPeaks=70, + CUDA=False, + CUDA_batched=True, + distributed=None, + ML=False, + ml_model_path=None, + ml_num_attempts=1, + ml_batch_size=8, + name="braggvectors", + returncalc=True, + ): + """ + Finds the Bragg disks in the diffraction patterns represented by `data` by + cross/phase correlatin with `template`. + + Behavior depends on `data`. If it is None (default), runs on the whole DataCube, + and stores the output in its tree. Otherwise, nothing is stored in tree, + but some value is returned. Valid entries are: + + - a 2-tuple of numbers (rx,ry): run on this diffraction image, + and return a QPoints instance + - a 2-tuple of arrays (rx,ry): run on these diffraction images, + and return a list of QPoints instances + - an Rspace shapped 2D boolean array: run on the diffraction images + specified by the True counts and return a list of QPoints + instances + + For disk detection on a full DataCube, the calculation can be performed + on the CPU, GPU or a cluster. By default the CPU is used. If `CUDA` is set + to True, tries to use the GPU. If `CUDA_batched` is also set to True, + batches the FFT/IFFT computations on the GPU. For distribution to a cluster, + distributed must be set to a dictionary, with contents describing how + distributed processing should be performed - see below for details. + + + For each diffraction pattern, the algorithm works in 4 steps: + + (1) any pre-processing is performed to the diffraction image. This is + accomplished by passing a callable function to the argument + `filter_function`, a bool to the argument `radial_bksb`, or a value >0 + to `sigma_dp`. If none of these are passed, this step is skipped. + (2) the diffraction image is cross correlated with the template. + Phase/hybrid correlations can be used instead by setting the + `corrPower` argument. Cross correlation can be skipped entirely, + and the subsequent steps performed directly on the diffraction + image instead of the cross correlation, by passing None to + `template`. + (3) the maxima of the cross correlation are located and their + positions and intensities stored. The cross correlation may be + passed through a gaussian filter first by passing the `sigma_cc` + argument. The method for maximum detection can be set with + the `subpixel` parameter. Options, from something like fastest/least + precise to slowest/most precise are 'pixel', 'poly', and 'multicorr'. + (4) filtering is applied to remove untrusted or undesired positive counts, + based on their intensity (`minRelativeIntensity`,`relativeToPeak`, + `minAbsoluteIntensity`) their proximity to one another or the + image edge (`minPeakSpacing`, `edgeBoundary`), and the total + number of peaks per pattern (`maxNumPeaks`). + + + Parameters + ---------- + template : 2D array + the vacuum probe template, in real space. For Probe instances, + this is `probe.kernel`. If None, does not perform a cross + correlation. + data : variable + see above + radial_bksb : bool + if True, computes a radial background given by the median of the + (circular) polar transform of each each diffraction pattern, and + subtracts this background from the pattern before applying any + filter function and computing the cross correlation. The origin + position must be set in the datacube's calibrations. Currently + only supported for full datacubes on the CPU. + filter_function : callable + filtering function to apply to each diffraction pattern before + peak finding. Must be a function of only one argument (the + diffraction pattern) and return the filtered diffraction pattern. + The shape of the returned DP must match the shape of the probe + kernel (but does not need to match the shape of the input + diffraction pattern, e.g. the filter can be used to bin the + diffraction pattern). If using distributed disk detection, the + function must be able to be pickled with by dill. + corrPower : float between 0 and 1, inclusive + the cross correlation power. A value of 1 corresponds to a cross + correlation, 0 corresponds to a phase correlation, and intermediate + values correspond to hybrid correlations. + sigma : float + alias for `sigma_cc` + sigma_dp : float + if >0, a gaussian smoothing filter with this standard deviation + is applied to the diffraction pattern before maxima are detected + sigma_cc : float + if >0, a gaussian smoothing filter with this standard deviation + is applied to the cross correlation before maxima are detected + subpixel : str + Whether to use subpixel fitting, and which algorithm to use. + Must be in ('none','poly','multicorr'). + * 'none': performs no subpixel fitting + * 'poly': polynomial interpolation of correlogram peaks (default) + * 'multicorr': uses the multicorr algorithm with DFT upsampling + upsample_factor : int + upsampling factor for subpixel fitting (only used when + subpixel='multicorr') + minAbsoluteIntensity : float + the minimum acceptable correlation peak intensity, on an absolute scale + minRelativeIntensity : float + the minimum acceptable correlation peak intensity, relative to the + intensity of the brightest peak + relativeToPeak : int + specifies the peak against which the minimum relative intensity is + measured -- 0=brightest maximum. 1=next brightest, etc. + minPeakSpacing : float + the minimum acceptable spacing between detected peaks + edgeBoundary (int): minimum acceptable distance for detected peaks from + the diffraction image edge, in pixels. + maxNumPeaks : int + the maximum number of peaks to return + CUDA : bool + If True, import cupy and use an NVIDIA GPU to perform disk detection + CUDA_batched : bool + If True, and CUDA is selected, the FFT and IFFT steps of disk detection + are performed in batches to better utilize GPU resources. + distributed : dict + contains information for parallel processing using an IPyParallel or + Dask distributed cluster. Valid keys are: + * ipyparallel (dict): + * client_file (str): path to client json for connecting to your + existing IPyParallel cluster + * dask (dict): client (object): a dask client that connects to + your existing Dask cluster + * data_file (str): the absolute path to your original data + file containing the datacube + * cluster_path (str): defaults to the working directory during + processing + if distributed is None, which is the default, processing will be in + serial + name : str + name for the output BraggVectors + returncalc : bool + if True, returns the answer + + Returns + ------- + variable + See above. + """ + from py4DSTEM.braggvectors import find_Bragg_disks + + sigma_cc = sigma if sigma is not None else sigma_cc + + # parse args + if data is None: + x = self + elif isinstance(data, tuple): + x = self, data[0], data[1] + elif isinstance(data, np.ndarray): + assert data.dtype == bool, "array must be boolean" + assert data.shape == self.Rshape, "array must be Rspace shaped" + x = self.data[data, :, :] + else: + raise Exception(f"unexpected type for `data` {type(data)}") + + # compute + peaks = find_Bragg_disks( + data=x, + template=template, + radial_bksb=radial_bksb, + filter_function=filter_function, + corrPower=corrPower, + sigma_dp=sigma_dp, + sigma_cc=sigma_cc, + subpixel=subpixel, + upsample_factor=upsample_factor, + minAbsoluteIntensity=minAbsoluteIntensity, + minRelativeIntensity=minRelativeIntensity, + relativeToPeak=relativeToPeak, + minPeakSpacing=minPeakSpacing, + edgeBoundary=edgeBoundary, + maxNumPeaks=maxNumPeaks, + CUDA=CUDA, + CUDA_batched=CUDA_batched, + distributed=distributed, + ML=ML, + ml_model_path=ml_model_path, + ml_num_attempts=ml_num_attempts, + ml_batch_size=ml_batch_size, + ) + + if isinstance(peaks, Node): + # add metadata + peaks.name = name + peaks.metadata = Metadata( + name="gen_params", + data={ + #'gen_func' : + "template": template, + "filter_function": filter_function, + "corrPower": corrPower, + "sigma_dp": sigma_dp, + "sigma_cc": sigma_cc, + "subpixel": subpixel, + "upsample_factor": upsample_factor, + "minAbsoluteIntensity": minAbsoluteIntensity, + "minRelativeIntensity": minRelativeIntensity, + "relativeToPeak": relativeToPeak, + "minPeakSpacing": minPeakSpacing, + "edgeBoundary": edgeBoundary, + "maxNumPeaks": maxNumPeaks, + "CUDA": CUDA, + "CUDA_batched": CUDA_batched, + "distributed": distributed, + "ML": ML, + "ml_model_path": ml_model_path, + "ml_num_attempts": ml_num_attempts, + "ml_batch_size": ml_batch_size, + }, + ) + + # add to tree + if data is None: + self.attach(peaks) + + # return + if returncalc: + return peaks + + def get_beamstop_mask( + self, + threshold=0.25, + distance_edge=2.0, + include_edges=True, + sigma=0, + use_max_dp=False, + scale_radial=None, + name="mask_beamstop", + returncalc=True, + ): + """ + This function uses the mean diffraction pattern plus a threshold to + create a beamstop mask. + + Args: + threshold (float): Value from 0 to 1 defining initial threshold for + beamstop mask, taken from the sorted intensity values - 0 is the + dimmest pixel, while 1 uses the brighted pixels. + distance_edge (float): How many pixels to expand the mask. + include_edges (bool): If set to True, edge pixels will be included + in the mask. + sigma (float): + Gaussain blur std to apply to image before thresholding. + use_max_dp (bool): + Use the max DP instead of the mean DP. + scale_radial (float): + Scale from center of image by this factor (can help with edge) + name (string): Name of the output array. + returncalc (bool): Set to true to return the result. + + Returns: + (Optional): if returncalc is True, returns the beamstop mask + + """ + + if scale_radial is not None: + x = np.arange(self.data.shape[2]) * 2.0 / self.data.shape[2] + y = np.arange(self.data.shape[3]) * 2.0 / self.data.shape[3] + ya, xa = np.meshgrid(y - np.mean(y), x - np.mean(x)) + im_scale = 1.0 + np.sqrt(xa**2 + ya**2) * scale_radial + + # Get image for beamstop mask + if use_max_dp: + # if not "dp_mean" in self.tree.keys(): + # self.get_dp_max(); + # im = self.tree["dp_max"].data.astype('float') + if not "dp_max" in self._branch.keys(): + self.get_dp_max() + im = self.tree("dp_max").data.copy().astype("float") + else: + if not "dp_mean" in self._branch.keys(): + self.get_dp_mean() + im = self.tree("dp_mean").data.copy() + + # if not "dp_mean" in self.tree.keys(): + # self.get_dp_mean(); + # im = self.tree["dp_mean"].data.astype('float') + + # smooth and scale if needed + if sigma > 0.0: + im = gaussian_filter(im, sigma, mode="nearest") + if scale_radial is not None: + im *= im_scale + + # Calculate beamstop mask + int_sort = np.sort(im.ravel()) + ind = np.round( + np.clip(int_sort.shape[0] * threshold, 0, int_sort.shape[0]) + ).astype("int") + intensity_threshold = int_sort[ind] + mask_beamstop = im >= intensity_threshold + + # clean up mask + mask_beamstop = np.logical_not(binary_fill_holes(np.logical_not(mask_beamstop))) + mask_beamstop = binary_fill_holes(mask_beamstop) + + # Edges + if include_edges: + mask_beamstop[0, :] = False + mask_beamstop[:, 0] = False + mask_beamstop[-1, :] = False + mask_beamstop[:, -1] = False + + # Expand mask + mask_beamstop = distance_transform_edt(mask_beamstop) < distance_edge + + # Wrap beamstop mask in a class + x = Array(data=mask_beamstop, name=name) + + # Add metadata + x.metadata = Metadata( + name="gen_params", + data={ + #'gen_func' : + "threshold": threshold, + "distance_edge": distance_edge, + "include_edges": include_edges, + "name": "mask_beamstop", + "returncalc": returncalc, + }, + ) + + # Add to tree + self.tree(x) + + # return + if returncalc: + return mask_beamstop + + def get_radial_bkgrnd(self, rx, ry, sigma=2): + """ + Computes and returns a background image for the diffraction + pattern at (rx,ry), populated by radial rings of constant intensity + about the origin, with the value of each ring given by the median + value of the diffraction pattern at that radial distance. + + Parameters + ---------- + rx : int + The x-coord of the beam position + ry : int + The y-coord of the beam position + sigma : number + If >0, applying a gaussian smoothing in the radial direction + before returning + + Returns + ------- + background : ndarray + The radial background + """ + # ensure a polar cube and origin exist + assert self.polar is not None, "No polar datacube found!" + assert self.calibration.get_origin() is not None, "No origin found!" + + # get the 1D median background + bkgrd_ma_1d = np.ma.median(self.polar.data[rx, ry], axis=0) + bkgrd_1d = bkgrd_ma_1d.data + bkgrd_1d[bkgrd_ma_1d.mask] = 0 + + # smooth + if sigma > 0: + bkgrd_1d = gaussian_filter1d(bkgrd_1d, sigma) + + # define the 2D cartesian coordinate system + origin = self.calibration.get_origin() + origin = origin[0][rx, ry], origin[1][rx, ry] + qxx, qyy = self.qxx_raw - origin[0], self.qyy_raw - origin[1] + + # get distance qr in polar-elliptical coords + ellipse = self.calibration.get_ellipse() + ellipse = (1, 1, 0) if ellipse is None else ellipse + a, b, theta = ellipse + + qrr = np.sqrt( + ((qxx * np.cos(theta)) + (qyy * np.sin(theta))) ** 2 + + ((qxx * np.sin(theta)) - (qyy * np.cos(theta))) ** 2 / (b / a) ** 2 + ) + + # make an interpolation function and get the 2D background + f = interp1d(self.polar.radial_bins, bkgrd_1d, fill_value="extrapolate") + background = f(qrr) + + # return + return background + + def get_radial_bksb_dp(self, rx, ry, sigma=2): + """ + Computes and returns the diffraction pattern at beam position (rx,ry) + with a radial background subtracted. See the docstring for + datacube.get_radial_background for more info. + + Parameters + ---------- + rx : int + The x-coord of the beam position + ry : int + The y-coord of the beam position + sigma : number + If >0, applying a gaussian smoothing in the radial direction + before returning + + Returns + ------- + data : ndarray + The radial background subtracted diffraction image + """ + # get 2D background + background = self.get_radial_bkgrnd(rx, ry, sigma) + + # subtract, zero negative values, return + ans = self.data[rx, ry] - background + ans[ans < 0] = 0 + return ans + + def get_local_ave_dp( + self, + rx, + ry, + radial_bksb=False, + sigma=2, + braggmask=False, + braggvectors=None, + braggmask_radius=None, + ): + """ + Computes and returns the diffraction pattern at beam position (rx,ry) + after weighted local averaging with its nearest-neighbor patterns, + using a 3x3 gaussian kernel for the weightings. + + Parameters + ---------- + rx : int + The x-coord of the beam position + ry : int + The y-coord of the beam position + radial_bksb : bool + It True, apply a radial background subtraction to each pattern + before averaging + sigma : number + If radial_bksb is True, use this sigma for radial smoothing of + the background + braggmask : bool + If True, masks bragg scattering at each scan position before + averaging. `braggvectors` and `braggmask_radius` must be + specified. + braggvectors : BraggVectors + The Bragg vectors to use for masking + braggmask_radius : number + The radius about each Bragg point to mask + + Returns + ------- + data : ndarray + The radial background subtracted diffraction image + """ + # define the kernel + kernel = np.array([[1, 2, 1], [2, 4, 2], [1, 2, 1]]) / 16.0 + + # get shape and check for valid inputs + nx, ny = self.data.shape[:2] + assert rx >= 0 and rx < nx, "rx outside of scan range" + assert ry >= 0 and ry < ny, "ry outside of scan range" + + # get the subcube, checking for edge patterns + # and modifying the kernel as needed + if rx != 0 and rx != (nx - 1) and ry != 0 and ry != (ny - 1): + subcube = self.data[rx - 1 : rx + 2, ry - 1 : ry + 2, :, :] + elif rx == 0 and ry == 0: + subcube = self.data[:2, :2, :, :] + kernel = kernel[1:, 1:] + elif rx == 0 and ry == (ny - 1): + subcube = self.data[:2, -2:, :, :] + kernel = kernel[1:, :-1] + elif rx == (nx - 1) and ry == 0: + subcube = self.data[-2:, :2, :, :] + kernel = kernel[:-1, 1:] + elif rx == (nx - 1) and ry == (ny - 1): + subcube = self.data[-2:, -2:, :, :] + kernel = kernel[:-1, :-1] + elif rx == 0: + subcube = self.data[:2, ry - 1 : ry + 2, :, :] + kernel = kernel[1:, :] + elif rx == (nx - 1): + subcube = self.data[-2:, ry - 1 : ry + 2, :, :] + kernel = kernel[:-1, :] + elif ry == 0: + subcube = self.data[rx - 1 : rx + 2, :2, :, :] + kernel = kernel[:, 1:] + elif ry == (ny - 1): + subcube = self.data[rx - 1 : rx + 2, -2:, :, :] + kernel = kernel[:, :-1] + else: + raise Exception(f"Invalid (rx,ry) = ({rx},{ry})...") + + # normalize the kernel + kernel /= np.sum(kernel) + + # compute... + + # ...in the simple case + if not (radial_bksb) and not (braggmask): + ans = np.tensordot(subcube, kernel, axes=((0, 1), (0, 1))) + + # ...with radial background subtration + elif radial_bksb and not (braggmask): + # get position of (rx,ry) relative to kernel + _xs = 1 if rx != 0 else 0 + _ys = 1 if ry != 0 else 0 + x0 = rx - _xs + y0 = ry - _ys + # compute + ans = np.zeros(self.Qshape) + for (i, j), w in np.ndenumerate(kernel): + x = x0 + i + y = y0 + j + ans += self.get_radial_bksb_dp(x, y, sigma) * w + + # ...with bragg masking + elif not (radial_bksb) and braggmask: + assert ( + braggvectors is not None + ), "`braggvectors` must be specified or `braggmask` must be turned off!" + assert ( + braggmask_radius is not None + ), "`braggmask_radius` must be specified or `braggmask` must be turned off!" + # get position of (rx,ry) relative to kernel + _xs = 1 if rx != 0 else 0 + _ys = 1 if ry != 0 else 0 + x0 = rx - _xs + y0 = ry - _ys + # compute + ans = np.zeros(self.Qshape) + weights = np.zeros(self.Qshape) + for (i, j), w in np.ndenumerate(kernel): + x = x0 + i + y = y0 + j + mask = self.get_braggmask(braggvectors, x, y, braggmask_radius) + weights_curr = mask * w + ans += self.data[x, y] * weights_curr + weights += weights_curr + # normalize + out = np.full_like(ans, np.nan) + ans_mask = weights > 0 + ans = np.divide(ans, weights, out=out, where=ans_mask) + # make masked array + ans = np.ma.array(data=ans, mask=np.logical_not(ans_mask)) + pass + + # ...with both radial background subtraction and bragg masking + else: + assert ( + braggvectors is not None + ), "`braggvectors` must be specified or `braggmask` must be turned off!" + assert ( + braggmask_radius is not None + ), "`braggmask_radius` must be specified or `braggmask` must be turned off!" + # get position of (rx,ry) relative to kernel + _xs = 1 if rx != 0 else 0 + _ys = 1 if ry != 0 else 0 + x0 = rx - _xs + y0 = ry - _ys + # compute + ans = np.zeros(self.Qshape) + weights = np.zeros(self.Qshape) + for (i, j), w in np.ndenumerate(kernel): + x = x0 + i + y = y0 + j + mask = self.get_braggmask(braggvectors, x, y, braggmask_radius) + weights_curr = mask * w + ans += self.get_radial_bksb_dp(x, y, sigma) * weights_curr + weights += weights_curr + # normalize + out = np.full_like(ans, np.nan) + ans_mask = weights > 0 + ans = np.divide(ans, weights, out=out, where=ans_mask) + # make masked array + ans = np.ma.array(data=ans, mask=np.logical_not(ans_mask)) + pass + + # return + return ans + + def get_braggmask(self, braggvectors, rx, ry, radius): + """ + Returns a boolean mask which is False in a radius of `radius` around + each bragg scattering vector at scan position (rx,ry). + + Parameters + ---------- + braggvectors : BraggVectors + The bragg vectors + rx : int + The x-coord of the beam position + ry : int + The y-coord of the beam position + radius : number + mask pixels about each bragg vector to this radial distance + + Returns + ------- + mask : boolean ndarray + """ + # allocate space + mask = np.ones(self.Qshape, dtype=bool) + # get the vectors + vects = braggvectors.raw[rx, ry] + # loop + for idx in range(len(vects.data)): + qr = np.hypot(self.qxx_raw - vects.qx[idx], self.qyy_raw - vects.qy[idx]) + mask = np.logical_and(mask, qr > radius) + return mask +>>>>>>> truth/dev diff --git a/py4DSTEM/datacube/diskdetection/diskdetection_cuda.py b/py4DSTEM/datacube/diskdetection/diskdetection_cuda.py index 870b8303d..ddea4d9ad 100644 --- a/py4DSTEM/datacube/diskdetection/diskdetection_cuda.py +++ b/py4DSTEM/datacube/diskdetection/diskdetection_cuda.py @@ -156,9 +156,11 @@ def find_Bragg_disks_CUDA( patt_idx = batch_idx * batch_size + subbatch_idx rx, ry = np.unravel_index(patt_idx, (datacube.R_Nx, datacube.R_Ny)) batched_subcube[subbatch_idx, :, :] = cp.array( - datacube.data[rx, ry, :, :] - if filter_function is None - else filter_function(datacube.data[rx, ry, :, :]), + ( + datacube.data[rx, ry, :, :] + if filter_function is None + else filter_function(datacube.data[rx, ry, :, :]) + ), dtype=cp.float32, ) diff --git a/py4DSTEM/io/importfile.py b/py4DSTEM/io/importfile.py index 20a3759a2..ff3d1c37c 100644 --- a/py4DSTEM/io/importfile.py +++ b/py4DSTEM/io/importfile.py @@ -75,7 +75,7 @@ def import_file( "gatan_K2_bin", "mib", "arina", - "abTEM" + "abTEM", # "kitware_counted", ], "Error: filetype not recognized" diff --git a/py4DSTEM/io/legacy/legacy13/v13_emd_classes/io.py b/py4DSTEM/io/legacy/legacy13/v13_emd_classes/io.py index e1b7ab241..ddbea9005 100644 --- a/py4DSTEM/io/legacy/legacy13/v13_emd_classes/io.py +++ b/py4DSTEM/io/legacy/legacy13/v13_emd_classes/io.py @@ -361,7 +361,7 @@ def Array_to_h5(array, group): data = grp.create_dataset( "data", shape=array.data.shape, - data=array.data + data=array.data, # dtype = type(array.data) ) data.attrs.create( diff --git a/py4DSTEM/process/calibration/origin.py b/py4DSTEM/process/calibration/origin.py index 91a08791e..957d1e40e 100644 --- a/py4DSTEM/process/calibration/origin.py +++ b/py4DSTEM/process/calibration/origin.py @@ -2,8 +2,10 @@ import functools import numpy as np +import matplotlib.pyplot as plt from scipy.ndimage import gaussian_filter from scipy.optimize import leastsq +import matplotlib.pyplot as plt from emdfile import tqdmnd, PointListArray from py4DSTEM.datacube import DataCube @@ -309,58 +311,122 @@ def get_origin( return qx0, qy0, mask -def get_origin_single_dp_beamstop(DP: np.ndarray, mask: np.ndarray, **kwargs): +def get_origin_friedel( + datacube: DataCube, + mask=None, + upsample_factor=1, + device="cpu", + return_cpu=True, +): """ - Find the origin for a single diffraction pattern, assuming there is a beam stop. - - Args: - DP (np array): diffraction pattern - mask (np array): boolean mask which is False under the beamstop and True - in the diffraction pattern. One approach to generating this mask - is to apply a suitable threshold on the average diffraction pattern - and use binary opening/closing to remove and holes - - Returns: - qx0, qy0 (tuple) measured center position of diffraction pattern + Fit the origin for each diffraction pattern, with or without a beam stop. + The method we have developed here is a heavily modified version of masked + cross correlation, where we use Friedel symmetry of the diffraction pattern + to find the common center. + + More details about how the correlation step can be found in: + https://doi.org/10.1109/TIP.2011.2181402 + + Parameters + ---------- + datacube: (DataCube) + The 4D dataset. + mask: (np array, optional) + Boolean mask which is False under the beamstop and True + in the diffraction pattern. One approach to generating this mask + is to apply a suitable threshold on the average diffraction pattern + and use binary opening/closing to remove any holes. + If no mask is provided, this method will likely not work with a beamstop. + upsample_factor: (int) + Upsample factor for subpixel fitting of the image shifts. + device: string + 'cpu' or 'gpu' to select device + return_cpu: bool + Return arrays on cpu. + + + Returns + ------- + qx0, qy0 + (tuple of np arrays) measured center position of each diffraction pattern """ - imCorr = np.real( - np.fft.ifft2( - np.fft.fft2(DP * mask) - * np.conj(np.fft.fft2(np.rot90(DP, 2) * np.rot90(mask, 2))) + # Select device + if device == "cpu": + xp = np + elif device == "gpu": + xp = cp + + # init measurement arrays + qx0 = xp.zeros(datacube.data.shape[:2]) + qy0 = xp.zeros_like(qx0) + + # pad the mask + if mask is not None: + mask = xp.asarray(mask).astype("float") + mask_pad = xp.pad( + mask, + ((0, datacube.data.shape[2]), (0, datacube.data.shape[3])), + constant_values=(1.0, 1.0), ) - ) - - xp, yp = np.unravel_index(np.argmax(imCorr), imCorr.shape) - - dx = ((xp + DP.shape[0] / 2) % DP.shape[0]) - DP.shape[0] / 2 - dy = ((yp + DP.shape[1] / 2) % DP.shape[1]) - DP.shape[1] / 2 - - return (DP.shape[0] + dx) / 2, (DP.shape[1] + dy) / 2 + M = xp.fft.fft2(mask_pad) + # main loop over all probe positions + for rx, ry in tqdmnd(datacube.R_Nx, datacube.R_Ny): + if mask is None: + # pad image + im_xp = xp.asarray(datacube.data[rx, ry]) + im = xp.pad( + im_xp, + ((0, datacube.data.shape[2]), (0, datacube.data.shape[3])), + ) + G = xp.fft.fft2(im) -def get_origin_beamstop(datacube: DataCube, mask: np.ndarray, **kwargs): - """ - Find the origin for each diffraction pattern, assuming there is a beam stop. + # Cross correlation of masked image with its inverse + cc = xp.real(xp.fft.ifft2(G**2)) - Args: - datacube (DataCube) - mask (np array): boolean mask which is False under the beamstop and True - in the diffraction pattern. One approach to generating this mask - is to apply a suitable threshold on the average diffraction pattern - and use binary opening/closing to remove any holes + else: + im_xp = xp.asarray(datacube.data[rx, ry, :, :]) + im = xp.pad( + im_xp, + ((0, datacube.data.shape[2]), (0, datacube.data.shape[3])), + ) - Returns: - qx0, qy0 (tuple of np arrays) measured center position of each diffraction pattern - """ + # Masked cross correlation of masked image with its inverse + term1 = xp.real(xp.fft.ifft2(xp.fft.fft2(im) ** 2) * xp.fft.ifft2(M**2)) + term2 = xp.real(xp.fft.ifft2(xp.fft.fft2(im**2) * M)) + term3 = xp.real(xp.fft.ifft2(xp.fft.fft2(im * mask_pad))) + cc = (term1 - term3) / (term2 - term3) - qx0 = np.zeros(datacube.data.shape[:2]) - qy0 = np.zeros_like(qx0) + # get correlation peak + x, y = xp.unravel_index(xp.argmax(cc), im.shape) - for rx, ry in tqdmnd(datacube.R_Nx, datacube.R_Ny): - x, y = get_origin_single_dp_beamstop(datacube.data[rx, ry, :, :], mask) + # half pixel upsampling / parabola subpixel fitting + dx = (cc[x + 1, y] - cc[x - 1, y]) / ( + 4.0 * cc[x, y] - 2.0 * cc[x + 1, y] - 2.0 * cc[x - 1, y] + ) + dy = (cc[x, y + 1] - cc[x, y - 1]) / ( + 4.0 * cc[x, y] - 2.0 * cc[x, y + 1] - 2.0 * cc[x, y - 1] + ) + # xp += np.round(dx*2.0)/2.0 + # yp += np.round(dy*2.0)/2.0 + x = x.astype("float") + dx + y = y.astype("float") + dy + + # upsample peak if needed + if upsample_factor > 1: + x, y = upsampled_correlation( + xp.fft.fft2(cc), + upsampleFactor=upsample_factor, + xyShift=xp.array((x, y)), + device=device, + ) - qx0[rx, ry] = x - qy0[rx, ry] = y + # Correlation peak, moved to image center shift + qx0[rx, ry] = (x / 2) % datacube.data.shape[2] + qy0[rx, ry] = (y / 2) % datacube.data.shape[3] - return qx0, qy0 + if return_cpu: + return copy_to_device(qx0), copy_to_device(qy0) + else: + return qx0, qy0 diff --git a/py4DSTEM/process/diffraction/WK_scattering_factors.py b/py4DSTEM/process/diffraction/WK_scattering_factors.py index 4c39a44f2..a816c770e 100644 --- a/py4DSTEM/process/diffraction/WK_scattering_factors.py +++ b/py4DSTEM/process/diffraction/WK_scattering_factors.py @@ -221,9 +221,7 @@ def RI1(BI, BJ, G): ri1[sub] = np.pi * (BI * np.log((BI + BJ) / BI) + BJ * np.log((BI + BJ) / BJ)) sub = np.logical_and(eps <= 0.1, G > 0.0) - temp = 0.5 * BI**2 * np.log(BI / (BI + BJ)) + 0.5 * BJ**2 * np.log( - BJ / (BI + BJ) - ) + temp = 0.5 * BI**2 * np.log(BI / (BI + BJ)) + 0.5 * BJ**2 * np.log(BJ / (BI + BJ)) temp += 0.75 * (BI**2 + BJ**2) - 0.25 * (BI + BJ) ** 2 temp -= 0.5 * (BI - BJ) ** 2 ri1[sub] += np.pi * G[sub] ** 2 * temp diff --git a/py4DSTEM/process/diffraction/crystal_viz.py b/py4DSTEM/process/diffraction/crystal_viz.py index da016b3ed..47df2e6ca 100644 --- a/py4DSTEM/process/diffraction/crystal_viz.py +++ b/py4DSTEM/process/diffraction/crystal_viz.py @@ -454,8 +454,7 @@ def plot_scattering_intensity( int_sf_plot = calc_1D_profile( k, self.g_vec_leng, - (self.struct_factors_int**int_power_scale) - * (self.g_vec_leng**k_power_scale), + (self.struct_factors_int**int_power_scale) * (self.g_vec_leng**k_power_scale), remove_origin=True, k_broadening=k_broadening, int_scale=int_scale, diff --git a/py4DSTEM/process/fit/fit.py b/py4DSTEM/process/fit/fit.py index 9973ff79f..5c2d56a3c 100644 --- a/py4DSTEM/process/fit/fit.py +++ b/py4DSTEM/process/fit/fit.py @@ -169,8 +169,7 @@ def polar_gaussian_2D( # t2 = np.min(np.vstack([t,1-t])) t2 = np.square(t - mu_t) return ( - I0 * np.exp(-(t2 / (2 * sigma_t**2) + (q - mu_q) ** 2 / (2 * sigma_q**2))) - + C + I0 * np.exp(-(t2 / (2 * sigma_t**2) + (q - mu_q) ** 2 / (2 * sigma_q**2))) + C ) diff --git a/py4DSTEM/process/phase/magnetic_ptychographic_tomography.py b/py4DSTEM/process/phase/magnetic_ptychographic_tomography.py index 7249a9064..60cec9742 100644 --- a/py4DSTEM/process/phase/magnetic_ptychographic_tomography.py +++ b/py4DSTEM/process/phase/magnetic_ptychographic_tomography.py @@ -1201,20 +1201,20 @@ def reconstruct( # position correction if not fix_positions and a0 > 0: - self._positions_px_all[ - batch_indices - ] = self._position_correction( - object_sliced, - vectorized_patch_indices_row, - vectorized_patch_indices_col, - shifted_probes, - overlap, - amplitudes_device, - positions_px, - positions_px_initial, - positions_step_size, - max_position_update_distance, - max_position_total_distance, + self._positions_px_all[batch_indices] = ( + self._position_correction( + object_sliced, + vectorized_patch_indices_row, + vectorized_patch_indices_col, + shifted_probes, + overlap, + amplitudes_device, + positions_px, + positions_px_initial, + positions_step_size, + max_position_update_distance, + max_position_total_distance, + ) ) measurement_error += batch_error @@ -1320,10 +1320,12 @@ def reconstruct( butterworth_order=butterworth_order, object_positivity=object_positivity, shrinkage_rad=shrinkage_rad, - object_mask=self._object_fov_mask_inverse - if fix_potential_baseline - and self._object_fov_mask_inverse.sum() > 0 - else None, + object_mask=( + self._object_fov_mask_inverse + if fix_potential_baseline + and self._object_fov_mask_inverse.sum() > 0 + else None + ), tv_denoise=tv_denoise and tv_denoise_weights is not None, tv_denoise_weights=tv_denoise_weights, tv_denoise_inner_iter=tv_denoise_inner_iter, @@ -1353,10 +1355,12 @@ def reconstruct( butterworth_order=butterworth_order, object_positivity=object_positivity, shrinkage_rad=shrinkage_rad, - object_mask=self._object_fov_mask_inverse - if fix_potential_baseline - and self._object_fov_mask_inverse.sum() > 0 - else None, + object_mask=( + self._object_fov_mask_inverse + if fix_potential_baseline + and self._object_fov_mask_inverse.sum() > 0 + else None + ), tv_denoise=tv_denoise and tv_denoise_weights is not None, tv_denoise_weights=tv_denoise_weights, tv_denoise_inner_iter=tv_denoise_inner_iter, diff --git a/py4DSTEM/process/phase/magnetic_ptychography.py b/py4DSTEM/process/phase/magnetic_ptychography.py index d718b1a9e..9be8144f4 100644 --- a/py4DSTEM/process/phase/magnetic_ptychography.py +++ b/py4DSTEM/process/phase/magnetic_ptychography.py @@ -1447,20 +1447,20 @@ def reconstruct( # position correction if not fix_positions and a0 > 0: - self._positions_px_all[ - batch_indices - ] = self._position_correction( - self._object, - vectorized_patch_indices_row, - vectorized_patch_indices_col, - shifted_probes, - overlap, - amplitudes_device, - positions_px, - positions_px_initial, - positions_step_size, - max_position_update_distance, - max_position_total_distance, + self._positions_px_all[batch_indices] = ( + self._position_correction( + self._object, + vectorized_patch_indices_row, + vectorized_patch_indices_col, + shifted_probes, + overlap, + amplitudes_device, + positions_px, + positions_px_initial, + positions_step_size, + max_position_update_distance, + max_position_total_distance, + ) ) measurement_error += batch_error @@ -1555,10 +1555,12 @@ def reconstruct( tv_denoise_inner_iter=tv_denoise_inner_iter, object_positivity=object_positivity, shrinkage_rad=shrinkage_rad, - object_mask=self._object_fov_mask_inverse - if fix_potential_baseline - and self._object_fov_mask_inverse.sum() > 0 - else None, + object_mask=( + self._object_fov_mask_inverse + if fix_potential_baseline + and self._object_fov_mask_inverse.sum() > 0 + else None + ), pure_phase_object=pure_phase_object and self._object_type == "complex", ) @@ -1588,10 +1590,12 @@ def reconstruct( tv_denoise_inner_iter=tv_denoise_inner_iter, object_positivity=object_positivity, shrinkage_rad=shrinkage_rad, - object_mask=self._object_fov_mask_inverse - if fix_potential_baseline - and self._object_fov_mask_inverse.sum() > 0 - else None, + object_mask=( + self._object_fov_mask_inverse + if fix_potential_baseline + and self._object_fov_mask_inverse.sum() > 0 + else None + ), pure_phase_object=pure_phase_object and self._object_type == "complex", ) diff --git a/py4DSTEM/process/phase/mixedstate_multislice_ptychography.py b/py4DSTEM/process/phase/mixedstate_multislice_ptychography.py index 47dd67dd3..0a856fe85 100644 --- a/py4DSTEM/process/phase/mixedstate_multislice_ptychography.py +++ b/py4DSTEM/process/phase/mixedstate_multislice_ptychography.py @@ -1047,16 +1047,21 @@ def reconstruct( butterworth_order=butterworth_order, kz_regularization_filter=kz_regularization_filter and kz_regularization_gamma is not None, - kz_regularization_gamma=kz_regularization_gamma[a0] - if kz_regularization_gamma is not None - and isinstance(kz_regularization_gamma, np.ndarray) - else kz_regularization_gamma, + kz_regularization_gamma=( + kz_regularization_gamma[a0] + if kz_regularization_gamma is not None + and isinstance(kz_regularization_gamma, np.ndarray) + else kz_regularization_gamma + ), identical_slices=identical_slices, object_positivity=object_positivity, shrinkage_rad=shrinkage_rad, - object_mask=self._object_fov_mask_inverse - if fix_potential_baseline and self._object_fov_mask_inverse.sum() > 0 - else None, + object_mask=( + self._object_fov_mask_inverse + if fix_potential_baseline + and self._object_fov_mask_inverse.sum() > 0 + else None + ), pure_phase_object=pure_phase_object and self._object_type == "complex", tv_denoise_chambolle=tv_denoise_chambolle and tv_denoise_weight_chambolle is not None, diff --git a/py4DSTEM/process/phase/mixedstate_ptychography.py b/py4DSTEM/process/phase/mixedstate_ptychography.py index 6fbb72b5d..72912a13c 100644 --- a/py4DSTEM/process/phase/mixedstate_ptychography.py +++ b/py4DSTEM/process/phase/mixedstate_ptychography.py @@ -941,9 +941,12 @@ def reconstruct( tv_denoise_inner_iter=tv_denoise_inner_iter, object_positivity=object_positivity, shrinkage_rad=shrinkage_rad, - object_mask=self._object_fov_mask_inverse - if fix_potential_baseline and self._object_fov_mask_inverse.sum() > 0 - else None, + object_mask=( + self._object_fov_mask_inverse + if fix_potential_baseline + and self._object_fov_mask_inverse.sum() > 0 + else None + ), pure_phase_object=pure_phase_object and self._object_type == "complex", ) diff --git a/py4DSTEM/process/phase/multislice_ptychography.py b/py4DSTEM/process/phase/multislice_ptychography.py index 03e636f57..2b35d0673 100644 --- a/py4DSTEM/process/phase/multislice_ptychography.py +++ b/py4DSTEM/process/phase/multislice_ptychography.py @@ -1030,9 +1030,12 @@ def reconstruct( identical_slices=identical_slices, object_positivity=object_positivity, shrinkage_rad=shrinkage_rad, - object_mask=self._object_fov_mask_inverse - if fix_potential_baseline and self._object_fov_mask_inverse.sum() > 0 - else None, + object_mask=( + self._object_fov_mask_inverse + if fix_potential_baseline + and self._object_fov_mask_inverse.sum() > 0 + else None + ), pure_phase_object=pure_phase_object and self._object_type == "complex", tv_denoise_chambolle=tv_denoise_chambolle and tv_denoise_weight_chambolle is not None, diff --git a/py4DSTEM/process/phase/parallax.py b/py4DSTEM/process/phase/parallax.py index a668f285d..1061ee95a 100644 --- a/py4DSTEM/process/phase/parallax.py +++ b/py4DSTEM/process/phase/parallax.py @@ -2194,16 +2194,16 @@ def score_CTF(coefs): measured_shifts_sx = xp.zeros( self._region_of_interest_shape, dtype=xp.float32 ) - measured_shifts_sx[ - self._xy_inds[:, 0], self._xy_inds[:, 1] - ] = self._xy_shifts_Ang[:, 0] + measured_shifts_sx[self._xy_inds[:, 0], self._xy_inds[:, 1]] = ( + self._xy_shifts_Ang[:, 0] + ) measured_shifts_sy = xp.zeros( self._region_of_interest_shape, dtype=xp.float32 ) - measured_shifts_sy[ - self._xy_inds[:, 0], self._xy_inds[:, 1] - ] = self._xy_shifts_Ang[:, 1] + measured_shifts_sy[self._xy_inds[:, 0], self._xy_inds[:, 1]] = ( + self._xy_shifts_Ang[:, 1] + ) fitted_shifts = ( xp.tensordot(gradients, xp.array(self._aberrations_coefs), axes=1) @@ -2214,16 +2214,16 @@ def score_CTF(coefs): fitted_shifts_sx = xp.zeros( self._region_of_interest_shape, dtype=xp.float32 ) - fitted_shifts_sx[ - self._xy_inds[:, 0], self._xy_inds[:, 1] - ] = fitted_shifts[:, 0] + fitted_shifts_sx[self._xy_inds[:, 0], self._xy_inds[:, 1]] = ( + fitted_shifts[:, 0] + ) fitted_shifts_sy = xp.zeros( self._region_of_interest_shape, dtype=xp.float32 ) - fitted_shifts_sy[ - self._xy_inds[:, 0], self._xy_inds[:, 1] - ] = fitted_shifts[:, 1] + fitted_shifts_sy[self._xy_inds[:, 0], self._xy_inds[:, 1]] = ( + fitted_shifts[:, 1] + ) max_shift = xp.max( xp.array( @@ -2520,8 +2520,7 @@ def aberration_correct( SNR_inv = ( xp.sqrt( 1 - + (kra2**k_info_power) - / ((k_info_limit) ** (2 * k_info_power)) + + (kra2**k_info_power) / ((k_info_limit) ** (2 * k_info_power)) ) / Wiener_signal_noise_ratio ) diff --git a/py4DSTEM/process/phase/parameter_optimize.py b/py4DSTEM/process/phase/parameter_optimize.py index aa369872d..652e1046e 100644 --- a/py4DSTEM/process/phase/parameter_optimize.py +++ b/py4DSTEM/process/phase/parameter_optimize.py @@ -186,7 +186,7 @@ def evaluation_callback(ptycho): pbar.update(1) error_metric(ptycho) - self._grid_search_function = self._get_optimization_function( + grid_search_function = self._get_optimization_function( self._reconstruction_type, self._parameter_list, self._init_static_args, @@ -200,7 +200,7 @@ def evaluation_callback(ptycho): evaluation_callback, ) - grid_search_res = list(map(self._grid_search_function, params_grid)) + grid_search_res = list(map(grid_search_function, params_grid)) pbar.close() if plot_reconstructed_objects: @@ -290,7 +290,7 @@ def optimize( error_metric = self._get_error_metric(error_metric) - self._optimization_function = self._get_optimization_function( + optimization_function = self._get_optimization_function( self._reconstruction_type, self._parameter_list, self._init_static_args, @@ -312,22 +312,37 @@ def optimize( def callback(*args, **kwargs): pbar.update(1) - self._skopt_result = gp_minimize( - self._optimization_function, - self._parameter_list, - n_calls=n_calls, - n_initial_points=n_initial_points, - x0=self._x0, - callback=callback, - **skopt_kwargs, - ) + try: + self._skopt_result = gp_minimize( + optimization_function, + self._parameter_list, + n_calls=n_calls, + n_initial_points=n_initial_points, + x0=self._x0, + callback=callback, + **skopt_kwargs, + ) - print("Optimized parameters:") - for p, x in zip(self._parameter_list, self._skopt_result.x): - print(f"{p.name}: {x}") + # Remove the optimization result's reference to the function, as it potentially contains a + # copy of the ptycho object + del self._skopt_result["fun"] - # Finish the tqdm progressbar so subsequent things behave nicely - pbar.close() + # If using the GPU, free some cached stuff + if self._init_args.get("device", "cpu") == "gpu": + import cupy as cp + + cp.get_default_memory_pool().free_all_blocks() + cp.get_default_pinned_memory_pool().free_all_blocks() + from cupy.fft.config import get_plan_cache + + get_plan_cache().clear() + + print("Optimized parameters:") + for p, x in zip(self._parameter_list, self._skopt_result.x): + print(f"{p.name}: {x}") + finally: + # close the pbar gracefully on interrupt + pbar.close() return self @@ -644,6 +659,7 @@ def f(**kwargs): def _set_optimizer_defaults( self, verbose=False, + clear_fft_cache=False, plot_center_of_mass=False, plot_rotation=False, plot_probe_overlaps=False, @@ -655,6 +671,7 @@ def _set_optimizer_defaults( Set all of the verbose and plotting to False, allowing for user-overwrite. """ self._init_static_args["verbose"] = verbose + self._init_static_args["clear_fft_cache"] = clear_fft_cache self._preprocess_static_args["plot_center_of_mass"] = plot_center_of_mass self._preprocess_static_args["plot_rotation"] = plot_rotation diff --git a/py4DSTEM/process/phase/phase_base_class.py b/py4DSTEM/process/phase/phase_base_class.py index f5b2e3cdc..014e39074 100644 --- a/py4DSTEM/process/phase/phase_base_class.py +++ b/py4DSTEM/process/phase/phase_base_class.py @@ -997,17 +997,21 @@ def _solve_for_center_of_mass_relative_rotation( if _rotation_best_transpose: ax.plot( rotation_angles_deg, - asnumpy(rotation_div_transpose) - if maximize_divergence - else asnumpy(rotation_curl_transpose), + ( + asnumpy(rotation_div_transpose) + if maximize_divergence + else asnumpy(rotation_curl_transpose) + ), label="CoM after transpose", ) else: ax.plot( rotation_angles_deg, - asnumpy(rotation_div) - if maximize_divergence - else asnumpy(rotation_curl), + ( + asnumpy(rotation_div) + if maximize_divergence + else asnumpy(rotation_curl) + ), label="CoM", ) @@ -1159,16 +1163,20 @@ def _solve_for_center_of_mass_relative_rotation( ax.plot( rotation_angles_deg, - asnumpy(rotation_div) - if maximize_divergence - else asnumpy(rotation_curl), + ( + asnumpy(rotation_div) + if maximize_divergence + else asnumpy(rotation_curl) + ), label="CoM", ) ax.plot( rotation_angles_deg, - asnumpy(rotation_div_transpose) - if maximize_divergence - else asnumpy(rotation_curl_transpose), + ( + asnumpy(rotation_div_transpose) + if maximize_divergence + else asnumpy(rotation_curl_transpose) + ), label="CoM after transpose", ) y_r = ax.get_ylim() diff --git a/py4DSTEM/process/phase/ptychographic_methods.py b/py4DSTEM/process/phase/ptychographic_methods.py index 65f091f0f..77bd4ee14 100644 --- a/py4DSTEM/process/phase/ptychographic_methods.py +++ b/py4DSTEM/process/phase/ptychographic_methods.py @@ -345,9 +345,7 @@ def _precompute_propagator_arrays( propagators[i] = xp.exp( 1.0j * (-(kx**2)[:, None] * np.pi * wavelength * dz) ) - propagators[i] *= xp.exp( - 1.0j * (-(ky**2)[None] * np.pi * wavelength * dz) - ) + propagators[i] *= xp.exp(1.0j * (-(ky**2)[None] * np.pi * wavelength * dz)) if theta_x is not None: propagators[i] *= xp.exp( diff --git a/py4DSTEM/process/phase/ptychographic_tomography.py b/py4DSTEM/process/phase/ptychographic_tomography.py index b4a29fa5d..f5d825f55 100644 --- a/py4DSTEM/process/phase/ptychographic_tomography.py +++ b/py4DSTEM/process/phase/ptychographic_tomography.py @@ -1093,20 +1093,20 @@ def reconstruct( # position correction if not fix_positions: - self._positions_px_all[ - batch_indices - ] = self._position_correction( - object_sliced, - vectorized_patch_indices_row, - vectorized_patch_indices_col, - shifted_probes, - overlap, - amplitudes_device, - positions_px, - positions_px_initial, - positions_step_size, - max_position_update_distance, - max_position_total_distance, + self._positions_px_all[batch_indices] = ( + self._position_correction( + object_sliced, + vectorized_patch_indices_row, + vectorized_patch_indices_col, + shifted_probes, + overlap, + amplitudes_device, + positions_px, + positions_px_initial, + positions_step_size, + max_position_update_distance, + max_position_total_distance, + ) ) measurement_error += batch_error @@ -1206,10 +1206,12 @@ def reconstruct( butterworth_order=butterworth_order, object_positivity=object_positivity, shrinkage_rad=shrinkage_rad, - object_mask=self._object_fov_mask_inverse - if fix_potential_baseline - and self._object_fov_mask_inverse.sum() > 0 - else None, + object_mask=( + self._object_fov_mask_inverse + if fix_potential_baseline + and self._object_fov_mask_inverse.sum() > 0 + else None + ), tv_denoise=tv_denoise and tv_denoise_weights is not None, tv_denoise_weights=tv_denoise_weights, tv_denoise_inner_iter=tv_denoise_inner_iter, @@ -1236,10 +1238,12 @@ def reconstruct( butterworth_order=butterworth_order, object_positivity=object_positivity, shrinkage_rad=shrinkage_rad, - object_mask=self._object_fov_mask_inverse - if fix_potential_baseline - and self._object_fov_mask_inverse.sum() > 0 - else None, + object_mask=( + self._object_fov_mask_inverse + if fix_potential_baseline + and self._object_fov_mask_inverse.sum() > 0 + else None + ), tv_denoise=tv_denoise and tv_denoise_weights is not None, tv_denoise_weights=tv_denoise_weights, tv_denoise_inner_iter=tv_denoise_inner_iter, diff --git a/py4DSTEM/process/phase/ptychographic_visualizations.py b/py4DSTEM/process/phase/ptychographic_visualizations.py index 58dd224cf..dd1732ae0 100644 --- a/py4DSTEM/process/phase/ptychographic_visualizations.py +++ b/py4DSTEM/process/phase/ptychographic_visualizations.py @@ -384,9 +384,11 @@ def _visualize_all_iterations( grid = ImageGrid( fig, spec[0], - nrows_ncols=(1, iterations_grid[1]) - if (plot_probe or plot_fourier_probe) - else iterations_grid, + nrows_ncols=( + (1, iterations_grid[1]) + if (plot_probe or plot_fourier_probe) + else iterations_grid + ), axes_pad=(0.75, 0.5) if cbar else 0.5, cbar_mode="each" if cbar else None, cbar_pad="2.5%" if cbar else None, diff --git a/py4DSTEM/process/phase/singleslice_ptychography.py b/py4DSTEM/process/phase/singleslice_ptychography.py index 71fe65cd7..1e4e75340 100644 --- a/py4DSTEM/process/phase/singleslice_ptychography.py +++ b/py4DSTEM/process/phase/singleslice_ptychography.py @@ -916,9 +916,12 @@ def reconstruct( tv_denoise_inner_iter=tv_denoise_inner_iter, object_positivity=object_positivity, shrinkage_rad=shrinkage_rad, - object_mask=self._object_fov_mask_inverse - if fix_potential_baseline and self._object_fov_mask_inverse.sum() > 0 - else None, + object_mask=( + self._object_fov_mask_inverse + if fix_potential_baseline + and self._object_fov_mask_inverse.sum() > 0 + else None + ), pure_phase_object=pure_phase_object and self._object_type == "complex", ) diff --git a/py4DSTEM/process/phase/utils.py b/py4DSTEM/process/phase/utils.py index c5cad6832..6f2c57fad 100644 --- a/py4DSTEM/process/phase/utils.py +++ b/py4DSTEM/process/phase/utils.py @@ -203,9 +203,7 @@ def evaluate_gaussian_envelope( self, alpha: Union[float, np.ndarray] ) -> Union[float, np.ndarray]: xp = self._xp - return xp.exp( - -0.5 * self._gaussian_spread**2 * alpha**2 / self._wavelength**2 - ) + return xp.exp(-0.5 * self._gaussian_spread**2 * alpha**2 / self._wavelength**2) def evaluate_spatial_envelope( self, alpha: Union[float, np.ndarray], phi: Union[float, np.ndarray] diff --git a/py4DSTEM/process/polar/polar_analysis.py b/py4DSTEM/process/polar/polar_analysis.py index 7df00a235..c62d1258d 100644 --- a/py4DSTEM/process/polar/polar_analysis.py +++ b/py4DSTEM/process/polar/polar_analysis.py @@ -584,8 +584,12 @@ def plot_pdf( def calculate_FEM_local( self, - figsize=(8, 6), + use_median=False, + plot_normalized_variance=True, + figsize=(8, 4), + return_values=False, returnfig=False, + progress_bar=True, ): """ Calculate fluctuation electron microscopy (FEM) statistics, including radial mean, @@ -596,18 +600,293 @@ def calculate_FEM_local( -------- self: PolarDatacube Polar datacube used for measuring FEM properties. + use_median: Bool + Use median instead of mean for statistics. Returns -------- - radial_avg: np.array - Average radial intensity - radial_var: np.array - Variance in the radial dimension + local_radial_mean: np.array + Average radial intensity of each probe position + local_radial_var: np.array + Variance in the radial dimension of each probe position + """ + # init radial data arrays + self.local_radial_mean = np.zeros( + ( + self._datacube.shape[0], + self._datacube.shape[1], + self.polar_shape[1], + ) + ) + self.local_radial_var = np.zeros( + ( + self._datacube.shape[0], + self._datacube.shape[1], + self.polar_shape[1], + ) + ) + + # Compute the radial mean and standard deviation for each probe position + for rx, ry in tqdmnd( + self._datacube.shape[0], + self._datacube.shape[1], + desc="Radial statistics", + unit=" probe positions", + disable=not progress_bar, + ): + im = self.data[rx, ry] + + if use_median: + im_mean = np.ma.median(im, axis=0) + im_var = np.ma.median((im - im_mean) ** 2, axis=0) + else: + im_mean = np.ma.mean(im, axis=0) + im_var = np.ma.mean((im - im_mean) ** 2, axis=0) + + self.local_radial_mean[rx, ry] = im_mean + self.local_radial_var[rx, ry] = im_var + + if plot_normalized_variance: + fig, ax = plt.subplots(figsize=figsize) + + sig = self.local_radial_var / self.local_radial_mean**2 + if use_median: + sig_plot = np.median(sig, axis=(0, 1)) + else: + sig_plot = np.mean(sig, axis=(0, 1)) + + ax.plot( + self.qq, + sig_plot, + ) + ax.set_xlabel( + "Scattering Vector (" + self.calibration.get_Q_pixel_units() + ")" + ) + ax.set_ylabel("Normalized Variance") + ax.set_xlim((self.qq[0], self.qq[-1])) + + if return_values: + if returnfig: + return self.local_radial_mean, self.local_radial_var, fig, ax + else: + return self.local_radial_mean, self.local_radial_var + else: + if returnfig: + return fig, ax + + +def calculate_annular_symmetry( + self, + max_symmetry=12, + mask_realspace=None, + plot_result=False, + figsize=(8, 4), + return_symmetry_map=False, + progress_bar=True, +): """ + This function calculates radial symmetry of diffraction patterns, typically applied + to amorphous scattering, but it can also be used for crystalline Bragg diffraction. - pass + Parameters + -------- + self: PolarDatacube + Polar transformed datacube + max_symmetry: int + Symmetry orders will be computed from 1 to max_symmetry for n-fold symmetry orders. + mask_realspace: np.array + Boolean mask, symmetries will only be computed at probe positions where mask is True. + plot_result: bool + Plot the resulting array + figsize: (float, float) + Size of the plot. + return_symmetry_map: bool + Set to true to return the symmetry array. + progress_bar: bool + Show progress bar during calculation. + + Returns + -------- + annular_symmetry: np.array + Array with annular symmetry magnitudes, with shape [max_symmetry, num_radial_bins] + + """ + + # Initialize outputs + self.annular_symmetry_max = max_symmetry + self.annular_symmetry = np.zeros( + ( + self.data_raw.shape[0], + self.data_raw.shape[1], + max_symmetry, + self.polar_shape[1], + ) + ) + + # Loop over all probe positions + for rx, ry in tqdmnd( + self._datacube.shape[0], + self._datacube.shape[1], + desc="Annular symmetry", + unit=" probe positions", + disable=not progress_bar, + ): + # Get polar transformed image + im = self.transform( + self.data_raw.data[rx, ry], + ) + polar_im = np.ma.getdata(im) + polar_mask = np.ma.getmask(im) + polar_im[polar_mask] = 0 + polar_mask = np.logical_not(polar_mask) + + # Calculate normalized correlation of polar image along angular direction (axis = 0) + polar_corr = np.real( + np.fft.ifft( + np.abs( + np.fft.fft( + polar_im, + axis=0, + ) + ) + ** 2, + axis=0, + ), + ) + polar_corr_norm = ( + np.sum( + polar_im, + axis=0, + ) + ** 2 + ) + sub = polar_corr_norm > 0 + polar_corr[:, sub] /= polar_corr_norm[ + sub + ] # gets rid of divide by 0 (False near center) + polar_corr[:, sub] -= 1 + + # Calculate normalized correlation of polar mask along angular direction (axis = 0) + mask_corr = np.real( + np.fft.ifft( + np.abs( + np.fft.fft( + polar_mask.astype("float"), + axis=0, + ) + ) + ** 2, + axis=0, + ), + ) + mask_corr_norm = ( + np.sum( + polar_mask.astype("float"), + axis=0, + ) + ** 2 + ) + sub = mask_corr_norm > 0 + mask_corr[:, sub] /= mask_corr_norm[ + sub + ] # gets rid of divide by 0 (False near center) + mask_corr[:, sub] -= 1 + + # Normalize polar correlation by mask correlation (beam stop removal) + sub = np.abs(mask_corr) > 0 + polar_corr[sub] -= mask_corr[sub] + + # Measure symmetry + self.annular_symmetry[rx, ry, :, :] = np.abs(np.fft.fft(polar_corr, axis=0))[ + 1 : max_symmetry + 1 + ] + + if plot_result: + fig, ax = plt.subplots(figsize=figsize) + ax.imshow( + np.mean(self.annular_symmetry, axis=(0, 1)), + aspect="auto", + extent=[ + self.qq[0], + self.qq[-1], + max_symmetry, + 0, + ], + ) + ax.set_yticks( + np.arange(max_symmetry) + 0.5, + range(1, max_symmetry + 1), + ) + ax.set_xlabel("Scattering angle (1/Å)") + ax.set_ylabel("Symmetry Order") + + if return_symmetry_map: + return self.annular_symmetry + + +def plot_annular_symmetry( + self, + symmetry_orders=None, + plot_std=False, + normalize_by_mean=False, + cmap="turbo", + vmin=0.01, + vmax=0.99, + figsize=(8, 4), +): + """ + Plot the symmetry orders + """ + + if symmetry_orders is None: + symmetry_orders = np.arange(1, self.annular_symmetry_max + 1) + else: + symmetry_orders = np.array(symmetry_orders) + + # plotting image + if plot_std: + im_plot = np.std( + self.annular_symmetry, + axis=(0, 1), + )[symmetry_orders - 1, :] + else: + im_plot = np.mean( + self.annular_symmetry, + axis=(0, 1), + )[symmetry_orders - 1, :] + if normalize_by_mean: + im_plot /= self.radial_mean[None, :] + + # plotting range + int_vals = np.sort(im_plot.ravel()) + ind0 = np.clip(np.round(im_plot.size * vmin).astype("int"), 0, im_plot.size - 1) + ind1 = np.clip(np.round(im_plot.size * vmax).astype("int"), 0, im_plot.size - 1) + vmin = int_vals[ind0] + vmax = int_vals[ind1] + + # plot image + fig, ax = plt.subplots(figsize=figsize) + ax.imshow( + im_plot, + aspect="auto", + extent=[ + self.qq[0], + self.qq[-1], + np.max(symmetry_orders), + np.min(symmetry_orders) - 1, + ], + cmap=cmap, + vmin=vmin, + vmax=vmax, + ) + ax.set_yticks( + symmetry_orders - 0.5, + symmetry_orders, + ) + ax.set_xlabel("Scattering angle (1/A)") + ax.set_ylabel("Symmetry Order") def scattering_model(k2, *coefs): diff --git a/py4DSTEM/process/polar/polar_datacube.py b/py4DSTEM/process/polar/polar_datacube.py index 4c06d4c09..0ba284ada 100644 --- a/py4DSTEM/process/polar/polar_datacube.py +++ b/py4DSTEM/process/polar/polar_datacube.py @@ -4,7 +4,6 @@ class PolarDatacube: - """ An interface to a 4D-STEM datacube under polar-elliptical transformation. """ @@ -97,8 +96,10 @@ def __init__( calculate_radial_statistics, calculate_pair_dist_function, calculate_FEM_local, + calculate_annular_symmetry, plot_radial_mean, plot_radial_var_norm, + plot_annular_symmetry, plot_background_fits, plot_sf_estimate, plot_reduced_pdf, diff --git a/py4DSTEM/process/polar/polar_fits.py b/py4DSTEM/process/polar/polar_fits.py index 3e39c5584..18f059b61 100644 --- a/py4DSTEM/process/polar/polar_fits.py +++ b/py4DSTEM/process/polar/polar_fits.py @@ -3,15 +3,18 @@ # from scipy.optimize import leastsq from scipy.optimize import curve_fit +from emdfile import tqdmnd def fit_amorphous_ring( - im, + im=None, + datacube=None, center=None, radial_range=None, coefs=None, mask_dp=None, show_fit_mask=False, + fit_all_images=False, maxfev=None, verbose=False, plot_result=True, @@ -19,6 +22,7 @@ def fit_amorphous_ring( plot_int_scale=(-3, 3), figsize=(8, 8), return_all_coefs=True, + progress_bar=None, ): """ Fit an amorphous halo with a two-sided Gaussian model, plus a background @@ -28,6 +32,8 @@ def fit_amorphous_ring( -------- im: np.array 2D image array to perform fitting on + datacube: py4DSTEM.DataCube + datacube to perform the fitting on center: np.array (x,y) center coordinates for fitting mask. If not specified by the user, we will assume the center coordinate is (im.shape-1)/2. @@ -40,6 +46,8 @@ def fit_amorphous_ring( Dark field mask for fitting, in addition to the radial range specified above. show_fit_mask: bool Set to true to preview the fitting mask and initial guess for the ellipse params + fit_all_images: bool + Fit the elliptic parameters to all images maxfev: int Max number of fitting evaluations for curve_fit. verbose: bool @@ -63,6 +71,12 @@ def fit_amorphous_ring( 11 parameter elliptic fit coefficients """ + # If passing in a DataCube, use mean diffraction pattern for initial guess + if im is None: + im = datacube.get_dp_mean() + if progress_bar is None: + progress_bar = True + # Default values if center is None: center = np.array(((im.shape[0] - 1) / 2, (im.shape[1] - 1) / 2)) @@ -193,7 +207,44 @@ def fit_amorphous_ring( )[0] coefs[4] = np.mod(coefs[4], 2 * np.pi) coefs[5:8] *= int_mean - # bounds=bounds + + # Perform the fit on each individual diffration pattern + if fit_all_images: + coefs_all = np.zeros((datacube.shape[0], datacube.shape[1], coefs.size)) + + for rx, ry in tqdmnd( + datacube.shape[0], + datacube.shape[1], + desc="Radial statistics", + unit=" probe positions", + disable=not progress_bar, + ): + vals = datacube.data[rx, ry][mask] + int_mean = np.mean(vals) + + if maxfev is None: + coefs_single = curve_fit( + amorphous_model, + basis, + vals / int_mean, + p0=coefs, + xtol=1e-8, + bounds=(lb, ub), + )[0] + else: + coefs_single = curve_fit( + amorphous_model, + basis, + vals / int_mean, + p0=coefs, + xtol=1e-8, + bounds=(lb, ub), + maxfev=maxfev, + )[0] + coefs_single[4] = np.mod(coefs_single[4], 2 * np.pi) + coefs_single[5:8] *= int_mean + + coefs_all[rx, ry] = coefs_single if verbose: print("x0 = " + str(np.round(coefs[0], 3)) + " px") @@ -214,9 +265,15 @@ def fit_amorphous_ring( # Return fit parameters if return_all_coefs: - return coefs + if fit_all_images: + return coefs_all + else: + return coefs else: - return coefs[:5] + if fit_all_images: + return coefs_all[:, :, :5] + else: + return coefs[:5] def plot_amorphous_ring( diff --git a/py4DSTEM/process/wholepatternfit/wp_models.py b/py4DSTEM/process/wholepatternfit/wp_models.py index c0907a11e..e437916eb 100644 --- a/py4DSTEM/process/wholepatternfit/wp_models.py +++ b/py4DSTEM/process/wholepatternfit/wp_models.py @@ -289,12 +289,16 @@ def __init__( "radius": Parameter(radius), "sigma": Parameter(sigma), "intensity": Parameter(intensity), - "x center": WPF.coordinate_model.params["x center"] - if global_center - else Parameter(x0), - "y center": WPF.coordinate_model.params["y center"] - if global_center - else Parameter(y0), + "x center": ( + WPF.coordinate_model.params["x center"] + if global_center + else Parameter(x0) + ), + "y center": ( + WPF.coordinate_model.params["y center"] + if global_center + else Parameter(y0) + ), } super().__init__(name, params, model_type=WPFModelType.AMORPHOUS) diff --git a/py4DSTEM/visualize/vis_grid.py b/py4DSTEM/visualize/vis_grid.py index 9be754689..cb2581e01 100644 --- a/py4DSTEM/visualize/vis_grid.py +++ b/py4DSTEM/visualize/vis_grid.py @@ -281,7 +281,11 @@ def show_image_grid( ) else: _, _ = show( - ar, figax=(fig, ax), returnfig=True, title=print_title, **kwargs + ar, + figax=(fig, ax), + returnfig=True, + title=print_title, + **kwargs, ) except IndexError: ax.axis("off") diff --git a/py4DSTEM/visualize/vis_special.py b/py4DSTEM/visualize/vis_special.py index b647b8871..331cc8cf3 100644 --- a/py4DSTEM/visualize/vis_special.py +++ b/py4DSTEM/visualize/vis_special.py @@ -503,20 +503,34 @@ def show_origin_meas(data): show_image_grid(get_ar=lambda i: [qx, qy][i], H=1, W=2, cmap="RdBu") -def show_origin_fit(data): +def show_origin_fit( + data, + plot_range=None, + axsize=(3, 3), +): """ Show the measured, fit, and residuals of the origin positions. - Args: - data (DataCube or Calibration or (3,2)-tuple of arrays - ((qx0_meas,qy0_meas),(qx0_fit,qy0_fit),(qx0_residuals,qy0_residuals)) + Parameters + ---------- + data: (DataCube or Calibration or (3,2)-tuple of arrays + ((qx0_meas,qy0_meas),(qx0_fit,qy0_fit),(qx0_residuals,qy0_residuals)) + plot_range: (tuple, list, or np.array) + Plotting range in units of pixels + axsize: (tuple) + Size of each plot axis + + Returns + ------- + + """ from py4DSTEM.data import Calibration from py4DSTEM.datacube import DataCube if isinstance(data, tuple): assert len(data) == 3 - qx0_meas, qy_meas = data[0] + qx0_meas, qy0_meas = data[0] qx0_fit, qy0_fit = data[1] qx0_residuals, qy0_residuals = data[2] elif isinstance(data, DataCube): @@ -530,18 +544,39 @@ def show_origin_fit(data): else: raise Exception("data must be of type Datacube or Calibration or tuple") + # Centered intensity plots + qx0_meas_plot = qx0_meas - np.median(qx0_meas) + qy0_meas_plot = qy0_meas - np.median(qy0_meas) + qx0_fit_plot = qx0_fit - np.median(qx0_fit) + qy0_fit_plot = qy0_fit - np.median(qy0_fit) + + # Determine plotting range + if plot_range is None: + plot_range = np.array((-1.0, 1.0)) * np.max( + (np.abs(qx0_fit_plot), np.abs(qy0_fit_plot)) + ) + else: + plot_range = np.array(plot_range) + if plot_range.ndim == 0: + plot_range = np.array((-1.0, 1.0)) * plot_range + + # plotting show_image_grid( get_ar=lambda i: [ - qx0_meas, - qx0_fit, + qx0_meas_plot, + qx0_fit_plot, qx0_residuals, - qy0_meas, - qy0_fit, + qy0_meas_plot, + qy0_fit_plot, qy0_residuals, ][i], H=2, W=3, cmap="RdBu", + intensity_range="absolute", + vmin=plot_range[0], + vmax=plot_range[1], + axsize=axsize, )