From b9193294310856d53da7b6eeb2399c937a136150 Mon Sep 17 00:00:00 2001 From: alex-rakowski Date: Tue, 7 Nov 2023 23:31:56 -0800 Subject: [PATCH 1/7] pep 8 - Comparisons to singletons complience --- py4DSTEM/braggvectors/braggvector_methods.py | 8 ++--- py4DSTEM/braggvectors/diskdetection.py | 8 ++--- py4DSTEM/braggvectors/diskdetection_aiml.py | 2 +- .../braggvectors/diskdetection_aiml_cuda.py | 4 +-- py4DSTEM/braggvectors/diskdetection_cuda.py | 2 +- .../diskdetection_parallel_new.py | 4 +-- py4DSTEM/braggvectors/threshold.py | 4 +-- py4DSTEM/datacube/virtualdiffraction.py | 2 +- py4DSTEM/datacube/virtualimage.py | 12 +++---- py4DSTEM/io/filereaders/read_K2.py | 14 ++++----- py4DSTEM/preprocess/electroncount.py | 2 +- py4DSTEM/preprocess/radialbkgrd.py | 10 +++--- py4DSTEM/preprocess/utils.py | 2 +- py4DSTEM/process/calibration/ellipse.py | 2 +- py4DSTEM/process/calibration/origin.py | 4 +-- py4DSTEM/process/calibration/qpixelsize.py | 2 +- .../braggvectorclassification.py | 4 +-- .../process/classification/featurization.py | 31 +++++++++---------- py4DSTEM/process/diffraction/crystal.py | 4 +-- py4DSTEM/process/diffraction/crystal_ACOM.py | 12 +++---- py4DSTEM/process/diffraction/crystal_viz.py | 2 +- py4DSTEM/process/fit/fit.py | 4 +-- py4DSTEM/process/polar/polar_peaks.py | 2 +- py4DSTEM/process/strain/latticevectors.py | 4 +-- py4DSTEM/process/strain/strain.py | 10 +++--- py4DSTEM/utils/configuration_checker.py | 4 +-- py4DSTEM/visualize/show.py | 4 +-- 27 files changed, 81 insertions(+), 82 deletions(-) diff --git a/py4DSTEM/braggvectors/braggvector_methods.py b/py4DSTEM/braggvectors/braggvector_methods.py index 845ebabf6..70a36dec1 100644 --- a/py4DSTEM/braggvectors/braggvector_methods.py +++ b/py4DSTEM/braggvectors/braggvector_methods.py @@ -99,12 +99,12 @@ def histogram( # then scale by the sampling factor else: # get pixel calibration - if self.calstate["pixel"] == True: + if self.calstate["pixel"] is True: qpix = self.calibration.get_Q_pixel_size() qx /= qpix qy /= qpix # origin calibration - if self.calstate["center"] == True: + if self.calstate["center"] is True: origin = self.calibration.get_origin_mean() qx += origin[0] qy += origin[1] @@ -153,12 +153,12 @@ def histogram( ).reshape(Q_Nx, Q_Ny) # determine the resampled grid center and pixel size - if mode == "cal" and self.calstate["center"] == True: + if mode == "cal" and self.calstate["center"] is True: x0 = sampling * origin[0] y0 = sampling * origin[1] else: x0, y0 = 0, 0 - if mode == "cal" and self.calstate["pixel"] == True: + if mode == "cal" and self.calstate["pixel"] is True: pixelsize = qpix / sampling else: pixelsize = 1 / sampling diff --git a/py4DSTEM/braggvectors/diskdetection.py b/py4DSTEM/braggvectors/diskdetection.py index e23b10a15..e097d92d5 100644 --- a/py4DSTEM/braggvectors/diskdetection.py +++ b/py4DSTEM/braggvectors/diskdetection.py @@ -231,10 +231,10 @@ def find_Bragg_disks( mode = "dc_ml" elif mode == "datacube": - if distributed is None and CUDA == False: + if distributed is None and CUDA is False: mode = "dc_CPU" - elif distributed is None and CUDA == True: - if CUDA_batched == False: + elif distributed is None and CUDA is True: + if CUDA_batched is False: mode = "dc_GPU" else: mode = "dc_GPU_batched" @@ -271,7 +271,7 @@ def find_Bragg_disks( kws["data_file"] = data_file kws["cluster_path"] = cluster_path # ML arguments - if ML == True: + if ML is True: kws["CUDA"] = CUDA kws["model_path"] = ml_model_path kws["num_attempts"] = ml_num_attempts diff --git a/py4DSTEM/braggvectors/diskdetection_aiml.py b/py4DSTEM/braggvectors/diskdetection_aiml.py index 67df18074..abcb70291 100644 --- a/py4DSTEM/braggvectors/diskdetection_aiml.py +++ b/py4DSTEM/braggvectors/diskdetection_aiml.py @@ -528,7 +528,7 @@ def find_Bragg_disks_aiml_serial( ) ) - if global_threshold == True: + if global_threshold is True: from py4DSTEM.braggvectors import universal_threshold peaks = universal_threshold( diff --git a/py4DSTEM/braggvectors/diskdetection_aiml_cuda.py b/py4DSTEM/braggvectors/diskdetection_aiml_cuda.py index c5f89b9fd..64a43156a 100644 --- a/py4DSTEM/braggvectors/diskdetection_aiml_cuda.py +++ b/py4DSTEM/braggvectors/diskdetection_aiml_cuda.py @@ -233,7 +233,7 @@ def find_Bragg_disks_aiml_CUDA( datacube.R_N, int(t2 / 3600), int(t2 / 60), int(t2 % 60) ) ) - if global_threshold == True: + if global_threshold is True: from py4DSTEM.braggvectors import universal_threshold peaks = universal_threshold( @@ -496,7 +496,7 @@ def get_maxima_2D_cp( if minSpacing > 0: deletemask = np.zeros(len(maxima), dtype=bool) for i in range(len(maxima)): - if deletemask[i] == False: + if deletemask[i] is False: tooClose = ( (maxima["x"] - maxima["x"][i]) ** 2 + (maxima["y"] - maxima["y"][i]) ** 2 diff --git a/py4DSTEM/braggvectors/diskdetection_cuda.py b/py4DSTEM/braggvectors/diskdetection_cuda.py index 4bbb7f488..670361b3b 100644 --- a/py4DSTEM/braggvectors/diskdetection_cuda.py +++ b/py4DSTEM/braggvectors/diskdetection_cuda.py @@ -482,7 +482,7 @@ def get_maxima_2D( if minSpacing > 0: deletemask = np.zeros(len(maxima), dtype=bool) for i in range(len(maxima)): - if deletemask[i] == False: + if deletemask[i] is False: tooClose = ( (maxima["x"] - maxima["x"][i]) ** 2 + (maxima["y"] - maxima["y"][i]) ** 2 diff --git a/py4DSTEM/braggvectors/diskdetection_parallel_new.py b/py4DSTEM/braggvectors/diskdetection_parallel_new.py index c15e41732..cde2c3650 100644 --- a/py4DSTEM/braggvectors/diskdetection_parallel_new.py +++ b/py4DSTEM/braggvectors/diskdetection_parallel_new.py @@ -261,9 +261,9 @@ def beta_parallel_disk_detection( if close_dask_client: dask_client.close() return peaks - elif close_dask_client == False and return_dask_client == True: + elif close_dask_client is False and return_dask_client is True: return peaks, dask_client - elif close_dask_client and return_dask_client == False: + elif close_dask_client and return_dask_client is False: return peaks else: print( diff --git a/py4DSTEM/braggvectors/threshold.py b/py4DSTEM/braggvectors/threshold.py index c13b0a665..c6c1c4afc 100644 --- a/py4DSTEM/braggvectors/threshold.py +++ b/py4DSTEM/braggvectors/threshold.py @@ -52,7 +52,7 @@ def threshold_Braggpeaks( r2 = minPeakSpacing**2 deletemask = np.zeros(pointlist.length, dtype=bool) for i in range(pointlist.length): - if deletemask[i] == False: + if deletemask[i] is False: tooClose = ( (pointlist.data["qx"] - pointlist.data["qx"][i]) ** 2 + (pointlist.data["qy"] - pointlist.data["qy"][i]) ** 2 @@ -160,7 +160,7 @@ def universal_threshold( r2 = minPeakSpacing**2 deletemask = np.zeros(pointlist.length, dtype=bool) for i in range(pointlist.length): - if deletemask[i] == False: + if deletemask[i] is False: tooClose = ( (pointlist.data["qx"] - pointlist.data["qx"][i]) ** 2 + (pointlist.data["qy"] - pointlist.data["qy"][i]) ** 2 diff --git a/py4DSTEM/datacube/virtualdiffraction.py b/py4DSTEM/datacube/virtualdiffraction.py index 65665728d..23b151d58 100644 --- a/py4DSTEM/datacube/virtualdiffraction.py +++ b/py4DSTEM/datacube/virtualdiffraction.py @@ -114,7 +114,7 @@ def get_virtual_diffraction( # Calculate # ...with no center shifting - if shift_center == False: + if shift_center is False: # ...for the whole pattern if mask is None: if method == "mean": diff --git a/py4DSTEM/datacube/virtualimage.py b/py4DSTEM/datacube/virtualimage.py index 50a297914..87aeae8b1 100644 --- a/py4DSTEM/datacube/virtualimage.py +++ b/py4DSTEM/datacube/virtualimage.py @@ -197,13 +197,13 @@ def get_virtual_image( # Get mask mask = self.make_detector(self.Qshape, mode, g) # if return_mask is True, skip computation - if return_mask == True and shift_center == False: + if return_mask is True and shift_center is False: return mask # Calculate virtual image # no center shifting - if shift_center == False: + if shift_center is False: # single CPU if not dask: # allocate space @@ -220,7 +220,7 @@ def get_virtual_image( virtual_image[rx, ry] = np.sum(self.data[rx, ry] * mask) # dask - if dask == True: + if dask is True: # set up a generalized universal function for dask distribution def _apply_mask_dask(self, mask): virtual_image = np.sum( @@ -444,7 +444,7 @@ def position_detector( # shift center if shift_center is None: shift_center = False - elif shift_center == True: + elif shift_center is True: assert isinstance( data, tuple ), "If shift_center is set to True, `data` should be a 2-tuple (rx,ry). \ @@ -552,7 +552,7 @@ def get_calibrated_detector_geometry( # Convert units into detector pixels # Shift center - if centered == True: + if centered is True: if mode == "point": g = (g[0] + x0_mean, g[1] + y0_mean) if mode in ("circle", "circular", "annulus", "annular"): @@ -561,7 +561,7 @@ def get_calibrated_detector_geometry( g = (g[0] + x0_mean, g[1] + x0_mean, g[2] + y0_mean, g[3] + y0_mean) # Scale by the detector pixel size - if calibrated == True: + if calibrated is True: if mode == "point": g = (g[0] / unit_conversion, g[1] / unit_conversion) if mode in ("circle", "circular"): diff --git a/py4DSTEM/io/filereaders/read_K2.py b/py4DSTEM/io/filereaders/read_K2.py index 61405a437..e0a5dae1f 100644 --- a/py4DSTEM/io/filereaders/read_K2.py +++ b/py4DSTEM/io/filereaders/read_K2.py @@ -336,9 +336,9 @@ def _find_offsets(self): for i in range(8): sync = False frame = 0 - while sync == False: + while sync is False: sync = self._bin_files[i][frame]["block"] == block_id - if sync == False: + if sync is False: frame += 1 self._shutter_offsets[i] += frame print("Offsets are currently ", self._shutter_offsets) @@ -358,7 +358,7 @@ def _find_offsets(self): sync = False next_frame = stripe[j]["frame"] - if sync == False: + if sync is False: # the first frame is incomplete, so we need to seek the next one print( f"First frame ({first_frame}) incomplete, seeking frame {next_frame}..." @@ -366,12 +366,12 @@ def _find_offsets(self): for i in range(8): sync = False frame = 0 - while sync == False: + while sync is False: sync = ( self._bin_files[i][self._shutter_offsets[i] + frame]["frame"] == next_frame ) - if sync == False: + if sync is False: frame += 1 self._shutter_offsets[i] += frame print("Offsets are now ", self._shutter_offsets) @@ -387,7 +387,7 @@ def _find_offsets(self): ] if np.any(stripe[:]["frame"] != first_frame): sync = False - if sync == True: + if sync is True: print("New frame is complete!") else: print("Next frame also incomplete!!!! Data may be corrupt?") @@ -397,7 +397,7 @@ def _find_offsets(self): for i in range(8): shutter = False frame = 0 - while shutter == False: + while shutter is False: offset = self._shutter_offsets[i] + (frame * 32) stripe = self._bin_files[i][offset : offset + 32] shutter = stripe[0]["shutter"] diff --git a/py4DSTEM/preprocess/electroncount.py b/py4DSTEM/preprocess/electroncount.py index 7a498a061..973ad3ebc 100644 --- a/py4DSTEM/preprocess/electroncount.py +++ b/py4DSTEM/preprocess/electroncount.py @@ -402,7 +402,7 @@ def counted_pointlistarray_to_datacube(counted_pointlistarray, shape, subpixel=F (4D array of bools): a 4D array of bools, with true indicating an electron strike. """ assert len(shape) == 4 - assert subpixel == False, "subpixel mode not presently supported." + assert subpixel is False, "subpixel mode not presently supported." R_Nx, R_Ny, Q_Nx, Q_Ny = shape counted_datacube = np.zeros((R_Nx, R_Nx, Q_Nx, Q_Ny), dtype=bool) diff --git a/py4DSTEM/preprocess/radialbkgrd.py b/py4DSTEM/preprocess/radialbkgrd.py index e0d402fe3..da225ed74 100644 --- a/py4DSTEM/preprocess/radialbkgrd.py +++ b/py4DSTEM/preprocess/radialbkgrd.py @@ -80,7 +80,7 @@ def get_1D_polar_background( # Crop polar data to maximum distance which contains information from original image if (polarData.mask.sum(axis=(0)) == polarData.shape[0]).any(): ii = polarData.data.shape[1] - 1 - while polarData.mask[:, ii].all() == True: + while polarData.mask[:, ii].all() is True: ii = ii - 1 maximalDistance = ii polarData = polarData[:, 0:maximalDistance] @@ -105,16 +105,16 @@ def get_1D_polar_background( background1D = np.maximum(background1D, min_background_value) - if smoothing == True: - if smoothing_log == True: + if smoothing is True: + if smoothing_log is True: background1D = np.log(background1D) background1D = savgol_filter( background1D, smoothingWindowSize, smoothingPolyOrder ) - if smoothing_log == True: + if smoothing_log is True: background1D = np.exp(background1D) - if return_polararr == True: + if return_polararr is True: return (background1D, r_bins, polarData) else: return (background1D, r_bins) diff --git a/py4DSTEM/preprocess/utils.py b/py4DSTEM/preprocess/utils.py index 752e2f81c..829f66608 100644 --- a/py4DSTEM/preprocess/utils.py +++ b/py4DSTEM/preprocess/utils.py @@ -293,7 +293,7 @@ def filter_2D_maxima( if minSpacing > 0: deletemask = np.zeros(len(maxima), dtype=bool) for i in range(len(maxima)): - if deletemask[i] == False: + if deletemask[i] is False: tooClose = ( (maxima["x"] - maxima["x"][i]) ** 2 + (maxima["y"] - maxima["y"][i]) ** 2 diff --git a/py4DSTEM/process/calibration/ellipse.py b/py4DSTEM/process/calibration/ellipse.py index 8835aa95b..e6a216cf1 100644 --- a/py4DSTEM/process/calibration/ellipse.py +++ b/py4DSTEM/process/calibration/ellipse.py @@ -63,7 +63,7 @@ def fit_ellipse_1D(ar, center=None, fitradii=None, mask=None): rr = np.sqrt((xx - x0) ** 2 + (yy - y0) ** 2) _mask = (rr > ri) * (rr <= ro) if mask is not None: - _mask *= mask == False + _mask *= mask is False xs, ys = np.nonzero(_mask) vals = ar[_mask] diff --git a/py4DSTEM/process/calibration/origin.py b/py4DSTEM/process/calibration/origin.py index 78a90fbef..ef0f35a96 100644 --- a/py4DSTEM/process/calibration/origin.py +++ b/py4DSTEM/process/calibration/origin.py @@ -154,7 +154,7 @@ def fit_origin( robust=robust, robust_steps=robust_steps, robust_thresh=robust_thresh, - data_mask=mask == True, + data_mask=mask is True, ) popt_y, pcov_y, qy0_fit, _ = fit_2D( f, @@ -162,7 +162,7 @@ def fit_origin( robust=robust, robust_steps=robust_steps, robust_thresh=robust_thresh, - data_mask=mask == True, + data_mask=mask is True, ) # Compute residuals diff --git a/py4DSTEM/process/calibration/qpixelsize.py b/py4DSTEM/process/calibration/qpixelsize.py index 2abefd54c..0510fad06 100644 --- a/py4DSTEM/process/calibration/qpixelsize.py +++ b/py4DSTEM/process/calibration/qpixelsize.py @@ -60,6 +60,6 @@ def get_dq_from_indexed_peaks(qs, hkl, a): # Get pixel size dq = 1 / (c * a) qs_fit = d_inv[mask] / a - hkl_fit = [hkl[i] for i in range(len(hkl)) if mask[i] == True] + hkl_fit = [hkl[i] for i in range(len(hkl)) if mask[i] is True] return dq, qs_fit, hkl_fit diff --git a/py4DSTEM/process/classification/braggvectorclassification.py b/py4DSTEM/process/classification/braggvectorclassification.py index d5c2ac0fc..4956f7630 100644 --- a/py4DSTEM/process/classification/braggvectorclassification.py +++ b/py4DSTEM/process/classification/braggvectorclassification.py @@ -594,8 +594,8 @@ def merge_iterative(self, threshBPs=0.1, threshScanPosition=0.1): W_merge = W_merge[:, 1:] H_merge = H_merge[1:, :] - W_ = np.hstack((W_[:, merged == False], W_merge)) - H_ = np.vstack((H_[merged == False, :], H_merge)) + W_ = np.hstack((W_[:, merged is False], W_merge)) + H_ = np.vstack((H_[merged is False, :], H_merge)) Nc_ = W_.shape[1] if len(merge_candidates) == 0: diff --git a/py4DSTEM/process/classification/featurization.py b/py4DSTEM/process/classification/featurization.py index 38b4e1412..3fb70a943 100644 --- a/py4DSTEM/process/classification/featurization.py +++ b/py4DSTEM/process/classification/featurization.py @@ -181,8 +181,7 @@ def from_braggvectors( np.rint( (pointlist.data["qy"] / q_pixel_size) + Q_Ny / 2 ).astype(int), - ] - == False + ] is False ), True, False, @@ -330,7 +329,7 @@ def MinMaxScaler(self, return_scaled=True): """ mms = MinMaxScaler() self.features = mms.fit_transform(self.features) - if return_scaled == True: + if return_scaled is True: return self.features else: return @@ -345,7 +344,7 @@ def RobustScaler(self, return_scaled=True): """ rs = RobustScaler() self.features = rs.fit_transform(self.features) - if return_scaled == True: + if return_scaled is True: return self.features else: return @@ -358,7 +357,7 @@ def shift_positive(self, return_scaled=True): return_scaled (bool): returns the scaled array """ self.features += np.abs(self.features.min()) - if return_scaled == True: + if return_scaled is True: return self.features else: return @@ -372,7 +371,7 @@ def PCA(self, components, return_results=False): """ pca = PCA(n_components=components) self.pca = pca.fit_transform(self.features) - if return_results == True: + if return_results is True: return self.pca return @@ -385,7 +384,7 @@ def ICA(self, components, return_results=True): """ ica = FastICA(n_components=components) self.ica = ica.fit_transform(self.features) - if return_results == True: + if return_results is True: return self.ica return @@ -434,7 +433,7 @@ def NMF( random_seed=random_seed, save_all_models=save_all_models, ) - if return_results == True: + if return_results is True: return self.W return @@ -455,7 +454,7 @@ def GMM(self, cv, components, num_models, random_seed=None, return_results=False num_models=num_models, random_seed=random_seed, ) - if return_results == True: + if return_results is True: return self.gmm return @@ -707,7 +706,7 @@ def spatial_separation(self, size, threshold=0, method=None, clean=True): ) if len(separated_temp) > 0: - if clean == True: + if clean is True: data_ndarray = np.dstack(separated_temp) data_hard = ( data_ndarray.max(axis=2, keepdims=1) == data_ndarray @@ -879,7 +878,7 @@ def _nmf_single( rng = np.random.RandomState(seed=42) else: seed = random_seed - if save_all_models == True: + if save_all_models is True: W = [] # Big loop through all models @@ -936,7 +935,7 @@ def _nmf_single( if n_comps <= 2: break - if save_all_models == True: + if save_all_models is True: W.append(nmf_temp) elif (recon_error / counter) < err: @@ -963,7 +962,7 @@ def _gmm_single(x, cv, components, num_models, random_seed=None, return_all=True gmm_labels OR best_gmm_labels: Label list for all models or labels for best model gmm_proba OR best_gmm_proba: Probability list of class belonging or probability for best model """ - if return_all == True: + if return_all is True: gmm_list = [] gmm_labels = [] gmm_proba = [] @@ -986,18 +985,18 @@ def _gmm_single(x, cv, components, num_models, random_seed=None, return_all=True labels = gmm.fit_predict(x) bic_temp = gmm.bic(x) - if return_all == True: + if return_all is True: gmm_list.append(gmm) gmm_labels.append(labels) gmm_proba.append(gmm.predict_proba(x)) - elif return_all == False: + elif return_all is False: if bic_temp < lowest_bic: lowest_bic = bic_temp best_gmm = gmm best_gmm_labels = labels best_gmm_proba = gmm.predict_proba(x) - if return_all == True: + if return_all is True: return gmm_list, gmm_labels, gmm_proba return best_gmm, best_gmm_labels, best_gmm_proba diff --git a/py4DSTEM/process/diffraction/crystal.py b/py4DSTEM/process/diffraction/crystal.py index b508d589e..fb2911992 100644 --- a/py4DSTEM/process/diffraction/crystal.py +++ b/py4DSTEM/process/diffraction/crystal.py @@ -868,12 +868,12 @@ def generate_ring_pattern( ) intensity_unique = np.bincount(inv, weights=intensity) - if plot_rings == True: + if plot_rings is True: from py4DSTEM.process.diffraction.crystal_viz import plot_ring_pattern plot_ring_pattern(radii_unique, intensity_unique, **plot_params) - if return_calc == True: + if return_calc is True: return radii_unique, intensity_unique # Vector conversions and other utilities for Crystal classes diff --git a/py4DSTEM/process/diffraction/crystal_ACOM.py b/py4DSTEM/process/diffraction/crystal_ACOM.py index 49be73b99..94d7a98cb 100644 --- a/py4DSTEM/process/diffraction/crystal_ACOM.py +++ b/py4DSTEM/process/diffraction/crystal_ACOM.py @@ -798,12 +798,12 @@ def match_orientations( ) # check cal state - if bragg_peaks_array.calstate["ellipse"] == False: + if bragg_peaks_array.calstate["ellipse"] is False: ellipse = False warn("Warning: bragg peaks not elliptically calibrated") else: ellipse = True - if bragg_peaks_array.calstate["rotate"] == False: + if bragg_peaks_array.calstate["rotate"] is False: rotate = False warn("bragg peaks not rotationally calibrated") else: @@ -1840,7 +1840,7 @@ def cluster_grains( xr = np.clip(x + np.arange(-1, 2, dtype="int"), 0, sig.shape[0] - 1) yr = np.clip(y + np.arange(-1, 2, dtype="int"), 0, sig.shape[1] - 1) inds_cand = inds_all[xr[:, None], yr[None], :].ravel() - inds_cand = np.delete(inds_cand, mark.ravel()[inds_cand] == False) + inds_cand = np.delete(inds_cand, mark.ravel()[inds_cand] is False) if inds_cand.size == 0: grow = False @@ -1893,7 +1893,7 @@ def cluster_grains( inds_grain = np.append(inds_grain, inds_cand[keep]) inds_cand = np.unique( - np.delete(inds_new, mark.ravel()[inds_new] == False) + np.delete(inds_new, mark.ravel()[inds_new] is False) ) if inds_cand.size == 0: @@ -2083,12 +2083,12 @@ def calculate_strain( radius_max_2 = corr_kernel_size**2 # check cal state - if bragg_peaks_array.calstate["ellipse"] == False: + if bragg_peaks_array.calstate["ellipse"] is False: ellipse = False warn("bragg peaks not elliptically calibrated") else: ellipse = True - if bragg_peaks_array.calstate["rotate"] == False: + if bragg_peaks_array.calstate["rotate"] is False: rotate = False warn("bragg peaks not rotationally calibrated") else: diff --git a/py4DSTEM/process/diffraction/crystal_viz.py b/py4DSTEM/process/diffraction/crystal_viz.py index 9f9336155..cf8c5adad 100644 --- a/py4DSTEM/process/diffraction/crystal_viz.py +++ b/py4DSTEM/process/diffraction/crystal_viz.py @@ -2120,7 +2120,7 @@ def plot_ring_pattern( ax = ax_parent[0] for a1 in range(radii.shape[0]): - if intensity_constant == True: + if intensity_constant is True: ax.plot( radii[a1] * np.sin(theta), radii[a1] * np.cos(theta), diff --git a/py4DSTEM/process/fit/fit.py b/py4DSTEM/process/fit/fit.py index 349d88530..df90feaa8 100644 --- a/py4DSTEM/process/fit/fit.py +++ b/py4DSTEM/process/fit/fit.py @@ -86,7 +86,7 @@ def fit_2D( xy = np.vstack((rx_1D, ry_1D)) # if robust fitting is turned off, set number of robust iterations to 0 - if robust == False: + if robust is False: robust_steps = 0 # least squares fitting @@ -107,7 +107,7 @@ def fit_2D( fit_mean_square_error > np.mean(fit_mean_square_error) * robust_thresh**2 ) - mask[_mask] == False + mask[_mask] is False # perform fitting popt, pcov = curve_fit( diff --git a/py4DSTEM/process/polar/polar_peaks.py b/py4DSTEM/process/polar/polar_peaks.py index be9ae989e..b923ba45f 100644 --- a/py4DSTEM/process/polar/polar_peaks.py +++ b/py4DSTEM/process/polar/polar_peaks.py @@ -167,7 +167,7 @@ def find_peaks_single_pattern( if remove_masked_peaks: peaks = np.delete( peaks, - mask_bool[peaks[:, 0], peaks[:, 1]] == False, + mask_bool[peaks[:, 0], peaks[:, 1]] is False, axis=0, ) diff --git a/py4DSTEM/process/strain/latticevectors.py b/py4DSTEM/process/strain/latticevectors.py index ba9bb4fcf..b29d8f69e 100644 --- a/py4DSTEM/process/strain/latticevectors.py +++ b/py4DSTEM/process/strain/latticevectors.py @@ -182,7 +182,7 @@ def fit_lattice_vectors(braggpeaks, x0=0, y0=0, minNumPeaks=5): # Remove unindexed peaks if "index_mask" in braggpeaks.dtype.names: - deletemask = braggpeaks.data["index_mask"] == False + deletemask = braggpeaks.data["index_mask"] is False braggpeaks.remove(deletemask) # Check to ensure enough peaks are present @@ -461,7 +461,7 @@ def get_rotated_strain_map(unrotated_strain_map, xaxis_x, xaxis_y, flip_theta): + 2 * cost * sint * unrotated_strain_map.get_slice("e_xy").data + cost2 * unrotated_strain_map.get_slice("e_yy").data ) - if flip_theta == True: + if flip_theta is True: rotated_strain_map.data[3, :, :] = -unrotated_strain_map.get_slice("theta").data else: rotated_strain_map.data[3, :, :] = unrotated_strain_map.get_slice("theta").data diff --git a/py4DSTEM/process/strain/strain.py b/py4DSTEM/process/strain/strain.py index ab8a46a9a..2175ba11d 100644 --- a/py4DSTEM/process/strain/strain.py +++ b/py4DSTEM/process/strain/strain.py @@ -84,7 +84,7 @@ def __init__(self, braggvectors: BraggVectors, name: Optional[str] = "strainmap" # re-calibration are issued self.calstate = self.braggvectors.calstate assert self.calstate["center"], "braggvectors must be centered" - if self.calstate["rotate"] == False: + if self.calstate["rotate"] is False: warnings.warn( ("Real to reciprocal space rotation not calibrated"), UserWarning, @@ -791,17 +791,17 @@ def show_strain( # Get images e_xx = np.ma.array( - self.get_slice("exx").data, mask=self.get_slice("mask").data == False + self.get_slice("exx").data, mask=self.get_slice("mask").data is False ) e_yy = np.ma.array( - self.get_slice("eyy").data, mask=self.get_slice("mask").data == False + self.get_slice("eyy").data, mask=self.get_slice("mask").data is False ) e_xy = np.ma.array( - self.get_slice("exy").data, mask=self.get_slice("mask").data == False + self.get_slice("exy").data, mask=self.get_slice("mask").data is False ) theta = np.ma.array( self.get_slice("theta").data, - mask=self.get_slice("mask").data == False, + mask=self.get_slice("mask").data is False, ) ## Plot diff --git a/py4DSTEM/utils/configuration_checker.py b/py4DSTEM/utils/configuration_checker.py index 26b0b89d5..283b1d26a 100644 --- a/py4DSTEM/utils/configuration_checker.py +++ b/py4DSTEM/utils/configuration_checker.py @@ -190,7 +190,7 @@ def get_module_states(state_dict: dict) -> dict: # check that all the depencies could be imported i.e. state == True # and set the state of the module to that - module_states[key] = all(temp_lst) == True + module_states[key] = all(temp_lst) is True return module_states @@ -338,7 +338,7 @@ def check_module_functionality(state_dict: dict) -> None: # check that all the depencies could be imported i.e. state == True # and set the state of the module to that - module_states[key] = all(temp_lst) == True + module_states[key] = all(temp_lst) is True # Print out the state of all the modules in colour code for key, val in module_states.items(): diff --git a/py4DSTEM/visualize/show.py b/py4DSTEM/visualize/show.py index 00309ec36..4cf31bb5a 100644 --- a/py4DSTEM/visualize/show.py +++ b/py4DSTEM/visualize/show.py @@ -312,7 +312,7 @@ def show( if returnfig==False (default), the figure is plotted and nothing is returned. if returnfig==True, return the figure and the axis. """ - if scalebar == True: + if scalebar is True: scalebar = {} # Alias dep @@ -415,7 +415,7 @@ def show( if ( hasattr(ar, "calibration") and (ar.calibration is not None) - and (scalebar != False) + and (scalebar is not False) ): cal = ar.calibration er = ".calibration attribute must be a Calibration instance" From 29e506e9b07620cee37adb03046809e7d6e3e63e Mon Sep 17 00:00:00 2001 From: alex-rakowski Date: Wed, 8 Nov 2023 00:13:15 -0800 Subject: [PATCH 2/7] == None to is None --- py4DSTEM/braggvectors/diskdetection_parallel_new.py | 6 +++--- py4DSTEM/process/classification/featurization.py | 10 +++++----- py4DSTEM/process/diffraction/crystal_phase.py | 6 +++--- py4DSTEM/visualize/show.py | 8 ++++---- 4 files changed, 15 insertions(+), 15 deletions(-) diff --git a/py4DSTEM/braggvectors/diskdetection_parallel_new.py b/py4DSTEM/braggvectors/diskdetection_parallel_new.py index cde2c3650..ddfc93049 100644 --- a/py4DSTEM/braggvectors/diskdetection_parallel_new.py +++ b/py4DSTEM/braggvectors/diskdetection_parallel_new.py @@ -137,8 +137,8 @@ def beta_parallel_disk_detection( # ... dask stuff. # TODO add assert statements and other checks. Think about reordering opperations - if dask_client == None: - if dask_client_params != None: + if dask_client is None: + if dask_client_params is not None: dask.config.set( { "distributed.worker.memory.spill": False, @@ -201,7 +201,7 @@ def beta_parallel_disk_detection( dask_data = da.from_array( dataset.data, chunks=(1, 1, dataset.Q_Nx, dataset.Q_Ny) ) - elif dataset.stack_pointer != None: + elif dataset.stack_pointer is not None: dask_data = da.from_array( dataset.stack_pointer, chunks=(1, 1, dataset.Q_Nx, dataset.Q_Ny) ) diff --git a/py4DSTEM/process/classification/featurization.py b/py4DSTEM/process/classification/featurization.py index 3fb70a943..54bc6184b 100644 --- a/py4DSTEM/process/classification/featurization.py +++ b/py4DSTEM/process/classification/featurization.py @@ -679,7 +679,7 @@ def spatial_separation(self, size, threshold=0, method=None, clean=True): ) else: large_labelled_image = labelled_image - elif method == None: + elif method is None: labelled_image = label(image) if np.sum(labelled_image) > size: large_labelled_image = remove_small_objects( @@ -874,7 +874,7 @@ def _nmf_single( """ # Prepare error, random seed err = np.inf - if random_seed == None: + if random_seed is None: rng = np.random.RandomState(seed=42) else: seed = random_seed @@ -883,7 +883,7 @@ def _nmf_single( # Big loop through all models for i in range(num_models): - if random_seed == None: + if random_seed is None: seed = rng.randint(5000) n_comps = max_components recon_error, counter = 0, 0 @@ -968,12 +968,12 @@ def _gmm_single(x, cv, components, num_models, random_seed=None, return_all=True gmm_proba = [] lowest_bic = np.infty bic_temp = 0 - if random_seed == None: + if random_seed is None: rng = np.random.RandomState(seed=42) else: seed = random_seed for n in range(num_models): - if random_seed == None: + if random_seed is None: seed = rng.randint(5000) for j in range(len(components)): for cv_type in cv: diff --git a/py4DSTEM/process/diffraction/crystal_phase.py b/py4DSTEM/process/diffraction/crystal_phase.py index 84824fe63..bac1cf8c7 100644 --- a/py4DSTEM/process/diffraction/crystal_phase.py +++ b/py4DSTEM/process/diffraction/crystal_phase.py @@ -52,7 +52,7 @@ def plot_all_phase_maps(self, map_scale_values=None, index=0): map_scale_values (float): Value to scale correlations by """ phase_maps = [] - if map_scale_values == None: + if map_scale_values is None: map_scale_values = [1] * len(self.orientation_maps) corr_sum = np.sum( [ @@ -75,7 +75,7 @@ def plot_phase_map(self, index=0, cmap=None): for p in range(len(self.orientation_maps)) ] - if cmap == None: + if cmap is None: cm = plt.get_cmap("rainbow") cmap = [ cm(1.0 * i / len(self.orientation_maps)) @@ -276,7 +276,7 @@ def quantify_phase_pointlist( if len(pointlist["qx"]) > 0: if mask_peaks is not None: for i in range(len(mask_peaks)): - if mask_peaks[i] == None: + if mask_peaks[i] is None: continue inds_mask = np.where( pointlist_peak_intensity_matches[:, mask_peaks[i]] != 0 diff --git a/py4DSTEM/visualize/show.py b/py4DSTEM/visualize/show.py index 4cf31bb5a..fcc05777e 100644 --- a/py4DSTEM/visualize/show.py +++ b/py4DSTEM/visualize/show.py @@ -493,12 +493,12 @@ def show( if np.all(np.isnan(_ar)): _ar[:, :] = 0 if intensity_range == "absolute": - if vmin != None: + if vmin is not None: if vmin > 0.0: vmin = np.log(vmin) else: vmin = np.min(_ar[_mask]) - if vmax != None: + if vmax is not None: vmax = np.log(vmax) elif scaling == "power": if power_offset is False: @@ -514,9 +514,9 @@ def show( _ar = np.power(ar.copy(), power) _mask = np.ones_like(_ar.data, dtype=bool) if intensity_range == "absolute": - if vmin != None: + if vmin is not None: vmin = np.power(vmin, power) - if vmax != None: + if vmax is not None: vmax = np.power(vmax, power) else: raise Exception From bbdb1ef13125f4e39539c4e229308e0cc0ad88b6 Mon Sep 17 00:00:00 2001 From: alex-rakowski Date: Wed, 8 Nov 2023 00:15:25 -0800 Subject: [PATCH 3/7] black --- py4DSTEM/process/classification/featurization.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/py4DSTEM/process/classification/featurization.py b/py4DSTEM/process/classification/featurization.py index 54bc6184b..b462ea1eb 100644 --- a/py4DSTEM/process/classification/featurization.py +++ b/py4DSTEM/process/classification/featurization.py @@ -181,7 +181,8 @@ def from_braggvectors( np.rint( (pointlist.data["qy"] / q_pixel_size) + Q_Ny / 2 ).astype(int), - ] is False + ] + is False ), True, False, From dea0b9881f17ef058af127605ed738bad4e7a0c3 Mon Sep 17 00:00:00 2001 From: alex-rakowski Date: Wed, 8 Nov 2023 00:33:05 -0800 Subject: [PATCH 4/7] removing unused f-strings --- py4DSTEM/braggvectors/braggvectors.py | 2 +- py4DSTEM/braggvectors/diskdetection.py | 6 +++--- py4DSTEM/data/calibration.py | 4 ++-- py4DSTEM/utils/configuration_checker.py | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/py4DSTEM/braggvectors/braggvectors.py b/py4DSTEM/braggvectors/braggvectors.py index daaf9816e..e81eeb62f 100644 --- a/py4DSTEM/braggvectors/braggvectors.py +++ b/py4DSTEM/braggvectors/braggvectors.py @@ -385,7 +385,7 @@ def __getitem__(self, pos): def __repr__(self): space = " " * len(self.__class__.__name__) + " " string = f"{self.__class__.__name__}( " - string += f"Retrieves raw bragg vectors. Get vectors for scan position x,y with [x,y]. )" + string += "Retrieves raw bragg vectors. Get vectors for scan position x,y with [x,y]. )" return string diff --git a/py4DSTEM/braggvectors/diskdetection.py b/py4DSTEM/braggvectors/diskdetection.py index e097d92d5..99818b75e 100644 --- a/py4DSTEM/braggvectors/diskdetection.py +++ b/py4DSTEM/braggvectors/diskdetection.py @@ -759,7 +759,7 @@ def _parse_distributed(distributed): data_file = distributed["data_file"] if not isinstance(data_file, str): - er = f"Expected string for distributed key 'data_file', " + er = "Expected string for distributed key 'data_file', " er += f"received {type(data_file)}" raise TypeError(er) if len(data_file.strip()) == 0: @@ -773,7 +773,7 @@ def _parse_distributed(distributed): cluster_path = distributed["cluster_path"] if not isinstance(cluster_path, str): - er = f"distributed key 'cluster_path' must be of type str, " + er = "distributed key 'cluster_path' must be of type str, " er += f"received {type(cluster_path)}" raise TypeError(er) @@ -784,7 +784,7 @@ def _parse_distributed(distributed): er = f"distributed key 'cluster_path' does not exist: {cluster_path}" raise FileNotFoundError(er) elif not os.path.isdir(cluster_path): - er = f"distributed key 'cluster_path' is not a directory: " + er = "distributed key 'cluster_path' is not a directory: " er += f"{cluster_path}" raise NotADirectoryError(er) else: diff --git a/py4DSTEM/data/calibration.py b/py4DSTEM/data/calibration.py index ffdbfa410..408f977cc 100644 --- a/py4DSTEM/data/calibration.py +++ b/py4DSTEM/data/calibration.py @@ -234,7 +234,7 @@ def attach(self, data): """ from py4DSTEM.data import Data - assert isinstance(data, Data), f"data must be a Data instance" + assert isinstance(data, Data), "data must be a Data instance" self.root.attach(data) # Register for auto-calibration @@ -316,7 +316,7 @@ def set_Q_pixel_units(self, x): "pixels", "A^-1", "mrad", - ), f"Q pixel units must be 'A^-1', 'mrad' or 'pixels'." + ), "Q pixel units must be 'A^-1', 'mrad' or 'pixels'." self._params["Q_pixel_units"] = x def get_Q_pixel_units(self): diff --git a/py4DSTEM/utils/configuration_checker.py b/py4DSTEM/utils/configuration_checker.py index 283b1d26a..904dceb29 100644 --- a/py4DSTEM/utils/configuration_checker.py +++ b/py4DSTEM/utils/configuration_checker.py @@ -375,12 +375,12 @@ def check_cupy_gpu(gratuitously_verbose: bool, **kwargs): # check that CUDA is detected correctly cuda_availability = cp.cuda.is_available() if cuda_availability: - s = f" CUDA is Available " + s = " CUDA is Available " s = create_success(s) s = f"{s: <80}" print(s) else: - s = f" CUDA is Unavailable " + s = " CUDA is Unavailable " s = create_failure(s) s = f"{s: <80}" print(s) From 58fb60aa5f7cfe2443f9bca20bc58c211151d857 Mon Sep 17 00:00:00 2001 From: alex-rakowski Date: Wed, 8 Nov 2023 09:02:05 -0800 Subject: [PATCH 5/7] changing is Bool to == Bool --- py4DSTEM/process/diffraction/crystal_ACOM.py | 4 ++-- py4DSTEM/process/polar/polar_peaks.py | 2 +- py4DSTEM/process/strain/latticevectors.py | 2 +- py4DSTEM/process/strain/strain.py | 8 ++++---- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/py4DSTEM/process/diffraction/crystal_ACOM.py b/py4DSTEM/process/diffraction/crystal_ACOM.py index 94d7a98cb..f84bffe4e 100644 --- a/py4DSTEM/process/diffraction/crystal_ACOM.py +++ b/py4DSTEM/process/diffraction/crystal_ACOM.py @@ -1840,7 +1840,7 @@ def cluster_grains( xr = np.clip(x + np.arange(-1, 2, dtype="int"), 0, sig.shape[0] - 1) yr = np.clip(y + np.arange(-1, 2, dtype="int"), 0, sig.shape[1] - 1) inds_cand = inds_all[xr[:, None], yr[None], :].ravel() - inds_cand = np.delete(inds_cand, mark.ravel()[inds_cand] is False) + inds_cand = np.delete(inds_cand, mark.ravel()[inds_cand] == False) # noqa: E712,E501 if inds_cand.size == 0: grow = False @@ -1893,7 +1893,7 @@ def cluster_grains( inds_grain = np.append(inds_grain, inds_cand[keep]) inds_cand = np.unique( - np.delete(inds_new, mark.ravel()[inds_new] is False) + np.delete(inds_new, mark.ravel()[inds_new] == False) #noqa: E712 ) if inds_cand.size == 0: diff --git a/py4DSTEM/process/polar/polar_peaks.py b/py4DSTEM/process/polar/polar_peaks.py index b923ba45f..9650361d7 100644 --- a/py4DSTEM/process/polar/polar_peaks.py +++ b/py4DSTEM/process/polar/polar_peaks.py @@ -167,7 +167,7 @@ def find_peaks_single_pattern( if remove_masked_peaks: peaks = np.delete( peaks, - mask_bool[peaks[:, 0], peaks[:, 1]] is False, + mask_bool[peaks[:, 0], peaks[:, 1]] == False, # noqa: E712 axis=0, ) diff --git a/py4DSTEM/process/strain/latticevectors.py b/py4DSTEM/process/strain/latticevectors.py index b29d8f69e..68a3ddd45 100644 --- a/py4DSTEM/process/strain/latticevectors.py +++ b/py4DSTEM/process/strain/latticevectors.py @@ -182,7 +182,7 @@ def fit_lattice_vectors(braggpeaks, x0=0, y0=0, minNumPeaks=5): # Remove unindexed peaks if "index_mask" in braggpeaks.dtype.names: - deletemask = braggpeaks.data["index_mask"] is False + deletemask = braggpeaks.data["index_mask"] == False # noqa:E712 braggpeaks.remove(deletemask) # Check to ensure enough peaks are present diff --git a/py4DSTEM/process/strain/strain.py b/py4DSTEM/process/strain/strain.py index 2175ba11d..91d6a8ecc 100644 --- a/py4DSTEM/process/strain/strain.py +++ b/py4DSTEM/process/strain/strain.py @@ -791,17 +791,17 @@ def show_strain( # Get images e_xx = np.ma.array( - self.get_slice("exx").data, mask=self.get_slice("mask").data is False + self.get_slice("exx").data, mask=self.get_slice("mask").data == False # noqa: E712,E501 ) e_yy = np.ma.array( - self.get_slice("eyy").data, mask=self.get_slice("mask").data is False + self.get_slice("eyy").data, mask=self.get_slice("mask").data == False # noqa: E712,E501 ) e_xy = np.ma.array( - self.get_slice("exy").data, mask=self.get_slice("mask").data is False + self.get_slice("exy").data, mask=self.get_slice("mask").data == False # noqa: E712,E501 ) theta = np.ma.array( self.get_slice("theta").data, - mask=self.get_slice("mask").data is False, + mask=self.get_slice("mask").data == False, # noqa: E712 ) ## Plot From 8720de8a243a50d36cfa7c3d03114aab91c84786 Mon Sep 17 00:00:00 2001 From: alex-rakowski Date: Wed, 8 Nov 2023 09:12:08 -0800 Subject: [PATCH 6/7] black --- py4DSTEM/process/diffraction/crystal_ACOM.py | 6 ++++-- py4DSTEM/process/polar/polar_peaks.py | 2 +- py4DSTEM/process/strain/latticevectors.py | 2 +- py4DSTEM/process/strain/strain.py | 11 +++++++---- 4 files changed, 13 insertions(+), 8 deletions(-) diff --git a/py4DSTEM/process/diffraction/crystal_ACOM.py b/py4DSTEM/process/diffraction/crystal_ACOM.py index f84bffe4e..09ba51ffc 100644 --- a/py4DSTEM/process/diffraction/crystal_ACOM.py +++ b/py4DSTEM/process/diffraction/crystal_ACOM.py @@ -1840,7 +1840,9 @@ def cluster_grains( xr = np.clip(x + np.arange(-1, 2, dtype="int"), 0, sig.shape[0] - 1) yr = np.clip(y + np.arange(-1, 2, dtype="int"), 0, sig.shape[1] - 1) inds_cand = inds_all[xr[:, None], yr[None], :].ravel() - inds_cand = np.delete(inds_cand, mark.ravel()[inds_cand] == False) # noqa: E712,E501 + inds_cand = np.delete( + inds_cand, mark.ravel()[inds_cand] == False # noqa: E712 + ) if inds_cand.size == 0: grow = False @@ -1893,7 +1895,7 @@ def cluster_grains( inds_grain = np.append(inds_grain, inds_cand[keep]) inds_cand = np.unique( - np.delete(inds_new, mark.ravel()[inds_new] == False) #noqa: E712 + np.delete(inds_new, mark.ravel()[inds_new] == False) # noqa: E712 ) if inds_cand.size == 0: diff --git a/py4DSTEM/process/polar/polar_peaks.py b/py4DSTEM/process/polar/polar_peaks.py index 9650361d7..4064fccaf 100644 --- a/py4DSTEM/process/polar/polar_peaks.py +++ b/py4DSTEM/process/polar/polar_peaks.py @@ -167,7 +167,7 @@ def find_peaks_single_pattern( if remove_masked_peaks: peaks = np.delete( peaks, - mask_bool[peaks[:, 0], peaks[:, 1]] == False, # noqa: E712 + mask_bool[peaks[:, 0], peaks[:, 1]] == False, # noqa: E712 axis=0, ) diff --git a/py4DSTEM/process/strain/latticevectors.py b/py4DSTEM/process/strain/latticevectors.py index 68a3ddd45..dcff91709 100644 --- a/py4DSTEM/process/strain/latticevectors.py +++ b/py4DSTEM/process/strain/latticevectors.py @@ -182,7 +182,7 @@ def fit_lattice_vectors(braggpeaks, x0=0, y0=0, minNumPeaks=5): # Remove unindexed peaks if "index_mask" in braggpeaks.dtype.names: - deletemask = braggpeaks.data["index_mask"] == False # noqa:E712 + deletemask = braggpeaks.data["index_mask"] == False # noqa:E712 braggpeaks.remove(deletemask) # Check to ensure enough peaks are present diff --git a/py4DSTEM/process/strain/strain.py b/py4DSTEM/process/strain/strain.py index 91d6a8ecc..099ecdefd 100644 --- a/py4DSTEM/process/strain/strain.py +++ b/py4DSTEM/process/strain/strain.py @@ -791,17 +791,20 @@ def show_strain( # Get images e_xx = np.ma.array( - self.get_slice("exx").data, mask=self.get_slice("mask").data == False # noqa: E712,E501 + self.get_slice("exx").data, + mask=self.get_slice("mask").data == False, # noqa: E712,E501 ) e_yy = np.ma.array( - self.get_slice("eyy").data, mask=self.get_slice("mask").data == False # noqa: E712,E501 + self.get_slice("eyy").data, + mask=self.get_slice("mask").data == False, # noqa: E712,E501 ) e_xy = np.ma.array( - self.get_slice("exy").data, mask=self.get_slice("mask").data == False # noqa: E712,E501 + self.get_slice("exy").data, + mask=self.get_slice("mask").data == False, # noqa: E712,E501 ) theta = np.ma.array( self.get_slice("theta").data, - mask=self.get_slice("mask").data == False, # noqa: E712 + mask=self.get_slice("mask").data == False, # noqa: E712 ) ## Plot From d1a7eb8ba2a8bda6e128107fdabde936c9976c54 Mon Sep 17 00:00:00 2001 From: alex-rakowski Date: Mon, 13 Nov 2023 10:26:12 -0800 Subject: [PATCH 7/7] changing to assignment --- py4DSTEM/process/fit/fit.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/py4DSTEM/process/fit/fit.py b/py4DSTEM/process/fit/fit.py index df90feaa8..9973ff79f 100644 --- a/py4DSTEM/process/fit/fit.py +++ b/py4DSTEM/process/fit/fit.py @@ -107,7 +107,7 @@ def fit_2D( fit_mean_square_error > np.mean(fit_mean_square_error) * robust_thresh**2 ) - mask[_mask] is False + mask[_mask] = False # perform fitting popt, pcov = curve_fit(