Skip to content

Commit

Permalink
pep 8 - Comparisons to singletons complience
Browse files Browse the repository at this point in the history
  • Loading branch information
alex-rakowski committed Nov 8, 2023
1 parent 7d1fb84 commit b919329
Show file tree
Hide file tree
Showing 27 changed files with 81 additions and 82 deletions.
8 changes: 4 additions & 4 deletions py4DSTEM/braggvectors/braggvector_methods.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,12 +99,12 @@ def histogram(
# then scale by the sampling factor
else:
# get pixel calibration
if self.calstate["pixel"] == True:
if self.calstate["pixel"] is True:
qpix = self.calibration.get_Q_pixel_size()
qx /= qpix
qy /= qpix
# origin calibration
if self.calstate["center"] == True:
if self.calstate["center"] is True:
origin = self.calibration.get_origin_mean()
qx += origin[0]
qy += origin[1]
Expand Down Expand Up @@ -153,12 +153,12 @@ def histogram(
).reshape(Q_Nx, Q_Ny)

# determine the resampled grid center and pixel size
if mode == "cal" and self.calstate["center"] == True:
if mode == "cal" and self.calstate["center"] is True:
x0 = sampling * origin[0]
y0 = sampling * origin[1]
else:
x0, y0 = 0, 0
if mode == "cal" and self.calstate["pixel"] == True:
if mode == "cal" and self.calstate["pixel"] is True:
pixelsize = qpix / sampling
else:
pixelsize = 1 / sampling
Expand Down
8 changes: 4 additions & 4 deletions py4DSTEM/braggvectors/diskdetection.py
Original file line number Diff line number Diff line change
Expand Up @@ -231,10 +231,10 @@ def find_Bragg_disks(
mode = "dc_ml"

elif mode == "datacube":
if distributed is None and CUDA == False:
if distributed is None and CUDA is False:
mode = "dc_CPU"
elif distributed is None and CUDA == True:
if CUDA_batched == False:
elif distributed is None and CUDA is True:
if CUDA_batched is False:
mode = "dc_GPU"
else:
mode = "dc_GPU_batched"
Expand Down Expand Up @@ -271,7 +271,7 @@ def find_Bragg_disks(
kws["data_file"] = data_file
kws["cluster_path"] = cluster_path
# ML arguments
if ML == True:
if ML is True:
kws["CUDA"] = CUDA
kws["model_path"] = ml_model_path
kws["num_attempts"] = ml_num_attempts
Expand Down
2 changes: 1 addition & 1 deletion py4DSTEM/braggvectors/diskdetection_aiml.py
Original file line number Diff line number Diff line change
Expand Up @@ -528,7 +528,7 @@ def find_Bragg_disks_aiml_serial(
)
)

if global_threshold == True:
if global_threshold is True:
from py4DSTEM.braggvectors import universal_threshold

peaks = universal_threshold(
Expand Down
4 changes: 2 additions & 2 deletions py4DSTEM/braggvectors/diskdetection_aiml_cuda.py
Original file line number Diff line number Diff line change
Expand Up @@ -233,7 +233,7 @@ def find_Bragg_disks_aiml_CUDA(
datacube.R_N, int(t2 / 3600), int(t2 / 60), int(t2 % 60)
)
)
if global_threshold == True:
if global_threshold is True:
from py4DSTEM.braggvectors import universal_threshold

peaks = universal_threshold(
Expand Down Expand Up @@ -496,7 +496,7 @@ def get_maxima_2D_cp(
if minSpacing > 0:
deletemask = np.zeros(len(maxima), dtype=bool)
for i in range(len(maxima)):
if deletemask[i] == False:
if deletemask[i] is False:
tooClose = (
(maxima["x"] - maxima["x"][i]) ** 2
+ (maxima["y"] - maxima["y"][i]) ** 2
Expand Down
2 changes: 1 addition & 1 deletion py4DSTEM/braggvectors/diskdetection_cuda.py
Original file line number Diff line number Diff line change
Expand Up @@ -482,7 +482,7 @@ def get_maxima_2D(
if minSpacing > 0:
deletemask = np.zeros(len(maxima), dtype=bool)
for i in range(len(maxima)):
if deletemask[i] == False:
if deletemask[i] is False:
tooClose = (
(maxima["x"] - maxima["x"][i]) ** 2
+ (maxima["y"] - maxima["y"][i]) ** 2
Expand Down
4 changes: 2 additions & 2 deletions py4DSTEM/braggvectors/diskdetection_parallel_new.py
Original file line number Diff line number Diff line change
Expand Up @@ -261,9 +261,9 @@ def beta_parallel_disk_detection(
if close_dask_client:
dask_client.close()
return peaks
elif close_dask_client == False and return_dask_client == True:
elif close_dask_client is False and return_dask_client is True:
return peaks, dask_client
elif close_dask_client and return_dask_client == False:
elif close_dask_client and return_dask_client is False:
return peaks
else:
print(
Expand Down
4 changes: 2 additions & 2 deletions py4DSTEM/braggvectors/threshold.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ def threshold_Braggpeaks(
r2 = minPeakSpacing**2
deletemask = np.zeros(pointlist.length, dtype=bool)
for i in range(pointlist.length):
if deletemask[i] == False:
if deletemask[i] is False:
tooClose = (
(pointlist.data["qx"] - pointlist.data["qx"][i]) ** 2
+ (pointlist.data["qy"] - pointlist.data["qy"][i]) ** 2
Expand Down Expand Up @@ -160,7 +160,7 @@ def universal_threshold(
r2 = minPeakSpacing**2
deletemask = np.zeros(pointlist.length, dtype=bool)
for i in range(pointlist.length):
if deletemask[i] == False:
if deletemask[i] is False:
tooClose = (
(pointlist.data["qx"] - pointlist.data["qx"][i]) ** 2
+ (pointlist.data["qy"] - pointlist.data["qy"][i]) ** 2
Expand Down
2 changes: 1 addition & 1 deletion py4DSTEM/datacube/virtualdiffraction.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ def get_virtual_diffraction(
# Calculate

# ...with no center shifting
if shift_center == False:
if shift_center is False:
# ...for the whole pattern
if mask is None:
if method == "mean":
Expand Down
12 changes: 6 additions & 6 deletions py4DSTEM/datacube/virtualimage.py
Original file line number Diff line number Diff line change
Expand Up @@ -197,13 +197,13 @@ def get_virtual_image(
# Get mask
mask = self.make_detector(self.Qshape, mode, g)
# if return_mask is True, skip computation
if return_mask == True and shift_center == False:
if return_mask is True and shift_center is False:
return mask

# Calculate virtual image

# no center shifting
if shift_center == False:
if shift_center is False:
# single CPU
if not dask:
# allocate space
Expand All @@ -220,7 +220,7 @@ def get_virtual_image(
virtual_image[rx, ry] = np.sum(self.data[rx, ry] * mask)

# dask
if dask == True:
if dask is True:
# set up a generalized universal function for dask distribution
def _apply_mask_dask(self, mask):
virtual_image = np.sum(
Expand Down Expand Up @@ -444,7 +444,7 @@ def position_detector(
# shift center
if shift_center is None:
shift_center = False
elif shift_center == True:
elif shift_center is True:
assert isinstance(
data, tuple
), "If shift_center is set to True, `data` should be a 2-tuple (rx,ry). \
Expand Down Expand Up @@ -552,7 +552,7 @@ def get_calibrated_detector_geometry(
# Convert units into detector pixels

# Shift center
if centered == True:
if centered is True:
if mode == "point":
g = (g[0] + x0_mean, g[1] + y0_mean)
if mode in ("circle", "circular", "annulus", "annular"):
Expand All @@ -561,7 +561,7 @@ def get_calibrated_detector_geometry(
g = (g[0] + x0_mean, g[1] + x0_mean, g[2] + y0_mean, g[3] + y0_mean)

# Scale by the detector pixel size
if calibrated == True:
if calibrated is True:
if mode == "point":
g = (g[0] / unit_conversion, g[1] / unit_conversion)
if mode in ("circle", "circular"):
Expand Down
14 changes: 7 additions & 7 deletions py4DSTEM/io/filereaders/read_K2.py
Original file line number Diff line number Diff line change
Expand Up @@ -336,9 +336,9 @@ def _find_offsets(self):
for i in range(8):
sync = False
frame = 0
while sync == False:
while sync is False:
sync = self._bin_files[i][frame]["block"] == block_id
if sync == False:
if sync is False:
frame += 1
self._shutter_offsets[i] += frame
print("Offsets are currently ", self._shutter_offsets)
Expand All @@ -358,20 +358,20 @@ def _find_offsets(self):
sync = False
next_frame = stripe[j]["frame"]

if sync == False:
if sync is False:
# the first frame is incomplete, so we need to seek the next one
print(
f"First frame ({first_frame}) incomplete, seeking frame {next_frame}..."
)
for i in range(8):
sync = False
frame = 0
while sync == False:
while sync is False:
sync = (
self._bin_files[i][self._shutter_offsets[i] + frame]["frame"]
== next_frame
)
if sync == False:
if sync is False:
frame += 1
self._shutter_offsets[i] += frame
print("Offsets are now ", self._shutter_offsets)
Expand All @@ -387,7 +387,7 @@ def _find_offsets(self):
]
if np.any(stripe[:]["frame"] != first_frame):
sync = False
if sync == True:
if sync is True:
print("New frame is complete!")
else:
print("Next frame also incomplete!!!! Data may be corrupt?")
Expand All @@ -397,7 +397,7 @@ def _find_offsets(self):
for i in range(8):
shutter = False
frame = 0
while shutter == False:
while shutter is False:
offset = self._shutter_offsets[i] + (frame * 32)
stripe = self._bin_files[i][offset : offset + 32]
shutter = stripe[0]["shutter"]
Expand Down
2 changes: 1 addition & 1 deletion py4DSTEM/preprocess/electroncount.py
Original file line number Diff line number Diff line change
Expand Up @@ -402,7 +402,7 @@ def counted_pointlistarray_to_datacube(counted_pointlistarray, shape, subpixel=F
(4D array of bools): a 4D array of bools, with true indicating an electron strike.
"""
assert len(shape) == 4
assert subpixel == False, "subpixel mode not presently supported."
assert subpixel is False, "subpixel mode not presently supported."
R_Nx, R_Ny, Q_Nx, Q_Ny = shape
counted_datacube = np.zeros((R_Nx, R_Nx, Q_Nx, Q_Ny), dtype=bool)

Expand Down
10 changes: 5 additions & 5 deletions py4DSTEM/preprocess/radialbkgrd.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ def get_1D_polar_background(
# Crop polar data to maximum distance which contains information from original image
if (polarData.mask.sum(axis=(0)) == polarData.shape[0]).any():
ii = polarData.data.shape[1] - 1
while polarData.mask[:, ii].all() == True:
while polarData.mask[:, ii].all() is True:
ii = ii - 1
maximalDistance = ii
polarData = polarData[:, 0:maximalDistance]
Expand All @@ -105,16 +105,16 @@ def get_1D_polar_background(

background1D = np.maximum(background1D, min_background_value)

if smoothing == True:
if smoothing_log == True:
if smoothing is True:
if smoothing_log is True:
background1D = np.log(background1D)

background1D = savgol_filter(
background1D, smoothingWindowSize, smoothingPolyOrder
)
if smoothing_log == True:
if smoothing_log is True:
background1D = np.exp(background1D)
if return_polararr == True:
if return_polararr is True:
return (background1D, r_bins, polarData)
else:
return (background1D, r_bins)
Expand Down
2 changes: 1 addition & 1 deletion py4DSTEM/preprocess/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -293,7 +293,7 @@ def filter_2D_maxima(
if minSpacing > 0:
deletemask = np.zeros(len(maxima), dtype=bool)
for i in range(len(maxima)):
if deletemask[i] == False:
if deletemask[i] is False:
tooClose = (
(maxima["x"] - maxima["x"][i]) ** 2
+ (maxima["y"] - maxima["y"][i]) ** 2
Expand Down
2 changes: 1 addition & 1 deletion py4DSTEM/process/calibration/ellipse.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ def fit_ellipse_1D(ar, center=None, fitradii=None, mask=None):
rr = np.sqrt((xx - x0) ** 2 + (yy - y0) ** 2)
_mask = (rr > ri) * (rr <= ro)
if mask is not None:
_mask *= mask == False
_mask *= mask is False
xs, ys = np.nonzero(_mask)
vals = ar[_mask]

Expand Down
4 changes: 2 additions & 2 deletions py4DSTEM/process/calibration/origin.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,15 +154,15 @@ def fit_origin(
robust=robust,
robust_steps=robust_steps,
robust_thresh=robust_thresh,
data_mask=mask == True,
data_mask=mask is True,
)
popt_y, pcov_y, qy0_fit, _ = fit_2D(
f,
qy0_meas,
robust=robust,
robust_steps=robust_steps,
robust_thresh=robust_thresh,
data_mask=mask == True,
data_mask=mask is True,
)

# Compute residuals
Expand Down
2 changes: 1 addition & 1 deletion py4DSTEM/process/calibration/qpixelsize.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,6 @@ def get_dq_from_indexed_peaks(qs, hkl, a):
# Get pixel size
dq = 1 / (c * a)
qs_fit = d_inv[mask] / a
hkl_fit = [hkl[i] for i in range(len(hkl)) if mask[i] == True]
hkl_fit = [hkl[i] for i in range(len(hkl)) if mask[i] is True]

return dq, qs_fit, hkl_fit
4 changes: 2 additions & 2 deletions py4DSTEM/process/classification/braggvectorclassification.py
Original file line number Diff line number Diff line change
Expand Up @@ -594,8 +594,8 @@ def merge_iterative(self, threshBPs=0.1, threshScanPosition=0.1):
W_merge = W_merge[:, 1:]
H_merge = H_merge[1:, :]

W_ = np.hstack((W_[:, merged == False], W_merge))
H_ = np.vstack((H_[merged == False, :], H_merge))
W_ = np.hstack((W_[:, merged is False], W_merge))
H_ = np.vstack((H_[merged is False, :], H_merge))
Nc_ = W_.shape[1]

if len(merge_candidates) == 0:
Expand Down
Loading

0 comments on commit b919329

Please sign in to comment.