Skip to content

Commit

Permalink
Merge pull request #558 from alex-rakowski/singletons
Browse files Browse the repository at this point in the history
'Cause PEP-8 never goes out of style
  • Loading branch information
bsavitzky authored Nov 13, 2023
2 parents 15ff583 + d1a7eb8 commit cf6a6a4
Show file tree
Hide file tree
Showing 30 changed files with 109 additions and 104 deletions.
8 changes: 4 additions & 4 deletions py4DSTEM/braggvectors/braggvector_methods.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,12 +99,12 @@ def histogram(
# then scale by the sampling factor
else:
# get pixel calibration
if self.calstate["pixel"] == True:
if self.calstate["pixel"] is True:
qpix = self.calibration.get_Q_pixel_size()
qx /= qpix
qy /= qpix
# origin calibration
if self.calstate["center"] == True:
if self.calstate["center"] is True:
origin = self.calibration.get_origin_mean()
qx += origin[0]
qy += origin[1]
Expand Down Expand Up @@ -153,12 +153,12 @@ def histogram(
).reshape(Q_Nx, Q_Ny)

# determine the resampled grid center and pixel size
if mode == "cal" and self.calstate["center"] == True:
if mode == "cal" and self.calstate["center"] is True:
x0 = sampling * origin[0]
y0 = sampling * origin[1]
else:
x0, y0 = 0, 0
if mode == "cal" and self.calstate["pixel"] == True:
if mode == "cal" and self.calstate["pixel"] is True:
pixelsize = qpix / sampling
else:
pixelsize = 1 / sampling
Expand Down
2 changes: 1 addition & 1 deletion py4DSTEM/braggvectors/braggvectors.py
Original file line number Diff line number Diff line change
Expand Up @@ -385,7 +385,7 @@ def __getitem__(self, pos):
def __repr__(self):
space = " " * len(self.__class__.__name__) + " "
string = f"{self.__class__.__name__}( "
string += f"Retrieves raw bragg vectors. Get vectors for scan position x,y with [x,y]. )"
string += "Retrieves raw bragg vectors. Get vectors for scan position x,y with [x,y]. )"
return string


Expand Down
14 changes: 7 additions & 7 deletions py4DSTEM/braggvectors/diskdetection.py
Original file line number Diff line number Diff line change
Expand Up @@ -231,10 +231,10 @@ def find_Bragg_disks(
mode = "dc_ml"

elif mode == "datacube":
if distributed is None and CUDA == False:
if distributed is None and CUDA is False:
mode = "dc_CPU"
elif distributed is None and CUDA == True:
if CUDA_batched == False:
elif distributed is None and CUDA is True:
if CUDA_batched is False:
mode = "dc_GPU"
else:
mode = "dc_GPU_batched"
Expand Down Expand Up @@ -271,7 +271,7 @@ def find_Bragg_disks(
kws["data_file"] = data_file
kws["cluster_path"] = cluster_path
# ML arguments
if ML == True:
if ML is True:
kws["CUDA"] = CUDA
kws["model_path"] = ml_model_path
kws["num_attempts"] = ml_num_attempts
Expand Down Expand Up @@ -759,7 +759,7 @@ def _parse_distributed(distributed):
data_file = distributed["data_file"]

if not isinstance(data_file, str):
er = f"Expected string for distributed key 'data_file', "
er = "Expected string for distributed key 'data_file', "
er += f"received {type(data_file)}"
raise TypeError(er)
if len(data_file.strip()) == 0:
Expand All @@ -773,7 +773,7 @@ def _parse_distributed(distributed):
cluster_path = distributed["cluster_path"]

if not isinstance(cluster_path, str):
er = f"distributed key 'cluster_path' must be of type str, "
er = "distributed key 'cluster_path' must be of type str, "
er += f"received {type(cluster_path)}"
raise TypeError(er)

Expand All @@ -784,7 +784,7 @@ def _parse_distributed(distributed):
er = f"distributed key 'cluster_path' does not exist: {cluster_path}"
raise FileNotFoundError(er)
elif not os.path.isdir(cluster_path):
er = f"distributed key 'cluster_path' is not a directory: "
er = "distributed key 'cluster_path' is not a directory: "
er += f"{cluster_path}"
raise NotADirectoryError(er)
else:
Expand Down
2 changes: 1 addition & 1 deletion py4DSTEM/braggvectors/diskdetection_aiml.py
Original file line number Diff line number Diff line change
Expand Up @@ -528,7 +528,7 @@ def find_Bragg_disks_aiml_serial(
)
)

if global_threshold == True:
if global_threshold is True:
from py4DSTEM.braggvectors import universal_threshold

peaks = universal_threshold(
Expand Down
4 changes: 2 additions & 2 deletions py4DSTEM/braggvectors/diskdetection_aiml_cuda.py
Original file line number Diff line number Diff line change
Expand Up @@ -233,7 +233,7 @@ def find_Bragg_disks_aiml_CUDA(
datacube.R_N, int(t2 / 3600), int(t2 / 60), int(t2 % 60)
)
)
if global_threshold == True:
if global_threshold is True:
from py4DSTEM.braggvectors import universal_threshold

peaks = universal_threshold(
Expand Down Expand Up @@ -496,7 +496,7 @@ def get_maxima_2D_cp(
if minSpacing > 0:
deletemask = np.zeros(len(maxima), dtype=bool)
for i in range(len(maxima)):
if deletemask[i] == False:
if deletemask[i] is False:
tooClose = (
(maxima["x"] - maxima["x"][i]) ** 2
+ (maxima["y"] - maxima["y"][i]) ** 2
Expand Down
2 changes: 1 addition & 1 deletion py4DSTEM/braggvectors/diskdetection_cuda.py
Original file line number Diff line number Diff line change
Expand Up @@ -482,7 +482,7 @@ def get_maxima_2D(
if minSpacing > 0:
deletemask = np.zeros(len(maxima), dtype=bool)
for i in range(len(maxima)):
if deletemask[i] == False:
if deletemask[i] is False:
tooClose = (
(maxima["x"] - maxima["x"][i]) ** 2
+ (maxima["y"] - maxima["y"][i]) ** 2
Expand Down
10 changes: 5 additions & 5 deletions py4DSTEM/braggvectors/diskdetection_parallel_new.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,8 +137,8 @@ def beta_parallel_disk_detection(
# ... dask stuff.
# TODO add assert statements and other checks. Think about reordering opperations

if dask_client == None:
if dask_client_params != None:
if dask_client is None:
if dask_client_params is not None:
dask.config.set(
{
"distributed.worker.memory.spill": False,
Expand Down Expand Up @@ -201,7 +201,7 @@ def beta_parallel_disk_detection(
dask_data = da.from_array(
dataset.data, chunks=(1, 1, dataset.Q_Nx, dataset.Q_Ny)
)
elif dataset.stack_pointer != None:
elif dataset.stack_pointer is not None:
dask_data = da.from_array(
dataset.stack_pointer, chunks=(1, 1, dataset.Q_Nx, dataset.Q_Ny)
)
Expand Down Expand Up @@ -261,9 +261,9 @@ def beta_parallel_disk_detection(
if close_dask_client:
dask_client.close()
return peaks
elif close_dask_client == False and return_dask_client == True:
elif close_dask_client is False and return_dask_client is True:
return peaks, dask_client
elif close_dask_client and return_dask_client == False:
elif close_dask_client and return_dask_client is False:
return peaks
else:
print(
Expand Down
4 changes: 2 additions & 2 deletions py4DSTEM/braggvectors/threshold.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ def threshold_Braggpeaks(
r2 = minPeakSpacing**2
deletemask = np.zeros(pointlist.length, dtype=bool)
for i in range(pointlist.length):
if deletemask[i] == False:
if deletemask[i] is False:
tooClose = (
(pointlist.data["qx"] - pointlist.data["qx"][i]) ** 2
+ (pointlist.data["qy"] - pointlist.data["qy"][i]) ** 2
Expand Down Expand Up @@ -160,7 +160,7 @@ def universal_threshold(
r2 = minPeakSpacing**2
deletemask = np.zeros(pointlist.length, dtype=bool)
for i in range(pointlist.length):
if deletemask[i] == False:
if deletemask[i] is False:
tooClose = (
(pointlist.data["qx"] - pointlist.data["qx"][i]) ** 2
+ (pointlist.data["qy"] - pointlist.data["qy"][i]) ** 2
Expand Down
4 changes: 2 additions & 2 deletions py4DSTEM/data/calibration.py
Original file line number Diff line number Diff line change
Expand Up @@ -234,7 +234,7 @@ def attach(self, data):
"""
from py4DSTEM.data import Data

assert isinstance(data, Data), f"data must be a Data instance"
assert isinstance(data, Data), "data must be a Data instance"
self.root.attach(data)

# Register for auto-calibration
Expand Down Expand Up @@ -316,7 +316,7 @@ def set_Q_pixel_units(self, x):
"pixels",
"A^-1",
"mrad",
), f"Q pixel units must be 'A^-1', 'mrad' or 'pixels'."
), "Q pixel units must be 'A^-1', 'mrad' or 'pixels'."
self._params["Q_pixel_units"] = x

def get_Q_pixel_units(self):
Expand Down
2 changes: 1 addition & 1 deletion py4DSTEM/datacube/virtualdiffraction.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ def get_virtual_diffraction(
# Calculate

# ...with no center shifting
if shift_center == False:
if shift_center is False:
# ...for the whole pattern
if mask is None:
if method == "mean":
Expand Down
12 changes: 6 additions & 6 deletions py4DSTEM/datacube/virtualimage.py
Original file line number Diff line number Diff line change
Expand Up @@ -197,13 +197,13 @@ def get_virtual_image(
# Get mask
mask = self.make_detector(self.Qshape, mode, g)
# if return_mask is True, skip computation
if return_mask == True and shift_center == False:
if return_mask is True and shift_center is False:
return mask

# Calculate virtual image

# no center shifting
if shift_center == False:
if shift_center is False:
# single CPU
if not dask:
# allocate space
Expand All @@ -220,7 +220,7 @@ def get_virtual_image(
virtual_image[rx, ry] = np.sum(self.data[rx, ry] * mask)

# dask
if dask == True:
if dask is True:
# set up a generalized universal function for dask distribution
def _apply_mask_dask(self, mask):
virtual_image = np.sum(
Expand Down Expand Up @@ -444,7 +444,7 @@ def position_detector(
# shift center
if shift_center is None:
shift_center = False
elif shift_center == True:
elif shift_center is True:
assert isinstance(
data, tuple
), "If shift_center is set to True, `data` should be a 2-tuple (rx,ry). \
Expand Down Expand Up @@ -552,7 +552,7 @@ def get_calibrated_detector_geometry(
# Convert units into detector pixels

# Shift center
if centered == True:
if centered is True:
if mode == "point":
g = (g[0] + x0_mean, g[1] + y0_mean)
if mode in ("circle", "circular", "annulus", "annular"):
Expand All @@ -561,7 +561,7 @@ def get_calibrated_detector_geometry(
g = (g[0] + x0_mean, g[1] + x0_mean, g[2] + y0_mean, g[3] + y0_mean)

# Scale by the detector pixel size
if calibrated == True:
if calibrated is True:
if mode == "point":
g = (g[0] / unit_conversion, g[1] / unit_conversion)
if mode in ("circle", "circular"):
Expand Down
14 changes: 7 additions & 7 deletions py4DSTEM/io/filereaders/read_K2.py
Original file line number Diff line number Diff line change
Expand Up @@ -336,9 +336,9 @@ def _find_offsets(self):
for i in range(8):
sync = False
frame = 0
while sync == False:
while sync is False:
sync = self._bin_files[i][frame]["block"] == block_id
if sync == False:
if sync is False:
frame += 1
self._shutter_offsets[i] += frame
print("Offsets are currently ", self._shutter_offsets)
Expand All @@ -358,20 +358,20 @@ def _find_offsets(self):
sync = False
next_frame = stripe[j]["frame"]

if sync == False:
if sync is False:
# the first frame is incomplete, so we need to seek the next one
print(
f"First frame ({first_frame}) incomplete, seeking frame {next_frame}..."
)
for i in range(8):
sync = False
frame = 0
while sync == False:
while sync is False:
sync = (
self._bin_files[i][self._shutter_offsets[i] + frame]["frame"]
== next_frame
)
if sync == False:
if sync is False:
frame += 1
self._shutter_offsets[i] += frame
print("Offsets are now ", self._shutter_offsets)
Expand All @@ -387,7 +387,7 @@ def _find_offsets(self):
]
if np.any(stripe[:]["frame"] != first_frame):
sync = False
if sync == True:
if sync is True:
print("New frame is complete!")
else:
print("Next frame also incomplete!!!! Data may be corrupt?")
Expand All @@ -397,7 +397,7 @@ def _find_offsets(self):
for i in range(8):
shutter = False
frame = 0
while shutter == False:
while shutter is False:
offset = self._shutter_offsets[i] + (frame * 32)
stripe = self._bin_files[i][offset : offset + 32]
shutter = stripe[0]["shutter"]
Expand Down
2 changes: 1 addition & 1 deletion py4DSTEM/preprocess/electroncount.py
Original file line number Diff line number Diff line change
Expand Up @@ -403,7 +403,7 @@ def counted_pointlistarray_to_datacube(counted_pointlistarray, shape, subpixel=F
(4D array of bools): a 4D array of bools, with true indicating an electron strike.
"""
assert len(shape) == 4
assert subpixel == False, "subpixel mode not presently supported."
assert subpixel is False, "subpixel mode not presently supported."
R_Nx, R_Ny, Q_Nx, Q_Ny = shape
counted_datacube = np.zeros((R_Nx, R_Nx, Q_Nx, Q_Ny), dtype=bool)

Expand Down
10 changes: 5 additions & 5 deletions py4DSTEM/preprocess/radialbkgrd.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ def get_1D_polar_background(
# Crop polar data to maximum distance which contains information from original image
if (polarData.mask.sum(axis=(0)) == polarData.shape[0]).any():
ii = polarData.data.shape[1] - 1
while polarData.mask[:, ii].all() == True:
while polarData.mask[:, ii].all() is True:
ii = ii - 1
maximalDistance = ii
polarData = polarData[:, 0:maximalDistance]
Expand All @@ -105,16 +105,16 @@ def get_1D_polar_background(

background1D = np.maximum(background1D, min_background_value)

if smoothing == True:
if smoothing_log == True:
if smoothing is True:
if smoothing_log is True:
background1D = np.log(background1D)

background1D = savgol_filter(
background1D, smoothingWindowSize, smoothingPolyOrder
)
if smoothing_log == True:
if smoothing_log is True:
background1D = np.exp(background1D)
if return_polararr == True:
if return_polararr is True:
return (background1D, r_bins, polarData)
else:
return (background1D, r_bins)
Expand Down
2 changes: 1 addition & 1 deletion py4DSTEM/preprocess/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -293,7 +293,7 @@ def filter_2D_maxima(
if minSpacing > 0:
deletemask = np.zeros(len(maxima), dtype=bool)
for i in range(len(maxima)):
if deletemask[i] == False:
if deletemask[i] is False:
tooClose = (
(maxima["x"] - maxima["x"][i]) ** 2
+ (maxima["y"] - maxima["y"][i]) ** 2
Expand Down
2 changes: 1 addition & 1 deletion py4DSTEM/process/calibration/ellipse.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ def fit_ellipse_1D(ar, center=None, fitradii=None, mask=None):
rr = np.sqrt((xx - x0) ** 2 + (yy - y0) ** 2)
_mask = (rr > ri) * (rr <= ro)
if mask is not None:
_mask *= mask == False
_mask *= mask is False
xs, ys = np.nonzero(_mask)
vals = ar[_mask]

Expand Down
4 changes: 2 additions & 2 deletions py4DSTEM/process/calibration/origin.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,15 +154,15 @@ def fit_origin(
robust=robust,
robust_steps=robust_steps,
robust_thresh=robust_thresh,
data_mask=mask == True,
data_mask=mask is True,
)
popt_y, pcov_y, qy0_fit, _ = fit_2D(
f,
qy0_meas,
robust=robust,
robust_steps=robust_steps,
robust_thresh=robust_thresh,
data_mask=mask == True,
data_mask=mask is True,
)

# Compute residuals
Expand Down
2 changes: 1 addition & 1 deletion py4DSTEM/process/calibration/qpixelsize.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,6 @@ def get_dq_from_indexed_peaks(qs, hkl, a):
# Get pixel size
dq = 1 / (c * a)
qs_fit = d_inv[mask] / a
hkl_fit = [hkl[i] for i in range(len(hkl)) if mask[i] == True]
hkl_fit = [hkl[i] for i in range(len(hkl)) if mask[i] is True]

return dq, qs_fit, hkl_fit
Loading

0 comments on commit cf6a6a4

Please sign in to comment.