Skip to content

Commit

Permalink
Merge pull request #24 from BrainLesion/default_from_original
Browse files Browse the repository at this point in the history
Updated default parameters from original repo
  • Loading branch information
neuronflow authored Dec 15, 2023
2 parents bd33d48 + cba027d commit f1b354d
Show file tree
Hide file tree
Showing 3 changed files with 103 additions and 41 deletions.
80 changes: 80 additions & 0 deletions data/default_rigid.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
# Whether to use bias correction before registration (helpful to generate more accurate transforms at the cost of time). Default: false
bias: false

# The registration metric. Options: ["mattes_mutual_information", "ants_neighborhood_correlation", "correlation", "demons", "joint_histogram_mutual_information", "mean_squares"]
# this can be a list as well, which will then become a multi-metric registration using composite transforms [https://simpleitk.org/doxygen/latest/html/classitk_1_1simple_1_1CompositeTransform.html]
metric: mean_squares

## metric-specific parameters
metric_parameters: {
histogram_bins: 50, # mattes_mutual_information, joint_histogram_mutual_information
radius: 5, # ants_neighborhood_correlation
intensityDifferenceThreshold: 0.001, # demons
varianceForJointPDFSmoothing: 1.5, # joint_histogram_mutual_information
}

# Optimizer. Options: ["gradient_descent", "regular_step_gradient_descent", "amoeba", "conjugate", "exhaustive", "gadient_descent_line_search", "lbfgsb", "lbfgsb2", "one_plus_one_evolutionary", "powell"]
optimizer: regular_step_gradient_descent

## optimizer-specific parameters
optimizer_parameters: {
min_step: 1e-4, # regular_step_gradient_descent
max_step: 1.0, # gradient_descent, regular_step_gradient_descent
iterations: 200, # regular_step_gradient_descent, gradient_descent_line_search, gradient_descent, conjugate, lbfgsb, lbfgsb2
relaxation: 0.1, # regular_step_gradient_descent
learningrate: 1.0, # gradient_descent, gradient_descent_line_search
tolerance: 1e-6, # gradient_descent, gradient_descent_line_search
convergence_minimum: 1e-6, # gradient_descent, gradient_descent_line_search
convergence_window_size: 10, # gradient_descent, gradient_descent_line_search
line_search_lower_limit: 0.0, # gradient_descent_line_search
line_search_upper_limit: 5.0, # gradient_descent_line_search
line_search_epsilon: 0.01, # gradient_descent_line_search
step_length: 0.1, # conjugate, exhaustive, powell
simplex_delta: 0.1, # amoeba
maximum_number_of_corrections: 5, # lbfgsb, lbfgsb2
maximum_number_of_function_evaluations: 2000, # lbfgsb, lbfgsb2
solution_accuracy: 1e-5, # lbfgsb2
hessian_approximate_accuracy: 1e-5, # lbfgsb2
delta_convergence_distance: 1e-5, # lbfgsb2
delta_convergence_tolerance: 1e-5, # lbfgsb2
line_search_maximum_evaluations: 50, # lbfgsb2
line_search_minimum_step: 1e-20, # lbfgsb2
line_search_accuracy: 1e-4, # lbfgsb2
epsilon: 1e-8, # one_plus_one_evolutionary
initial_radius: 1.0, # one_plus_one_evolutionary
growth_factor: -1.0, # one_plus_one_evolutionary
shrink_factor: -1.0, # one_plus_one_evolutionary
maximum_line_iterations: 100, # powell
step_tolerance: 1e-6, # powell
value_tolerance: 1e-6, # powell
}

# The registration transform. Options: ["translation", "versor", "versor_rigid", "euler", "similarity", "scale", "scale_versor", "scale_skew_versor", "affine", "bspline", "displacement"]
transform: versorrigid

# Composite transform
composite_transform: false

# Previous transforms saved to disk: only used if composite_transform is true
previous_transforms: []

# Transform initialization. Options: ["moments", "geometry", "selfmoments", "selfgeometry"]
initialization: geometry

# Interpolator. Options: ["linear", "bspline", "nearestneighbor", "gaussian", "labelgaussian"]
interpolator: linear

# Sampling strategy. Options: ["regular", "random", "none"]
sampling_strategy: "random"

# Sampling percentage. Can be a list of percentages with the same length as the number of levels.
sampling_percentage: 0.01

# Shrink factor at each level for pyramid registration.
shrink_factors: [4, 2, 1]

# Smoothing sigma at each level for pyramid registration.
smoothing_sigmas: [2, 1, 0]

# The number of attempts to try to find a good registration (useful when using random sampling)
attempts: 5
18 changes: 2 additions & 16 deletions data/sample_config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,10 @@ optimizer: regular_step_gradient_descent
optimizer_parameters: {
min_step: 1e-6, # regular_step_gradient_descent
max_step: 1.0, # gradient_descent, regular_step_gradient_descent
maximumStepSizeInPhysicalUnits: 1.0, # regular_step_gradient_descent, gradient_descent_line_search, gradient_descent,
iterations: 1000, # regular_step_gradient_descent, gradient_descent_line_search, gradient_descent, conjugate, lbfgsb, lbfgsb2
relaxation: 0.5, # regular_step_gradient_descent
learningrate: 1.0, # gradient_descent, gradient_descent_line_search
tolerance: 1e-4, # gradient_descent, gradient_descent_line_search
convergence_minimum: 1e-6, # gradient_descent, gradient_descent_line_search
convergence_window_size: 10, # gradient_descent, gradient_descent_line_search
line_search_lower_limit: 0.0, # gradient_descent_line_search
Expand Down Expand Up @@ -69,18 +70,6 @@ sampling_strategy: none
# Sampling percentage. Can be a list of percentages with the same length as the number of levels.
sampling_percentage: 0.5

# Registration relaxation factor.
relaxation: 0.5

# Registration gradient tolerance
tolerance: 1e-4

# Maximum number of iterations at each level.
max_step: 5.0

# Minimum step size at each level.
min_step: 1e-5

# Shrink factor at each level for pyramid registration.
shrink_factors: [2, 1]

Expand All @@ -89,6 +78,3 @@ smoothing_sigmas: [1, 0]

# The number of attempts to try to find a good registration (useful when using random sampling)
attempts: 5

# Total number of iterations.
iterations: 1000
46 changes: 21 additions & 25 deletions ereg/registration.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,11 +123,6 @@ def update_parameters(self, config_file: Union[str, dict], **kwargs):
self.parameters["initialization"] = self.parameters.get(
"initialization", "geometry"
).lower()
self.parameters["max_step"] = self.parameters.get("max_step", 5.0)
self.parameters["min_step"] = self.parameters.get("min_step", 0.01)
self.parameters["iterations"] = self.parameters.get("iterations", 200)
self.parameters["relaxation"] = self.parameters.get("relaxation", 0.5)
self.parameters["tolerance"] = self.parameters.get("tolerance", 1e-4)
self.parameters["bias_correct"] = self.parameters.get(
"bias_correct", self.parameters.get("bias", False)
)
Expand Down Expand Up @@ -738,14 +733,15 @@ def _register_image_and_get_transform(
)

# R.SetOptimizerScalesFromJacobian()
# R.SetOptimizerScalesFromPhysicalShift()
R.SetOptimizerScalesFromPhysicalShift()

R.SetShrinkFactorsPerLevel(self.parameters["shrink_factors"])
R.SetSmoothingSigmasPerLevel(self.parameters["smoothing_sigmas"])
R.SmoothingSigmasAreSpecifiedInPhysicalUnitsOn()
transform_function = self._get_transform_wrapper(
self.parameters["transform"], dimension
)
## todo: evaluate the viability of having default options for "rigid", "affine", and "deformable" registrations
# rigid_registration = False
# # euler transforms need special processing
# if isinstance(transform_function, sitk.Euler3DTransform) or isinstance(
Expand Down Expand Up @@ -792,6 +788,7 @@ def _register_image_and_get_transform(
)
R.SetInitialTransform(final_transform, inPlace=False)
## set the interpolator - all options: https://simpleitk.org/doxygen/latest/html/namespaceitk_1_1simple.html#a7cb1ef8bd02c669c02ea2f9f5aa374e5
# this should be linear to optimize results and computational efficacy
R.SetInterpolator(sitk.sitkLinear)

# R.AddCommand(sitk.sitkIterationEvent, lambda: R)
Expand All @@ -811,25 +808,24 @@ def _register_image_and_get_transform(
raise RuntimeError("Registration failed.")

registration_transform_sitk = output_transform
if "rigid_registration" in self.parameters:
if self.parameters["rigid_registration"]:
try:
# Euler Transform used:
registration_transform_sitk = eval(
"sitk.Euler%dDTransform(registration_transform_sitk)"
% (dimension)
)
except:
# VersorRigid used: Transform from VersorRigid to Euler
registration_transform_sitk = eval(
"sitk.VersorRigid%dDTransform(registration_transform_sitk)"
% (dimension)
)
tmp = eval("sitk.Euler%dDTransform()" % (dimension))
tmp.SetMatrix(registration_transform_sitk.GetMatrix())
tmp.SetTranslation(registration_transform_sitk.GetTranslation())
tmp.SetCenter(registration_transform_sitk.GetCenter())
registration_transform_sitk = tmp
# if user is requesting a rigid registration, convert the transform to a rigid transform
if self.parameters["transform"] in ["euler", "versorrigid"]:
try:
# Euler Transform used:
registration_transform_sitk = eval(
"sitk.Euler%dDTransform(registration_transform_sitk)" % (dimension)
)
except:
# VersorRigid used: Transform from VersorRigid to Euler
registration_transform_sitk = eval(
"sitk.VersorRigid%dDTransform(registration_transform_sitk)"
% (dimension)
)
tmp = eval("sitk.Euler%dDTransform()" % (dimension))
tmp.SetMatrix(registration_transform_sitk.GetMatrix())
tmp.SetTranslation(registration_transform_sitk.GetTranslation())
tmp.SetCenter(registration_transform_sitk.GetCenter())
registration_transform_sitk = tmp
## additional information
# print("Metric: ", R.MetricEvaluate(target_image, moving_image), flush=True)
# print(
Expand Down

0 comments on commit f1b354d

Please sign in to comment.