diff --git a/rosplane_tuning/src/autotune/__pycache__/optimizer.cpython-312.pyc b/rosplane_tuning/src/autotune/__pycache__/optimizer.cpython-312.pyc new file mode 100644 index 0000000..599f59e Binary files /dev/null and b/rosplane_tuning/src/autotune/__pycache__/optimizer.cpython-312.pyc differ diff --git a/rosplane_tuning/src/autotune/optimizer.py b/rosplane_tuning/src/autotune/optimizer.py index 687ad5e..5f67fc1 100644 --- a/rosplane_tuning/src/autotune/optimizer.py +++ b/rosplane_tuning/src/autotune/optimizer.py @@ -40,6 +40,7 @@ def __init__(self, initial_gains, optimization_params): self.init_alpha = optimization_params['alpha'] self.tau = optimization_params['tau'] self.state = OptimizerState.FINDING_GRADIENT # Find the initial gradient from the starting gains + self.initial_gains = initial_gains # Line Search Variables self.k = 0 @@ -71,7 +72,10 @@ def get_optimiztion_status(self): str: The status of the optimization algorithm. """ # Return with the final values, the best value found - return 'TODO: Status not implemented.' + if self.state == OptimizerState.TERMINATED: + return 'Optimization Terminated' + else: + return "Optimization in Progress" def get_next_parameter_set(self, error): """ @@ -103,9 +107,9 @@ def get_next_parameter_set(self, error): elif self.state == OptimizerState.SELECT_DIRECTION: if self.k == 0: - new_gains = self.line_search(self.initial_gains, error, self.save_gains, self.new_phis) + new_gains = self.line_search(self.initial_gains, error, self.save_gains, self.save_phis) else: - new_gains = self.line_search() + new_gains = self.line_search(self.current_gains, error, self.save_gains, self.save_phis) return new_gains @@ -187,9 +191,9 @@ def line_search(self, gains, phi, gainsh, phih): self.p = -phi_prime/np.linalg.norm(phi_prime) + bk*prior_p # Prepare for bracketing - self.OptimizerState = OptimizerState.BRACKETING + self.state = OptimizerState.BRACKETING # Request phi2 and phi2+h - gains2 = gains + self.a_init*p + gains2 = gains + self.init_alpha*self.p gains2h = [gain + 0.01 for gain in gains2] new_gains = np.array([gains2, gains2h]) @@ -230,13 +234,13 @@ def bracketing(self, gains, gainsh, phi1, phi1_prime, gains2, gains2h, phi2, phi # Request new point alphap = self.interpolate(alpha1, alpha2) gainsp = self.init_gains + alphap*self.p - new_gains = [self.save_gains[4], self.save_gains[5]] + new_gains = np.array([self.save_gains[4], self.save_gains[5]]) return new_gains # Optimized if abs(phi2_prime) <= -self.u2*phi1_prime: self.state == OptimizerState.SELECT_DIRECTION - new_gains = self.init_gains + alpha2*self.p + new_gains = np.array([self.init_gains + alpha2*self.p]) self.current_gains = new_gains return new_gains @@ -249,7 +253,7 @@ def bracketing(self, gains, gainsh, phi1, phi1_prime, gains2, gains2h, phi2, phi # Request new point alphap = self.interpolate(alpha1, alpha2) gainsp = self.init_gains + alphap*self.p - new_gains = [self.save_gains[4], self.save_gains[5]] + new_gains = np.array([self.save_gains[4], self.save_gains[5]]) return new_gains # Needs more Bracketing @@ -263,7 +267,7 @@ def bracketing(self, gains, gainsh, phi1, phi1_prime, gains2, gains2h, phi2, phi gains1h = [gain + 0.01 for gain in gains1] gains2h = [gain + 0.01 for gain in gains2] - new_gains = [gains1, gains1h, gains2, gains2h] + new_gains = np.array([gains1, gains1h, gains2, gains2h]) return new_gains def interpolate(self, alpha1, alpha2): @@ -301,14 +305,14 @@ def pinpointing(self, gains1, phi1, phi1_prime, gains2, gainsp, phip, phip_prime phi2_prime = phip_prime self.save_gains = np.array([gains1, None, gains2, None, gainsp, [gain + 0.01 for gain in gainsp]]) self.save_phis = np.array([phi1, phi1_prime, phi2, phi2_prime]) - new_gains = [self.save_gains[4], self.save_gains[5]] + new_gains = np.array([self.save_gains[4], self.save_gains[5]]) return new_gains else: # Optimized if abs(phip_prime) <= -self.u2*phi1_prime: self.state == OptimizerState.SELECT_DIRECTION alphastar = alphap - new_gains = self.init_gains + alphastar*self.p + new_gains = np.array([self.init_gains + alphastar*self.p]) return new_gains # More parameterization needed elif phip_prime*(alpha2 - alpha1) >= 0: @@ -321,7 +325,7 @@ def pinpointing(self, gains1, phi1, phi1_prime, gains2, gainsp, phip, phip_prime self.save_gains = np.array([gains1, None, gains2, None, gainsp, [gain + 0.01 for gain in gainsp]]) self.save_phis = np.array([phi1, phi1_prime, phi2, phi2_prime]) - new_gains = [self.save_gains[4], self.save_gains[5]] + new_gains = np.array([self.save_gains[4], self.save_gains[5]]) return new_gains # Check for failure criteria - the nuclear option diff --git a/rosplane_tuning/src/autotune/optimizer_tester.py b/rosplane_tuning/src/autotune/optimizer_tester.py index 65ce30d..c386f70 100644 --- a/rosplane_tuning/src/autotune/optimizer_tester.py +++ b/rosplane_tuning/src/autotune/optimizer_tester.py @@ -7,8 +7,12 @@ # Function to test the optimizer with def function(x): # Matyas function + print("f", x) return 0.26 * (x[0] ** 2 + x[1] ** 2) - 0.48 * x[0] * x[1] +def gradient(x): + # Gradient of Matyas function + return np.array([0.52 * x[0] - 0.48 * x[1], 0.52 * x[1] - 0.48 * x[0]]) # Initialize optimizer curr_points = np.array([[0, 5]]) # Initial point @@ -22,22 +26,32 @@ def function(x): # Run optimization all_points = [] +k = 0 while not optimizer.optimization_terminated(): + print("Iteration ", k) # Print status print(optimizer.get_optimiztion_status()) + print(optimizer.state) # Calculate error for current points error = [] + # print(curr_points) # Testing for point in curr_points: error.append(function(point)) error = np.array(error) - # Pass points to optimizer + print("CP", curr_points) # Testing + # print("G", gradient(curr_points[0])) # Testing curr_points = optimizer.get_next_parameter_set(error) + print("CP", curr_points) # Testing # Store points for point in curr_points: all_points.append(point) + + # End interation step + k += 1 + print() all_points = np.array(all_points) print('Optimization terminated with status: {}'.format(optimizer.get_optimiztion_status()))