From ada5021fac28b3b41f4e8ae8576e471ef1f9cad8 Mon Sep 17 00:00:00 2001 From: Brandon Sutherland Date: Tue, 12 Mar 2024 17:13:06 -0600 Subject: [PATCH 01/52] began working on autotune node --- rosplane_tuning/CMakeLists.txt | 6 + rosplane_tuning/launch/autotune.launch.py | 46 +++++ .../launch/roll_autotune.launch.py | 19 +++ rosplane_tuning/src/autotune.py | 160 ++++++++++++++++++ 4 files changed, 231 insertions(+) create mode 100644 rosplane_tuning/launch/autotune.launch.py create mode 100644 rosplane_tuning/launch/roll_autotune.launch.py create mode 100644 rosplane_tuning/src/autotune.py diff --git a/rosplane_tuning/CMakeLists.txt b/rosplane_tuning/CMakeLists.txt index 631836a..7388d5d 100644 --- a/rosplane_tuning/CMakeLists.txt +++ b/rosplane_tuning/CMakeLists.txt @@ -63,6 +63,12 @@ install(PROGRAMS DESTINATION lib/${PROJECT_NAME} ) +# Autotune +install(PROGRAMS + src/autotune.py + DESTINATION lib/${PROJECT_NAME} +) + #### END OF EXECUTABLES ### ament_package() diff --git a/rosplane_tuning/launch/autotune.launch.py b/rosplane_tuning/launch/autotune.launch.py new file mode 100644 index 0000000..7fe84e5 --- /dev/null +++ b/rosplane_tuning/launch/autotune.launch.py @@ -0,0 +1,46 @@ +import os + +from ament_index_python import get_package_share_directory +from launch import LaunchDescription +from launch.actions import DeclareLaunchArgument, IncludeLaunchDescription +from launch.launch_description_sources import PythonLaunchDescriptionSource +from launch.substitutions import LaunchConfiguration +from launch_ros.actions import Node + +def generate_launch_description(): + + # Launch arguments + error_collection_period = LaunchConfiguration('error_collection_period') + error_collection_period_launch_arg = DeclareLaunchArgument( + 'error_collection_period', + description='Amount of time to collect data for calculating error' + ) + current_tuning_autopilot = LaunchConfiguration('current_tuning_autopilot') + current_tuning_autopilot_launch_arg = DeclareLaunchArgument( + 'current_tuning_autopilot', + description='Autopilot to tune' + ) + + # Launch nodes + autotune_node = Node( + package='rosplane_tuning', + executable='autotune.py', + output='screen', + parameters=[ + {'error_collection_period': error_collection_period}, + {'current_tuning_autopilot': current_tuning_autopilot} + ] + ) + rosplane_tuning_launch_include = IncludeLaunchDescription( + PythonLaunchDescriptionSource( + os.path.join(get_package_share_directory('rosplane_tuning'), + 'launch/rosplane_tuning.launch.py') + ) + ) + + return LaunchDescription([ + error_collection_period_launch_arg, + current_tuning_autopilot_launch_arg, + autotune_node, + rosplane_tuning_launch_include + ]) diff --git a/rosplane_tuning/launch/roll_autotune.launch.py b/rosplane_tuning/launch/roll_autotune.launch.py new file mode 100644 index 0000000..25e1081 --- /dev/null +++ b/rosplane_tuning/launch/roll_autotune.launch.py @@ -0,0 +1,19 @@ +import os + +from ament_index_python import get_package_share_directory +from launch import LaunchDescription +from launch.actions import IncludeLaunchDescription +from launch.launch_description_sources import PythonLaunchDescriptionSource + +def generate_launch_description(): + return LaunchDescription([ + IncludeLaunchDescription( + PythonLaunchDescriptionSource( + os.path.join(get_package_share_directory('rosplane_tuning'), + 'launch/autotune.launch.py') + ), + launch_arguments={ + 'error_collection_period': '5.0', + 'current_tuning_autopilot': 'roll' + }.items() + )]) diff --git a/rosplane_tuning/src/autotune.py b/rosplane_tuning/src/autotune.py new file mode 100644 index 0000000..4b163ec --- /dev/null +++ b/rosplane_tuning/src/autotune.py @@ -0,0 +1,160 @@ +#!/usr/bin/env python3 + +from rosplane_msgs.msg import ControllerCommands +from rosplane_msgs.msg import ControllerInternalsDebug +from rosplane_msgs.msg import State +from std_srvs.srv import Trigger + +import rclpy +from rclpy.node import Node + +import numpy as np + + +class Autotune(Node): + """ + This class is an auto-tuning node for the ROSplane autopilot. It calculates the error between + the state estimate of the system response and the commanded setpoint for an autopilot. A + gradient-based optimization is then run to find the optimal gains to minimize the error. + + Va is airspeed, phi is roll angle, chi is course angle, theta is pitch angle, and h is altitude. + """ + + def __init__(self): + super().__init__('autotune') + + # Class variables + self.collecting_data = False + + self.state = [] + self.commands = [] + self.internals_debug = [] + + # ROS parameters + # The amount of time to collect data for calculating the error + self.declare_parameter('error_collection_period', rclpy.Parameter.Type.DOUBLE) + # The autopilot that is currently being tuned + self.declare_parameter('current_tuning_autopilot', rclpy.Parameter.Type.STRING) + + # Subscriptions + self.state_subscription = self.create_subscription( + State, + 'estimated_state', + self.state_callback, + 10) + self.commands_subscription = self.create_subscription( + ControllerCommands, + 'controller_commands', + self.commands_callback, + 10) + self.internals_debug_subscription = self.create_subscription( + ControllerInternalsDebug, + 'tuning_debug', + self.internals_debug_callback, + 10) + + # Timers + self.error_collection_timer = self.create_timer( + self.get_parameter('error_collection_period').value, + self.error_collection_callback) + self.error_collection_timer.cancel() + + # Services + self.run_tuning_iteration_service = self.create_service( + Trigger, + 'run_tuning_iteration', + self.run_tuning_iteration_callback) + + + ## ROS Callbacks ## + def state_callback(self, msg): + """ + This function is called when a new state estimate is received. It stores the state estimate + if the node is currently collecting data. + """ + + if self.collecting_data: + time = msg.header.stamp.sec + msg.header.stamp.nanosec * 1e-9 + self.state.append([time, msg.va, msg.phi, msg.chi, msg.theta, -msg.position[2]]) # h = -msg.position[2] + + def commands_callback(self, msg): + """ + This function is called when new commands are received. It stores the commands if the node + is currently collecting data. + """ + + if self.collecting_data: + time = msg.header.stamp.sec + msg.header.stamp.nanosec * 1e-9 + self.commands.append([time, msg.va_c, msg.chi_c, msg.h_c]) + + def internals_debug_callback(self, msg): + """ + This function is called when new debug information is received. It stores the debug + information if the node is currently collecting data. + """ + + if self.collecting_data: + time = msg.header.stamp.sec + msg.header.stamp.nanosec * 1e-9 + self.internals_debug.append([time, msg.phi_c, msg.theta_c]) + + def error_collection_callback(self): + """ + This function is called when the error collection timer expires. It stops the error + collection process and begins the optimization process. + """ + + self.stop_error_collection() + + def run_tuning_iteration_callback(self, request, response): + """ + This function is called when the run_tuning_iteration service is called. It steps the autopilot + command with the signal_generator node and begins the error collection process. + """ + + self.get_logger().info('Starting tuning iteration...') + + self.start_error_collection() + + response.success = True + response.message = 'Tuning iteration started!' + + return response + + + ## Helper Functions ## + def start_error_collection(self): + """ + Start the error collection timer and begin storing state and command data for the time specified + by the error_collection_period parameter. + """ + + # Start data collection + self.collecting_data = True + + # Start the timer + self.error_collection_timer.timer_period_ns = \ + int(self.get_parameter('error_collection_period').value * 1e9) + self.error_collection_timer.reset() + + def stop_error_collection(self): + """ + Stop the error collection timer and stop storing state and command data. + """ + + self.collecting_data = False + self.error_collection_timer.cancel() + + +def main(args=None): + rclpy.init(args=args) + + autotune = Autotune() + rclpy.spin(autotune) + + autotune.destroy_node() + rclpy.shutdown() + + +if __name__ == '__main__': + main() + From 73c4d504e77d80edfb565f7a444577f1c38fcf49 Mon Sep 17 00:00:00 2001 From: Brandon Sutherland Date: Tue, 12 Mar 2024 17:17:04 -0600 Subject: [PATCH 02/52] added numpy to rosdep --- rosplane_tuning/package.xml | 1 + 1 file changed, 1 insertion(+) diff --git a/rosplane_tuning/package.xml b/rosplane_tuning/package.xml index 9323df2..1e51c63 100644 --- a/rosplane_tuning/package.xml +++ b/rosplane_tuning/package.xml @@ -17,6 +17,7 @@ sensor_msgs rosplane_msgs rosflight_msgs + python3-numpy ament_lint_auto ament_lint_common From f0e42e1c9c8318ba0dbb835aaf3d4b4b8b8cd5e3 Mon Sep 17 00:00:00 2001 From: Brandon Sutherland Date: Thu, 14 Mar 2024 17:25:33 -0600 Subject: [PATCH 03/52] got autotune calling signal_generator service --- rosplane_tuning/src/autotune.py | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/rosplane_tuning/src/autotune.py b/rosplane_tuning/src/autotune.py index 4b163ec..ad8ca27 100644 --- a/rosplane_tuning/src/autotune.py +++ b/rosplane_tuning/src/autotune.py @@ -8,8 +8,6 @@ import rclpy from rclpy.node import Node -import numpy as np - class Autotune(Node): """ @@ -65,6 +63,8 @@ def __init__(self): 'run_tuning_iteration', self.run_tuning_iteration_callback) + # Clients + self.toggle_step_signal_client = self.create_client(Trigger, 'toggle_step_signal') ## ROS Callbacks ## def state_callback(self, msg): @@ -136,6 +136,9 @@ def start_error_collection(self): int(self.get_parameter('error_collection_period').value * 1e9) self.error_collection_timer.reset() + # Step the command signal + self.call_toggle_step_signal() + def stop_error_collection(self): """ Stop the error collection timer and stop storing state and command data. @@ -143,6 +146,18 @@ def stop_error_collection(self): self.collecting_data = False self.error_collection_timer.cancel() + self.call_toggle_step_signal() + + def call_toggle_step_signal(self): + """ + Call the signal_generator's toggle step service to toggle the step input. + """ + while not self.toggle_step_signal_client.wait_for_service(timeout_sec=1.0): + self.get_logger().info(f'service {self.toggle_step_signal_client.srv_name} ' + + 'not available, waiting...') + + request = Trigger.Request() + self.toggle_step_signal_client.call_async(request) def main(args=None): From df2faa30112c6757b3a8b263e12ce7ea35d4da51 Mon Sep 17 00:00:00 2001 From: Brandon Sutherland Date: Thu, 14 Mar 2024 17:40:59 -0600 Subject: [PATCH 04/52] added optimizer class for autotune --- rosplane_tuning/CMakeLists.txt | 3 ++- .../src/{ => autotune}/autotune.py | 26 +++++++++++++------ rosplane_tuning/src/autotune/optimizer.py | 12 +++++++++ 3 files changed, 32 insertions(+), 9 deletions(-) rename rosplane_tuning/src/{ => autotune}/autotune.py (87%) create mode 100644 rosplane_tuning/src/autotune/optimizer.py diff --git a/rosplane_tuning/CMakeLists.txt b/rosplane_tuning/CMakeLists.txt index 7388d5d..4978102 100644 --- a/rosplane_tuning/CMakeLists.txt +++ b/rosplane_tuning/CMakeLists.txt @@ -65,7 +65,8 @@ install(PROGRAMS # Autotune install(PROGRAMS - src/autotune.py + src/autotune/autotune.py + src/autotune/optimizer.py DESTINATION lib/${PROJECT_NAME} ) diff --git a/rosplane_tuning/src/autotune.py b/rosplane_tuning/src/autotune/autotune.py similarity index 87% rename from rosplane_tuning/src/autotune.py rename to rosplane_tuning/src/autotune/autotune.py index ad8ca27..115ffc9 100644 --- a/rosplane_tuning/src/autotune.py +++ b/rosplane_tuning/src/autotune/autotune.py @@ -8,12 +8,17 @@ import rclpy from rclpy.node import Node +from optimizer import Optimizer + class Autotune(Node): """ - This class is an auto-tuning node for the ROSplane autopilot. It calculates the error between - the state estimate of the system response and the commanded setpoint for an autopilot. A - gradient-based optimization is then run to find the optimal gains to minimize the error. + This class is an auto-tuning node for the ROSplane autopilot. The node calculates the error + between the state estimate of the system and the commanded setpoint for a given autopilot. + A gradient-based optimization is then run to find the optimal gains to minimize the error. + + This class itself contians the ROS-specific code for the autotune node. The optimization + algorithms are contained in the Optimizer class. Va is airspeed, phi is roll angle, chi is course angle, theta is pitch angle, and h is altitude. """ @@ -28,6 +33,8 @@ def __init__(self): self.commands = [] self.internals_debug = [] + self.optimizer = Optimizer() + # ROS parameters # The amount of time to collect data for calculating the error self.declare_parameter('error_collection_period', rclpy.Parameter.Type.DOUBLE) @@ -66,6 +73,7 @@ def __init__(self): # Clients self.toggle_step_signal_client = self.create_client(Trigger, 'toggle_step_signal') + ## ROS Callbacks ## def state_callback(self, msg): """ @@ -75,7 +83,8 @@ def state_callback(self, msg): if self.collecting_data: time = msg.header.stamp.sec + msg.header.stamp.nanosec * 1e-9 - self.state.append([time, msg.va, msg.phi, msg.chi, msg.theta, -msg.position[2]]) # h = -msg.position[2] + self.state.append([time, msg.va, msg.phi, msg.chi, msg.theta, + -msg.position[2]]) # h = -msg.position[2] def commands_callback(self, msg): """ @@ -107,8 +116,8 @@ def error_collection_callback(self): def run_tuning_iteration_callback(self, request, response): """ - This function is called when the run_tuning_iteration service is called. It steps the autopilot - command with the signal_generator node and begins the error collection process. + This function is called when the run_tuning_iteration service is called. It steps the + autopilot command with the signal_generator node and begins the error collection process. """ self.get_logger().info('Starting tuning iteration...') @@ -124,8 +133,8 @@ def run_tuning_iteration_callback(self, request, response): ## Helper Functions ## def start_error_collection(self): """ - Start the error collection timer and begin storing state and command data for the time specified - by the error_collection_period parameter. + Start the error collection timer and begin storing state and command data for the time + specified by the error_collection_period parameter. """ # Start data collection @@ -152,6 +161,7 @@ def call_toggle_step_signal(self): """ Call the signal_generator's toggle step service to toggle the step input. """ + while not self.toggle_step_signal_client.wait_for_service(timeout_sec=1.0): self.get_logger().info(f'service {self.toggle_step_signal_client.srv_name} ' + 'not available, waiting...') diff --git a/rosplane_tuning/src/autotune/optimizer.py b/rosplane_tuning/src/autotune/optimizer.py new file mode 100644 index 0000000..389085d --- /dev/null +++ b/rosplane_tuning/src/autotune/optimizer.py @@ -0,0 +1,12 @@ +#!/usr/bin/env python3 + +import numpy as np + + +class Optimizer: + """ + This class contains the optimization algorithms used by the autotune node. + """ + + def __init__(self): + pass From 594d2c7880ae6d8d75c5a28824a44c0adea70585 Mon Sep 17 00:00:00 2001 From: Brandon Sutherland Date: Fri, 15 Mar 2024 11:57:51 -0600 Subject: [PATCH 05/52] added optimization routine and stubbed out optimization class --- rosplane_tuning/src/autotune/autotune.py | 96 +++++++++++++---------- rosplane_tuning/src/autotune/optimizer.py | 43 +++++++++- 2 files changed, 92 insertions(+), 47 deletions(-) diff --git a/rosplane_tuning/src/autotune/autotune.py b/rosplane_tuning/src/autotune/autotune.py index 115ffc9..d82966e 100644 --- a/rosplane_tuning/src/autotune/autotune.py +++ b/rosplane_tuning/src/autotune/autotune.py @@ -28,16 +28,15 @@ def __init__(self): # Class variables self.collecting_data = False - self.state = [] self.commands = [] self.internals_debug = [] - - self.optimizer = Optimizer() + self.new_gains = [] # TODO: Get gains from ROS parameters + self.optimizer = Optimizer(self.new_gains) # ROS parameters # The amount of time to collect data for calculating the error - self.declare_parameter('error_collection_period', rclpy.Parameter.Type.DOUBLE) + self.declare_parameter('stabilize_period', rclpy.Parameter.Type.DOUBLE) # The autopilot that is currently being tuned self.declare_parameter('current_tuning_autopilot', rclpy.Parameter.Type.STRING) @@ -59,10 +58,10 @@ def __init__(self): 10) # Timers - self.error_collection_timer = self.create_timer( - self.get_parameter('error_collection_period').value, - self.error_collection_callback) - self.error_collection_timer.cancel() + self.stabilize_period_timer = self.create_timer( + self.get_parameter('stabilize_period').value, + self.stabilize_period_timer_callback) + self.stabilize_period_timer.cancel() # Services self.run_tuning_iteration_service = self.create_service( @@ -106,57 +105,53 @@ def internals_debug_callback(self, msg): time = msg.header.stamp.sec + msg.header.stamp.nanosec * 1e-9 self.internals_debug.append([time, msg.phi_c, msg.theta_c]) - def error_collection_callback(self): + def stabilize_period_timer_callback(self): """ - This function is called when the error collection timer expires. It stops the error - collection process and begins the optimization process. + This function is called when the stability timer callback occurs. It starts/stops data + collection and sets up ROSplane to perform a step manuever. """ - self.stop_error_collection() + if not self.collecting_data: + # Stabilization period is over, start collecting data + self.get_logger().info('Stepping command and collecting data for ' + + str(self.get_parameter('stabilize_period').value) + + ' seconds...') + self.collecting_data = True + self.call_toggle_step_signal() + else: + # Data collection is over, stop collecting data and calculate gains for next iteration + self.get_logger().info('Data collection complete.') + self.collecting_data = False + self.stabilize_period_timer.cancel() + self.call_toggle_step_signal() + self.new_gains = self.optimizer.get_next_parameter_set(self.calculate_error()) + def run_tuning_iteration_callback(self, request, response): """ - This function is called when the run_tuning_iteration service is called. It steps the - autopilot command with the signal_generator node and begins the error collection process. + This function is called when the run_tuning_iteration service is called. It starts the + next iteration of the optimization process. """ - self.get_logger().info('Starting tuning iteration...') + if not self.optimizer.optimization_terminated(): + self.get_logger().info('Setting gains: ' + str(self.new_gains)) + self.set_gains(self.new_gains) - self.start_error_collection() + self.stabilize_period_timer.timer_period_ns = \ + int(self.get_parameter('stabilize_period').value * 1e9) + self.stabilize_period_timer.reset() + + self.get_logger().info('Stabilizing autopilot for ' + + str(self.get_parameter('stabilize_period').value) + + ' seconds...') response.success = True - response.message = 'Tuning iteration started!' + response.message = self.optimizer.get_optimiztion_status() return response ## Helper Functions ## - def start_error_collection(self): - """ - Start the error collection timer and begin storing state and command data for the time - specified by the error_collection_period parameter. - """ - - # Start data collection - self.collecting_data = True - - # Start the timer - self.error_collection_timer.timer_period_ns = \ - int(self.get_parameter('error_collection_period').value * 1e9) - self.error_collection_timer.reset() - - # Step the command signal - self.call_toggle_step_signal() - - def stop_error_collection(self): - """ - Stop the error collection timer and stop storing state and command data. - """ - - self.collecting_data = False - self.error_collection_timer.cancel() - self.call_toggle_step_signal() - def call_toggle_step_signal(self): """ Call the signal_generator's toggle step service to toggle the step input. @@ -169,6 +164,21 @@ def call_toggle_step_signal(self): request = Trigger.Request() self.toggle_step_signal_client.call_async(request) + def set_gains(self, gains): + """ + Set the gains of the autopilot to the given values. + """ + # TODO: Implement this function + pass + + def calculate_error(self): + """ + Calculate the error between the state estimate and the commanded setpoint using the + collected data. + """ + # TODO: Implement this function + pass + def main(args=None): rclpy.init(args=args) diff --git a/rosplane_tuning/src/autotune/optimizer.py b/rosplane_tuning/src/autotune/optimizer.py index 389085d..c9f2f0f 100644 --- a/rosplane_tuning/src/autotune/optimizer.py +++ b/rosplane_tuning/src/autotune/optimizer.py @@ -1,12 +1,47 @@ #!/usr/bin/env python3 -import numpy as np - class Optimizer: """ - This class contains the optimization algorithms used by the autotune node. + This class contains the optimization algorithms used by the autotune node. It is able to + optimize functions of any number of parameters. """ - def __init__(self): + def __init__(self, initial_gains): # TODO: Add any other optimization parameters needed (e.g. step size, termination criteria) + """ + Parameters: + initial_gains (list): The starting point for the gains to be optimized (x0). + """ + pass + + def optimization_terminated(self): + """ + This function checks if the optimization algorithm has terminated. + + Returns: + bool: True if optimization should terminate, False otherwise + """ + pass + + def get_optimiztion_status(self): + """ + This function returns the status of the optimization algorithm. + + Returns: + str: The status of the optimization algorithm + """ + pass + + def get_next_parameter_set(self, error): + """ + This function returns the next set of gains to be tested by the optimization algorithm. + + Parameters: + error (float): The error from the set of gains returned previously. If this is the first + iteration, the error of the initial gains will be given. + + Returns: + list: The next set of gains to be tested. Should be the same length as the initial gains. + """ pass + From f806a4b38e05743b89cccb408a5dc2b45803f3fe Mon Sep 17 00:00:00 2001 From: Joseph Date: Mon, 18 Mar 2024 22:34:52 -0600 Subject: [PATCH 06/52] Added first attempt at linesearch and optimization parameters --- rosplane_tuning/src/autotune/autotune.py | 30 +++++++- rosplane_tuning/src/autotune/optimizer.py | 91 ++++++++++++++++++++++- 2 files changed, 118 insertions(+), 3 deletions(-) diff --git a/rosplane_tuning/src/autotune/autotune.py b/rosplane_tuning/src/autotune/autotune.py index d82966e..f252b98 100644 --- a/rosplane_tuning/src/autotune/autotune.py +++ b/rosplane_tuning/src/autotune/autotune.py @@ -17,7 +17,7 @@ class Autotune(Node): between the state estimate of the system and the commanded setpoint for a given autopilot. A gradient-based optimization is then run to find the optimal gains to minimize the error. - This class itself contians the ROS-specific code for the autotune node. The optimization + This class itself contains the ROS-specific code for the autotune node. The optimization algorithms are contained in the Optimizer class. Va is airspeed, phi is roll angle, chi is course angle, theta is pitch angle, and h is altitude. @@ -32,7 +32,14 @@ def __init__(self): self.commands = [] self.internals_debug = [] self.new_gains = [] # TODO: Get gains from ROS parameters - self.optimizer = Optimizer(self.new_gains) + + u1 = 10**-4 # 1st Strong Wolfe Condition, must be between 0 and 1. + u2 = 0.5 # 2nd Strong Wolfe Condition, must be between u1 and 1. + sigma = 1.5 + alpha_init = 1 # Typically 1 + tau = 10**-3 # Convergence tolerance, typically 10^-3 + self.optimization_params = [u1,u2,sigma,alpha_init,tau] + self.optimizer = Optimizer(self.new_gains, self.optimization_params) # ROS parameters # The amount of time to collect data for calculating the error @@ -72,6 +79,12 @@ def __init__(self): # Clients self.toggle_step_signal_client = self.create_client(Trigger, 'toggle_step_signal') + # Optimization Setup + # TODO: Implement this function + # Run iteration at x0 -> phi0 + # Run iteration at x0+0.01 -> temp_phi + # Do a finite difference to get the gradient -> phi0_prime + ## ROS Callbacks ## def state_callback(self, msg): @@ -179,6 +192,19 @@ def calculate_error(self): # TODO: Implement this function pass + def get_gradient(self, fx, fxh): + """ + This function returns the gradient at the given point using forward finite difference. + + Parameters: + fx (float): The function evaluation at the point of the gradient. + fxh (float): The function evaluation at a point slightly offset from the point. + + Returns: + float: The gradient at the given point. + """ + return (fxh - fx) / 0.01 + def main(args=None): rclpy.init(args=args) diff --git a/rosplane_tuning/src/autotune/optimizer.py b/rosplane_tuning/src/autotune/optimizer.py index c9f2f0f..43c6d55 100644 --- a/rosplane_tuning/src/autotune/optimizer.py +++ b/rosplane_tuning/src/autotune/optimizer.py @@ -7,11 +7,18 @@ class Optimizer: optimize functions of any number of parameters. """ - def __init__(self, initial_gains): # TODO: Add any other optimization parameters needed (e.g. step size, termination criteria) + def __init__(self, initial_gains, optimization_params): """ Parameters: initial_gains (list): The starting point for the gains to be optimized (x0). + optimization_params (list): Parameters needed for the optimization operation. + [u1, u2, sigma, alpha_init, tau] """ + self.u1 = optimization_params[0] + self.u2 = optimization_params[1] + self.sigma = optimization_params[2] + self.init_alpha = optimization_params[3] + self.tau = optimization_params[4] pass def optimization_terminated(self): @@ -45,3 +52,85 @@ def get_next_parameter_set(self, error): """ pass + def interpolate(self, alpha1, alpha2): + """ + This function interpolates between two points. + + Parameters: + alpha1 (float): The first distance. + alpha2 (float): The second distance. + + Returns: + float: The interpolated value or alphap. + """ + return (alpha1 + alpha2)/2 + + def bracketing(self, phi1, phi1_prime, phi2, phi2_prime): + """ + This function conducts the bracketing part of the optimization. + + Parameters: + phi1 (float): The value of the function at point 1. + phi1_prime (float): The gradient of the function at point 1. + phi2 (float): The value of the function at point 2. + phi2_prime (float): The gradient of the function at point 2. + + Returns: + alphastar (float): The optimal step size + """ + + alpha2 = self.init_alpha + first = True + if(phi2 > phi1 + self.u1*self.init_alpha*phi1_prime) or (first == False and phi2 > phi1): + # alphastar = pinpointing() + alphastar = 1 # Testing + return alphastar + + if abs(phi2_prime) <= -self.u2*phi1_prime: + alphastar = alpha2 + return alphastar + + elif phi2_prime >= 0: + # alphastar = pinpointing() + alphastar = 1 # Testing + return alphastar + + else: + alpha1 = alpha2 + alpha2 = self.sigma*alpha2 + + first = False + + def pinpointing(self, alphalow, alphahigh, philow, philow_prime, phihigh, phihigh_prime): + """ + This function conducts the pinpointing part of the optimization. + + Parameters: + alpha1 (float): The first distance. + alpha2 (float): The second distance. + phi1 (float): The value of the function at point 1. + phi1_prime (float): The gradient of the function at point 1. + phi2 (float): The value of the function at point 2. + phi2_prime (float): The gradient of the function at point 2. + + Returns: + alphastar (float): The optimal step size + """ + alphap = self.interpolate(alphalow, alphahigh) + # Calculate phip + phip = 1 + # Calculate phip_prime + phip_prime = 1 + + if alphap > philow + self.u1*alphap*philow_prime or alphap > philow: + alphahigh = alphap + phihigh = phip + else: + if abs(phip_prime) <= -self.u2*philow_prime: + alphastar = alphap + return alphastar + + elif phip_prime*(alphahigh - alphalow) >= 0: + alphahigh = alphalow + + alphalow = alphap \ No newline at end of file From 5ef365fd4974c4cfb51f7c9b9048a59dd553a524 Mon Sep 17 00:00:00 2001 From: Joseph Date: Wed, 20 Mar 2024 13:55:48 -0600 Subject: [PATCH 07/52] moved functions over to optimizer --- rosplane_tuning/src/autotune/optimizer.py | 24 +++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/rosplane_tuning/src/autotune/optimizer.py b/rosplane_tuning/src/autotune/optimizer.py index 43c6d55..4b3c45e 100644 --- a/rosplane_tuning/src/autotune/optimizer.py +++ b/rosplane_tuning/src/autotune/optimizer.py @@ -11,8 +11,7 @@ def __init__(self, initial_gains, optimization_params): """ Parameters: initial_gains (list): The starting point for the gains to be optimized (x0). - optimization_params (list): Parameters needed for the optimization operation. - [u1, u2, sigma, alpha_init, tau] + optimization_params (list): Parameters needed for the optimization operation. """ self.u1 = optimization_params[0] self.u2 = optimization_params[1] @@ -52,6 +51,27 @@ def get_next_parameter_set(self, error): """ pass + def calculate_error(self): + """ + Calculate the error between the state estimate and the commanded setpoint using the + collected data. + """ + # TODO: Implement this function + pass + + def get_gradient(self, fx, fxh): + """ + This function returns the gradient at the given point using forward finite difference. + + Parameters: + fx (float): The function evaluation at the point of the gradient. + fxh (float): The function evaluation at a point slightly offset from the point. + + Returns: + float: The gradient at the given point. + """ + return (fxh - fx) / 0.01 + def interpolate(self, alpha1, alpha2): """ This function interpolates between two points. From 2c4fa52b98f1109c87b8ede7b18ab28232f6e8c0 Mon Sep 17 00:00:00 2001 From: Joseph Date: Thu, 21 Mar 2024 21:23:36 -0600 Subject: [PATCH 08/52] additional work on changing the optimizer for ROS --- rosplane_tuning/src/autotune/autotune.py | 22 +--- rosplane_tuning/src/autotune/optimizer.py | 153 +++++++++++++++------- 2 files changed, 107 insertions(+), 68 deletions(-) diff --git a/rosplane_tuning/src/autotune/autotune.py b/rosplane_tuning/src/autotune/autotune.py index f252b98..8f29794 100644 --- a/rosplane_tuning/src/autotune/autotune.py +++ b/rosplane_tuning/src/autotune/autotune.py @@ -84,6 +84,7 @@ def __init__(self): # Run iteration at x0 -> phi0 # Run iteration at x0+0.01 -> temp_phi # Do a finite difference to get the gradient -> phi0_prime + # Repeat for x0+alpha ## ROS Callbacks ## @@ -184,27 +185,6 @@ def set_gains(self, gains): # TODO: Implement this function pass - def calculate_error(self): - """ - Calculate the error between the state estimate and the commanded setpoint using the - collected data. - """ - # TODO: Implement this function - pass - - def get_gradient(self, fx, fxh): - """ - This function returns the gradient at the given point using forward finite difference. - - Parameters: - fx (float): The function evaluation at the point of the gradient. - fxh (float): The function evaluation at a point slightly offset from the point. - - Returns: - float: The gradient at the given point. - """ - return (fxh - fx) / 0.01 - def main(args=None): rclpy.init(args=args) diff --git a/rosplane_tuning/src/autotune/optimizer.py b/rosplane_tuning/src/autotune/optimizer.py index 4b3c45e..7c4329b 100644 --- a/rosplane_tuning/src/autotune/optimizer.py +++ b/rosplane_tuning/src/autotune/optimizer.py @@ -13,13 +13,16 @@ def __init__(self, initial_gains, optimization_params): initial_gains (list): The starting point for the gains to be optimized (x0). optimization_params (list): Parameters needed for the optimization operation. """ + # Set initial values self.u1 = optimization_params[0] self.u2 = optimization_params[1] self.sigma = optimization_params[2] self.init_alpha = optimization_params[3] self.tau = optimization_params[4] - pass + self.flag = 0 + pass + def optimization_terminated(self): """ This function checks if the optimization algorithm has terminated. @@ -72,85 +75,141 @@ def get_gradient(self, fx, fxh): """ return (fxh - fx) / 0.01 - def interpolate(self, alpha1, alpha2): + def next_step(self, phi1, phi1_prime, phi2, phi2_prime): """ - This function interpolates between two points. - - Parameters: - alpha1 (float): The first distance. - alpha2 (float): The second distance. - - Returns: - float: The interpolated value or alphap. + This function chooses which function to run next based on the flag. """ - return (alpha1 + alpha2)/2 + if self.flag == 0: + self.initial_bracketing(phi1, phi1_prime, phi2, phi2_prime) + pass - def bracketing(self, phi1, phi1_prime, phi2, phi2_prime): + # Flag 0 - Initial Bracketing + def initial_bracketing(self, phi1, phi1_prime, phi2, phi2_prime): """ This function conducts the bracketing part of the optimization. + """ - Parameters: - phi1 (float): The value of the function at point 1. - phi1_prime (float): The gradient of the function at point 1. - phi2 (float): The value of the function at point 2. - phi2_prime (float): The gradient of the function at point 2. + alpha1 = 0 + alpha2 = self.init_alpha + + # Needs Pinpointing + if(phi2 > phi1 + self.u1*self.init_alpha*phi1_prime): + flag = 2 + alphap = self.interpolate(alpha1, alpha2) + return alpha2 + + # Optimized + if abs(phi2_prime) <= -self.u2*phi1_prime: + flag = 4 + return alpha2 + + # Needs Pinpointing + elif phi2_prime >= 0: + flag = 2 + alphap = self.interpolate(alpha1, alpha2) + return alpha2 + + # Needs more Bracketing + else: + alpha1 = alpha2 + alpha2 = self.sigma*alpha2 + flag = 1 + return alpha2 - Returns: - alphastar (float): The optimal step size + + # Flag 1 - Bracketing + def bracketing(self, phi1, phi1_prime, phi2, phi2_prime, temp_alpha1, temp_alpha2): """ + This function conducts the bracketing part of the optimization. + """ + alpha1 = temp_alpha1 + alpha2 = temp_alpha2 - alpha2 = self.init_alpha - first = True - if(phi2 > phi1 + self.u1*self.init_alpha*phi1_prime) or (first == False and phi2 > phi1): - # alphastar = pinpointing() - alphastar = 1 # Testing - return alphastar + # Needs Pinpointing + if(phi2 > phi1 + self.u1*self.init_alpha*phi1_prime) or (phi2 > phi1): + flag = 2 + alphap = self.interpolate(alpha1, alpha2) + res = [alpha1, alphap, alpha2] + return res + # Optimized if abs(phi2_prime) <= -self.u2*phi1_prime: - alphastar = alpha2 - return alphastar + flag = 4 + res = [alpha2] + return res + # Needs Pinpointing elif phi2_prime >= 0: - # alphastar = pinpointing() - alphastar = 1 # Testing - return alphastar + flag = 2 + alphap = self.interpolate(alpha1, alpha2) + res = [alpha1, alphap, alpha2] + return res + # Needs more Bracketing else: alpha1 = alpha2 alpha2 = self.sigma*alpha2 + res = [alpha1, alpha2] + return res + + # Interpolating + def interpolate(self, alpha1, alpha2): + """ + This function interpolates between two points. + + Parameters: + alpha1 (float): The first distance. + alpha2 (float): The second distance. - first = False + Returns: + float: The interpolated value or alphap. + """ + return (alpha1 + alpha2)/2 - def pinpointing(self, alphalow, alphahigh, philow, philow_prime, phihigh, phihigh_prime): + # Flag 2 - Pinpointing + def pinpointing(self, alpha1, phi1, phi1_prime, alpha2, phi2, phi2_prime, phip, phip_prime): """ This function conducts the pinpointing part of the optimization. Parameters: alpha1 (float): The first distance. - alpha2 (float): The second distance. phi1 (float): The value of the function at point 1. phi1_prime (float): The gradient of the function at point 1. + alpha2 (float): The second distance. phi2 (float): The value of the function at point 2. phi2_prime (float): The gradient of the function at point 2. Returns: alphastar (float): The optimal step size """ - alphap = self.interpolate(alphalow, alphahigh) - # Calculate phip - phip = 1 - # Calculate phip_prime - phip_prime = 1 + alphap = self.interpolate(alpha1, alpha2) - if alphap > philow + self.u1*alphap*philow_prime or alphap > philow: - alphahigh = alphap - phihigh = phip + if alphap > phi1 + self.u1*alphap*phi1_prime or alphap > phi1: + alpha2 = alphap + phi2 = phip + res = [alpha1, alpha2] + return res else: - if abs(phip_prime) <= -self.u2*philow_prime: + # Optimized + if abs(phip_prime) <= -self.u2*phi1_prime: + flag = 4 alphastar = alphap - return alphastar - - elif phip_prime*(alphahigh - alphalow) >= 0: - alphahigh = alphalow + res = [alphastar] + return res + # More parameterization needed + elif phip_prime*(alpha2 - alpha1) >= 0: + alpha2 = alpha1 - alphalow = alphap \ No newline at end of file + alpha1 = alphap + res = [alpha1, alpha2] + return res + + # Check for failure criteria + + # Flag 3 - Failure Convergence + def failure(self): + pass + + # Flag 4 - Successful Convergence + def success(self): + pass From 6d299b686c43238da6fb270e53ebb14876c77e47 Mon Sep 17 00:00:00 2001 From: Joseph Date: Fri, 22 Mar 2024 13:46:36 -0600 Subject: [PATCH 09/52] fixed a small oversight --- rosplane_tuning/src/autotune/optimizer.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/rosplane_tuning/src/autotune/optimizer.py b/rosplane_tuning/src/autotune/optimizer.py index 7c4329b..f9dfe92 100644 --- a/rosplane_tuning/src/autotune/optimizer.py +++ b/rosplane_tuning/src/autotune/optimizer.py @@ -167,7 +167,7 @@ def interpolate(self, alpha1, alpha2): return (alpha1 + alpha2)/2 # Flag 2 - Pinpointing - def pinpointing(self, alpha1, phi1, phi1_prime, alpha2, phi2, phi2_prime, phip, phip_prime): + def pinpointing(self, alpha1, phi1, phi1_prime, alpha2, phi2, phi2_prime, alphap, phip, phip_prime): """ This function conducts the pinpointing part of the optimization. @@ -182,7 +182,6 @@ def pinpointing(self, alpha1, phi1, phi1_prime, alpha2, phi2, phi2_prime, phip, Returns: alphastar (float): The optimal step size """ - alphap = self.interpolate(alpha1, alpha2) if alphap > phi1 + self.u1*alphap*phi1_prime or alphap > phi1: alpha2 = alphap From 9b9135d52f625403a2d5e60d6d53a1d210fd3b88 Mon Sep 17 00:00:00 2001 From: Brandon Sutherland Date: Sat, 23 Mar 2024 08:17:06 -0600 Subject: [PATCH 10/52] got autotune settings params for autopilot --- rosplane_tuning/README.md | 8 +- rosplane_tuning/include/signal_generator.hpp | 4 +- rosplane_tuning/launch/autotune.launch.py | 16 +-- .../launch/roll_autotune.launch.py | 4 +- rosplane_tuning/src/autotune/autotune.py | 135 ++++++++++++++---- rosplane_tuning/src/autotune/optimizer.py | 40 ++++-- rosplane_tuning/src/signal_generator.cpp | 8 +- 7 files changed, 158 insertions(+), 57 deletions(-) diff --git a/rosplane_tuning/README.md b/rosplane_tuning/README.md index 0a4703e..e431215 100644 --- a/rosplane_tuning/README.md +++ b/rosplane_tuning/README.md @@ -6,11 +6,11 @@ We recommend using [PlotJuggler](https://github.com/facontidavide/PlotJuggler) t ## Signal Generator -Signal generator is a ROS2 node that will generate step inputs, square waves, sine waves, sawtooth waves, and triangle waves to be used as command input for ROSplane. It has support for roll, pitch, altitude, heading, and airspeed command input. +Signal generator is a ROS2 node that will generate step inputs, square waves, sine waves, sawtooth waves, and triangle waves to be used as command input for ROSplane. It has support for roll, pitch, altitude, course, and airspeed command input. This is useful for tuning autopilots as we can give a clear and repeatable command to any portion of the autopilot and observe its response to that input. We can then tune gains, re-issue the same commands, and observe whether performance improved or worsened. -Signal generator works by publishing autopilot commands on the ROS network on the `controller_commands` and `tuning_debug` topics. Each control output (roll, pitch, altitude, heading, airspeed) has a default values that is set by a ROS parameter. The signal generator will then add the generated signal to one of the control outputs with a set magnitude and frequency. +Signal generator works by publishing autopilot commands on the ROS network on the `controller_commands` and `tuning_debug` topics. Each control output (roll, pitch, altitude, course, airspeed) has a default values that is set by a ROS parameter. The signal generator will then add the generated signal to one of the control outputs with a set magnitude and frequency. ### Signal Types - Step: This is a non-continuous signal, where the generated signal will jump between the default value and the default+magnitude every time the `toggle_step_signal` service is called. @@ -24,14 +24,14 @@ Signal generator works by publishing autopilot commands on the ROS network on th *By Omegatron - Own work, CC BY-SA 3.0, https://commons.wikimedia.org/w/index.php?curid=343520* ### Parameters -- `controller_output`: Specifies what controller to apply the generated signal to. All other controllers will be constant at their default values. Valid values are `roll`, `pitch`, `altitude`, `heading`, and `airspeed`. +- `controller_output`: Specifies what controller to apply the generated signal to. All other controllers will be constant at their default values. Valid values are `roll`, `pitch`, `altitude`, `course`, and `airspeed`. - `signal_type`: Specified what kind of signal to generate. Valid values are `step`, `square`, `sawtooth`, `triangle`, and `sine`. - `publish_rate_hz`: Specifies the rate to publish control commands. Must be greater than 0. - `signal_magnitude`: Specifies the magnitude of the signal to generate. The signal will only be added to the default value, rather than subtracted. For example, if the signal has a magnitude of 2 and a default value of 5, the generated signal will range from 5 to 7. - `frequency_hz`: Specifies the frequency of the generated signal. Must be greater than 0, and does not apply to step signals. For step signals, manually toggle the signal up and down with the `toggle_step_signal` service. - `default_va_c`: The default value for the commanded airspeed, in meters per second. - `default_h_c`: The default value for the commanded altitude, in meters above takeoff altitude. -- `default_chi_c`: The default value for the commanded heading, in radians clockwise from north. +- `default_chi_c`: The default value for the commanded course, in radians clockwise from north. - `default_theta_c`: The default value for the commanded pitch, in radians pitched up from horizontal. - `default_phi_c`: The default value for the commanded roll, in radians 'rolled right' from horizontal. diff --git a/rosplane_tuning/include/signal_generator.hpp b/rosplane_tuning/include/signal_generator.hpp index c97a06f..8319fdf 100644 --- a/rosplane_tuning/include/signal_generator.hpp +++ b/rosplane_tuning/include/signal_generator.hpp @@ -53,7 +53,7 @@ namespace rosplane /** * This class is used to generate various input signals to test and tune all the control layers * in ROSplane. It currently supports square, sawtooth, triangle, and sine signals, and supports - * outputting to the roll, pitch, altitude, heading, and airspeed controllers. + * outputting to the roll, pitch, altitude, course, and airspeed controllers. */ class TuningSignalGenerator : public rclcpp::Node { @@ -68,7 +68,7 @@ class TuningSignalGenerator : public rclcpp::Node ROLL, PITCH, ALTITUDE, - HEADING, + COURSE, AIRSPEED }; diff --git a/rosplane_tuning/launch/autotune.launch.py b/rosplane_tuning/launch/autotune.launch.py index 7fe84e5..a8f4c26 100644 --- a/rosplane_tuning/launch/autotune.launch.py +++ b/rosplane_tuning/launch/autotune.launch.py @@ -10,14 +10,14 @@ def generate_launch_description(): # Launch arguments - error_collection_period = LaunchConfiguration('error_collection_period') - error_collection_period_launch_arg = DeclareLaunchArgument( - 'error_collection_period', + stabilize_period = LaunchConfiguration('/autotune/stabilize_period') + stabilize_period_launch_arg = DeclareLaunchArgument( + '/autotune/stabilize_period', description='Amount of time to collect data for calculating error' ) - current_tuning_autopilot = LaunchConfiguration('current_tuning_autopilot') + current_tuning_autopilot = LaunchConfiguration('/autotune/current_tuning_autopilot') current_tuning_autopilot_launch_arg = DeclareLaunchArgument( - 'current_tuning_autopilot', + '/autotune/current_tuning_autopilot', description='Autopilot to tune' ) @@ -27,8 +27,8 @@ def generate_launch_description(): executable='autotune.py', output='screen', parameters=[ - {'error_collection_period': error_collection_period}, - {'current_tuning_autopilot': current_tuning_autopilot} + {'/autotune/stabilize_period': stabilize_period}, + {'/autotune/current_tuning_autopilot': current_tuning_autopilot} ] ) rosplane_tuning_launch_include = IncludeLaunchDescription( @@ -39,7 +39,7 @@ def generate_launch_description(): ) return LaunchDescription([ - error_collection_period_launch_arg, + stabilize_period_launch_arg, current_tuning_autopilot_launch_arg, autotune_node, rosplane_tuning_launch_include diff --git a/rosplane_tuning/launch/roll_autotune.launch.py b/rosplane_tuning/launch/roll_autotune.launch.py index 25e1081..3a6768c 100644 --- a/rosplane_tuning/launch/roll_autotune.launch.py +++ b/rosplane_tuning/launch/roll_autotune.launch.py @@ -13,7 +13,7 @@ def generate_launch_description(): 'launch/autotune.launch.py') ), launch_arguments={ - 'error_collection_period': '5.0', - 'current_tuning_autopilot': 'roll' + '/autotune/stabilize_period': '5.0', + '/autotune/current_tuning_autopilot': 'roll' }.items() )]) diff --git a/rosplane_tuning/src/autotune/autotune.py b/rosplane_tuning/src/autotune/autotune.py index 8f29794..043b362 100644 --- a/rosplane_tuning/src/autotune/autotune.py +++ b/rosplane_tuning/src/autotune/autotune.py @@ -3,12 +3,26 @@ from rosplane_msgs.msg import ControllerCommands from rosplane_msgs.msg import ControllerInternalsDebug from rosplane_msgs.msg import State -from std_srvs.srv import Trigger +from optimizer import Optimizer import rclpy from rclpy.node import Node +from rclpy.parameter import Parameter +from rcl_interfaces.srv import GetParameters, SetParameters +from std_srvs.srv import Trigger -from optimizer import Optimizer +from enum import Enum, auto + + +class CurrentAutopilot(Enum): + """ + This class defines which autopilots are available for tuning. + """ + ROLL = auto() + COURSE = auto() + PITCH = auto() + ALTITUDE = auto() + AIRSPEED = auto() class Autotune(Node): @@ -26,26 +40,36 @@ class Autotune(Node): def __init__(self): super().__init__('autotune') - # Class variables + # Class state variables self.collecting_data = False + + # Data storage self.state = [] self.commands = [] self.internals_debug = [] - self.new_gains = [] # TODO: Get gains from ROS parameters - - u1 = 10**-4 # 1st Strong Wolfe Condition, must be between 0 and 1. - u2 = 0.5 # 2nd Strong Wolfe Condition, must be between u1 and 1. - sigma = 1.5 - alpha_init = 1 # Typically 1 - tau = 10**-3 # Convergence tolerance, typically 10^-3 - self.optimization_params = [u1,u2,sigma,alpha_init,tau] - self.optimizer = Optimizer(self.new_gains, self.optimization_params) # ROS parameters # The amount of time to collect data for calculating the error - self.declare_parameter('stabilize_period', rclpy.Parameter.Type.DOUBLE) + self.declare_parameter('/autotune/stabilize_period', rclpy.Parameter.Type.DOUBLE) # The autopilot that is currently being tuned - self.declare_parameter('current_tuning_autopilot', rclpy.Parameter.Type.STRING) + self.declare_parameter('/autotune/current_tuning_autopilot', rclpy.Parameter.Type.STRING) + # Get the autopilot to tune + if self.get_parameter('/autotune/current_tuning_autopilot').value == 'roll': + self.current_autopilot = CurrentAutopilot.ROLL + elif self.get_parameter('/autotune/current_tuning_autopilot').value == 'course': + self.current_autopilot = CurrentAutopilot.COURSE + elif self.get_parameter('/autotune/current_tuning_autopilot').value == 'pitch': + self.current_autopilot = CurrentAutopilot.PITCH + elif self.get_parameter('/autotune/current_tuning_autopilot').value == 'altitude': + self.current_autopilot = CurrentAutopilot.ALTITUDE + elif self.get_parameter('/autotune/current_tuning_autopilot').value == 'airspeed': + self.current_autopilot = CurrentAutopilot.AIRSPEED + else: + self.get_logger().fatal(self.get_parameter('/autotune/current_tuning_autopilot').value + + ' is not a valid value for current_tuning_autopilot.' + + ' Please select one of the' + + ' following: roll, course, pitch, altitude, airspeed.') + rclpy.shutdown() # Subscriptions self.state_subscription = self.create_subscription( @@ -66,25 +90,30 @@ def __init__(self): # Timers self.stabilize_period_timer = self.create_timer( - self.get_parameter('stabilize_period').value, + self.get_parameter('/autotune/stabilize_period').value, self.stabilize_period_timer_callback) self.stabilize_period_timer.cancel() # Services self.run_tuning_iteration_service = self.create_service( Trigger, - 'run_tuning_iteration', + '/autotune/run_tuning_iteration', self.run_tuning_iteration_callback) # Clients - self.toggle_step_signal_client = self.create_client(Trigger, 'toggle_step_signal') + self.toggle_step_signal_client = self.create_client(Trigger, '/autotune/toggle_step_signal') + self.get_parameter_client = self.create_client(GetParameters, '/autopilot/get_parameters') + self.set_parameter_client = self.create_client(SetParameters, '/autopilot/set_parameters') - # Optimization Setup - # TODO: Implement this function - # Run iteration at x0 -> phi0 - # Run iteration at x0+0.01 -> temp_phi - # Do a finite difference to get the gradient -> phi0_prime - # Repeat for x0+alpha + # Optimization + self.new_gains = self.get_gains() + u1 = 10**-4 # 1st Strong Wolfe Condition, must be between 0 and 1. + u2 = 0.5 # 2nd Strong Wolfe Condition, must be between u1 and 1. + sigma = 1.5 + alpha_init = 1 # Typically 1 + tau = 10**-3 # Convergence tolerance, typically 10^-3 + self.optimization_params = [u1,u2,sigma,alpha_init,tau] + self.optimizer = Optimizer(self.new_gain, self.optimization_params) ## ROS Callbacks ## @@ -172,16 +201,74 @@ def call_toggle_step_signal(self): """ while not self.toggle_step_signal_client.wait_for_service(timeout_sec=1.0): - self.get_logger().info(f'service {self.toggle_step_signal_client.srv_name} ' + + self.get_logger().info(f'Service {self.toggle_step_signal_client.srv_name} ' + 'not available, waiting...') request = Trigger.Request() self.toggle_step_signal_client.call_async(request) + def get_gains(self): + """ + Gets the current gains of the autopilot. + + Returns: + list of floats: The current gains of the autopilot that is being tuning. + """ + # TODO: Function not complete + gains = [] + request = GetParameters.Request() + + while not self.get_parameter_client.wait_for_service(timeout_sec=1.0): + self.get_logger().info('Service {self.get_parameter_client.srv_name}' + + ' not available, waiting...') + + if self.current_autopilot == CurrentAutopilot.ROLL: + request.names = ['r_kp', 'r_kd'] + elif self.current_autopilot == CurrentAutopilot.COURSE: + request.names = ['c_kp', 'c_ki'] + elif self.current_autopilot == CurrentAutopilot.PITCH: + request.names = ['p_kp', 'p_kd'] + elif self.current_autopilot == CurrentAutopilot.ALTITUDE: + request.names = ['a_kp', 'a_ki'] + else: # CurrentAutopilot.AIRSPEED + request.names = ['a_t_kp', 'a_t_ki'] + + self.get_parameter_client.call_async(request) + def set_gains(self, gains): """ Set the gains of the autopilot to the given values. """ + request = SetParameters.Request() + + while not self.set_parameter_client.wait_for_service(timeout_sec=1.0): + self.get_logger().info('Service {self.set_parameter_client.srv_name}' + + ' not available, waiting...') + + if self.current_autopilot == CurrentAutopilot.ROLL: + request.parameters = [Parameter(name='r_kp', value=gains[0]).to_parameter_msg(), + Parameter(name='r_kd', value=gains[1]).to_parameter_msg()] + elif self.current_autopilot == CurrentAutopilot.COURSE: + request.parameters = [Parameter(name='c_kp', value=gains[0]).to_parameter_msg(), + Parameter(name='c_ki', value=gains[1]).to_parameter_msg()] + elif self.current_autopilot == CurrentAutopilot.PITCH: + request.parameters = [Parameter(name='p_kp', value=gains[0]).to_parameter_msg(), + Parameter(name='p_kd', value=gains[1]).to_parameter_msg()] + elif self.current_autopilot == CurrentAutopilot.ALTITUDE: + request.parameters = [Parameter(name='a_kp', value=gains[0]).to_parameter_msg(), + Parameter(name='a_ki', value=gains[1]).to_parameter_msg()] + else: # CurrentAutopilot.AIRSPEED + request.parameters = [Parameter(name='a_t_kp', value=gains[0]).to_parameter_msg(), + Parameter(name='a_t_ki', value=gains[1]).to_parameter_msg()] + + self.set_parameter_client.call_async(request) + + + def calculate_error(self): + """ + Calculate the error between the state estimate and the commanded setpoint using the + collected data. + """ # TODO: Implement this function pass diff --git a/rosplane_tuning/src/autotune/optimizer.py b/rosplane_tuning/src/autotune/optimizer.py index f9dfe92..e6a0b12 100644 --- a/rosplane_tuning/src/autotune/optimizer.py +++ b/rosplane_tuning/src/autotune/optimizer.py @@ -1,5 +1,17 @@ #!/usr/bin/env python3 +from enum import Enum, auto + + +class OptimizerState(Enum): + """ + This class defines the various states that exist within the optimizer. + """ + FINDING_GRADIENT = auto() + BRACKETING = auto() + PINPOINTING = auto() + TERMINATED = auto() + class Optimizer: """ @@ -21,8 +33,8 @@ def __init__(self, initial_gains, optimization_params): self.tau = optimization_params[4] self.flag = 0 - pass - + self.state = OptimizerState.FINDING_GRADIENT + def optimization_terminated(self): """ This function checks if the optimization algorithm has terminated. @@ -39,28 +51,30 @@ def get_optimiztion_status(self): Returns: str: The status of the optimization algorithm """ - pass + return 'TODO' def get_next_parameter_set(self, error): """ This function returns the next set of gains to be tested by the optimization algorithm. Parameters: - error (float): The error from the set of gains returned previously. If this is the first - iteration, the error of the initial gains will be given. + error (list of floats): The error from the list of gains to test that was returned by this + function previously. If this is the first iteration, the error of the initial gains will be given. + [error1, error2, ...] Returns: list: The next set of gains to be tested. Should be the same length as the initial gains. """ - pass - def calculate_error(self): - """ - Calculate the error between the state estimate and the commanded setpoint using the - collected data. - """ - # TODO: Implement this function - pass + if self.state == OptimizerState.FINDING_GRADIENT: + pass + elif self.state == OptimizerState.BRACKETING: + pass + elif self.state == OptimizerState.PINPOINTING: + pass + else: # Terminated + pass + def get_gradient(self, fx, fxh): """ diff --git a/rosplane_tuning/src/signal_generator.cpp b/rosplane_tuning/src/signal_generator.cpp index 21d0b48..1f30770 100644 --- a/rosplane_tuning/src/signal_generator.cpp +++ b/rosplane_tuning/src/signal_generator.cpp @@ -137,7 +137,7 @@ void TuningSignalGenerator::publish_timer_callback() case ControllerOutput::ALTITUDE: center_value = default_h_c_; break; - case ControllerOutput::HEADING: + case ControllerOutput::COURSE: center_value = default_chi_c_; break; case ControllerOutput::AIRSPEED: @@ -186,7 +186,7 @@ void TuningSignalGenerator::publish_timer_callback() case ControllerOutput::ALTITUDE: command_message.h_c = signal_value; break; - case ControllerOutput::HEADING: + case ControllerOutput::COURSE: command_message.chi_c = signal_value; break; case ControllerOutput::AIRSPEED: @@ -336,8 +336,8 @@ void TuningSignalGenerator::update_params() controller_output_ = ControllerOutput::PITCH; } else if (controller_output_string == "altitude") { controller_output_ = ControllerOutput::ALTITUDE; - } else if (controller_output_string == "heading") { - controller_output_ = ControllerOutput::HEADING; + } else if (controller_output_string == "course") { + controller_output_ = ControllerOutput::COURSE; } else if (controller_output_string == "airspeed") { controller_output_ = ControllerOutput::AIRSPEED; } else { From 466e8bce58196f9df715da773d56e399ec8c47d8 Mon Sep 17 00:00:00 2001 From: Brandon Sutherland Date: Sat, 23 Mar 2024 11:00:59 -0600 Subject: [PATCH 11/52] modified interface functions between autotune and optimizer --- rosplane_tuning/src/autotune/autotune.py | 9 +-- rosplane_tuning/src/autotune/optimizer.py | 71 +++++++++++++---------- 2 files changed, 43 insertions(+), 37 deletions(-) diff --git a/rosplane_tuning/src/autotune/autotune.py b/rosplane_tuning/src/autotune/autotune.py index 043b362..3833bf8 100644 --- a/rosplane_tuning/src/autotune/autotune.py +++ b/rosplane_tuning/src/autotune/autotune.py @@ -107,13 +107,8 @@ def __init__(self): # Optimization self.new_gains = self.get_gains() - u1 = 10**-4 # 1st Strong Wolfe Condition, must be between 0 and 1. - u2 = 0.5 # 2nd Strong Wolfe Condition, must be between u1 and 1. - sigma = 1.5 - alpha_init = 1 # Typically 1 - tau = 10**-3 # Convergence tolerance, typically 10^-3 - self.optimization_params = [u1,u2,sigma,alpha_init,tau] - self.optimizer = Optimizer(self.new_gain, self.optimization_params) + self.optimizer = Optimizer(self.new_gains, {'u1': 10**-4, 'u2': 0.5, 'sigma': 1.5, + 'alpha': 1, 'tau': 10**-3}) ## ROS Callbacks ## diff --git a/rosplane_tuning/src/autotune/optimizer.py b/rosplane_tuning/src/autotune/optimizer.py index e6a0b12..18cd490 100644 --- a/rosplane_tuning/src/autotune/optimizer.py +++ b/rosplane_tuning/src/autotune/optimizer.py @@ -1,6 +1,7 @@ #!/usr/bin/env python3 from enum import Enum, auto +import numpy as np class OptimizerState(Enum): @@ -22,17 +23,22 @@ class Optimizer: def __init__(self, initial_gains, optimization_params): """ Parameters: - initial_gains (list): The starting point for the gains to be optimized (x0). - optimization_params (list): Parameters needed for the optimization operation. + initial_gains (np.array, size num_gains, dtype float): The starting point for the gains to + be optimized (x0). [gain1, gain2, ...] + optimization_params (dictionary): Parameters needed for the optimization operation. + - u1 (float): 1st Strong Wolfe Condition, must be between 0 and 1. + - u2 (float): 2nd Strong Wolfe Condition, must be between u1 and 1. + - sigma (float): The step size multiplier. + - alpha (float): The initial step size, usually 1. + - tau (float): Convergence tolerance, usually 1e-3. """ # Set initial values - self.u1 = optimization_params[0] - self.u2 = optimization_params[1] - self.sigma = optimization_params[2] - self.init_alpha = optimization_params[3] - self.tau = optimization_params[4] - self.flag = 0 - + self.u1 = optimization_params['u1'] + self.u2 = optimization_params['u2'] + self.sigma = optimization_params['sigma'] + self.init_alpha = optimization_params['alpha'] + self.tau = optimization_params['tau'] + self.flag = 0 self.state = OptimizerState.FINDING_GRADIENT def optimization_terminated(self): @@ -40,30 +46,35 @@ def optimization_terminated(self): This function checks if the optimization algorithm has terminated. Returns: - bool: True if optimization should terminate, False otherwise + bool: True if optimization is terminated, False otherwise. """ - pass + if self.state == OptimizerState.TERMINATED: + return True + else: + return False def get_optimiztion_status(self): """ This function returns the status of the optimization algorithm. Returns: - str: The status of the optimization algorithm + str: The status of the optimization algorithm. """ - return 'TODO' + return 'TODO: Status not implemented.' def get_next_parameter_set(self, error): """ This function returns the next set of gains to be tested by the optimization algorithm. Parameters: - error (list of floats): The error from the list of gains to test that was returned by this - function previously. If this is the first iteration, the error of the initial gains will be given. - [error1, error2, ...] + error (np.array, size num_gains, dtype float): The error from the list of gains to test that + was returned by this function previously. If this is the first iteration, the error of + the initial gains will be given. [error1, error2, ...] Returns: - list: The next set of gains to be tested. Should be the same length as the initial gains. + np.array, size num_gains*num_sets, dtype float: The next series of gains to be tested. Any + number of gain sets can be returned, but each set should correspond to the initial gains + given at initialization. [[gain1_1, gain2_1, ...], [gain1_2, gain2_2, ...], ...] """ if self.state == OptimizerState.FINDING_GRADIENT: @@ -79,7 +90,7 @@ def get_next_parameter_set(self, error): def get_gradient(self, fx, fxh): """ This function returns the gradient at the given point using forward finite difference. - + Parameters: fx (float): The function evaluation at the point of the gradient. fxh (float): The function evaluation at a point slightly offset from the point. @@ -105,24 +116,24 @@ def initial_bracketing(self, phi1, phi1_prime, phi2, phi2_prime): alpha1 = 0 alpha2 = self.init_alpha - + # Needs Pinpointing if(phi2 > phi1 + self.u1*self.init_alpha*phi1_prime): flag = 2 alphap = self.interpolate(alpha1, alpha2) return alpha2 - + # Optimized if abs(phi2_prime) <= -self.u2*phi1_prime: flag = 4 return alpha2 - + # Needs Pinpointing elif phi2_prime >= 0: flag = 2 alphap = self.interpolate(alpha1, alpha2) return alpha2 - + # Needs more Bracketing else: alpha1 = alpha2 @@ -130,7 +141,7 @@ def initial_bracketing(self, phi1, phi1_prime, phi2, phi2_prime): flag = 1 return alpha2 - + # Flag 1 - Bracketing def bracketing(self, phi1, phi1_prime, phi2, phi2_prime, temp_alpha1, temp_alpha2): """ @@ -145,20 +156,20 @@ def bracketing(self, phi1, phi1_prime, phi2, phi2_prime, temp_alpha1, temp_alpha alphap = self.interpolate(alpha1, alpha2) res = [alpha1, alphap, alpha2] return res - + # Optimized if abs(phi2_prime) <= -self.u2*phi1_prime: flag = 4 res = [alpha2] return res - + # Needs Pinpointing elif phi2_prime >= 0: flag = 2 alphap = self.interpolate(alpha1, alpha2) res = [alpha1, alphap, alpha2] return res - + # Needs more Bracketing else: alpha1 = alpha2 @@ -192,7 +203,7 @@ def pinpointing(self, alpha1, phi1, phi1_prime, alpha2, phi2, phi2_prime, alphap alpha2 (float): The second distance. phi2 (float): The value of the function at point 2. phi2_prime (float): The gradient of the function at point 2. - + Returns: alphastar (float): The optimal step size """ @@ -212,13 +223,13 @@ def pinpointing(self, alpha1, phi1, phi1_prime, alpha2, phi2, phi2_prime, alphap # More parameterization needed elif phip_prime*(alpha2 - alpha1) >= 0: alpha2 = alpha1 - + alpha1 = alphap res = [alpha1, alpha2] return res - + # Check for failure criteria - + # Flag 3 - Failure Convergence def failure(self): pass From bf10b908e8c2eb46b4b814296d451cfa8c109817 Mon Sep 17 00:00:00 2001 From: Brandon Sutherland Date: Sat, 23 Mar 2024 13:01:26 -0600 Subject: [PATCH 12/52] got autotune receiving service call results during service --- rosplane_tuning/src/autotune/autotune.py | 76 ++++++++++++++++++------ 1 file changed, 58 insertions(+), 18 deletions(-) diff --git a/rosplane_tuning/src/autotune/autotune.py b/rosplane_tuning/src/autotune/autotune.py index 3833bf8..0c3ec55 100644 --- a/rosplane_tuning/src/autotune/autotune.py +++ b/rosplane_tuning/src/autotune/autotune.py @@ -6,12 +6,16 @@ from optimizer import Optimizer import rclpy +from rclpy.executors import MultiThreadedExecutor +from rclpy.callback_groups import MutuallyExclusiveCallbackGroup from rclpy.node import Node from rclpy.parameter import Parameter from rcl_interfaces.srv import GetParameters, SetParameters from std_srvs.srv import Trigger from enum import Enum, auto +import numpy as np +import time class CurrentAutopilot(Enum): @@ -40,6 +44,10 @@ class Autotune(Node): def __init__(self): super().__init__('autotune') + # Callback groups, used for allowing external services to run mid-internal callback + self.internal_callback_group = MutuallyExclusiveCallbackGroup() + self.external_callback_group = MutuallyExclusiveCallbackGroup() + # Class state variables self.collecting_data = False @@ -76,34 +84,48 @@ def __init__(self): State, 'estimated_state', self.state_callback, - 10) + 10, + callback_group=self.internal_callback_group) self.commands_subscription = self.create_subscription( ControllerCommands, 'controller_commands', self.commands_callback, - 10) + 10, + callback_group=self.internal_callback_group) self.internals_debug_subscription = self.create_subscription( ControllerInternalsDebug, 'tuning_debug', self.internals_debug_callback, - 10) + 10, + callback_group=self.internal_callback_group) # Timers self.stabilize_period_timer = self.create_timer( self.get_parameter('/autotune/stabilize_period').value, - self.stabilize_period_timer_callback) + self.stabilize_period_timer_callback, + callback_group=self.internal_callback_group) self.stabilize_period_timer.cancel() # Services self.run_tuning_iteration_service = self.create_service( Trigger, '/autotune/run_tuning_iteration', - self.run_tuning_iteration_callback) + self.run_tuning_iteration_callback, + callback_group=self.internal_callback_group) # Clients - self.toggle_step_signal_client = self.create_client(Trigger, '/autotune/toggle_step_signal') - self.get_parameter_client = self.create_client(GetParameters, '/autopilot/get_parameters') - self.set_parameter_client = self.create_client(SetParameters, '/autopilot/set_parameters') + self.toggle_step_signal_client = self.create_client( + Trigger, + '/autotune/toggle_step_signal', + callback_group=self.external_callback_group) + self.get_parameter_client = self.create_client( + GetParameters, + '/autopilot/get_parameters', + callback_group=self.external_callback_group) + self.set_parameter_client = self.create_client( + SetParameters, + '/autopilot/set_parameters', + callback_group=self.external_callback_group) # Optimization self.new_gains = self.get_gains() @@ -209,12 +231,10 @@ def get_gains(self): Returns: list of floats: The current gains of the autopilot that is being tuning. """ - # TODO: Function not complete - gains = [] request = GetParameters.Request() while not self.get_parameter_client.wait_for_service(timeout_sec=1.0): - self.get_logger().info('Service {self.get_parameter_client.srv_name}' + + self.get_logger().info(f'Service {self.get_parameter_client.srv_name}' + ' not available, waiting...') if self.current_autopilot == CurrentAutopilot.ROLL: @@ -230,16 +250,14 @@ def get_gains(self): self.get_parameter_client.call_async(request) + return np.array([1.0, 1.0]) # Placeholder + def set_gains(self, gains): """ Set the gains of the autopilot to the given values. """ - request = SetParameters.Request() - - while not self.set_parameter_client.wait_for_service(timeout_sec=1.0): - self.get_logger().info('Service {self.set_parameter_client.srv_name}' + - ' not available, waiting...') + request = SetParameters.Request() if self.current_autopilot == CurrentAutopilot.ROLL: request.parameters = [Parameter(name='r_kp', value=gains[0]).to_parameter_msg(), Parameter(name='r_kd', value=gains[1]).to_parameter_msg()] @@ -256,7 +274,27 @@ def set_gains(self, gains): request.parameters = [Parameter(name='a_t_kp', value=gains[0]).to_parameter_msg(), Parameter(name='a_t_ki', value=gains[1]).to_parameter_msg()] - self.set_parameter_client.call_async(request) + # Call the service + while not self.set_parameter_client.wait_for_service(timeout_sec=1.0): + self.get_logger().info('Service {self.set_parameter_client.srv_name}' + + ' not available, waiting...') + future = self.set_parameter_client.call_async(request) + + # Wait for the service to complete, exiting if it takes too long + # rclcpp has a function for this, but I couldn't seem to find one for rclpy + call_time = time.time() + callback_complete = False + while call_time + 5 > time.time(): + if future.done(): + callback_complete = True + break + if not callback_complete: + self.get_logger().error('Unable to set autopilot gains after 5 seconds.') + + # Print any errors that occurred + for response in future.result().results: + if not response.successful: + self.get_logger().error(f'Failed to set parameter: {response.reason}') def calculate_error(self): @@ -272,7 +310,9 @@ def main(args=None): rclpy.init(args=args) autotune = Autotune() - rclpy.spin(autotune) + executor = MultiThreadedExecutor() + executor.add_node(autotune) + executor.spin() autotune.destroy_node() rclpy.shutdown() From a254a4e67adc5da482d88e8e2968cfd126a0be55 Mon Sep 17 00:00:00 2001 From: Brandon Sutherland Date: Sat, 23 Mar 2024 14:11:11 -0600 Subject: [PATCH 13/52] got params setting and gettings --- rosplane_tuning/src/autotune/autotune.py | 70 +++++++++++++++++------- 1 file changed, 49 insertions(+), 21 deletions(-) diff --git a/rosplane_tuning/src/autotune/autotune.py b/rosplane_tuning/src/autotune/autotune.py index 0c3ec55..52af295 100644 --- a/rosplane_tuning/src/autotune/autotune.py +++ b/rosplane_tuning/src/autotune/autotune.py @@ -10,6 +10,7 @@ from rclpy.callback_groups import MutuallyExclusiveCallbackGroup from rclpy.node import Node from rclpy.parameter import Parameter +from rcl_interfaces.msg import ParameterType from rcl_interfaces.srv import GetParameters, SetParameters from std_srvs.srv import Trigger @@ -73,11 +74,10 @@ def __init__(self): elif self.get_parameter('/autotune/current_tuning_autopilot').value == 'airspeed': self.current_autopilot = CurrentAutopilot.AIRSPEED else: - self.get_logger().fatal(self.get_parameter('/autotune/current_tuning_autopilot').value + - ' is not a valid value for current_tuning_autopilot.' + - ' Please select one of the' + - ' following: roll, course, pitch, altitude, airspeed.') - rclpy.shutdown() + raise ValueError(self.get_parameter('/autotune/current_tuning_autopilot').value + + ' is not a valid value for current_tuning_autopilot.' + + ' Please select one of the' + + ' following: roll, course, pitch, altitude, airspeed.') # Subscriptions self.state_subscription = self.create_subscription( @@ -128,9 +128,14 @@ def __init__(self): callback_group=self.external_callback_group) # Optimization - self.new_gains = self.get_gains() - self.optimizer = Optimizer(self.new_gains, {'u1': 10**-4, 'u2': 0.5, 'sigma': 1.5, - 'alpha': 1, 'tau': 10**-3}) + self.optimization_params = {'u1': 10**-4, + 'u2': 0.5, + 'sigma': 1.5, + 'alpha': 1, + 'tau': 10**-3} + self.new_gains = None # get_gains function cannot be called in __init__ since the node + # has not yet been passed to the executor + self.optimizer = None ## ROS Callbacks ## @@ -186,13 +191,16 @@ def stabilize_period_timer_callback(self): self.call_toggle_step_signal() self.new_gains = self.optimizer.get_next_parameter_set(self.calculate_error()) - def run_tuning_iteration_callback(self, request, response): """ This function is called when the run_tuning_iteration service is called. It starts the next iteration of the optimization process. """ + if self.optimizer is None: + self.new_gains = self.get_gains() + self.optimizer = Optimizer(self.new_gains, self.optimization_params) + if not self.optimizer.optimization_terminated(): self.get_logger().info('Setting gains: ' + str(self.new_gains)) self.set_gains(self.new_gains) @@ -231,12 +239,8 @@ def get_gains(self): Returns: list of floats: The current gains of the autopilot that is being tuning. """ - request = GetParameters.Request() - - while not self.get_parameter_client.wait_for_service(timeout_sec=1.0): - self.get_logger().info(f'Service {self.get_parameter_client.srv_name}' + - ' not available, waiting...') + request = GetParameters.Request() if self.current_autopilot == CurrentAutopilot.ROLL: request.names = ['r_kp', 'r_kd'] elif self.current_autopilot == CurrentAutopilot.COURSE: @@ -248,9 +252,35 @@ def get_gains(self): else: # CurrentAutopilot.AIRSPEED request.names = ['a_t_kp', 'a_t_ki'] - self.get_parameter_client.call_async(request) + # Call the service + while not self.get_parameter_client.wait_for_service(timeout_sec=1.0): + self.get_logger().info(f'Service {self.get_parameter_client.srv_name}' + + ' not available, waiting...') + future = self.get_parameter_client.call_async(request) - return np.array([1.0, 1.0]) # Placeholder + # Wait for the service to complete, exiting if it takes too long + call_time = time.time() + callback_complete = False + while call_time + 5 > time.time(): + if future.done(): + callback_complete = True + break + if not callback_complete or future.result() is None: + raise RuntimeError('Unable to get autopilot gains.') + + # Check that values were returned. No values are returned if the parameter does not exist. + if len(future.result().values) == 0: + raise RuntimeError(f'Parameter values for {request.names} were not returned, ' + + 'have they been set?') + + # Put the gains into a numpy array + gains = [] + for value in future.result().values: + if value.type != ParameterType.PARAMETER_DOUBLE: + raise RuntimeError('Parameter type returned by get_parameters is not DOUBLE.') + gains.append(value.double_value) + + return np.array(gains) # Placeholder def set_gains(self, gains): """ @@ -281,21 +311,19 @@ def set_gains(self, gains): future = self.set_parameter_client.call_async(request) # Wait for the service to complete, exiting if it takes too long - # rclcpp has a function for this, but I couldn't seem to find one for rclpy call_time = time.time() callback_complete = False while call_time + 5 > time.time(): if future.done(): callback_complete = True break - if not callback_complete: - self.get_logger().error('Unable to set autopilot gains after 5 seconds.') + if not callback_complete or future.result() is None: + raise RuntimeError('Unable to set autopilot gains.') # Print any errors that occurred for response in future.result().results: if not response.successful: - self.get_logger().error(f'Failed to set parameter: {response.reason}') - + raise RuntimeError(f'Failed to set parameter: {response.reason}') def calculate_error(self): """ From 57fa4e51f9b61df08d8629f28b9686e4a80c05e4 Mon Sep 17 00:00:00 2001 From: Brandon Sutherland Date: Mon, 25 Mar 2024 13:29:30 -0600 Subject: [PATCH 14/52] added gain queue, for testing multiple gains easier --- rosplane_tuning/src/autotune/autotune.py | 53 ++++++++++++++++++------ 1 file changed, 40 insertions(+), 13 deletions(-) diff --git a/rosplane_tuning/src/autotune/autotune.py b/rosplane_tuning/src/autotune/autotune.py index 52af295..71df665 100644 --- a/rosplane_tuning/src/autotune/autotune.py +++ b/rosplane_tuning/src/autotune/autotune.py @@ -15,6 +15,7 @@ from std_srvs.srv import Trigger from enum import Enum, auto +from queue import Queue import numpy as np import time @@ -51,6 +52,8 @@ def __init__(self): # Class state variables self.collecting_data = False + self.gain_queue = Queue() + self.error_queue = Queue() # Data storage self.state = [] @@ -133,9 +136,8 @@ def __init__(self): 'sigma': 1.5, 'alpha': 1, 'tau': 10**-3} - self.new_gains = None # get_gains function cannot be called in __init__ since the node + self.optimizer = None # get_gains function cannot be called in __init__ since the node # has not yet been passed to the executor - self.optimizer = None ## ROS Callbacks ## @@ -189,7 +191,6 @@ def stabilize_period_timer_callback(self): self.collecting_data = False self.stabilize_period_timer.cancel() self.call_toggle_step_signal() - self.new_gains = self.optimizer.get_next_parameter_set(self.calculate_error()) def run_tuning_iteration_callback(self, request, response): """ @@ -198,12 +199,12 @@ def run_tuning_iteration_callback(self, request, response): """ if self.optimizer is None: - self.new_gains = self.get_gains() - self.optimizer = Optimizer(self.new_gains, self.optimization_params) + self.optimizer = Optimizer(self.get_current_gains(), self.optimization_params) if not self.optimizer.optimization_terminated(): - self.get_logger().info('Setting gains: ' + str(self.new_gains)) - self.set_gains(self.new_gains) + new_gains = self.get_next_gains() + self.get_logger().info('Setting gains: ' + str(new_gains)) + self.set_current_gains(new_gains) self.stabilize_period_timer.timer_period_ns = \ int(self.get_parameter('stabilize_period').value * 1e9) @@ -229,15 +230,14 @@ def call_toggle_step_signal(self): self.get_logger().info(f'Service {self.toggle_step_signal_client.srv_name} ' + 'not available, waiting...') - request = Trigger.Request() - self.toggle_step_signal_client.call_async(request) + self.toggle_step_signal_client.call_async(Trigger.Request()) - def get_gains(self): + def get_current_gains(self): """ Gets the current gains of the autopilot. Returns: - list of floats: The current gains of the autopilot that is being tuning. + np.array, size num_gains, dtype float: The gains of the autopilot. [gain1, gain2, ...] """ request = GetParameters.Request() @@ -280,11 +280,15 @@ def get_gains(self): raise RuntimeError('Parameter type returned by get_parameters is not DOUBLE.') gains.append(value.double_value) - return np.array(gains) # Placeholder + return np.array(gains) - def set_gains(self, gains): + def set_current_gains(self, gains): """ Set the gains of the autopilot to the given values. + + Parameters: + gains (np.array, size num_gains, dtype float): The gains to set for the autopilot. + [gain1, gain2, ...] """ request = SetParameters.Request() @@ -333,6 +337,29 @@ def calculate_error(self): # TODO: Implement this function pass + def get_next_gains(self): + """ + Gets the next set of gains to test from either the queue or the optimizer. + + Returns: + np.array, size num_gains, dtype float: The gains to test. [gain1, gain2, ...] + """ + + self.error_queue.put(self.calculate_error()) + self.state = [] + self.commands = [] + self.internals_debug = [] + + if self.gain_queue.empty(): + # Empty the error queue and pass to the optimizer + error_array = np.array([self.error_queue.get() while not self.error_queue.empty()]) + next_set = self.optimizer.get_next_parameter_set(error_array) + + # Store the next set of gains in the queue + self.gain_queue.put(next_set[i] for i in range(next_set.shape[0])) + + return self.gain_queue.get() + def main(args=None): rclpy.init(args=args) From fb0c1a5a687cf04e9438fb5587fab887e7361fff Mon Sep 17 00:00:00 2001 From: Brandon Sutherland Date: Mon, 25 Mar 2024 14:45:56 -0600 Subject: [PATCH 15/52] got autotune running with queue --- rosplane_tuning/src/autotune/autotune.py | 29 ++++++++++++++---------- 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/rosplane_tuning/src/autotune/autotune.py b/rosplane_tuning/src/autotune/autotune.py index 71df665..0d9c092 100644 --- a/rosplane_tuning/src/autotune/autotune.py +++ b/rosplane_tuning/src/autotune/autotune.py @@ -119,7 +119,7 @@ def __init__(self): # Clients self.toggle_step_signal_client = self.create_client( Trigger, - '/autotune/toggle_step_signal', + 'toggle_step_signal', callback_group=self.external_callback_group) self.get_parameter_client = self.create_client( GetParameters, @@ -181,7 +181,7 @@ def stabilize_period_timer_callback(self): if not self.collecting_data: # Stabilization period is over, start collecting data self.get_logger().info('Stepping command and collecting data for ' - + str(self.get_parameter('stabilize_period').value) + + str(self.get_parameter('/autotune/stabilize_period').value) + ' seconds...') self.collecting_data = True self.call_toggle_step_signal() @@ -199,23 +199,25 @@ def run_tuning_iteration_callback(self, request, response): """ if self.optimizer is None: - self.optimizer = Optimizer(self.get_current_gains(), self.optimization_params) + new_gains = self.get_current_gains() + self.optimizer = Optimizer(new_gains, self.optimization_params) + else: + new_gains = self.get_next_gains() if not self.optimizer.optimization_terminated(): - new_gains = self.get_next_gains() self.get_logger().info('Setting gains: ' + str(new_gains)) self.set_current_gains(new_gains) self.stabilize_period_timer.timer_period_ns = \ - int(self.get_parameter('stabilize_period').value * 1e9) + int(self.get_parameter('/autotune/stabilize_period').value * 1e9) self.stabilize_period_timer.reset() self.get_logger().info('Stabilizing autopilot for ' - + str(self.get_parameter('stabilize_period').value) + + str(self.get_parameter('/autotune/stabilize_period').value) + ' seconds...') response.success = True - response.message = self.optimizer.get_optimiztion_status() + response.message = self.optimizer.get_optimization_status() return response @@ -335,7 +337,7 @@ def calculate_error(self): collected data. """ # TODO: Implement this function - pass + return 0.0 def get_next_gains(self): """ @@ -351,12 +353,15 @@ def get_next_gains(self): self.internals_debug = [] if self.gain_queue.empty(): - # Empty the error queue and pass to the optimizer - error_array = np.array([self.error_queue.get() while not self.error_queue.empty()]) - next_set = self.optimizer.get_next_parameter_set(error_array) + # Empty the error queue and store it in a numpy array + error_array = [] + while not self.error_queue.empty(): + error_array.append(self.error_queue.get()) + error_array = np.array(error_array) # Store the next set of gains in the queue - self.gain_queue.put(next_set[i] for i in range(next_set.shape[0])) + for set in self.optimizer.get_next_parameter_set(error_array): + self.gain_queue.put(set) return self.gain_queue.get() From e534e7995890fc268ab4a40b39165ff473425851 Mon Sep 17 00:00:00 2001 From: Brandon Sutherland Date: Mon, 25 Mar 2024 18:47:06 -0600 Subject: [PATCH 16/52] implemented error calculation --- rosplane_tuning/src/autotune/autotune.py | 35 ++++++++++++++++++++++-- 1 file changed, 33 insertions(+), 2 deletions(-) diff --git a/rosplane_tuning/src/autotune/autotune.py b/rosplane_tuning/src/autotune/autotune.py index 0d9c092..1fe4c3a 100644 --- a/rosplane_tuning/src/autotune/autotune.py +++ b/rosplane_tuning/src/autotune/autotune.py @@ -336,8 +336,39 @@ def calculate_error(self): Calculate the error between the state estimate and the commanded setpoint using the collected data. """ - # TODO: Implement this function - return 0.0 + + # Get the estimate and command data from the right autopilot + if self.current_autopilot == CurrentAutopilot.ROLL: + estimate = np.array(self.state)[:, [0, 2]] + command = np.array(self.internals_debug)[:, [0, 1]] + elif self.current_autopilot == CurrentAutopilot.COURSE: + estimate = np.array(self.state)[:, [0, 3]] + command = np.array(self.commands)[:, [0, 2]] + elif self.current_autopilot == CurrentAutopilot.PITCH: + estimate = np.array(self.state)[:, [0, 4]] + command = np.array(self.internals_debug)[:, [0, 2]] + elif self.current_autopilot == CurrentAutopilot.ALTITUDE: + estimate = np.array(self.state)[:, [0, 5]] + command = np.array(self.commands)[:, [0, 3]] + else: # CurrentAutopilot.AIRSPEED + estimate = np.array(self.state)[:, [0, 1]] + command = np.array(self.commands)[:, [0, 1]] + + # Trim any measurements that happened before the first command + estimate = estimate[np.where(estimate[:, 0] >= command[0, 0])[0], :] + + # Integrate across the square of the error + cumulative_error = 0.0 + for i in range(estimate.shape[0] - 1): + # Get command that was just previous + command_index = np.where(command[:, 0] <= estimate[i + 1, 0])[0][-1] + # Numerically integrate + cumulative_error += (estimate[i + 1, 1] - command[command_index, 1])**2 \ + * (estimate[i + 1, 0] - estimate[i, 0]) + + self.get_logger().info('Cumulative error: ' + str(cumulative_error)) + + return cumulative_error def get_next_gains(self): """ From 4c3a23baf90c72e71c90e69910900a75eae09f6e Mon Sep 17 00:00:00 2001 From: Brandon Sutherland Date: Tue, 26 Mar 2024 11:20:14 -0600 Subject: [PATCH 17/52] added return to orbit state --- rosplane_tuning/src/autotune/autotune.py | 54 ++++++++++++++++++------ 1 file changed, 42 insertions(+), 12 deletions(-) diff --git a/rosplane_tuning/src/autotune/autotune.py b/rosplane_tuning/src/autotune/autotune.py index 1fe4c3a..146eb2c 100644 --- a/rosplane_tuning/src/autotune/autotune.py +++ b/rosplane_tuning/src/autotune/autotune.py @@ -31,6 +31,16 @@ class CurrentAutopilot(Enum): AIRSPEED = auto() +class AutoTuneState(Enum): + """ + This class defines the possible states of the autotune node. + """ + ORBITING = auto() + STABILIZING = auto() + STEP_TESTING = auto() + RETURN_TESTING = auto() + + class Autotune(Node): """ This class is an auto-tuning node for the ROSplane autopilot. The node calculates the error @@ -46,6 +56,8 @@ class Autotune(Node): def __init__(self): super().__init__('autotune') + self.autotune_state = AutoTuneState.ORBITING + # Callback groups, used for allowing external services to run mid-internal callback self.internal_callback_group = MutuallyExclusiveCallbackGroup() self.external_callback_group = MutuallyExclusiveCallbackGroup() @@ -131,13 +143,14 @@ def __init__(self): callback_group=self.external_callback_group) # Optimization + self.initial_gains = None # get_gains function cannot be called in __init__ since the node + # has not yet been passed to the executor self.optimization_params = {'u1': 10**-4, 'u2': 0.5, 'sigma': 1.5, 'alpha': 1, 'tau': 10**-3} - self.optimizer = None # get_gains function cannot be called in __init__ since the node - # has not yet been passed to the executor + self.optimizer = None ## ROS Callbacks ## @@ -178,19 +191,36 @@ def stabilize_period_timer_callback(self): collection and sets up ROSplane to perform a step manuever. """ - if not self.collecting_data: + if self.autotune_state == AutoTuneState.STABILIZING: # Stabilization period is over, start collecting data - self.get_logger().info('Stepping command and collecting data for ' + self.get_logger().info('Setting gains: ' + str(self.new_gains)) + self.set_current_gains(self.new_gains) + self.get_logger().info('Stepping command for ' + str(self.get_parameter('/autotune/stabilize_period').value) + ' seconds...') self.collecting_data = True self.call_toggle_step_signal() + + self.autotune_state = AutoTuneState.STEP_TESTING + + elif self.autotune_state == AutoTuneState.STEP_TESTING: + # Step test is over, reverse step + self.get_logger().info('Reversing step command for ' + + str(self.get_parameter('/autotune/stabilize_period').value) + + ' seconds...') + self.call_toggle_step_signal() + + self.autotune_state = AutoTuneState.RETURN_TESTING + else: # Data collection is over, stop collecting data and calculate gains for next iteration - self.get_logger().info('Data collection complete.') + self.get_logger().info('Data collection complete, restoring original gains and ' + + 'returning to orbit.') self.collecting_data = False self.stabilize_period_timer.cancel() - self.call_toggle_step_signal() + self.set_current_gains(self.initial_gains) + + self.autotune_state = AutoTuneState.ORBITING def run_tuning_iteration_callback(self, request, response): """ @@ -199,15 +229,14 @@ def run_tuning_iteration_callback(self, request, response): """ if self.optimizer is None: - new_gains = self.get_current_gains() - self.optimizer = Optimizer(new_gains, self.optimization_params) + # Initialize optimizer + self.initial_gains = self.get_current_gains() + self.optimizer = Optimizer(self.initial_gains, self.optimization_params) + self.new_gains = self.initial_gains else: - new_gains = self.get_next_gains() + self.new_gains = self.get_next_gains() if not self.optimizer.optimization_terminated(): - self.get_logger().info('Setting gains: ' + str(new_gains)) - self.set_current_gains(new_gains) - self.stabilize_period_timer.timer_period_ns = \ int(self.get_parameter('/autotune/stabilize_period').value * 1e9) self.stabilize_period_timer.reset() @@ -215,6 +244,7 @@ def run_tuning_iteration_callback(self, request, response): self.get_logger().info('Stabilizing autopilot for ' + str(self.get_parameter('/autotune/stabilize_period').value) + ' seconds...') + self.autotune_state = AutoTuneState.STABILIZING response.success = True response.message = self.optimizer.get_optimization_status() From ab60c200102bc6d9a31c245321675ebcea3fb998 Mon Sep 17 00:00:00 2001 From: Brandon Sutherland Date: Tue, 26 Mar 2024 12:05:04 -0600 Subject: [PATCH 18/52] added option for continuous tuning --- rosplane_tuning/launch/autotune.launch.py | 19 ++++++--- .../launch/roll_autotune.launch.py | 41 ++++++++++++++----- rosplane_tuning/src/autotune/autotune.py | 35 ++++++++-------- 3 files changed, 62 insertions(+), 33 deletions(-) diff --git a/rosplane_tuning/launch/autotune.launch.py b/rosplane_tuning/launch/autotune.launch.py index a8f4c26..df16601 100644 --- a/rosplane_tuning/launch/autotune.launch.py +++ b/rosplane_tuning/launch/autotune.launch.py @@ -10,14 +10,19 @@ def generate_launch_description(): # Launch arguments - stabilize_period = LaunchConfiguration('/autotune/stabilize_period') + stabilize_period = LaunchConfiguration('stabilize_period') stabilize_period_launch_arg = DeclareLaunchArgument( - '/autotune/stabilize_period', + 'stabilize_period', description='Amount of time to collect data for calculating error' ) - current_tuning_autopilot = LaunchConfiguration('/autotune/current_tuning_autopilot') + continuous_tuning = LaunchConfiguration('continuous_tuning') + continuous_tuning_launch_arg = DeclareLaunchArgument( + 'continuous_tuning', + description='Whether to run the tuning sequence continuously or to wait for manual input' + ) + current_tuning_autopilot = LaunchConfiguration('current_tuning_autopilot') current_tuning_autopilot_launch_arg = DeclareLaunchArgument( - '/autotune/current_tuning_autopilot', + 'current_tuning_autopilot', description='Autopilot to tune' ) @@ -27,8 +32,9 @@ def generate_launch_description(): executable='autotune.py', output='screen', parameters=[ - {'/autotune/stabilize_period': stabilize_period}, - {'/autotune/current_tuning_autopilot': current_tuning_autopilot} + {'stabilize_period': stabilize_period}, + {'continuous_tuning': continuous_tuning}, + {'current_tuning_autopilot': current_tuning_autopilot} ] ) rosplane_tuning_launch_include = IncludeLaunchDescription( @@ -40,6 +46,7 @@ def generate_launch_description(): return LaunchDescription([ stabilize_period_launch_arg, + continuous_tuning_launch_arg, current_tuning_autopilot_launch_arg, autotune_node, rosplane_tuning_launch_include diff --git a/rosplane_tuning/launch/roll_autotune.launch.py b/rosplane_tuning/launch/roll_autotune.launch.py index 3a6768c..9eb2c8d 100644 --- a/rosplane_tuning/launch/roll_autotune.launch.py +++ b/rosplane_tuning/launch/roll_autotune.launch.py @@ -2,18 +2,37 @@ from ament_index_python import get_package_share_directory from launch import LaunchDescription -from launch.actions import IncludeLaunchDescription +from launch.actions import DeclareLaunchArgument, IncludeLaunchDescription +from launch.substitutions import TextSubstitution, LaunchConfiguration from launch.launch_description_sources import PythonLaunchDescriptionSource def generate_launch_description(): + stabilize_period = LaunchConfiguration('stabilize_period') + stabilize_period_launch_arg = DeclareLaunchArgument( + 'stabilize_period', + default_value=TextSubstitution(text='5.0'), + description='Whether to run the tuning sequence continuously or to wait for manual input' + ) + continuous_tuning = LaunchConfiguration('continuous_tuning') + continuous_tuning_launch_arg = DeclareLaunchArgument( + 'continuous_tuning', + default_value=TextSubstitution(text='False'), + description='Whether to run the tuning sequence continuously or to wait for manual input' + ) + rosplane_tuning_launch_arg = IncludeLaunchDescription( + PythonLaunchDescriptionSource( + os.path.join(get_package_share_directory('rosplane_tuning'), + 'launch/autotune.launch.py') + ), + launch_arguments={ + 'stabilize_period': stabilize_period, + 'continuous_tuning': continuous_tuning, + 'current_tuning_autopilot': 'roll' + }.items() + ) + return LaunchDescription([ - IncludeLaunchDescription( - PythonLaunchDescriptionSource( - os.path.join(get_package_share_directory('rosplane_tuning'), - 'launch/autotune.launch.py') - ), - launch_arguments={ - '/autotune/stabilize_period': '5.0', - '/autotune/current_tuning_autopilot': 'roll' - }.items() - )]) + stabilize_period_launch_arg, + continuous_tuning_launch_arg, + rosplane_tuning_launch_arg + ]) diff --git a/rosplane_tuning/src/autotune/autotune.py b/rosplane_tuning/src/autotune/autotune.py index 146eb2c..d42aaf3 100644 --- a/rosplane_tuning/src/autotune/autotune.py +++ b/rosplane_tuning/src/autotune/autotune.py @@ -74,22 +74,24 @@ def __init__(self): # ROS parameters # The amount of time to collect data for calculating the error - self.declare_parameter('/autotune/stabilize_period', rclpy.Parameter.Type.DOUBLE) + self.declare_parameter('stabilize_period', rclpy.Parameter.Type.DOUBLE) + # Whether to run the autotune continuously or not + self.declare_parameter('continuous_tuning', rclpy.Parameter.Type.BOOL) # The autopilot that is currently being tuned - self.declare_parameter('/autotune/current_tuning_autopilot', rclpy.Parameter.Type.STRING) + self.declare_parameter('current_tuning_autopilot', rclpy.Parameter.Type.STRING) # Get the autopilot to tune - if self.get_parameter('/autotune/current_tuning_autopilot').value == 'roll': + if self.get_parameter('current_tuning_autopilot').value == 'roll': self.current_autopilot = CurrentAutopilot.ROLL - elif self.get_parameter('/autotune/current_tuning_autopilot').value == 'course': + elif self.get_parameter('current_tuning_autopilot').value == 'course': self.current_autopilot = CurrentAutopilot.COURSE - elif self.get_parameter('/autotune/current_tuning_autopilot').value == 'pitch': + elif self.get_parameter('current_tuning_autopilot').value == 'pitch': self.current_autopilot = CurrentAutopilot.PITCH - elif self.get_parameter('/autotune/current_tuning_autopilot').value == 'altitude': + elif self.get_parameter('current_tuning_autopilot').value == 'altitude': self.current_autopilot = CurrentAutopilot.ALTITUDE - elif self.get_parameter('/autotune/current_tuning_autopilot').value == 'airspeed': + elif self.get_parameter('current_tuning_autopilot').value == 'airspeed': self.current_autopilot = CurrentAutopilot.AIRSPEED else: - raise ValueError(self.get_parameter('/autotune/current_tuning_autopilot').value + + raise ValueError(self.get_parameter('current_tuning_autopilot').value + ' is not a valid value for current_tuning_autopilot.' + ' Please select one of the' + ' following: roll, course, pitch, altitude, airspeed.') @@ -116,7 +118,7 @@ def __init__(self): # Timers self.stabilize_period_timer = self.create_timer( - self.get_parameter('/autotune/stabilize_period').value, + self.get_parameter('stabilize_period').value, self.stabilize_period_timer_callback, callback_group=self.internal_callback_group) self.stabilize_period_timer.cancel() @@ -124,7 +126,7 @@ def __init__(self): # Services self.run_tuning_iteration_service = self.create_service( Trigger, - '/autotune/run_tuning_iteration', + 'run_tuning_iteration', self.run_tuning_iteration_callback, callback_group=self.internal_callback_group) @@ -196,7 +198,7 @@ def stabilize_period_timer_callback(self): self.get_logger().info('Setting gains: ' + str(self.new_gains)) self.set_current_gains(self.new_gains) self.get_logger().info('Stepping command for ' - + str(self.get_parameter('/autotune/stabilize_period').value) + + str(self.get_parameter('stabilize_period').value) + ' seconds...') self.collecting_data = True self.call_toggle_step_signal() @@ -206,7 +208,7 @@ def stabilize_period_timer_callback(self): elif self.autotune_state == AutoTuneState.STEP_TESTING: # Step test is over, reverse step self.get_logger().info('Reversing step command for ' - + str(self.get_parameter('/autotune/stabilize_period').value) + + str(self.get_parameter('stabilize_period').value) + ' seconds...') self.call_toggle_step_signal() @@ -222,6 +224,9 @@ def stabilize_period_timer_callback(self): self.autotune_state = AutoTuneState.ORBITING + if self.get_parameter('continuous_tuning').value: + self.run_tuning_iteration_callback(Trigger.Request(), Trigger.Response()) + def run_tuning_iteration_callback(self, request, response): """ This function is called when the run_tuning_iteration service is called. It starts the @@ -238,11 +243,11 @@ def run_tuning_iteration_callback(self, request, response): if not self.optimizer.optimization_terminated(): self.stabilize_period_timer.timer_period_ns = \ - int(self.get_parameter('/autotune/stabilize_period').value * 1e9) + int(self.get_parameter('stabilize_period').value * 1e9) self.stabilize_period_timer.reset() self.get_logger().info('Stabilizing autopilot for ' - + str(self.get_parameter('/autotune/stabilize_period').value) + + str(self.get_parameter('stabilize_period').value) + ' seconds...') self.autotune_state = AutoTuneState.STABILIZING @@ -396,8 +401,6 @@ def calculate_error(self): cumulative_error += (estimate[i + 1, 1] - command[command_index, 1])**2 \ * (estimate[i + 1, 0] - estimate[i, 0]) - self.get_logger().info('Cumulative error: ' + str(cumulative_error)) - return cumulative_error def get_next_gains(self): From b56def6f8b5bd69d303211de6a2e223d57dfeb82 Mon Sep 17 00:00:00 2001 From: Brandon Sutherland Date: Tue, 26 Mar 2024 12:48:16 -0600 Subject: [PATCH 19/52] fixed a couple bugs around optimization termination --- rosplane_tuning/src/autotune/autotune.py | 6 +++++- rosplane_tuning/src/autotune/optimizer.py | 1 + 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/rosplane_tuning/src/autotune/autotune.py b/rosplane_tuning/src/autotune/autotune.py index d42aaf3..b66bf11 100644 --- a/rosplane_tuning/src/autotune/autotune.py +++ b/rosplane_tuning/src/autotune/autotune.py @@ -238,7 +238,7 @@ def run_tuning_iteration_callback(self, request, response): self.initial_gains = self.get_current_gains() self.optimizer = Optimizer(self.initial_gains, self.optimization_params) self.new_gains = self.initial_gains - else: + elif not self.optimizer.optimization_terminated(): self.new_gains = self.get_next_gains() if not self.optimizer.optimization_terminated(): @@ -250,6 +250,10 @@ def run_tuning_iteration_callback(self, request, response): + str(self.get_parameter('stabilize_period').value) + ' seconds...') self.autotune_state = AutoTuneState.STABILIZING + else: + self.get_logger().info('Optimization terminated with: ' + + self.optimizer.get_optimization_status()) + self.stabilize_period_timer.cancel() response.success = True response.message = self.optimizer.get_optimization_status() diff --git a/rosplane_tuning/src/autotune/optimizer.py b/rosplane_tuning/src/autotune/optimizer.py index 18cd490..cdc7479 100644 --- a/rosplane_tuning/src/autotune/optimizer.py +++ b/rosplane_tuning/src/autotune/optimizer.py @@ -65,6 +65,7 @@ def get_optimiztion_status(self): def get_next_parameter_set(self, error): """ This function returns the next set of gains to be tested by the optimization algorithm. + Needs to return valid array even if optimization is terminated. Parameters: error (np.array, size num_gains, dtype float): The error from the list of gains to test that From 92444fe8ed31da5cd6c42601788a5502759b685e Mon Sep 17 00:00:00 2001 From: Brandon Sutherland Date: Tue, 26 Mar 2024 13:21:11 -0600 Subject: [PATCH 20/52] wrote plot_util for visualizing function landscape --- rosplane_tuning/CMakeLists.txt | 1 + rosplane_tuning/src/autotune/plot_util.py | 58 +++++++++++++++++++++++ 2 files changed, 59 insertions(+) create mode 100644 rosplane_tuning/src/autotune/plot_util.py diff --git a/rosplane_tuning/CMakeLists.txt b/rosplane_tuning/CMakeLists.txt index 4978102..08a9c47 100644 --- a/rosplane_tuning/CMakeLists.txt +++ b/rosplane_tuning/CMakeLists.txt @@ -67,6 +67,7 @@ install(PROGRAMS install(PROGRAMS src/autotune/autotune.py src/autotune/optimizer.py + src/autotune/plot_util.py DESTINATION lib/${PROJECT_NAME} ) diff --git a/rosplane_tuning/src/autotune/plot_util.py b/rosplane_tuning/src/autotune/plot_util.py new file mode 100644 index 0000000..ca7b8e9 --- /dev/null +++ b/rosplane_tuning/src/autotune/plot_util.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python3 + +import numpy as np +import matplotlib.pyplot as plt + +class Optimizer: + """ + This class is a utility that can be used to plot the function landscape of the autopilot + being tuned. It acts as a dummy optimizer that instead of running an optimization, it collects + data based on a grid of points and then plots the function landscape. + + WARNING: Plotting is intended for simulation only, do not attempt to plot on real hardware + as the autopilot may be given non-functional gains. As such, no launch files are provided for + this utility. If you wish to use it, modify the source code of the autotune node to + include this class instead of the optimizer. + """ + + def __init__(self, *args): + # None of the optimization parameters are needed, so they are ignored. + + # Define the grid of points to test and plot + self.x_values = np.linspace(0.0, 1.0, 5) + self.y_values = np.linspace(0.0, 1.0, 5) + + self.points_given = False + self.plotting_complete = False + + def optimization_terminated(self): + return self.plotting_complete + + def get_optimization_status(self): + if self.plotting_complete: + return "Plotting complete." + else: + return "Collecting data for plotting." + + def get_next_parameter_set(self, error): + if not self.points_given: + self.points_given = True + x, y = np.meshgrid(self.x_values, self.y_values) + return np.column_stack((x.ravel(), y.ravel())) + else: + self.plot(error) + self.plotting_complete = True + return np.zeros((1, 2)) + + def plot(self, error): + x, y = np.meshgrid(self.x_values, self.y_values) + z = error.reshape(x.shape) + + # Plot the fucntion landscape in a 3D plot + fig = plt.figure() + ax = fig.add_subplot(111, projection='3d') + ax.plot_surface(x, y, z, cmap='viridis', alpha=0.8) + ax.set_xlabel('Gain 1') + ax.set_ylabel('Gain 2') + ax.set_zlabel('Error') + plt.show() From a8b9398594da983b7e4b804e790e882da740adf2 Mon Sep 17 00:00:00 2001 From: Brandon Sutherland Date: Tue, 2 Apr 2024 13:15:57 -0600 Subject: [PATCH 21/52] misc tweaks to tuning package --- rosplane_tuning/launch/rosplane_tuning.launch.py | 2 +- rosplane_tuning/src/autotune/plot_util.py | 15 ++++++++++++++- 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/rosplane_tuning/launch/rosplane_tuning.launch.py b/rosplane_tuning/launch/rosplane_tuning.launch.py index 8e9a1b8..189deee 100644 --- a/rosplane_tuning/launch/rosplane_tuning.launch.py +++ b/rosplane_tuning/launch/rosplane_tuning.launch.py @@ -23,7 +23,7 @@ def generate_launch_description(): autopilot_params = os.path.join( rosplane_dir, 'params', - aircraft + '_autopilot_commands.yaml' + aircraft + '_autopilot_params.yaml' ) diff --git a/rosplane_tuning/src/autotune/plot_util.py b/rosplane_tuning/src/autotune/plot_util.py index ca7b8e9..d571f46 100644 --- a/rosplane_tuning/src/autotune/plot_util.py +++ b/rosplane_tuning/src/autotune/plot_util.py @@ -48,7 +48,7 @@ def plot(self, error): x, y = np.meshgrid(self.x_values, self.y_values) z = error.reshape(x.shape) - # Plot the fucntion landscape in a 3D plot + # Plot the function landscape in a 3D plot fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.plot_surface(x, y, z, cmap='viridis', alpha=0.8) @@ -56,3 +56,16 @@ def plot(self, error): ax.set_ylabel('Gain 2') ax.set_zlabel('Error') plt.show() + + # Save to npy file, iterate the filename if it already exists + xyz = np.stack((x, y, z), axis=-1) + file_index = 0 + while True: + filename = f'plot_data_{file_index}.npy' + try: + with open(filename, 'xb') as f: # Use 'xb' mode to create the file exclusively + np.save(f, xyz) + break # Exit loop if file creation is successful + except FileExistsError: + file_index += 1 + From 964b8aa0ab771920ee0d0aa513cfe51634ec35c7 Mon Sep 17 00:00:00 2001 From: Brandon Sutherland Date: Tue, 2 Apr 2024 13:16:28 -0600 Subject: [PATCH 22/52] got autotune setting signal generator commands --- rosplane_tuning/src/autotune/autotune.py | 73 +++++++++++++++++++++++- 1 file changed, 70 insertions(+), 3 deletions(-) diff --git a/rosplane_tuning/src/autotune/autotune.py b/rosplane_tuning/src/autotune/autotune.py index b66bf11..8f39ec6 100644 --- a/rosplane_tuning/src/autotune/autotune.py +++ b/rosplane_tuning/src/autotune/autotune.py @@ -139,10 +139,14 @@ def __init__(self): GetParameters, '/autopilot/get_parameters', callback_group=self.external_callback_group) - self.set_parameter_client = self.create_client( + self.autopilot_set_param_client = self.create_client( SetParameters, '/autopilot/set_parameters', callback_group=self.external_callback_group) + self.signal_generator_set_param_client = self.create_client( + SetParameters, + '/signal_generator/set_parameters', + callback_group=self.external_callback_group) # Optimization self.initial_gains = None # get_gains function cannot be called in __init__ since the node @@ -238,6 +242,7 @@ def run_tuning_iteration_callback(self, request, response): self.initial_gains = self.get_current_gains() self.optimizer = Optimizer(self.initial_gains, self.optimization_params) self.new_gains = self.initial_gains + self.set_signal_generator_params() elif not self.optimizer.optimization_terminated(): self.new_gains = self.get_next_gains() @@ -262,6 +267,68 @@ def run_tuning_iteration_callback(self, request, response): ## Helper Functions ## + def set_signal_generator_params(self): + """ + Sets the signal generators parameters to the initial values needed for the optimization. + """ + + request = SetParameters.Request() + if self.current_autopilot == CurrentAutopilot.ROLL: + request.parameters = [ + Parameter(name='controller_output', value='roll').to_parameter_msg(), + Parameter(name='signal_type', value='step').to_parameter_msg(), + Parameter(name='signal_magnitude', value=np.deg2rad(30)).to_parameter_msg(), + Parameter(name='default_phi_c', value=0.0).to_parameter_msg() + ] + elif self.current_autopilot == CurrentAutopilot.COURSE: + request.parameters = [ + Parameter(name='controller_output', value='course').to_parameter_msg(), + Parameter(name='signal_type', value='step').to_parameter_msg(), + Parameter(name='signal_magnitude', value=np.deg2rad(90)).to_parameter_msg(), + ] + elif self.current_autopilot == CurrentAutopilot.PITCH: + request.parameters = [ + Parameter(name='controller_output', value='pitch').to_parameter_msg(), + Parameter(name='signal_type', value='step').to_parameter_msg(), + Parameter(name='signal_magnitude', value=np.deg2rad(20)).to_parameter_msg(), + Parameter(name='default_theta_c', value=0.0).to_parameter_msg() + ] + elif self.current_autopilot == CurrentAutopilot.ALTITUDE: + request.parameters = [ + Parameter(name='controller_output', value='altitude').to_parameter_msg(), + Parameter(name='signal_type', value='step').to_parameter_msg(), + Parameter(name='signal_magnitude', value=10.0).to_parameter_msg(), + ] + else: # CurrentAutopilot.AIRSPEED + request.parameters = [ + Parameter(name='controller_output', value='airspeed').to_parameter_msg(), + Parameter(name='signal_type', value='step').to_parameter_msg(), + Parameter(name='signal_magnitude', value=5.0).to_parameter_msg(), + ] + + + # Call the service + while not self.signal_generator_set_param_client.wait_for_service(timeout_sec=1.0): + self.get_logger().info('Service {self.signal_generator_set_param_client.srv_name}' + + ' not available, waiting...') + future = self.signal_generator_set_param_client.call_async(request) + + # Wait for the service to complete, exiting if it takes too long + call_time = time.time() + callback_complete = False + while call_time + 5 > time.time(): + if future.done(): + callback_complete = True + break + if not callback_complete or future.result() is None: + raise RuntimeError('Unable to set signal generator gains.') + + # Print any errors that occurred + for response in future.result().results: + if not response.successful: + raise RuntimeError(f'Failed to set parameter: {response.reason}') + + def call_toggle_step_signal(self): """ Call the signal_generator's toggle step service to toggle the step input. @@ -350,10 +417,10 @@ def set_current_gains(self, gains): Parameter(name='a_t_ki', value=gains[1]).to_parameter_msg()] # Call the service - while not self.set_parameter_client.wait_for_service(timeout_sec=1.0): + while not self.autopilot_set_param_client.wait_for_service(timeout_sec=1.0): self.get_logger().info('Service {self.set_parameter_client.srv_name}' + ' not available, waiting...') - future = self.set_parameter_client.call_async(request) + future = self.autopilot_set_param_client.call_async(request) # Wait for the service to complete, exiting if it takes too long call_time = time.time() From 5b942d17819139a7d0084bf22c46564edc7a171f Mon Sep 17 00:00:00 2001 From: Brandon Sutherland Date: Tue, 2 Apr 2024 14:22:20 -0600 Subject: [PATCH 23/52] added launch files for remaining autopilots --- .../launch/airspeed_autotune.launch.py | 38 +++++++++++++++++++ .../launch/altitude_autotune.launch.py | 38 +++++++++++++++++++ .../launch/course_autotune.launch.py | 38 +++++++++++++++++++ .../launch/pitch_autotune.launch.py | 38 +++++++++++++++++++ 4 files changed, 152 insertions(+) create mode 100644 rosplane_tuning/launch/airspeed_autotune.launch.py create mode 100644 rosplane_tuning/launch/altitude_autotune.launch.py create mode 100644 rosplane_tuning/launch/course_autotune.launch.py create mode 100644 rosplane_tuning/launch/pitch_autotune.launch.py diff --git a/rosplane_tuning/launch/airspeed_autotune.launch.py b/rosplane_tuning/launch/airspeed_autotune.launch.py new file mode 100644 index 0000000..fb843d1 --- /dev/null +++ b/rosplane_tuning/launch/airspeed_autotune.launch.py @@ -0,0 +1,38 @@ +import os + +from ament_index_python import get_package_share_directory +from launch import LaunchDescription +from launch.actions import DeclareLaunchArgument, IncludeLaunchDescription +from launch.substitutions import TextSubstitution, LaunchConfiguration +from launch.launch_description_sources import PythonLaunchDescriptionSource + +def generate_launch_description(): + stabilize_period = LaunchConfiguration('stabilize_period') + stabilize_period_launch_arg = DeclareLaunchArgument( + 'stabilize_period', + default_value=TextSubstitution(text='5.0'), + description='Whether to run the tuning sequence continuously or to wait for manual input' + ) + continuous_tuning = LaunchConfiguration('continuous_tuning') + continuous_tuning_launch_arg = DeclareLaunchArgument( + 'continuous_tuning', + default_value=TextSubstitution(text='False'), + description='Whether to run the tuning sequence continuously or to wait for manual input' + ) + rosplane_tuning_launch_arg = IncludeLaunchDescription( + PythonLaunchDescriptionSource( + os.path.join(get_package_share_directory('rosplane_tuning'), + 'launch/autotune.launch.py') + ), + launch_arguments={ + 'stabilize_period': stabilize_period, + 'continuous_tuning': continuous_tuning, + 'current_tuning_autopilot': 'airspeed' + }.items() + ) + + return LaunchDescription([ + stabilize_period_launch_arg, + continuous_tuning_launch_arg, + rosplane_tuning_launch_arg + ]) diff --git a/rosplane_tuning/launch/altitude_autotune.launch.py b/rosplane_tuning/launch/altitude_autotune.launch.py new file mode 100644 index 0000000..64cf1fa --- /dev/null +++ b/rosplane_tuning/launch/altitude_autotune.launch.py @@ -0,0 +1,38 @@ +import os + +from ament_index_python import get_package_share_directory +from launch import LaunchDescription +from launch.actions import DeclareLaunchArgument, IncludeLaunchDescription +from launch.substitutions import TextSubstitution, LaunchConfiguration +from launch.launch_description_sources import PythonLaunchDescriptionSource + +def generate_launch_description(): + stabilize_period = LaunchConfiguration('stabilize_period') + stabilize_period_launch_arg = DeclareLaunchArgument( + 'stabilize_period', + default_value=TextSubstitution(text='5.0'), + description='Whether to run the tuning sequence continuously or to wait for manual input' + ) + continuous_tuning = LaunchConfiguration('continuous_tuning') + continuous_tuning_launch_arg = DeclareLaunchArgument( + 'continuous_tuning', + default_value=TextSubstitution(text='False'), + description='Whether to run the tuning sequence continuously or to wait for manual input' + ) + rosplane_tuning_launch_arg = IncludeLaunchDescription( + PythonLaunchDescriptionSource( + os.path.join(get_package_share_directory('rosplane_tuning'), + 'launch/autotune.launch.py') + ), + launch_arguments={ + 'stabilize_period': stabilize_period, + 'continuous_tuning': continuous_tuning, + 'current_tuning_autopilot': 'altitude' + }.items() + ) + + return LaunchDescription([ + stabilize_period_launch_arg, + continuous_tuning_launch_arg, + rosplane_tuning_launch_arg + ]) diff --git a/rosplane_tuning/launch/course_autotune.launch.py b/rosplane_tuning/launch/course_autotune.launch.py new file mode 100644 index 0000000..e4adab7 --- /dev/null +++ b/rosplane_tuning/launch/course_autotune.launch.py @@ -0,0 +1,38 @@ +import os + +from ament_index_python import get_package_share_directory +from launch import LaunchDescription +from launch.actions import DeclareLaunchArgument, IncludeLaunchDescription +from launch.substitutions import TextSubstitution, LaunchConfiguration +from launch.launch_description_sources import PythonLaunchDescriptionSource + +def generate_launch_description(): + stabilize_period = LaunchConfiguration('stabilize_period') + stabilize_period_launch_arg = DeclareLaunchArgument( + 'stabilize_period', + default_value=TextSubstitution(text='5.0'), + description='Whether to run the tuning sequence continuously or to wait for manual input' + ) + continuous_tuning = LaunchConfiguration('continuous_tuning') + continuous_tuning_launch_arg = DeclareLaunchArgument( + 'continuous_tuning', + default_value=TextSubstitution(text='False'), + description='Whether to run the tuning sequence continuously or to wait for manual input' + ) + rosplane_tuning_launch_arg = IncludeLaunchDescription( + PythonLaunchDescriptionSource( + os.path.join(get_package_share_directory('rosplane_tuning'), + 'launch/autotune.launch.py') + ), + launch_arguments={ + 'stabilize_period': stabilize_period, + 'continuous_tuning': continuous_tuning, + 'current_tuning_autopilot': 'course' + }.items() + ) + + return LaunchDescription([ + stabilize_period_launch_arg, + continuous_tuning_launch_arg, + rosplane_tuning_launch_arg + ]) diff --git a/rosplane_tuning/launch/pitch_autotune.launch.py b/rosplane_tuning/launch/pitch_autotune.launch.py new file mode 100644 index 0000000..a221ea9 --- /dev/null +++ b/rosplane_tuning/launch/pitch_autotune.launch.py @@ -0,0 +1,38 @@ +import os + +from ament_index_python import get_package_share_directory +from launch import LaunchDescription +from launch.actions import DeclareLaunchArgument, IncludeLaunchDescription +from launch.substitutions import TextSubstitution, LaunchConfiguration +from launch.launch_description_sources import PythonLaunchDescriptionSource + +def generate_launch_description(): + stabilize_period = LaunchConfiguration('stabilize_period') + stabilize_period_launch_arg = DeclareLaunchArgument( + 'stabilize_period', + default_value=TextSubstitution(text='5.0'), + description='Whether to run the tuning sequence continuously or to wait for manual input' + ) + continuous_tuning = LaunchConfiguration('continuous_tuning') + continuous_tuning_launch_arg = DeclareLaunchArgument( + 'continuous_tuning', + default_value=TextSubstitution(text='False'), + description='Whether to run the tuning sequence continuously or to wait for manual input' + ) + rosplane_tuning_launch_arg = IncludeLaunchDescription( + PythonLaunchDescriptionSource( + os.path.join(get_package_share_directory('rosplane_tuning'), + 'launch/autotune.launch.py') + ), + launch_arguments={ + 'stabilize_period': stabilize_period, + 'continuous_tuning': continuous_tuning, + 'current_tuning_autopilot': 'pitch' + }.items() + ) + + return LaunchDescription([ + stabilize_period_launch_arg, + continuous_tuning_launch_arg, + rosplane_tuning_launch_arg + ]) From 3c10b21415b52ca1d35072f75d06e23db7ea4edc Mon Sep 17 00:00:00 2001 From: Brandon Sutherland Date: Tue, 2 Apr 2024 14:23:21 -0600 Subject: [PATCH 24/52] misc tweaks to plot_util.py --- rosplane_tuning/src/autotune/plot_util.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/rosplane_tuning/src/autotune/plot_util.py b/rosplane_tuning/src/autotune/plot_util.py index d571f46..f6122fb 100644 --- a/rosplane_tuning/src/autotune/plot_util.py +++ b/rosplane_tuning/src/autotune/plot_util.py @@ -19,8 +19,8 @@ def __init__(self, *args): # None of the optimization parameters are needed, so they are ignored. # Define the grid of points to test and plot - self.x_values = np.linspace(0.0, 1.0, 5) - self.y_values = np.linspace(0.0, 1.0, 5) + self.x_values = np.linspace(0.0, 1.0, 10) + self.y_values = np.linspace(0.0, 1.0, 10) self.points_given = False self.plotting_complete = False @@ -48,15 +48,6 @@ def plot(self, error): x, y = np.meshgrid(self.x_values, self.y_values) z = error.reshape(x.shape) - # Plot the function landscape in a 3D plot - fig = plt.figure() - ax = fig.add_subplot(111, projection='3d') - ax.plot_surface(x, y, z, cmap='viridis', alpha=0.8) - ax.set_xlabel('Gain 1') - ax.set_ylabel('Gain 2') - ax.set_zlabel('Error') - plt.show() - # Save to npy file, iterate the filename if it already exists xyz = np.stack((x, y, z), axis=-1) file_index = 0 @@ -69,3 +60,12 @@ def plot(self, error): except FileExistsError: file_index += 1 + # Plot the function landscape in a 3D plot + fig = plt.figure() + ax = fig.add_subplot(111, projection='3d') + ax.plot_surface(x, y, z, cmap='viridis', alpha=0.8) + ax.set_xlabel('Gain 1') + ax.set_ylabel('Gain 2') + ax.set_zlabel('Error') + plt.show() + From 800101fb1087238ce3f9927501fbf5a8b4eaaf7c Mon Sep 17 00:00:00 2001 From: Joseph Date: Tue, 2 Apr 2024 14:51:18 -0600 Subject: [PATCH 25/52] Removed several unneeded functions to consolidate them. Added new optimizerstate into functions. --- rosplane_tuning/src/autotune/optimizer.py | 88 ++++++----------------- 1 file changed, 22 insertions(+), 66 deletions(-) diff --git a/rosplane_tuning/src/autotune/optimizer.py b/rosplane_tuning/src/autotune/optimizer.py index cdc7479..6d64e5a 100644 --- a/rosplane_tuning/src/autotune/optimizer.py +++ b/rosplane_tuning/src/autotune/optimizer.py @@ -38,7 +38,7 @@ def __init__(self, initial_gains, optimization_params): self.sigma = optimization_params['sigma'] self.init_alpha = optimization_params['alpha'] self.tau = optimization_params['tau'] - self.flag = 0 + self.k = 0 self.state = OptimizerState.FINDING_GRADIENT def optimization_terminated(self): @@ -60,6 +60,7 @@ def get_optimiztion_status(self): Returns: str: The status of the optimization algorithm. """ + # Return with the final values, the best value found return 'TODO: Status not implemented.' def get_next_parameter_set(self, error): @@ -79,16 +80,20 @@ def get_next_parameter_set(self, error): """ if self.state == OptimizerState.FINDING_GRADIENT: - pass + self.get_gradient() + elif self.state == OptimizerState.BRACKETING: - pass + # using error, conduct bracketing + # return the next set of gains to test + self.bracketing() + elif self.state == OptimizerState.PINPOINTING: - pass + self.pinpointing() + else: # Terminated pass - - def get_gradient(self, fx, fxh): + def get_gradient(self, fx, fxh, offset=0.01): """ This function returns the gradient at the given point using forward finite difference. @@ -99,74 +104,35 @@ def get_gradient(self, fx, fxh): Returns: float: The gradient at the given point. """ - return (fxh - fx) / 0.01 - - def next_step(self, phi1, phi1_prime, phi2, phi2_prime): - """ - This function chooses which function to run next based on the flag. - """ - if self.flag == 0: - self.initial_bracketing(phi1, phi1_prime, phi2, phi2_prime) - pass - - # Flag 0 - Initial Bracketing - def initial_bracketing(self, phi1, phi1_prime, phi2, phi2_prime): - """ - This function conducts the bracketing part of the optimization. - """ - - alpha1 = 0 - alpha2 = self.init_alpha + return (fxh - fx) / offset - # Needs Pinpointing - if(phi2 > phi1 + self.u1*self.init_alpha*phi1_prime): - flag = 2 - alphap = self.interpolate(alpha1, alpha2) - return alpha2 - - # Optimized - if abs(phi2_prime) <= -self.u2*phi1_prime: - flag = 4 - return alpha2 - - # Needs Pinpointing - elif phi2_prime >= 0: - flag = 2 - alphap = self.interpolate(alpha1, alpha2) - return alpha2 - - # Needs more Bracketing - else: - alpha1 = alpha2 - alpha2 = self.sigma*alpha2 - flag = 1 - return alpha2 - - - # Flag 1 - Bracketing def bracketing(self, phi1, phi1_prime, phi2, phi2_prime, temp_alpha1, temp_alpha2): """ This function conducts the bracketing part of the optimization. """ - alpha1 = temp_alpha1 - alpha2 = temp_alpha2 + if self.k == 0: + alpha1 = 0 + alpha2 = self.init_alpha + else: + alpha1 = temp_alpha1 + alpha2 = temp_alpha2 # Needs Pinpointing if(phi2 > phi1 + self.u1*self.init_alpha*phi1_prime) or (phi2 > phi1): - flag = 2 + self.state == OptimizerState.PINPOINTING alphap = self.interpolate(alpha1, alpha2) res = [alpha1, alphap, alpha2] return res # Optimized if abs(phi2_prime) <= -self.u2*phi1_prime: - flag = 4 + self.state == OptimizerState.TERMINATED res = [alpha2] return res # Needs Pinpointing elif phi2_prime >= 0: - flag = 2 + self.state == OptimizerState.PINPOINTING alphap = self.interpolate(alpha1, alpha2) res = [alpha1, alphap, alpha2] return res @@ -178,7 +144,6 @@ def bracketing(self, phi1, phi1_prime, phi2, phi2_prime, temp_alpha1, temp_alpha res = [alpha1, alpha2] return res - # Interpolating def interpolate(self, alpha1, alpha2): """ This function interpolates between two points. @@ -192,7 +157,6 @@ def interpolate(self, alpha1, alpha2): """ return (alpha1 + alpha2)/2 - # Flag 2 - Pinpointing def pinpointing(self, alpha1, phi1, phi1_prime, alpha2, phi2, phi2_prime, alphap, phip, phip_prime): """ This function conducts the pinpointing part of the optimization. @@ -217,7 +181,7 @@ def pinpointing(self, alpha1, phi1, phi1_prime, alpha2, phi2, phi2_prime, alphap else: # Optimized if abs(phip_prime) <= -self.u2*phi1_prime: - flag = 4 + self.state == OptimizerState.TERMINATED alphastar = alphap res = [alphastar] return res @@ -230,11 +194,3 @@ def pinpointing(self, alpha1, phi1, phi1_prime, alpha2, phi2, phi2_prime, alphap return res # Check for failure criteria - - # Flag 3 - Failure Convergence - def failure(self): - pass - - # Flag 4 - Successful Convergence - def success(self): - pass From c439c21d9cc23e730a1aa96426944f0c456c5f32 Mon Sep 17 00:00:00 2001 From: Brandon Sutherland Date: Tue, 2 Apr 2024 15:50:02 -0600 Subject: [PATCH 26/52] added autopilot override enable/disable --- rosplane_tuning/src/autotune/autotune.py | 53 +++++++++++++++++++++--- 1 file changed, 48 insertions(+), 5 deletions(-) diff --git a/rosplane_tuning/src/autotune/autotune.py b/rosplane_tuning/src/autotune/autotune.py index 8f39ec6..cd90554 100644 --- a/rosplane_tuning/src/autotune/autotune.py +++ b/rosplane_tuning/src/autotune/autotune.py @@ -206,7 +206,6 @@ def stabilize_period_timer_callback(self): + ' seconds...') self.collecting_data = True self.call_toggle_step_signal() - self.autotune_state = AutoTuneState.STEP_TESTING elif self.autotune_state == AutoTuneState.STEP_TESTING: @@ -215,7 +214,6 @@ def stabilize_period_timer_callback(self): + str(self.get_parameter('stabilize_period').value) + ' seconds...') self.call_toggle_step_signal() - self.autotune_state = AutoTuneState.RETURN_TESTING else: @@ -225,7 +223,7 @@ def stabilize_period_timer_callback(self): self.collecting_data = False self.stabilize_period_timer.cancel() self.set_current_gains(self.initial_gains) - + self.disable_autopilot_override() self.autotune_state = AutoTuneState.ORBITING if self.get_parameter('continuous_tuning').value: @@ -243,6 +241,7 @@ def run_tuning_iteration_callback(self, request, response): self.optimizer = Optimizer(self.initial_gains, self.optimization_params) self.new_gains = self.initial_gains self.set_signal_generator_params() + elif not self.optimizer.optimization_terminated(): self.new_gains = self.get_next_gains() @@ -250,11 +249,12 @@ def run_tuning_iteration_callback(self, request, response): self.stabilize_period_timer.timer_period_ns = \ int(self.get_parameter('stabilize_period').value * 1e9) self.stabilize_period_timer.reset() - + self.enable_autopilot_override() self.get_logger().info('Stabilizing autopilot for ' + str(self.get_parameter('stabilize_period').value) + ' seconds...') self.autotune_state = AutoTuneState.STABILIZING + else: self.get_logger().info('Optimization terminated with: ' + self.optimizer.get_optimization_status()) @@ -328,6 +328,38 @@ def set_signal_generator_params(self): if not response.successful: raise RuntimeError(f'Failed to set parameter: {response.reason}') + def enable_autopilot_override(self): + """ + Enable to autopilot override for the current autopilot, if an override is needed. + This enables direct control of that autopilot, rather than the outer loop controlling + the autopilot. + """ + request = SetParameters.Request() + if self.current_autopilot == CurrentAutopilot.ROLL: + request.parameters = [ + Parameter(name='roll_tuning_debug_override', value=True).to_parameter_msg() + ] + elif self.current_autopilot == CurrentAutopilot.PITCH: + request.parameters = [ + Parameter(name='pitch_tuning_debug_override', value=True).to_parameter_msg() + ] + self.set_autopilot_params(request) + + def disable_autopilot_override(self): + """ + Disable the autopilot override for the current autopilot, if an override is needed. + This allows the outer loop to control the autopilot again. + """ + request = SetParameters.Request() + if self.current_autopilot == CurrentAutopilot.ROLL: + request.parameters = [ + Parameter(name='roll_tuning_debug_override', value=False).to_parameter_msg() + ] + elif self.current_autopilot == CurrentAutopilot.PITCH: + request.parameters = [ + Parameter(name='pitch_tuning_debug_override', value=False).to_parameter_msg() + ] + self.set_autopilot_params(request) def call_toggle_step_signal(self): """ @@ -416,6 +448,17 @@ def set_current_gains(self, gains): request.parameters = [Parameter(name='a_t_kp', value=gains[0]).to_parameter_msg(), Parameter(name='a_t_ki', value=gains[1]).to_parameter_msg()] + self.set_autopilot_params(request) + + def set_autopilot_params(self, request): + """ + Set parameters for the autopilot. Moved to a separate function to avoid code repetition. + + Parameters: + request (SetParameters.Request): A ROS2 object for setting parameters, already populated + with the desired parameters and values. + """ + # Call the service while not self.autopilot_set_param_client.wait_for_service(timeout_sec=1.0): self.get_logger().info('Service {self.set_parameter_client.srv_name}' + @@ -430,7 +473,7 @@ def set_current_gains(self, gains): callback_complete = True break if not callback_complete or future.result() is None: - raise RuntimeError('Unable to set autopilot gains.') + raise RuntimeError('Unable to set autopilot params.') # Print any errors that occurred for response in future.result().results: From 3da5299df5504624e40cb86db98b28ee8e530f5c Mon Sep 17 00:00:00 2001 From: Brandon Sutherland Date: Tue, 2 Apr 2024 16:43:54 -0600 Subject: [PATCH 27/52] fixed error where autopilot override was ignored --- rosplane/src/controller_base.cpp | 6 ++---- rosplane_tuning/launch/rosplane_tuning.launch.py | 4 +--- 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/rosplane/src/controller_base.cpp b/rosplane/src/controller_base.cpp index f3bb364..e65e1ae 100644 --- a/rosplane/src/controller_base.cpp +++ b/rosplane/src/controller_base.cpp @@ -32,10 +32,8 @@ controller_base::controller_base() // Set the values for the parameters, from the param file or use the deafault value. set_parameters(); - if (params_.roll_tuning_debug_override || params_.pitch_tuning_debug_override) { - tuning_debug_sub_ = this->create_subscription( - "/tuning_debug", 10, std::bind(&controller_base::tuning_debug_callback, this, _1)); - } + tuning_debug_sub_ = this->create_subscription( + "/tuning_debug", 10, std::bind(&controller_base::tuning_debug_callback, this, _1)); // Set the parameter callback, for when parameters are changed. parameter_callback_handle_ = this->add_on_set_parameters_callback( diff --git a/rosplane_tuning/launch/rosplane_tuning.launch.py b/rosplane_tuning/launch/rosplane_tuning.launch.py index 189deee..2291138 100644 --- a/rosplane_tuning/launch/rosplane_tuning.launch.py +++ b/rosplane_tuning/launch/rosplane_tuning.launch.py @@ -32,9 +32,7 @@ def generate_launch_description(): package='rosplane', executable='rosplane_controller', name='autopilot', - parameters = [autopilot_params, - {'pitch_tuning_debug_override': False}, - {'roll_tuning_debug_override': True}], + parameters = [autopilot_params], output = 'screen', arguments = [control_type] ), From ebc534a7167ae3b3a55d12a37074b74599bcce3c Mon Sep 17 00:00:00 2001 From: Brandon Sutherland Date: Tue, 2 Apr 2024 18:22:59 -0600 Subject: [PATCH 28/52] wrote some readme documentation for the autotuner --- rosplane_tuning/README.md | 39 +++++++++++++++++++++- rosplane_tuning/launch/autotune.launch.py | 9 +---- rosplane_tuning/{ => media}/Waveforms.svg | 0 rosplane_tuning/media/roll_response.png | Bin 0 -> 11911 bytes rosplane_tuning/src/autotune/autotune.py | 4 ++- 5 files changed, 42 insertions(+), 10 deletions(-) rename rosplane_tuning/{ => media}/Waveforms.svg (100%) create mode 100644 rosplane_tuning/media/roll_response.png diff --git a/rosplane_tuning/README.md b/rosplane_tuning/README.md index e431215..e263768 100644 --- a/rosplane_tuning/README.md +++ b/rosplane_tuning/README.md @@ -4,6 +4,43 @@ This ROS2 package contains tools useful for tuning the autopilot performance of We recommend using [PlotJuggler](https://github.com/facontidavide/PlotJuggler) to visualize command input and response. +## Autotuner + +To make using ROSplane easier, we have implemented an autotune node. This node performs a gradient-based optimization routine where the autotune adjusts the autopilot gains in the direction that minimizes the error between the autopilot command and estimated state. + +The optimization tests a series of gains using step commands from which error is calculated and gains are slowly adjusted to minimize the error. Error is calculated by giving a step input to an autopilot, waiting a few seconds for the system to respond, stepping back, waiting a few more seconds, and then calculuating the error. The autopilot then returns back to its previous state. This results in a responce like this figure: + +![Autotune Response](media/roll_response.png) + +### Usage + +WARNING: Make sure to monitor the UAV during all portions of tuning, and be ready to take manual control in case the autopilot becomes unstable or does something terrible. We don't guarantee that the autotuner will not crash your plane. + +1. Launch the launch file for the portion of the autopilot you want to tune. This will launch both ROSplane and the autotuner. Set the `continuous_tuning` parameter to `true` if you are confident in the autopilot's current ability to stay stable and in the air. Leave it as `false` if you'd rather manually begin each iteration. + ``` + ros2 launch rosplane_tuning roll_autotune.launch.py continuous_tuning:=true + ``` + +2. Launch ROSplane if you haven't already done so. The autotuner will start from whichever gains are currently set in ROSplane, so set those to whichever values you'd like to start the optimization from. + ``` + ros2 launch rosplane_tuning rosplane_tuning.launch.py + ``` + +3. Get ROSplane into the desired state, like an orbit high above the launch site. The autotuner will return to this state between every iteration. + +4. Call the `run_tuning_iteration` service to start tuning. If you set `continuous_tuning` to `false`, you will need to manually call the `run_tuning_iteration` service to start each iteration. + +5. Wait until the autotuner reports that it has finished or you are satisfied with the current response. The autotuner will update the current gains with the best response it has found so far. + +Repeat for each autopilot you wish to tune. We recommend tuning the autopilots in this order: roll, pitch, airspeed, altitude, course. The airspeed and altitude autopilots are closely coupled, so you may need to iterate between them to get the best performance. + +Make sure to set the new gains as the default values for ROSplane in the `rosplane/params` directory. Gains are reported by the autotuner in this order: +- Roll: `r_kp`, `r_kd` +- Pitch: `p_kp`, `p_kd` +- Airspeed: `a_t_kp`, `a_t_ki` +- Altitude: `a_kp`, `a_ki` +- Course: `c_kp`, `c_ki` + ## Signal Generator Signal generator is a ROS2 node that will generate step inputs, square waves, sine waves, sawtooth waves, and triangle waves to be used as command input for ROSplane. It has support for roll, pitch, altitude, course, and airspeed command input. @@ -19,7 +56,7 @@ Signal generator works by publishing autopilot commands on the ROS network on th - Triangle: This is a continuous signal that ramps up and down between the minimum and maximum value of the signal, creating a triangle like pattern. - Sawtooth: This is a continuous signal (sometimes call a ramp signal) that creates a constantly increasing signal that resets to its minimum value once the maximum value is reached. -![Waveforms](Waveforms.svg) +![Waveforms](media/Waveforms.svg) *By Omegatron - Own work, CC BY-SA 3.0, https://commons.wikimedia.org/w/index.php?curid=343520* diff --git a/rosplane_tuning/launch/autotune.launch.py b/rosplane_tuning/launch/autotune.launch.py index df16601..84587a3 100644 --- a/rosplane_tuning/launch/autotune.launch.py +++ b/rosplane_tuning/launch/autotune.launch.py @@ -37,17 +37,10 @@ def generate_launch_description(): {'current_tuning_autopilot': current_tuning_autopilot} ] ) - rosplane_tuning_launch_include = IncludeLaunchDescription( - PythonLaunchDescriptionSource( - os.path.join(get_package_share_directory('rosplane_tuning'), - 'launch/rosplane_tuning.launch.py') - ) - ) return LaunchDescription([ stabilize_period_launch_arg, continuous_tuning_launch_arg, current_tuning_autopilot_launch_arg, - autotune_node, - rosplane_tuning_launch_include + autotune_node ]) diff --git a/rosplane_tuning/Waveforms.svg b/rosplane_tuning/media/Waveforms.svg similarity index 100% rename from rosplane_tuning/Waveforms.svg rename to rosplane_tuning/media/Waveforms.svg diff --git a/rosplane_tuning/media/roll_response.png b/rosplane_tuning/media/roll_response.png new file mode 100644 index 0000000000000000000000000000000000000000..4f085ae1a971541a1c3400ce0e8f1b07ea7c45ac GIT binary patch literal 11911 zcmbulc|25a_&0tJp-8Dns7!^VqO>T&q-4tyqLT5kls;L@){JcwB`v0*RCZBzMfO1? z8L}3#jcDv4lYRYN$EWA_dcM!|=kwQ`Ip^70qd8yY-{!K8tf9dWwU!N@FKR-hVBa-{3LYG5h1rdg4un zq;h+A?LxorQvV|b=FG3GI`=96YPphy_z=BI@2{Pk6(4?JvCz}$(dBJ0E5?EFelK6X ztW}x+MJysu-s0mC_j3MKWTv}+|NcVvk?Ys4QRxU-UcY|*rHc0)O;YEmL)&5BrG*W@%=D+5UZEZ*14QoO5@S_f* zDqgon8S2pj>J2}GRegq%-fVaIaoDfD-Bu@;$L`?T~huO~x4s8<&a^C-bOFWllTUYCw+WqmKQ``GJ(^J_k zM9)sQ7WH%AnwzIQe5XBLX#}f`mv0J_vrw_Aq;6AkXGLXQZ%Q8g`bOiN1PO;Y-g}HO zn^H{mBNZpbx@Ic+Ri^Vj$BJ_E^Jl9qmTn4E(mBe*dUFx-{@AnCLy(1Y!PtPGg(fU_w~mcCW?BmhkZKjKa}^ zrN#N+EeEZ-%L2NVm!@4x+?HnhgT)lm^`lj=D7`#l=d<)D+9Rh|Xs^j-_tq;KTW^$8 zaGb?+O&NXST}7VL!9EkO4%MAF6cks`xGac}=7WypyN*RVDi?UxZlPL;)lY6Z>cl)@ zRUX)Jhi99sfsfE>B_;P?-;2HHE1&FgDfv>;(Aby>;kdtPcb`wbZC%ts>k1aL*2(?H z2fsT!>zQ=YpYD6&MAx5YIrhhU0(Z3)nF^|_&Xlg#JKh@#=4;S|5nfYQSJ$KChQ{c~ zopp1+0v7MAS2>n@OCcz(Y*=;T%UjEj&8f32jt%ouBkgpOOT(LOH!FAC;pT5EaI=$r znd{j3($lLva04gLy32*`4&B@PSS@3t-{1ZGxL)P5pY5K?WgVv9gM)MMM!h9wg zG{9!hpN|xXKC`_YPRdi?tyLEX^{0Qey1-4JZa$03N4NA(jF)#L)Ig3EEhKlfX4!U; z`Xj}T+JDcsYsoF1Ok2>{7vZ8qL=@w;V?MeK%W=||vadJ2|Mx4qjy{x}l3ubjckb00 z*g@u-q|QDEOGEb<0n*SH$dv73$8WMSuT&SapJr?Y4`V^-_TMx&Icz%dqQ$~@QO;-l zL6Xs%+GthZiiaZkJf4$7ZgtAj_U$=A0^CWRkQJJn`56o@p4lCB%W7hvp=lv6fap1r z4dAql?Ypr3W;y?UeJ$pgw_`eQj~pUglhTVgcBLwG$Sc)0iqzLqvNXvo@+|oiIkqt6 zv^@3h^LDey;P70?(DNxKBkj3krX_Pt{0be;qxt=3&bgLv(D)Lqs=B<@2>k0R?OkQ2 z>pN-cJNex@cyg%8?#uIoU31O3kHMfn;L^qEE;%nfYwMp8$6aLJb9zqwdI9MUSX!VzX1W=VKNZ(!6IQxey6Fc1-PUDn#dh zQuzd4Id%n={lLXEPRJ7Im*3$K<%1RxIBqvU-RcM!CnoUI17U#fn)6FBU*G8a&JDh? z?y}F-x8Owh4Wh$JB>Iu*WwTtz$P*+sgcf3WzG`Z;WO+$L+d0+V zCQkroI2DfMbd0IYvO<1-4i-Oh{Tt-^DE&{Z2a{ww*q9(k|6P`MTv$ZZ~gf(HaV~sQ05EIF35|XZ$|pcSZ3z;y_8jn9ddmCy z`hZw++uH!il_0I67k}A8W-hS9N){ji4l7_3TH`w#<~tkSwICG~ck&K*Cv4X-7=OBW zx+vVSU=a9pG_Taj=TEp?@k|e(>(`DBI?$w;6a8RN+_PuT0{Db|`jxs!00G?Vw=b-) zP~8AUspFH1CZr)M0`lSK=bxOM%qUqRySV&;t=&@MyS!o7nSGb@4k)}#OG)vZY%(=7 zGaDWr{`^qn>tu7q<84Z|wGn{Jwc!d*AMf%5{_8F;E!uv0-q75f1Gpz0^!n_J#`=2a z`jb47UTlW%Zyw*b?jvoF#b&|uT-U+IkL#4|KTvR{5T+oZy>Zgyh4IgIC;oidWAyr4 zQ`3GTJ?}x&N1l#u*{PF|JgcNjl4BDRu8c8@Htp8is_L@<5q2t?Y=&K?an2R|s|<;< zJHojP{4-s(H^a3k8xZ-#bOkUFH@~Rs?^2$A-+TzLbdb-&Pw~1)B@aw_e12^H&gxT+ zLwlQpWlOqwdyaz{=paXhzPjiV;1_VD1F#CK@=1O_Shej&`G&}mk2sIK2j0$eqKH5N zeKeZ)69zr=aLYmGA0Hsd+g$V*D=l73NkWSP6s>`!w9$QtsW_09=LYI3%*(uEn ztazg|6gcd#n1W+RvA5@F(=uQJ2DUn3vWt*fKw8rk5c>soHL%KD@bXNsd7=B{aElX+ z>^(PeKknK$2MeVbF+Jkxd#?v(+uM0Z5|z%rc*z6n8aoQw;VZqc&YcBroo!)G zzVq#T!m^krEbnc5wp>4QG!NvX@4{!_2p}JfQ9#-)kBcvl11UUK97;5E%)hdlvMbfkR!+lteNz9qehxLF1H0-L!%(&L$|`5e&mLrB%ec&&4S=AcS+ zp6z=9Si9#<1iQ^HLwR}O55}V)?l8Lbg^-*7(gz3%E1pPr6|6D`bbbp&rpx-A_Di3q ztK?Q-z9Zjdxkqlv3I?^9DHHbjeMg)X^?Mdf2ALcMHOSzb=;CV)aWD^SJEq2VpM8D+ zY&`DLr%9x&6)2=Cdrps+!C|IE(iPZwfBNf|+gggJn$4WdZ*>R!>8okacd>0)Yz5?j^~e?e%FJbwhugcB zrmlpU=Ul0N=5D40RY~De{L*~M^1S{MOW!1e=nZ1rw|B1vLv`tu_~HPVK<*9@W+1zTrkyUS2Eu@w)D$ZRjEJ1MM93y^VFs7;^%3%jbb(*fvLXN*Be+^lt7sX zxX|VLRTtFl5Qr83HJc!E;EK(%myba;1;BaPcX1Mc%+=1$PEZ}7j0oT;sD2Oh=$hRT zzlTyXHh0c&319x5U*|0w6bE$$Fx(B$c7QbbzcQ=WDNodcuhIt!PkM#2K+Ad0osCpHqVVrhuJ3#< z(Ai@#1=(ql7Vi*rO|}tca|hu!VDkptsiU2u;?ChY|ZSg+!g$#$osqH zJq!TB=>X@-Eb6JbsgZC`y6^H56c^G3<3Lxv%;SB!prQwKB%H5Q-vSXYU!BM*q^-&j57$Fsds8FrOS z%y8;H(Tw?`>cx@h%gr$@$=sE3P^N)i-xU-G{Av%>GCkTE(4=Jl{Vh;+M?2`MP8j=l z_eYn#WF^Ic7`+y*wc;vt=hXJbMtQH9-*I(Y4_cM87$IX&^=@niZfb^lIVJb9DtW0ImAUMo7FX5s*J2lat<#Y5l#keYBYi=`w0$0FqM z#jLBkg6b~)wM9Q4D?uuz0d4FzFPMmMsbxIUc_6CjdQC@1XX*W0wXCZ(bC{;VBrcSX z>4l?ny>h;!=8LvMot2kBCB@zE_OaPNxO8!q3wM~JM7U*Anf310nRhxb8>P-4qb)K zY*`y|98e`f$zv2y52Fn;r=si!>;DIVyzM+9@))XQPte|=&7GEi)h(q0o|W>5pMV?) zaorRC)$b;5;0I%-!>yxi{+Dn4mbEn&p_+eHYYl$^2*`wnv(pn@xH= z`(!;jK8ec`+`VtUx_3)4b7nk;M?CIYTSY+%;RwHoJPQg+>E0K@=B^EZC0A=BE)JS& z^YMwHs%LwvwKZ%mw+k0iaX=~&@>jIergxr%;!rwxGLyh&Wn_i##OnR9!t&Fxu$wj$ zH^hC04ZJ((a>LQ{e;PP1A1S*@#9v&e^y@8%M_z}^Y!27@i~HQ?ZV9!YOXhHmW^v6J zx0s%jo{OlBdL|G#5-w(lM0SyIandrg8?CeXFDLQV@1hA;o)8}4Fzf+KE4wa+IX4Z5XuLver*#ynz0G7s>A|?=g3s{f?@t@Fc&7 zNwtxUw7O_Sl>}alm5%UsC*Me)CUwNgR+J5pA z^LJ)J+xM|1ZTV~{EUx5q^EUU?_dA2HWv+{tAuuH54Ajj#Hs_faxs98-rkVL(ew3E} z8`=0l(*&~zH?BCgKr;_bVhnAq9ZnOO+9R1WV1AsR3Tuk*=wL(be{gn!{r-e9Nb zP^)<1_yGY$BO+eKD)(hXDYt< zy6Z`a9sZi01*c-HA6gy2;tCqSM0!2b%Dp-g+Pw7M~IP-vpLl*^6%_>x0j-*6;Im1sc06v*L{zT zq=JC#n~akKmo0@UUqa{thjoOdpZH1;>Luj-^{Nn+L=!H%@~;#+eU3G8WOc~oT4eAc zRQUEQ58OY#ujvAtABdnBP6l7>fetQ;9tf(F^@ZhS=Og?GFW72rf{!-7<3o{$dIO9b z3XLE8Y7+9!l6ELZyyQm8R#kj^Vm)v%ZzSa6{g_-7c}=3`i6TDK`-D!lCkvx0DbkM8 z30YKrlC5SqXnwt{Q<@^6MNCYun4{6u+!4X6AdHUbynrcAji-8kP9sh6h@7NznW6{N;x0evx5|?0S<}3#|jwzT8OAJ zG~og*u&xd2yr3Hjym-i^f{)P85Q5LZT86L!8VAgg=6(jBB%}!Ss#`V_mzcD$M$ud_ z4U7v7q1#;OUNe}V+n(Uiz=NZ_e?qr0wDoHrPd*DFXw+xZlp|{#U4lb1zk%`8nnTsRiWW!~|9`bmnHCtwpX{ z?2W5-5ffKgTqH{L2M4%3pVYjl`tBRV(5@r8DC=H z-YAtG@82zp>W!M&H17Or`2q99HDSfvm}73qp5jKK8f+RQ>-B13JT~X;L34y(M+=M3 z^b$aU_gRvq4{zZp$25r>88mK$SD}Tm8VV61^>5W6@oRr19#SH!L#fza^tvM%3)?pi}Z)U3{ z&u-*4glqFNbVO)^10?Mq{payD5^_yF>jV+u5o8>8q?E+Fi-=;}|BhoE1;}q;^fyK_ zUjl{IdK?7_ihs|ak|cH!SKpVj#b-;%2?2gkY^ZeOyRhnwr`U!#Fb~^ZE7jADL$I=q zXV@F}!g?iwSzIwn9gjFPcQACkFn^w=(aTSdscoK-=V^raV!KP?IV-__^Arx-UH9*R z8PtDhVeAWy@G2$hue;=7SE7IMq`^H!d@p+A&*L2=`qg_G&#)8~_HOOE^hGM3Lf-|19Ib zUFgk$T%nXUII4P!^M64lSImjpNUn|g?7L;TTM;p< z(dM~ky|}xm{reo_vpfgVwyp}-1L4rjvYyV;P)Q@+JN<)jmh`nklT$NKd+Y0)=Is~T z6c2Fl>?GkdbJF|sOpak%VGm341pb|sD&c9SW9hjK#{?MRG8J10GUYelRz;?oSk)fQ zQB>nFhc|K(`kzCZ)-A{;3lD^{bj32daBGBHEUUWL1qQQp*MH^1t5+SjnVc!G<}M=a zhDF@iYfBhbEF^GgGL6R_P>dbiKS)!M+*<1QZLrj@I#4)qErXB1H4$_9=aBgwBG7sG zb}4bfQ0Vih&Vp5PglR42;RETYG4WfWrt9)XdV<$A4ZRr9TrpPbo=4?(r_ah7Si*o_ zs;Ds~Ybb5Piu(@nhY?L-d~2qkakaX`=TjFydA7u(nTIT0P98^m(@h;=-jv+yyHj3> zBB(ddfnyQHUOJU2GnpRpoudDoEfs#}hyYE0^YL4~yopB{;kSwv(2gYb!I~Ihj3%UB zemsl_TVZu44-If(kt&58u%Vokvk7N$;azJv?*hRv`VR052;U(B1BDx}t3e0lPg3mv z9@a=bf3b={r&G8uN|H7)PKsdYEKITg-6&%7oTV#s?<%4^*JQHZCF5dlZnfSK0&~3R z<~j_d7OuIj3O%*1=lfs#Nq0*Y4<(2#FKR=BxjnxKbIP~K_8wkoqBOXNI zrP9|}Pa%VCuh&u~VX9vTEhrD4(C=F^k-?R9bgBqVLF?=`lw7exIP)}H>JBg!hW$wX zOF1s{BwOlk*msot#ZOrN6kCcS!b#vizWNmR%JUdu?D-vQS*!!|(x2O+gqq`mrv|{} z);_nRm533J9_OGVw)K1`F)u|maQf@9n?qwc5TO@#X<%H1iS_E$&sk|lGG5~_f=S!< za~0+E!GQqhznj>uDw&%S4>v%8p_x#vRjahw&4GJ32v)~Wp8~mp1P@@(m~A(VxiBs= z<0Hy*Y98aczWTv`8$b6<>UD$?H#5xKi2FD(CNkrLt*g~1&0h+_!Qi2AFUXhNf~^D` zN{0PU;AuWexBvsYmg%?%z<_R2JSgdhGd~VmV@Par&bsIYj_~qu&ZTx_iM^=CZaz4U zBZ!Jjs0m0o3BmqH27I(!nX{`=V|9#hV@7(j67@vD=j<*0nHX#wv8pO8aaa93|86^_ zGDg_2E;N_$`9a{Pn-rI|Va3p3of`HnfP*}uyp-=zisCnm?WJdHS zTwqu)KlN5Cydw;Z915_3j1k~z;sgDP!%HO#)3hNtUdD0~hEgFdt=Y&J#$+)ruoby+ z23*k{kH0Hy(+qkjDTja0aHs+Y)zbY(>}O zvW6zhb{Ri_7jVYJhJu>No~t`KpN1vuaborVi>|-P8md{$7&9=?5`n&&p2!LMQU-WLXA;$l9RfDyQn_DK13L0o|;s{Fn;9>T0r0qQCg~lHOaM} za5RZHfbW34N0AQV&?ko3A$<5+vZXIJip{g30FJ*Z4XrkItha0CgTHFel+x5EX(Uj` zepSD-`O)!!rr~nO%qn;xA^#yS%$`qyw1@|VCaAsxMlq;75V#hSgDN81tYZQ$tK4WR zy9>ifn}v}h$ErKVa>N{v8HdGc-N8l-9K#w(aIiGK2(0$S~n8Ic>Lu$3f;BoJtN`#kZFfslbbl#FMp>-(oZDZ_c0gS9fdOpD6GRsoU`KIJKkF$A>PbC)8-~#oGd=t&9A=^FC5O?l z_0|~6fxJP4>&NduY6Ls*y=%$G1t1jhFZhlb+%}-K42vS_MR^M68cA@+zH0NL($p|L ztmSHxWA(=swsIr2hII1&TR)6m!#Ee%33ffza1Ml{{17zBxG0C+tn3JVE*+$%NnC!6 zqsS%_;R;dGlb9d^R)~QEwf<8dZV8J%bPz{L&p)T0!-WwEPiO|QJ6stIuY6cx@{?%~ zI-gL%Dl?tDip4};>%ZSj?a8ZWszO>+jS%FVf|y|x^u1zOBm&q%`BHs8<2Qd8G$-Mx#jOo3gdBm7U`=>TE!t~#Wt;LnI0%C{ym4??tL>q*7?(vh$j zq!_09W+cSo<@{^Imv87xr8v~o@Q(b*)94fIzZj*U0it^r@)VOA{jgNKrASkNlXmS(Tz$lSvUThREDoU zA~dpUuWl-4gtWmRnlT+CfxEiH_km0Bjse{|9GEAoU}fc+F6=qmRRFXuZM4{x{1A9A zo7}RHeyQ<9FZ2vpXk<Au6|tQs|Cs?2`-oo6|v~*!uRwlM7G55G(KFsCv!pXIM!bZ(^O2 z&7YuY$)5A%LX}2El5oj^3^XUVV{oCV+SD$I1#DGQd%mkw#-B|P6vLuOO5hFKaQhW`8vCR|EuPGWd5Dn2)W&;(e5Mg{Ot~Gt-mtQ2EDJYdu$j^x;&YGm;uK1K7UTmsQb5h*l4QF|wLZsPn5h^>sB|Cp;WbVmfo$gCemGv}zZ!32EGc11l!pvT< zPFkv~ba1_e9b67v{ye1Hef^%XF3D)=Cl?}I5=!^dw1iU+Slr99A>F1_h4bJ03rq^l2ishiiNU{3h&4XO||{-jeft)=rW z4jD=uY#G))kF}9yg11N{{QYK?=!~1#Qu?U(HEhnGlRJ6e6r3eny=3%a&>IK;5xM^Wmmjd2oyUFs~ zk#^-0IZ@1}dGjx^w+_FAM*{v8&9zromb>!hj#>jFX2p+GtCcmc*10$I6ymVTc6mtG zi(Sxu0Cqezg;_Bt%>@oc%6|q&S`BaRg^yeZ{3@E^R2WfyBV0RQxw~ zJ6RWTt%0=BPGy8Y=&Uelf}r0lSLt1xSo)9*Yv{Xi<=i5#YInv1_!!B+=!Qf_B3M=`<8982lKuU(0ikkxgx4KQgs#7G{MMxZjVz9&Yj=p^H+Mc~7khrq zS6;xI>_M(Zb?ftyt3iJvFZJx>I&>qcivJ$O$DI~N*K-9=nlL~zqrme*A-NOdXOw`LC!VB#`Tm<$ZZt0mH0Hpyj?JTw@ZD^5Og+6Tu2_ z#y85!7%SA8KE!DMBx0X#02$8($00C|j79t}g6-SmpTM7ae-DkY7+!xu*E51#588OB zw+X(qHa=qoc?7ygi!po?Sj?C}TltR{Vc6{e3xq-*LJCoWlE`fLoPS!>U?noI&KNPv zo~|`H0+G}@k}srw&o3PgYmkujer#F8*c-g?;0TL4?5LR_+2mLizSkVizz6|w37g}* v Date: Tue, 2 Apr 2024 20:43:14 -0600 Subject: [PATCH 29/52] Added conjugate gradient algo, and converted line search to work with the autotuner --- rosplane_tuning/src/autotune/optimizer.py | 225 +++++++++++++++++----- 1 file changed, 179 insertions(+), 46 deletions(-) diff --git a/rosplane_tuning/src/autotune/optimizer.py b/rosplane_tuning/src/autotune/optimizer.py index 6d64e5a..3d00055 100644 --- a/rosplane_tuning/src/autotune/optimizer.py +++ b/rosplane_tuning/src/autotune/optimizer.py @@ -8,6 +8,7 @@ class OptimizerState(Enum): """ This class defines the various states that exist within the optimizer. """ + SELECT_DIRECTION = auto() FINDING_GRADIENT = auto() BRACKETING = auto() PINPOINTING = auto() @@ -38,8 +39,17 @@ def __init__(self, initial_gains, optimization_params): self.sigma = optimization_params['sigma'] self.init_alpha = optimization_params['alpha'] self.tau = optimization_params['tau'] - self.k = 0 - self.state = OptimizerState.FINDING_GRADIENT + self.state = OptimizerState.FINDING_GRADIENT # Find the initial gradient from the starting gains + + # Line Search Variables + self.k = 0 + self.reset = False + self.p = np.array([]) + self.line_search_vector = np.array([0.0, 0.0]) # [prior_p, prior_phi_prime] + self.save_gains = np.array([]) # np.array to save gains between autotune iterations + self.save_phis = np.array([]) # np.array to save phis (to calc gradients) between autotune iterations, correspond with above array + self.current_gains = initial_gains # Gains updated every succesful/nuclear pinpointing + def optimization_terminated(self): """ @@ -80,69 +90,181 @@ def get_next_parameter_set(self, error): """ if self.state == OptimizerState.FINDING_GRADIENT: - self.get_gradient() + # Store current gains and error, request new gain and error for gradient, then select direction + self.state = OptimizerState.SELECT_DIRECTION + self.save_gains = self.current_gains + self.save_phis = error + new_gains = [gain + 0.01 for gain in self.initial_gains] + # After some calculations, found that even if you add 0.01 to all dimensions and divide by 0.01 for the gradient + # you get roughly the same magnitude on the gradient. If we need more accuracy we can add a smarter value + # (decrease based on number of dimensions) + return new_gains + + elif self.state == OptimizerState.SELECT_DIRECTION: + + if self.k == 0: + new_gains = self.line_search(self.initial_gains, error, self.save_gains, self.new_phis) + else: + new_gains = self.line_search() + + return new_gains elif self.state == OptimizerState.BRACKETING: # using error, conduct bracketing # return the next set of gains to test - self.bracketing() + gains = self.save_gains[0] + gainsh = self.save_gains[1] + gains2 = self.save_gains[2] + gains2h = self.save_gains[3] + + new_gains = self.bracketing(gains, gainsh, self.save_phis[0], self.save_phis[1], gains2, gains2h, error[0], error[1]) + return new_gains elif self.state == OptimizerState.PINPOINTING: - self.pinpointing() - - else: # Terminated - pass - def get_gradient(self, fx, fxh, offset=0.01): + gains1 = self.save_gains[0] + gains2 = self.save_gains[2] + gainsp = self.save_gains[4] + + phip = error[0] + phiph = error[1] + phip_prime = self.get_gradient(phip, phiph) + + phi1 = self.save_phis[0] + phi1_prime = self.save_phis[1] + + new_gains = self.pinpointing(gains1, phi1, phi1_prime, gains2, gainsp, phip, phip_prime) + return new_gains + + else: # Process Terminated + return self.current_gains + + def get_gradient(self, phi, phih): """ This function returns the gradient at the given point using forward finite difference. Parameters: - fx (float): The function evaluation at the point of the gradient. - fxh (float): The function evaluation at a point slightly offset from the point. + phi (float): The function evaluation at the point of the gradient. + phih (float): The function evaluation at a point slightly offset from the point. Returns: float: The gradient at the given point. """ - return (fxh - fx) / offset + return (phih - phi) / 0.01 - def bracketing(self, phi1, phi1_prime, phi2, phi2_prime, temp_alpha1, temp_alpha2): + def line_search(self, gains, phi, gainsh, phih): """ - This function conducts the bracketing part of the optimization. + This function conducts the initial direction selection part of the optimization, + using conjugate gradient descent. + + Parameters: + gains (np.array, size num_gains, dtype float): The current gains. [gain1, gain2, ...] + phi (np.array, size num_gains, dtype float): The error from the list of gains. [error1, error2, ...] + phi_prime (): The gradient of the error from the list of gains. + + Returns: + np.array, size num_gains*num_sets, dtype float: The next series of gains to be tested. Any + number of gain sets can be returned, depending on the number of gains that need to be tested for + the follow on optimization. [[gain1_1, gain2_1, ...], [gain1_2, gain2_2, ...], ...] """ - if self.k == 0: - alpha1 = 0 - alpha2 = self.init_alpha + + phi_prime = self.get_gradient(phi, phih) # Calculate gradient + + # Check for convergence + if np.linalg.norm(phi_prime,np.inf) < self.tau: + self.state = OptimizerState.TERMINATED + return self.current_gains + + # Select Direction + prior_p = self.line_search_vector[0] + prior_phi_prime = self.line_search_vector[1] + + if self.k == 0 or self.reset == True: + self.reset = False + self.p = -phi_prime/np.linalg.norm(phi_prime) else: - alpha1 = temp_alpha1 - alpha2 = temp_alpha2 + bk = np.dot(phi_prime,phi_prime)/np.dot(prior_phi_prime,prior_phi_prime) + self.p = -phi_prime/np.linalg.norm(phi_prime) + bk*prior_p + + # Prepare for bracketing + self.OptimizerState = OptimizerState.BRACKETING + # Request phi2 and phi2+h + gains2 = gains + self.a_init*p + gains2h = [gain + 0.01 for gain in gains2] + new_gains = np.array([gains2, gains2h]) + + # Save phi1 and phi1+h + self.save_gains = np.array([gains, gainsh, gains2, gains2h]) + self.save_phis = np.array([phi, phih]) + self.k += 1 + + return new_gains + + def distance(point1, point2): + """ + This function calculates the distance between two points. + + Parameters: + point1 (np.array, size num_gains, dtype float): The first point. [gain1, gain2, ...] + point2 (np.array, size num_gains, dtype float): The second point. [gain1, gain2, ...] + + Returns: + float: The distance between the two points. + """ + return np.linalg.norm(point1 - point2) + + def bracketing(self, gains, gainsh, phi1, phi1_prime, gains2, gains2h, phi2, phi2_prime): + """ + This function conducts the bracketing part of the optimization. + """ + + alpha1 = self.distance(self.init_gains, gains) # For the initial pass, gains should be initial_gains, so alpha1 = 0 + alpha2 = self.distance(self.init_gains, gains2) # Needs Pinpointing if(phi2 > phi1 + self.u1*self.init_alpha*phi1_prime) or (phi2 > phi1): self.state == OptimizerState.PINPOINTING + # Save old points + self.save_gains = np.array([gains, gainsh, gains2, gains2h, gainsp, [gain + 0.01 for gain in gainsp]]) + self.save_phis = np.array([phi1, phi1_prime, phi2, phi2_prime]) + # Request new point alphap = self.interpolate(alpha1, alpha2) - res = [alpha1, alphap, alpha2] - return res + gainsp = self.init_gains + alphap*self.p + new_gains = [self.save_gains[4], self.save_gains[5]] + return new_gains # Optimized if abs(phi2_prime) <= -self.u2*phi1_prime: - self.state == OptimizerState.TERMINATED - res = [alpha2] - return res + self.state == OptimizerState.SELECT_DIRECTION + new_gains = self.init_gains + alpha2*self.p + self.current_gains = new_gains + return new_gains # Needs Pinpointing elif phi2_prime >= 0: self.state == OptimizerState.PINPOINTING + # Save old points + self.save_gains = np.array([gains, gainsh, gains2, gains2h, gainsp, [gain + 0.01 for gain in gainsp]]) + self.save_phis = np.array([phi1, phi1_prime, phi2, phi2_prime]) + # Request new point alphap = self.interpolate(alpha1, alpha2) - res = [alpha1, alphap, alpha2] - return res + gainsp = self.init_gains + alphap*self.p + new_gains = [self.save_gains[4], self.save_gains[5]] + return new_gains # Needs more Bracketing else: alpha1 = alpha2 alpha2 = self.sigma*alpha2 - res = [alpha1, alpha2] - return res + + # Request new points + gains1 = self.init_gains + alpha1*self.p + gains2 = self.init_gains + alpha2*self.p + gains1h = [gain + 0.01 for gain in gains1] + gains2h = [gain + 0.01 for gain in gains2] + + new_gains = [gains1, gains1h, gains2, gains2h] + return new_gains def interpolate(self, alpha1, alpha2): """ @@ -157,40 +279,51 @@ def interpolate(self, alpha1, alpha2): """ return (alpha1 + alpha2)/2 - def pinpointing(self, alpha1, phi1, phi1_prime, alpha2, phi2, phi2_prime, alphap, phip, phip_prime): + def pinpointing(self, gains1, phi1, phi1_prime, gains2, gainsp, phip, phip_prime): """ This function conducts the pinpointing part of the optimization. Parameters: - alpha1 (float): The first distance. - phi1 (float): The value of the function at point 1. - phi1_prime (float): The gradient of the function at point 1. - alpha2 (float): The second distance. - phi2 (float): The value of the function at point 2. - phi2_prime (float): The gradient of the function at point 2. Returns: alphastar (float): The optimal step size """ + alpha1 = self.distance(self.init_gains, gains1) + alpha2 = self.distance(self.init_gains, gains2) + alphap = self.distance(self.init_gains, gainsp) + if alphap > phi1 + self.u1*alphap*phi1_prime or alphap > phi1: - alpha2 = alphap + # Continue pinpointing + gains2 = gainsp + gainsp = self.interpolate(gains1, gains2) phi2 = phip - res = [alpha1, alpha2] - return res + phi2_prime = phip_prime + self.save_gains = np.array([gains1, None, gains2, None, gainsp, [gain + 0.01 for gain in gainsp]]) + self.save_phis = np.array([phi1, phi1_prime, phi2, phi2_prime]) + new_gains = [self.save_gains[4], self.save_gains[5]] + return new_gains else: # Optimized if abs(phip_prime) <= -self.u2*phi1_prime: - self.state == OptimizerState.TERMINATED + self.state == OptimizerState.SELECT_DIRECTION alphastar = alphap - res = [alphastar] - return res + new_gains = self.init_gains + alphastar*self.p + return new_gains # More parameterization needed elif phip_prime*(alpha2 - alpha1) >= 0: - alpha2 = alpha1 + gains2 = gains1 + phi2 = phi1 + + gains1 = gainsp + phi1 = phip + gainsp = self.interpolate(gains1, gains2) - alpha1 = alphap - res = [alpha1, alpha2] - return res + self.save_gains = np.array([gains1, None, gains2, None, gainsp, [gain + 0.01 for gain in gainsp]]) + self.save_phis = np.array([phi1, phi1_prime, phi2, phi2_prime]) + new_gains = [self.save_gains[4], self.save_gains[5]] + return new_gains - # Check for failure criteria + # Check for failure criteria - the nuclear option + # self.state == OptimizerState.SELECT_DIRECTION + # self.current_gains = new_gains \ No newline at end of file From 85df3b0988b4e09a2adb7c2856c7c7fec4f123fc Mon Sep 17 00:00:00 2001 From: Brandon Sutherland Date: Mon, 8 Apr 2024 11:07:58 -0600 Subject: [PATCH 30/52] wrote standalone tester for optimizer --- .../src/autotune/optimizer_tester.py | 36 +++++++++++++++++++ 1 file changed, 36 insertions(+) create mode 100644 rosplane_tuning/src/autotune/optimizer_tester.py diff --git a/rosplane_tuning/src/autotune/optimizer_tester.py b/rosplane_tuning/src/autotune/optimizer_tester.py new file mode 100644 index 0000000..1b7e409 --- /dev/null +++ b/rosplane_tuning/src/autotune/optimizer_tester.py @@ -0,0 +1,36 @@ +from optimizer import Optimizer +import numpy as np + + +# Function to test the optimizer with +def function(x): + # Matyas function + return 0.26 * (x[0] ** 2 + x[1] ** 2) - 0.48 * x[0] * x[1] + + +# Initialize optimizer +points = np.array([[1, 2]]) # Initial point +optimization_params = {'u1': 1e-4, + 'u2': 0.5, + 'sigma': 1.5, + 'alpha': 1.0, + 'tau': 1e-3} +optimizer = Optimizer(points[0], optimization_params) + + +while not optimizer.optimization_terminated(): + # Print status + print(optimizer.get_optimiztion_status()) + + # Calculate error for current points + error = [] + for point in points: + error.append(function(point)) + error = np.array(error) + + # Pass points to optimizer + points = optimizer.get_next_parameter_set(error) + + +print('Optimization terminated with status: {}'.format(optimizer.get_optimiztion_status())) + From 6f6e81c42f58a0dd838f885d00d2992b68c47e4e Mon Sep 17 00:00:00 2001 From: Brandon Sutherland Date: Mon, 8 Apr 2024 11:18:50 -0600 Subject: [PATCH 31/52] added plotter to optimization_tester --- rosplane_tuning/src/autotune/autotune.py | 8 ++--- rosplane_tuning/src/autotune/optimizer.py | 4 +-- .../src/autotune/optimizer_tester.py | 30 ++++++++++++++++--- rosplane_tuning/src/autotune/plot_util.py | 4 +-- 4 files changed, 34 insertions(+), 12 deletions(-) diff --git a/rosplane_tuning/src/autotune/autotune.py b/rosplane_tuning/src/autotune/autotune.py index cd6562f..9d276aa 100644 --- a/rosplane_tuning/src/autotune/autotune.py +++ b/rosplane_tuning/src/autotune/autotune.py @@ -153,11 +153,11 @@ def __init__(self): # Optimization self.initial_gains = None # get_gains function cannot be called in __init__ since the node # has not yet been passed to the executor - self.optimization_params = {'u1': 10**-4, + self.optimization_params = {'u1': 1e-3, 'u2': 0.5, - 'sigma': 1.5, - 'alpha': 1, - 'tau': 10**-3} + 'sigma': 2.0, + 'alpha': 0.05, + 'tau': 1e-2} self.optimizer = None diff --git a/rosplane_tuning/src/autotune/optimizer.py b/rosplane_tuning/src/autotune/optimizer.py index 3d00055..687ad5e 100644 --- a/rosplane_tuning/src/autotune/optimizer.py +++ b/rosplane_tuning/src/autotune/optimizer.py @@ -150,7 +150,7 @@ def get_gradient(self, phi, phih): Returns: float: The gradient at the given point. """ - return (phih - phi) / 0.01 + return (phih - phi) / 0.01 # Shouldn't be a hard coded value, replace with a variable def line_search(self, gains, phi, gainsh, phih): """ @@ -326,4 +326,4 @@ def pinpointing(self, gains1, phi1, phi1_prime, gains2, gainsp, phip, phip_prime # Check for failure criteria - the nuclear option # self.state == OptimizerState.SELECT_DIRECTION - # self.current_gains = new_gains \ No newline at end of file + # self.current_gains = new_gains diff --git a/rosplane_tuning/src/autotune/optimizer_tester.py b/rosplane_tuning/src/autotune/optimizer_tester.py index 1b7e409..65ce30d 100644 --- a/rosplane_tuning/src/autotune/optimizer_tester.py +++ b/rosplane_tuning/src/autotune/optimizer_tester.py @@ -1,5 +1,7 @@ from optimizer import Optimizer + import numpy as np +import matplotlib.pyplot as plt # Function to test the optimizer with @@ -9,28 +11,48 @@ def function(x): # Initialize optimizer -points = np.array([[1, 2]]) # Initial point +curr_points = np.array([[0, 5]]) # Initial point optimization_params = {'u1': 1e-4, 'u2': 0.5, 'sigma': 1.5, 'alpha': 1.0, 'tau': 1e-3} -optimizer = Optimizer(points[0], optimization_params) +optimizer = Optimizer(curr_points[0], optimization_params) +# Run optimization +all_points = [] while not optimizer.optimization_terminated(): # Print status print(optimizer.get_optimiztion_status()) # Calculate error for current points error = [] - for point in points: + for point in curr_points: error.append(function(point)) error = np.array(error) # Pass points to optimizer - points = optimizer.get_next_parameter_set(error) + curr_points = optimizer.get_next_parameter_set(error) + # Store points + for point in curr_points: + all_points.append(point) +all_points = np.array(all_points) print('Optimization terminated with status: {}'.format(optimizer.get_optimiztion_status())) + +# Plot the function with the optimization path +x_min = np.min(all_points[:, 0]) - 1 +x_max = np.max(all_points[:, 0]) + 1 +y_min = np.min(all_points[:, 1]) - 1 +y_max = np.max(all_points[:, 1]) + 1 +x = np.linspace(x_min, x_max, 100) +y = np.linspace(y_min, y_max, 100) +X, Y = np.meshgrid(x, y) +Z = function([X, Y]) +plt.contour(X, Y, Z, 50) +plt.plot(all_points[:, 0], all_points[:, 1], marker='o', color='r') +plt.show() + diff --git a/rosplane_tuning/src/autotune/plot_util.py b/rosplane_tuning/src/autotune/plot_util.py index f6122fb..91d9f22 100644 --- a/rosplane_tuning/src/autotune/plot_util.py +++ b/rosplane_tuning/src/autotune/plot_util.py @@ -19,8 +19,8 @@ def __init__(self, *args): # None of the optimization parameters are needed, so they are ignored. # Define the grid of points to test and plot - self.x_values = np.linspace(0.0, 1.0, 10) - self.y_values = np.linspace(0.0, 1.0, 10) + self.x_values = np.linspace(0.1, 0.3, 10) + self.y_values = np.linspace(0.0, 0.1, 10) self.points_given = False self.plotting_complete = False From e699b9fd9578afa1539ba80ed9a9051edfd4412b Mon Sep 17 00:00:00 2001 From: Joseph Date: Mon, 8 Apr 2024 12:09:34 -0600 Subject: [PATCH 32/52] Print statement changes, currently looking at why there are issues with current point not being a numpy array. --- .../__pycache__/optimizer.cpython-312.pyc | Bin 0 -> 15093 bytes rosplane_tuning/src/autotune/optimizer.py | 28 ++++++++++-------- .../src/autotune/optimizer_tester.py | 16 +++++++++- 3 files changed, 31 insertions(+), 13 deletions(-) create mode 100644 rosplane_tuning/src/autotune/__pycache__/optimizer.cpython-312.pyc diff --git a/rosplane_tuning/src/autotune/__pycache__/optimizer.cpython-312.pyc b/rosplane_tuning/src/autotune/__pycache__/optimizer.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..599f59e176c8f1c7e97c357cf7cd4138bde41a58 GIT binary patch literal 15093 zcmeHOYfv0lcJ3KwhGAxS4+FzX4MGw~2nmGH!)w`+5CUYQ7x4p28Mmf;V9+pwyBjd$ z!SYr)RmkftL0lDJ;|in7RtZI^82?)B{)yIA$!g=IJPnS)6a63+SEcmNMy}e@{>nMG zAJa2xK(>?Jq$*v-o%`x@?>+b2bG~zK^J#9bgMw#fzdJD2Mp6HP1@*BinMWOvnW0#U zHHD}lQ=e(b+-Ejn*&NF1vzV#V6r1%f#acMiohS=H!?LJ?a8RC4tq{HVMim)Rg%)5NPH%6F{(vt1- zu@S$|$6H`w$%2cgUO08K*Vo>40e-s9_9P2Bx_a8XdQSUJUpU_0b*iT~={RxW_{lS; zdZ9AuJlEB8?rc{NDL8shU3jsp=Xmd__S@7W{!<#>u{_T%p6j$0oWMjs$2X zJWhjzU*UKx4Kq9lg2?fLEFFI3oDde^&SUw}Ujb8Qs2Iq)7m6{;N18E#Ic$sFq`Z< zDvWd5z8=-;g5jVTWJ11vB4+jB;Ujci*z8u6A_dtDdL91;&X+gRmo%N`ZxDCG|{hN$L2;Fe_9&prlzLL+N|`V z?jzzT(DiJP2rR>o!vs=mp~D= zdw{JZbM{4x&T5uat;015A3^dC^>sN_(X_xy|8tD!uB!FfD*u;+o1uiAey&bo+_)1uYEQ_d? zZ=3jC@FO6D6fr2()ApqZM{812u|bzZND2o4A<3g$1vidfKl)+yinm4bwk$bYR*Os5 zip&1zcjRXp7t?oymw8I|1CJo!NdN{2FZJt5p^(kCLlAk*U_1pORYyll6(-7p(5r2I zv@ERw5bZfW(hvGT;P(JS`9@@|ZkIg$oaj@gjY{1oFDObYWI&A?_c*EBqXMHnYFsPc zEANqZ2H+JZ{E<@=gY)nMPR$wJ$wG~o@c=T>YT#sQs7I?rOs0B86u|}>73i;_lT`n9 z>W0~G?x+0b%YdjR%z(vAQD~kvzn^2FCbFO=>*Cg0KpQ1RnP`DA7*)e6sW#S3T3eyD zI903WjAx3g8Pu?x+80r%5kkD9=%tA)W&XzF7;*52jgy*~mecL2TdUMEgcGC2Ym+hc zgk^(Pg03g58?+LfJpp*wh-)(9o&bD2sG(wQN>t1eL#hVITSfVXZ8F-o#j;~2rJqUm z0PZM!)^;N`l$O(R$pD=bF1@qCT8QT{Vc(#2b3WM6^O(p{XFTD6oXl4z(5ujdN8_A<#9oBcYl6QC^BC2T z&DcQ^J%x%7b8m_?;3-t{Dt<@Q3jqYMwW@Qe=+F$qoHCj{0K~z*Mrhf3KqMj*WNCuV zXl@k1^QP8VXF^ioK4sK7o(BixJ2@!wD*-+-L@ZYbV<7qk;_9G<5j75>?HL1{Aum8( zIe-QYstohoD27);f~4aL&-kx$=oa&6VUsR| z7X3WK2Eo}#I){T{;w?iv42F0zX(fA{wBXc}Ibm*0aaOEEGLkm(GobvoB(q7mMXhL7 z8itb=@N2*s!gL_%z#rc*9~|OHLo=yqmTLrrv$&tSNA_z7R6@a!y&jAKq4;D%r6+rw z1ByZ&a1-Dxt%FL3CGX)S=i#rsyH>oIpKO1>d$qcDrFy?qy&no^SF38_r)RbK;7apJ zsrlp=W$g>@S$Ev>mlcV!_Q@_tzP3~Ds)Va@Ew}JS-}Sx^kF8W3l`4)d6OYxsj_jQcV+K$(%$FpmE5;{mjB!Q#H*KAUhS7&?O(1On95OUeQl+x zYM$nWwc^V3p&)?UTA+OI-FRxKJW2B*YOZ zJeqiOBJuXMZzyWqbleP)Qtt`#WY=ouybekg2bXdWuIb0>Sly%q{>5zJH0;Q#+dEq` z-3e>(R;_qUl#gg33l#DRP);s8($!cGnhyia>bK{#~2Y3YQ4Oz05 z%7W<YzpQtC@lTwHHb7&I~YlBkoP=zH^!n9+SmGWy7_FCC((%cj1ghD}f z|DPcxZWC(gj0a@Gq5&?e4kn`d&UgkDxTLR+>EAJcPbzJIV+QWURgh4=M%19NkrM?@ zmqr(;peScN#Dy{d|8gLmfOMl)8Q3C7$M9Lk!icl;j;ui112}_Lx?`h6*jC#{#D0}88$bjGMgPoF}m`ig* z9N>o7n8$E%G`*Uxx8aJ59q|K@)T2T+$V2oN@3|le2NIHH{aFJ=&c5CZ=SgCHyg54VD|`;HC!bIYS0b zRisG`@(kY%L_(p+7*f(p*Y|ciXl(%EZtyK|G+b}^rk<4NvBJn+Vj8E`2m}tIALoTu2{Igiwjma@g z&OnmP(S#(K4UR&D_YEiWm5eGrN%NJf{B!uqGnhP&$&VlrQ0c3}Mt+I^z{5uwRJ0_8 z26X<>Jihj4U^ql-JyMAo8;pZoAb=>S& zcJ5hVKN`3_u;grefGz|^$B7G3T&kf;z?e`fYLJQ=7HXuT#!0wmsa`2>mdcwKYw#u| zf6bYHqwRXz)cNa8ZEJlm%Fer~>%b2H~w-p#y)@n!mmRB~jp z{ihvY6&BBUraiMc@rLEX#=pJr(Mz{qT0DF2?DF1TsjzqQ#H#VDcTe0M`(?LO(gEZ< zU|pWNm69f@qzP|p)(Snd?)hzV+ZNm(d2V|o?>?z;-=uAIPxBW=E%DZ=*4erbo=Ft7 zOlCv!!0nmoobH?r#QPIfP0Q|mgiRGy^YwG}@ly+Ji*0ugCAObjE^i0Y`47Am^JQ~o z@dFE%_?~4i4wtuTciphPXPc_I?)<53Ex#;bFMGURNEII-5d0tZxK7tt{Ggk)2VoQe zVI7c}p+v1=P~S_b9n`}&r7i}MW(4hb6p#S0-NZ0Z(2hff`e#D^k&yDG3IcHm&f|@v zdfzIJMP&F3-y&!v43RlrLc^u*M*7J}#mW&I1wL!S58x6t zs7Mdc(;lcbkiitBglKIxz#huz77}XFy~ubFpv5dIOuO0+OSU2n%<=yM?`S|i_2TB= zq9DvRQ6j2s@)+HBz}3u5-! zF?+ur{%P(Etjj3M4-sZ<2QXG`#A~cKa4%F2PB^!im!UhP-6CiJRplPI{v$yzs!);LD+$Z z2uld)hEI)NfxPm85%KTjZ;uF6#t?=4c?U-7W#d_cxjIaCK$3II^jWtbwagvO_A_5f3Z`mP-z% zg6pH;8w73#ZcgAAQXLP<%IEEK_ITZ`eK+?3@ULxMWPcI)I0Di2w$4ON*K%368kI(0 zBY!gbljmUhV{>D(1M#R--Mr*%)-B_RRDNX1btIi?$KBSyZ~JB2$1h5?#}hRtmdj28 z$(r4h?Vr1<<$yf!E3a&}`8}VG)c*mFz_o~{Qs!*mbYI*imF%4CfVIH&0bXjPb6UOJ z$f(_S@C#2{T$~bT*$>`Ic-kgUL$YdE`<1d+I~R5lAExHyGT!ybEPT9v)RH3 z1Hb`&P@jJtD;P-9WBLF)(!6*b`9c8Zx{!Y9tYoK$D5A<4ofIooWa=@^e z&jvO!bv^L?{RjsjbYnk)*!5VJig*>D0s0f4;b$=``98|C9vkQ>WoiA2laW`U=Wo8~EK-p&>yz!zOHXUvtdCZ# z@i#&8cR$8$$QE44(}_csoWzP+nLo!Sj}w31q|!`){2vfgGTuX&hHM0e)$kylgSs zKZu-Mb6)$S%87z5b2W4cxdvPGZ!`_L36j)(cCiVR)F|oWOIqa<3Yh3u2v@;ZN$P(R z^s09gOepoiy+)x&zE(wy%J*S{GS-ksFe?8VR*(Ju<1?*n=-I80-Hv4YtF){_Sx3?Lvx4XZF9#KcEjfvlDl=u*?QM@&;6_1 zwUP>gTQV_XaVdVUuzj-ofdWKri~3!%hi=6-LY_mxjyTY|!_q2=oE zrZ6-e2yO*03yPOZf*^3*0k~X~&1*mFj1u>Svb-&rn0HtZz=fsooCEqs#>QoC8PfrBi&bI literal 0 HcmV?d00001 diff --git a/rosplane_tuning/src/autotune/optimizer.py b/rosplane_tuning/src/autotune/optimizer.py index 687ad5e..5f67fc1 100644 --- a/rosplane_tuning/src/autotune/optimizer.py +++ b/rosplane_tuning/src/autotune/optimizer.py @@ -40,6 +40,7 @@ def __init__(self, initial_gains, optimization_params): self.init_alpha = optimization_params['alpha'] self.tau = optimization_params['tau'] self.state = OptimizerState.FINDING_GRADIENT # Find the initial gradient from the starting gains + self.initial_gains = initial_gains # Line Search Variables self.k = 0 @@ -71,7 +72,10 @@ def get_optimiztion_status(self): str: The status of the optimization algorithm. """ # Return with the final values, the best value found - return 'TODO: Status not implemented.' + if self.state == OptimizerState.TERMINATED: + return 'Optimization Terminated' + else: + return "Optimization in Progress" def get_next_parameter_set(self, error): """ @@ -103,9 +107,9 @@ def get_next_parameter_set(self, error): elif self.state == OptimizerState.SELECT_DIRECTION: if self.k == 0: - new_gains = self.line_search(self.initial_gains, error, self.save_gains, self.new_phis) + new_gains = self.line_search(self.initial_gains, error, self.save_gains, self.save_phis) else: - new_gains = self.line_search() + new_gains = self.line_search(self.current_gains, error, self.save_gains, self.save_phis) return new_gains @@ -187,9 +191,9 @@ def line_search(self, gains, phi, gainsh, phih): self.p = -phi_prime/np.linalg.norm(phi_prime) + bk*prior_p # Prepare for bracketing - self.OptimizerState = OptimizerState.BRACKETING + self.state = OptimizerState.BRACKETING # Request phi2 and phi2+h - gains2 = gains + self.a_init*p + gains2 = gains + self.init_alpha*self.p gains2h = [gain + 0.01 for gain in gains2] new_gains = np.array([gains2, gains2h]) @@ -230,13 +234,13 @@ def bracketing(self, gains, gainsh, phi1, phi1_prime, gains2, gains2h, phi2, phi # Request new point alphap = self.interpolate(alpha1, alpha2) gainsp = self.init_gains + alphap*self.p - new_gains = [self.save_gains[4], self.save_gains[5]] + new_gains = np.array([self.save_gains[4], self.save_gains[5]]) return new_gains # Optimized if abs(phi2_prime) <= -self.u2*phi1_prime: self.state == OptimizerState.SELECT_DIRECTION - new_gains = self.init_gains + alpha2*self.p + new_gains = np.array([self.init_gains + alpha2*self.p]) self.current_gains = new_gains return new_gains @@ -249,7 +253,7 @@ def bracketing(self, gains, gainsh, phi1, phi1_prime, gains2, gains2h, phi2, phi # Request new point alphap = self.interpolate(alpha1, alpha2) gainsp = self.init_gains + alphap*self.p - new_gains = [self.save_gains[4], self.save_gains[5]] + new_gains = np.array([self.save_gains[4], self.save_gains[5]]) return new_gains # Needs more Bracketing @@ -263,7 +267,7 @@ def bracketing(self, gains, gainsh, phi1, phi1_prime, gains2, gains2h, phi2, phi gains1h = [gain + 0.01 for gain in gains1] gains2h = [gain + 0.01 for gain in gains2] - new_gains = [gains1, gains1h, gains2, gains2h] + new_gains = np.array([gains1, gains1h, gains2, gains2h]) return new_gains def interpolate(self, alpha1, alpha2): @@ -301,14 +305,14 @@ def pinpointing(self, gains1, phi1, phi1_prime, gains2, gainsp, phip, phip_prime phi2_prime = phip_prime self.save_gains = np.array([gains1, None, gains2, None, gainsp, [gain + 0.01 for gain in gainsp]]) self.save_phis = np.array([phi1, phi1_prime, phi2, phi2_prime]) - new_gains = [self.save_gains[4], self.save_gains[5]] + new_gains = np.array([self.save_gains[4], self.save_gains[5]]) return new_gains else: # Optimized if abs(phip_prime) <= -self.u2*phi1_prime: self.state == OptimizerState.SELECT_DIRECTION alphastar = alphap - new_gains = self.init_gains + alphastar*self.p + new_gains = np.array([self.init_gains + alphastar*self.p]) return new_gains # More parameterization needed elif phip_prime*(alpha2 - alpha1) >= 0: @@ -321,7 +325,7 @@ def pinpointing(self, gains1, phi1, phi1_prime, gains2, gainsp, phip, phip_prime self.save_gains = np.array([gains1, None, gains2, None, gainsp, [gain + 0.01 for gain in gainsp]]) self.save_phis = np.array([phi1, phi1_prime, phi2, phi2_prime]) - new_gains = [self.save_gains[4], self.save_gains[5]] + new_gains = np.array([self.save_gains[4], self.save_gains[5]]) return new_gains # Check for failure criteria - the nuclear option diff --git a/rosplane_tuning/src/autotune/optimizer_tester.py b/rosplane_tuning/src/autotune/optimizer_tester.py index 65ce30d..c386f70 100644 --- a/rosplane_tuning/src/autotune/optimizer_tester.py +++ b/rosplane_tuning/src/autotune/optimizer_tester.py @@ -7,8 +7,12 @@ # Function to test the optimizer with def function(x): # Matyas function + print("f", x) return 0.26 * (x[0] ** 2 + x[1] ** 2) - 0.48 * x[0] * x[1] +def gradient(x): + # Gradient of Matyas function + return np.array([0.52 * x[0] - 0.48 * x[1], 0.52 * x[1] - 0.48 * x[0]]) # Initialize optimizer curr_points = np.array([[0, 5]]) # Initial point @@ -22,22 +26,32 @@ def function(x): # Run optimization all_points = [] +k = 0 while not optimizer.optimization_terminated(): + print("Iteration ", k) # Print status print(optimizer.get_optimiztion_status()) + print(optimizer.state) # Calculate error for current points error = [] + # print(curr_points) # Testing for point in curr_points: error.append(function(point)) error = np.array(error) - # Pass points to optimizer + print("CP", curr_points) # Testing + # print("G", gradient(curr_points[0])) # Testing curr_points = optimizer.get_next_parameter_set(error) + print("CP", curr_points) # Testing # Store points for point in curr_points: all_points.append(point) + + # End interation step + k += 1 + print() all_points = np.array(all_points) print('Optimization terminated with status: {}'.format(optimizer.get_optimiztion_status())) From 1a02f1c2d6a82612abbc5fbf21ce464d9ca61829 Mon Sep 17 00:00:00 2001 From: Brandon Sutherland Date: Mon, 8 Apr 2024 12:18:00 -0600 Subject: [PATCH 33/52] set pitch to zero when tuning airspeed --- .gitignore | 6 +++++- rosplane_tuning/src/autotune/autotune.py | 7 +++++-- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/.gitignore b/.gitignore index 301eb2e..8059276 100644 --- a/.gitignore +++ b/.gitignore @@ -8,4 +8,8 @@ log/ # IDE files .vscode/ -.idea/ \ No newline at end of file +.idea/ + +# Python files +__pycache__/ +venv/ diff --git a/rosplane_tuning/src/autotune/autotune.py b/rosplane_tuning/src/autotune/autotune.py index 9d276aa..765d3a2 100644 --- a/rosplane_tuning/src/autotune/autotune.py +++ b/rosplane_tuning/src/autotune/autotune.py @@ -306,6 +306,7 @@ def set_signal_generator_params(self): Parameter(name='controller_output', value='airspeed').to_parameter_msg(), Parameter(name='signal_type', value='step').to_parameter_msg(), Parameter(name='signal_magnitude', value=5.0).to_parameter_msg(), + Parameter(name='default_theta_c', value=0.0).to_parameter_msg() ] @@ -341,7 +342,8 @@ def enable_autopilot_override(self): request.parameters = [ Parameter(name='roll_tuning_debug_override', value=True).to_parameter_msg() ] - elif self.current_autopilot == CurrentAutopilot.PITCH: + elif self.current_autopilot == CurrentAutopilot.PITCH \ + or self.current_autopilot == CurrentAutopilot.AIRSPEED: request.parameters = [ Parameter(name='pitch_tuning_debug_override', value=True).to_parameter_msg() ] @@ -357,7 +359,8 @@ def disable_autopilot_override(self): request.parameters = [ Parameter(name='roll_tuning_debug_override', value=False).to_parameter_msg() ] - elif self.current_autopilot == CurrentAutopilot.PITCH: + elif self.current_autopilot == CurrentAutopilot.PITCH \ + or self.current_autopilot == CurrentAutopilot.AIRSPEED: request.parameters = [ Parameter(name='pitch_tuning_debug_override', value=False).to_parameter_msg() ] From 16a45195f9a540c00f7f33b69e414d1342fd009d Mon Sep 17 00:00:00 2001 From: Brandon Sutherland Date: Mon, 8 Apr 2024 12:34:53 -0600 Subject: [PATCH 34/52] wrapped lines at 100, replaced hard-coded value with parameter --- rosplane_tuning/src/autotune/optimizer.py | 86 ++++++++++++++--------- 1 file changed, 53 insertions(+), 33 deletions(-) diff --git a/rosplane_tuning/src/autotune/optimizer.py b/rosplane_tuning/src/autotune/optimizer.py index 5f67fc1..ecfcc9d 100644 --- a/rosplane_tuning/src/autotune/optimizer.py +++ b/rosplane_tuning/src/autotune/optimizer.py @@ -39,7 +39,8 @@ def __init__(self, initial_gains, optimization_params): self.sigma = optimization_params['sigma'] self.init_alpha = optimization_params['alpha'] self.tau = optimization_params['tau'] - self.state = OptimizerState.FINDING_GRADIENT # Find the initial gradient from the starting gains + self.state = OptimizerState.FINDING_GRADIENT # Find the initial gradient from the + # starting gains self.initial_gains = initial_gains # Line Search Variables @@ -47,9 +48,14 @@ def __init__(self, initial_gains, optimization_params): self.reset = False self.p = np.array([]) self.line_search_vector = np.array([0.0, 0.0]) # [prior_p, prior_phi_prime] - self.save_gains = np.array([]) # np.array to save gains between autotune iterations - self.save_phis = np.array([]) # np.array to save phis (to calc gradients) between autotune iterations, correspond with above array - self.current_gains = initial_gains # Gains updated every succesful/nuclear pinpointing + self.save_gains = np.array([]) # np.array to save gains between autotune + # iterations + self.save_phis = np.array([]) # np.array to save phis (to calc gradients) + # between autotune iterations, correspond + # with above array + self.current_gains = initial_gains # Gains updated every succesful/nuclear + # pinpointing + self.finite_difference_step = 0.01 def optimization_terminated(self): @@ -94,25 +100,28 @@ def get_next_parameter_set(self, error): """ if self.state == OptimizerState.FINDING_GRADIENT: - # Store current gains and error, request new gain and error for gradient, then select direction + # Store current gains and error, request new gain and error for gradient, then select + # direction self.state = OptimizerState.SELECT_DIRECTION self.save_gains = self.current_gains self.save_phis = error - new_gains = [gain + 0.01 for gain in self.initial_gains] - # After some calculations, found that even if you add 0.01 to all dimensions and divide by 0.01 for the gradient - # you get roughly the same magnitude on the gradient. If we need more accuracy we can add a smarter value - # (decrease based on number of dimensions) + new_gains = [gain + self.finite_difference_step for gain in self.initial_gains] + # After some calculations, found that even if you add 0.01 to all dimensions and divide + # by 0.01 for the gradient you get roughly the same magnitude on the gradient. If we + # need more accuracy we can add a smarter value (decrease based on number of dimensions) return new_gains - + elif self.state == OptimizerState.SELECT_DIRECTION: - + if self.k == 0: - new_gains = self.line_search(self.initial_gains, error, self.save_gains, self.save_phis) + new_gains = self.line_search(self.initial_gains, error, + self.save_gains, self.save_phis) else: - new_gains = self.line_search(self.current_gains, error, self.save_gains, self.save_phis) - + new_gains = self.line_search(self.current_gains, error, + self.save_gains, self.save_phis) + return new_gains - + elif self.state == OptimizerState.BRACKETING: # using error, conduct bracketing # return the next set of gains to test @@ -121,9 +130,12 @@ def get_next_parameter_set(self, error): gains2 = self.save_gains[2] gains2h = self.save_gains[3] - new_gains = self.bracketing(gains, gainsh, self.save_phis[0], self.save_phis[1], gains2, gains2h, error[0], error[1]) + new_gains = self.bracketing(gains, gainsh, + self.save_phis[0], self.save_phis[1], + gains2, gains2h, + error[0], error[1]) return new_gains - + elif self.state == OptimizerState.PINPOINTING: gains1 = self.save_gains[0] @@ -154,7 +166,7 @@ def get_gradient(self, phi, phih): Returns: float: The gradient at the given point. """ - return (phih - phi) / 0.01 # Shouldn't be a hard coded value, replace with a variable + return (phih - phi) / self.finite_difference_step def line_search(self, gains, phi, gainsh, phih): """ @@ -163,15 +175,17 @@ def line_search(self, gains, phi, gainsh, phih): Parameters: gains (np.array, size num_gains, dtype float): The current gains. [gain1, gain2, ...] - phi (np.array, size num_gains, dtype float): The error from the list of gains. [error1, error2, ...] + phi (np.array, size num_gains, dtype float): The error from the list of gains. + [error1, error2, ...] phi_prime (): The gradient of the error from the list of gains. Returns: np.array, size num_gains*num_sets, dtype float: The next series of gains to be tested. Any - number of gain sets can be returned, depending on the number of gains that need to be tested for - the follow on optimization. [[gain1_1, gain2_1, ...], [gain1_2, gain2_2, ...], ...] + number of gain sets can be returned, depending on the number of gains that need to be + tested for the follow on optimization. + [[gain1_1, gain2_1, ...], [gain1_2, gain2_2, ...], ...] """ - + phi_prime = self.get_gradient(phi, phih) # Calculate gradient # Check for convergence @@ -189,14 +203,14 @@ def line_search(self, gains, phi, gainsh, phih): else: bk = np.dot(phi_prime,phi_prime)/np.dot(prior_phi_prime,prior_phi_prime) self.p = -phi_prime/np.linalg.norm(phi_prime) + bk*prior_p - + # Prepare for bracketing self.state = OptimizerState.BRACKETING # Request phi2 and phi2+h gains2 = gains + self.init_alpha*self.p - gains2h = [gain + 0.01 for gain in gains2] + gains2h = [gain + self.finite_difference_step for gain in gains2] new_gains = np.array([gains2, gains2h]) - + # Save phi1 and phi1+h self.save_gains = np.array([gains, gainsh, gains2, gains2h]) self.save_phis = np.array([phi, phih]) @@ -222,14 +236,16 @@ def bracketing(self, gains, gainsh, phi1, phi1_prime, gains2, gains2h, phi2, phi This function conducts the bracketing part of the optimization. """ - alpha1 = self.distance(self.init_gains, gains) # For the initial pass, gains should be initial_gains, so alpha1 = 0 + alpha1 = self.distance(self.init_gains, gains) # For the initial pass, gains should be + # initial_gains, so alpha1 = 0 alpha2 = self.distance(self.init_gains, gains2) # Needs Pinpointing if(phi2 > phi1 + self.u1*self.init_alpha*phi1_prime) or (phi2 > phi1): self.state == OptimizerState.PINPOINTING # Save old points - self.save_gains = np.array([gains, gainsh, gains2, gains2h, gainsp, [gain + 0.01 for gain in gainsp]]) + self.save_gains = np.array([gains, gainsh, gains2, gains2h, gainsp, + [gain + self.finite_difference_step for gain in gainsp]]) self.save_phis = np.array([phi1, phi1_prime, phi2, phi2_prime]) # Request new point alphap = self.interpolate(alpha1, alpha2) @@ -248,7 +264,8 @@ def bracketing(self, gains, gainsh, phi1, phi1_prime, gains2, gains2h, phi2, phi elif phi2_prime >= 0: self.state == OptimizerState.PINPOINTING # Save old points - self.save_gains = np.array([gains, gainsh, gains2, gains2h, gainsp, [gain + 0.01 for gain in gainsp]]) + self.save_gains = np.array([gains, gainsh, gains2, gains2h, gainsp, + [gain + self.finite_difference_step for gain in gainsp]]) self.save_phis = np.array([phi1, phi1_prime, phi2, phi2_prime]) # Request new point alphap = self.interpolate(alpha1, alpha2) @@ -264,8 +281,8 @@ def bracketing(self, gains, gainsh, phi1, phi1_prime, gains2, gains2h, phi2, phi # Request new points gains1 = self.init_gains + alpha1*self.p gains2 = self.init_gains + alpha2*self.p - gains1h = [gain + 0.01 for gain in gains1] - gains2h = [gain + 0.01 for gain in gains2] + gains1h = [gain + self.finite_difference_step for gain in gains1] + gains2h = [gain + self.finite_difference_step for gain in gains2] new_gains = np.array([gains1, gains1h, gains2, gains2h]) return new_gains @@ -303,7 +320,8 @@ def pinpointing(self, gains1, phi1, phi1_prime, gains2, gainsp, phip, phip_prime gainsp = self.interpolate(gains1, gains2) phi2 = phip phi2_prime = phip_prime - self.save_gains = np.array([gains1, None, gains2, None, gainsp, [gain + 0.01 for gain in gainsp]]) + self.save_gains = np.array([gains1, None, gains2, None, gainsp, + [gain + self.finite_difference_step for gain in gainsp]]) self.save_phis = np.array([phi1, phi1_prime, phi2, phi2_prime]) new_gains = np.array([self.save_gains[4], self.save_gains[5]]) return new_gains @@ -318,12 +336,13 @@ def pinpointing(self, gains1, phi1, phi1_prime, gains2, gainsp, phip, phip_prime elif phip_prime*(alpha2 - alpha1) >= 0: gains2 = gains1 phi2 = phi1 - + gains1 = gainsp phi1 = phip gainsp = self.interpolate(gains1, gains2) - self.save_gains = np.array([gains1, None, gains2, None, gainsp, [gain + 0.01 for gain in gainsp]]) + self.save_gains = np.array([gains1, None, gains2, None, gainsp, + [gain + self.finite_difference_step for gain in gainsp]]) self.save_phis = np.array([phi1, phi1_prime, phi2, phi2_prime]) new_gains = np.array([self.save_gains[4], self.save_gains[5]]) return new_gains @@ -331,3 +350,4 @@ def pinpointing(self, gains1, phi1, phi1_prime, gains2, gainsp, phip, phip_prime # Check for failure criteria - the nuclear option # self.state == OptimizerState.SELECT_DIRECTION # self.current_gains = new_gains + From cb00e76259c6bba020ee279c3de79eff5c63d64d Mon Sep 17 00:00:00 2001 From: Brandon Sutherland Date: Mon, 8 Apr 2024 15:04:35 -0600 Subject: [PATCH 35/52] began debugging optimizer --- rosplane_tuning/src/autotune/optimizer.py | 93 +++++++++---------- .../src/autotune/optimizer_tester.py | 2 +- 2 files changed, 43 insertions(+), 52 deletions(-) diff --git a/rosplane_tuning/src/autotune/optimizer.py b/rosplane_tuning/src/autotune/optimizer.py index ecfcc9d..6067c6c 100644 --- a/rosplane_tuning/src/autotune/optimizer.py +++ b/rosplane_tuning/src/autotune/optimizer.py @@ -8,10 +8,11 @@ class OptimizerState(Enum): """ This class defines the various states that exist within the optimizer. """ - SELECT_DIRECTION = auto() FINDING_GRADIENT = auto() + SELECT_DIRECTION = auto() BRACKETING = auto() PINPOINTING = auto() + TERMINATED = auto() @@ -21,7 +22,7 @@ class Optimizer: optimize functions of any number of parameters. """ - def __init__(self, initial_gains, optimization_params): + def __init__(self, initial_gains, optimization_params): """ Parameters: initial_gains (np.array, size num_gains, dtype float): The starting point for the gains to @@ -41,11 +42,10 @@ def __init__(self, initial_gains, optimization_params): self.tau = optimization_params['tau'] self.state = OptimizerState.FINDING_GRADIENT # Find the initial gradient from the # starting gains - self.initial_gains = initial_gains + self.current_gains = initial_gains # Line Search Variables - self.k = 0 - self.reset = False + self.reset = True self.p = np.array([]) self.line_search_vector = np.array([0.0, 0.0]) # [prior_p, prior_phi_prime] self.save_gains = np.array([]) # np.array to save gains between autotune @@ -53,9 +53,7 @@ def __init__(self, initial_gains, optimization_params): self.save_phis = np.array([]) # np.array to save phis (to calc gradients) # between autotune iterations, correspond # with above array - self.current_gains = initial_gains # Gains updated every succesful/nuclear - # pinpointing - self.finite_difference_step = 0.01 + self.h = 0.01 # The finite difference step size, h def optimization_terminated(self): @@ -103,23 +101,15 @@ def get_next_parameter_set(self, error): # Store current gains and error, request new gain and error for gradient, then select # direction self.state = OptimizerState.SELECT_DIRECTION - self.save_gains = self.current_gains self.save_phis = error - new_gains = [gain + self.finite_difference_step for gain in self.initial_gains] - # After some calculations, found that even if you add 0.01 to all dimensions and divide - # by 0.01 for the gradient you get roughly the same magnitude on the gradient. If we - # need more accuracy we can add a smarter value (decrease based on number of dimensions) + new_gains = np.tile(self.current_gains, (self.current_gains.shape[0], 1)) \ + + np.identity(self.current_gains.shape[0]) * self.h + self.save_gains = new_gains return new_gains elif self.state == OptimizerState.SELECT_DIRECTION: - - if self.k == 0: - new_gains = self.line_search(self.initial_gains, error, - self.save_gains, self.save_phis) - else: - new_gains = self.line_search(self.current_gains, error, - self.save_gains, self.save_phis) - + new_gains = self.line_search(self.current_gains, self.save_phis, + self.save_gains, error) return new_gains elif self.state == OptimizerState.BRACKETING: @@ -160,13 +150,15 @@ def get_gradient(self, phi, phih): This function returns the gradient at the given point using forward finite difference. Parameters: - phi (float): The function evaluation at the point of the gradient. - phih (float): The function evaluation at a point slightly offset from the point. + phi (np.array, size num_gains, dtype float): The function evaluation at the point of the + gradient. [error_1, error_2, ... ] + phih (np.array, size num_gains, dtype float): The function evaluation at a point slightly + offset from the point. [error_h_1, error_h_2, ... ] Returns: - float: The gradient at the given point. + np.array, size num_gains, dtype float: The gradient at the given point. """ - return (phih - phi) / self.finite_difference_step + return (phih - phi) / self.h def line_search(self, gains, phi, gainsh, phih): """ @@ -177,7 +169,7 @@ def line_search(self, gains, phi, gainsh, phih): gains (np.array, size num_gains, dtype float): The current gains. [gain1, gain2, ...] phi (np.array, size num_gains, dtype float): The error from the list of gains. [error1, error2, ...] - phi_prime (): The gradient of the error from the list of gains. + phi_prime (): The gradient of the error from the list of gains. Returns: np.array, size num_gains*num_sets, dtype float: The next series of gains to be tested. Any @@ -186,10 +178,10 @@ def line_search(self, gains, phi, gainsh, phih): [[gain1_1, gain2_1, ...], [gain1_2, gain2_2, ...], ...] """ - phi_prime = self.get_gradient(phi, phih) # Calculate gradient + phi_prime = self.get_gradient(phi, phih) - # Check for convergence - if np.linalg.norm(phi_prime,np.inf) < self.tau: + # Check for convergence using infinity norm + if np.linalg.norm(phi_prime, np.inf) < self.tau: self.state = OptimizerState.TERMINATED return self.current_gains @@ -197,7 +189,7 @@ def line_search(self, gains, phi, gainsh, phih): prior_p = self.line_search_vector[0] prior_phi_prime = self.line_search_vector[1] - if self.k == 0 or self.reset == True: + if self.reset: self.reset = False self.p = -phi_prime/np.linalg.norm(phi_prime) else: @@ -208,17 +200,16 @@ def line_search(self, gains, phi, gainsh, phih): self.state = OptimizerState.BRACKETING # Request phi2 and phi2+h gains2 = gains + self.init_alpha*self.p - gains2h = [gain + self.finite_difference_step for gain in gains2] + gains2h = gains2 + self.h*self.p new_gains = np.array([gains2, gains2h]) # Save phi1 and phi1+h self.save_gains = np.array([gains, gainsh, gains2, gains2h]) self.save_phis = np.array([phi, phih]) - self.k += 1 return new_gains - def distance(point1, point2): + def distance(self, point1, point2): """ This function calculates the distance between two points. @@ -236,27 +227,27 @@ def bracketing(self, gains, gainsh, phi1, phi1_prime, gains2, gains2h, phi2, phi This function conducts the bracketing part of the optimization. """ - alpha1 = self.distance(self.init_gains, gains) # For the initial pass, gains should be - # initial_gains, so alpha1 = 0 - alpha2 = self.distance(self.init_gains, gains2) + alpha1 = self.distance(self.current_gains, gains) # For the initial pass, gains should be + # current_gains, so alpha1 = 0 + alpha2 = self.distance(self.current_gains, gains2) # Needs Pinpointing if(phi2 > phi1 + self.u1*self.init_alpha*phi1_prime) or (phi2 > phi1): self.state == OptimizerState.PINPOINTING # Save old points self.save_gains = np.array([gains, gainsh, gains2, gains2h, gainsp, - [gain + self.finite_difference_step for gain in gainsp]]) + [gain + self.h for gain in gainsp]]) self.save_phis = np.array([phi1, phi1_prime, phi2, phi2_prime]) # Request new point alphap = self.interpolate(alpha1, alpha2) - gainsp = self.init_gains + alphap*self.p + gainsp = self.current_gains + alphap*self.p new_gains = np.array([self.save_gains[4], self.save_gains[5]]) return new_gains # Optimized if abs(phi2_prime) <= -self.u2*phi1_prime: self.state == OptimizerState.SELECT_DIRECTION - new_gains = np.array([self.init_gains + alpha2*self.p]) + new_gains = np.array([self.current_gains + alpha2*self.p]) self.current_gains = new_gains return new_gains @@ -265,11 +256,11 @@ def bracketing(self, gains, gainsh, phi1, phi1_prime, gains2, gains2h, phi2, phi self.state == OptimizerState.PINPOINTING # Save old points self.save_gains = np.array([gains, gainsh, gains2, gains2h, gainsp, - [gain + self.finite_difference_step for gain in gainsp]]) + [gain + self.h for gain in gainsp]]) self.save_phis = np.array([phi1, phi1_prime, phi2, phi2_prime]) # Request new point alphap = self.interpolate(alpha1, alpha2) - gainsp = self.init_gains + alphap*self.p + gainsp = self.current_gains + alphap*self.p new_gains = np.array([self.save_gains[4], self.save_gains[5]]) return new_gains @@ -279,10 +270,10 @@ def bracketing(self, gains, gainsh, phi1, phi1_prime, gains2, gains2h, phi2, phi alpha2 = self.sigma*alpha2 # Request new points - gains1 = self.init_gains + alpha1*self.p - gains2 = self.init_gains + alpha2*self.p - gains1h = [gain + self.finite_difference_step for gain in gains1] - gains2h = [gain + self.finite_difference_step for gain in gains2] + gains1 = self.current_gains + alpha1*self.p + gains2 = self.current_gains + alpha2*self.p + gains1h = [gain + self.h for gain in gains1] + gains2h = [gain + self.h for gain in gains2] new_gains = np.array([gains1, gains1h, gains2, gains2h]) return new_gains @@ -310,9 +301,9 @@ def pinpointing(self, gains1, phi1, phi1_prime, gains2, gainsp, phip, phip_prime alphastar (float): The optimal step size """ - alpha1 = self.distance(self.init_gains, gains1) - alpha2 = self.distance(self.init_gains, gains2) - alphap = self.distance(self.init_gains, gainsp) + alpha1 = self.distance(self.current_gains, gains1) + alpha2 = self.distance(self.current_gains, gains2) + alphap = self.distance(self.current_gains, gainsp) if alphap > phi1 + self.u1*alphap*phi1_prime or alphap > phi1: # Continue pinpointing @@ -321,7 +312,7 @@ def pinpointing(self, gains1, phi1, phi1_prime, gains2, gainsp, phip, phip_prime phi2 = phip phi2_prime = phip_prime self.save_gains = np.array([gains1, None, gains2, None, gainsp, - [gain + self.finite_difference_step for gain in gainsp]]) + [gain + self.h for gain in gainsp]]) self.save_phis = np.array([phi1, phi1_prime, phi2, phi2_prime]) new_gains = np.array([self.save_gains[4], self.save_gains[5]]) return new_gains @@ -330,7 +321,7 @@ def pinpointing(self, gains1, phi1, phi1_prime, gains2, gainsp, phip, phip_prime if abs(phip_prime) <= -self.u2*phi1_prime: self.state == OptimizerState.SELECT_DIRECTION alphastar = alphap - new_gains = np.array([self.init_gains + alphastar*self.p]) + new_gains = np.array([self.current_gains + alphastar*self.p]) return new_gains # More parameterization needed elif phip_prime*(alpha2 - alpha1) >= 0: @@ -342,7 +333,7 @@ def pinpointing(self, gains1, phi1, phi1_prime, gains2, gainsp, phip, phip_prime gainsp = self.interpolate(gains1, gains2) self.save_gains = np.array([gains1, None, gains2, None, gainsp, - [gain + self.finite_difference_step for gain in gainsp]]) + [gain + self.h for gain in gainsp]]) self.save_phis = np.array([phi1, phi1_prime, phi2, phi2_prime]) new_gains = np.array([self.save_gains[4], self.save_gains[5]]) return new_gains diff --git a/rosplane_tuning/src/autotune/optimizer_tester.py b/rosplane_tuning/src/autotune/optimizer_tester.py index c386f70..4454607 100644 --- a/rosplane_tuning/src/autotune/optimizer_tester.py +++ b/rosplane_tuning/src/autotune/optimizer_tester.py @@ -15,7 +15,7 @@ def gradient(x): return np.array([0.52 * x[0] - 0.48 * x[1], 0.52 * x[1] - 0.48 * x[0]]) # Initialize optimizer -curr_points = np.array([[0, 5]]) # Initial point +curr_points = np.array([[0.0, 5.0]]) # Initial point optimization_params = {'u1': 1e-4, 'u2': 0.5, 'sigma': 1.5, From f95727c91614483acf41ed6dc3d5a88f4c7ecec7 Mon Sep 17 00:00:00 2001 From: Joseph Date: Mon, 8 Apr 2024 17:38:30 -0600 Subject: [PATCH 36/52] Changed gradient function to work with jacobian, added extra gains to acccount for working with jacobian, at bracketing with bugfixing. --- .../__pycache__/optimizer.cpython-312.pyc | Bin 15093 -> 15873 bytes rosplane_tuning/src/autotune/optimizer.py | 50 ++++++++++-------- .../src/autotune/optimizer_tester.py | 6 +-- 3 files changed, 31 insertions(+), 25 deletions(-) diff --git a/rosplane_tuning/src/autotune/__pycache__/optimizer.cpython-312.pyc b/rosplane_tuning/src/autotune/__pycache__/optimizer.cpython-312.pyc index 599f59e176c8f1c7e97c357cf7cd4138bde41a58..658496156fb8fc42c79c5e9fd3d7d4bc294d25bf 100644 GIT binary patch delta 5096 zcmdT|Yiv_T7QWZMe%rAf+t=^-#(CM0m^e7Zad?)H2h9UYN*|O>O>8HIl!R;Z%3hOh zw?Evf6-j3$K&nJgS0Y)|;wV2z)ou&jAGE5FVp5UFLRu=Jt=K;cp;foqUAkwk?ZiZ| z>Pq`(N6MLV&zw0kK6B1DXY#|*-*81=t5k9V+Qg_n@{xbK=(i;LfYfR=dvdJ^guU|F88QwSGJD=9VtE7JVtHv?yRC zBZ|d?@o+}fca&wrhvU8d!RX-_Qb@O%$pN=8L&d^_k&G&OI2w-z2XmwS(yd}MOpZ;V z52d@5ckFAD)HhPgeKApnG%~;GuCx52>w;^t?t|(i^-dAUi|Hiwt{6R#A3)b+SIJ8B zhI}X4G;>4#0ZF!^BgzeO>~JCcJ=}$+m7B@4=)Q8TnuOm$fsk);1uAme(Dy1M`axwt z`YJv8vqfwO2_k|^g1kyj7hS6|fF_;SlIC5DihML;*2szloB)ht7x0Y}>_J+E6a7P} zlOJ*s1ncIAgJ$%=tb@g9w7(5us!hx(I3-7M5>Cp=AKMGT zp)rd=vWIYsf>I&+)NY>X(moKpz+&g$!=ZbR=dhdH#jFiFR*VUDGK&*|Eyu(GBtylc zgJIT=Sy5~tI2_JMqaldDX#6BwfwjueyZUDwDz=hW3A5}Fn~@z3AJ2whMwE@yf9g9a zoGPf>&@tKrOzbDv$m_&?hEQrw2i^+2U$S8Kr_KJG3jgRG67P1mvzpPw`b1RGjDqSHf=g{X#CJb`GV7z zcKQ~afwVI)>rFd1&C{E&>UUkIuhD6`^?{Hyw~@*AJ0(@HK=-1~Hm9`RQE5-V{MO6w zZ(gtl($>IDRbbI%TQIrOCfB5X!QGU0H!ZqrE|*M|%o-kofvsQw#pwq0F-_Mmx~eZ1 zO%oFQ?){MA~d9<)-v2 zcm*LYg0K(3+mD;k=xXZ`b5>}cc!Y1a;Bj5v%ydl4%)-);}5mhTg_ydx@yMb z@i6Mm8B9T})&P3Ru*O>S~m>C|5@>X+Z2UZLZ^w#ZP zZ5vw%pYSlz2$RD$21gVVjWPRbtLs4@ug)AFIyx9)4uts?O89+dSKPO+)>8{x2TiOG z{C@PP*oRJ)sL&R3^(f8+)(gZf%*Hd0dX~WgaWBY63DUbd!x{g$|Loq0y>qted0kCXx~MiL)!FF55tDCtKYa8Y04YR| zFhQYIg>ioKy4*mv^g-ugz~LL~+VIB9p}GnQoHH&ZY8m z!L4+D?ka8CoBhfCBuC|b8o9D4Of=>va!MY%)xw!i?cJoZFq@FV@hkLm@kOL%YK?^h zOgtQmheJ$ch(#|kw#O$u()rPcjHVdJc}qsn)#^6!5z0DHnpr!F(VcZ-Qi4e-kc==i z6lXDFXC$#BN5f$VVk`b4d-&wwb0s4_21QBVp^Rvlg`8%~@a%FR?(B8IZo-0gOqwv+ z2_z$fOzH*w*TNYIbVF<}dbQN(+>FPh{AE!$aAKJ|$!>&!7=LwWYfNhmD=9S?Jsj?h zg@bJ0z(uU{MfO5*5*kC2TGX4+jZ%m9xr=)*?44IsPsT5wm^yJ&(SZI{xjaZvaGGNE3mAir_Z#Hx1T*WaUx}!D+$c&n_fuDZcA0k z&{+F7(h|7!toHNnbMA@fCtFf&vjcPGt@D;PsLRy1&DQhgbLNTdljUH#YSNtQnJt}u zA=N%twt3#X1($Aj)TTM9DR+yY7t1<@Bqx}eEIUcAB@i^YNt^-CvPTBEBXZeM4wi&3*OODFpyC6iwZJH zj^1`xl?pf^7?t!F<`~`)-yq}p;_tD_Nr@g+7)7{LvY?_hR#J{?-43Mo8TgV87nWmc zaYX?ns5sRgB97A_YgDql3G}MX@SYE@3YU{)&{$;y`niG@;erjDwx9{0!Va?`dvRTr zoej1)Nl19;mHz`T7*>0MlR;sMcG+o(3?FPl#`_Z9DLG#j@!xp|SPc|c^yGtet{iMa z4W~^#Sxs4>CPEWcrQ3@*HI%iYmGe&U$I~Yu`3N}<^gnTKYGgP1%wCS}ul02hG`zs3JPHSWaG=;6^vT}wTuEVF3b)O4Z=Gt2a>@9F&4viwrVy3 z=hpD~m4!ad4m{R^3C1^e3;L|Oa?LhiGZcopj5s(r3`I2$a_m+tm+%ni9f)b*3l-JS zyZyk7=vQ#>01J>CjzQmGe@wE7o{ibW@eIe=UARiK>qm#r{@m=&cPmOND zP?a`RO;)D#SLkVaswQn{KsD=XP@BhR&KB9JW3!>pBOgbm6KQAjT_p{_rORj9k z5XQ!zGPFO2&-DvE-Qmz&FQq%mHD7D!4mb66HQnKvvDcQ7&2^x_;seW}D?5M%Q6O1d z%~ypXkc<>uVFdx@KyP|CkR1BN`xo)=IT5@|(Xf?9f2`Xn#b}8!29cCOvyv_Vm#Tg- zdS(53v}e7Zl%lbEH~LYf18|e^{m2?xAJ@T>eB+iu4L_~l^JyK+g-Tup{`rA_D&HX8 zYkkj?wj9biND5u`4gG?>{JQ=rdqI!t4MNe#ANb@9Y)EeDgN5z^#^jPyy*fFux1O4f zY#5OZx-R~;S_+E!yxWQ=tx7wbaO^XfG-I-SIP4+(8Nmeq^MyT#4m5bk02*(wbXJpa zc-Cxg=~w3#&LXJMpO9NmmZGJGp2}aGZYFf9aV@&&Z?r5_`qP#E*~Y6q*Y{rAn{IkG zUHR->MaR6Y6aC%4>%X&DWAkRidJXPk*l}Mt={8^Sb%JhdL|-+!9Ie<1_~pVjU=qXx zqYulUL;z_5wq_hpFCuv%rLX{_ga0=gefEh5ssL}1ESGyeuR CZ3q$o delta 4535 zcmdT{Yj6|S72aK~WIa}rB}=lNwk*FS%TH`%W5!%Q4MewOUDGJ6~+*81P-!e2#{Tv`&{?*VtU!*j(RM z|4dD7(nXVZ=9#OPx)I&Dg} zJRt96uF#2j+(Yw3%RjPi?&F(klb1{d8W-v&&C+2;&oij4>s*>@_UPdUNjVM<6?4*X z#2*RHsk?i-Bi;UZ>EIo>5O&Kq%gopb zUxhd1yHaKyc|+_k4LMe$`gPJwv75w2FGNN`(7_nClZjruEW;#E-zuBVpGA{yh0k$R1h_!6cd1x6K8 zrl0I&gfAy2F>Ipys|uMUDi!;R0BgbBqcU;C1*GB#W9D!w)VNbvRzN#EL{fkuGh!cb zeTEF@NLcb|@SI^wftJt3!JIV6^C5nYB|$x>=nW1mhPN~kZI_v0Ai;+eHSj z7%NZg!a+C-&v24@u#JS#lHoq1rRsXVY9WX~prhVjS$8 zb-3`+JX^AUretfpWa~Y1?Sx^(Fvfn#7BkllHKKSVrwoo5XMdp9o!xtS@9P_9Y?X0a z<&?Vefi~rA|LOh_-QHJTp0zt??D=tf{zU6c!KQe@rd!54?7P}`w6Q%0X7+T(_jFF% zyM`4@n>~_Jj*L&U09>YW*kQ`rJkftG@}1+?kKgjg%Ny=wy<7NBVQkOd`1XCV1MRV3 zPpmf_iyVvh_Qzg~#*UwOOi?eQ)X@z#`?6=oFhERl!L`I=o>s!EuM$LbWXue($~r z*;j0b?F3<WjIpc&uT@kLQo+8DOd5I zQcwn@tN2e9RYoL`oMzV}E>$bJCCVk#PTW-@PO+=f4!Jn7C5+P0o9xP3{8n<4ViTwr zX?UzLxd!p%?Mxd%XJUd|2D}mhFY_pT^V zc`G+Cm?32+RDue_pIa6FoPZEr34hEsT&HYRU@PDYp4<$Hz~a90&eb8mT&*F<#4N1F zJhq(r)R5s#m8wP9j~Ccz6{LI8VXD9=OC)267nm?Zo6HdsSz;69EAK_9$9zVbg`0U^ zm_hLVBdUC2?g`iC1SM zH3?puR=mcg54SZ+1?_)!U^E%44(WffkwrP;jvB3yOu}Ntt>iYBS%@20DnLG==aIsz zg{?VaVp)$ad`L2aSeSV9{BG>eDM_iEC?w$Th1P>kVz8D)=2V!bf_z`72W#s^*6|+- z^W-nUs?vp&&mP?>!V$Tqju_PwA@a>;=qt*}+d{^Kdgf&Qp1v-BAO955)DTUQSjLoe zg-Hf;!mT1xQ#tkq4Lb=_b`J*m!b7~j{YWs<-P?H|XTsl0lr~CAZ#wTd=NO5Mp15%0 znsi#{L(0(^SLtNax^l=ZuFvkjF>pr~>ki%9)fe*zV=d3ccJH5V**}HD+!vZPLbqHA?G%5M6;>$~0*ch$r)woIG1 zLQ6^c|G0nsbN}|%p}JY4_BmghROunId6thp%!i9C|6+3CFg?@ov zn9o^!I;qt1g*lTiO#Jtjk=wCB|F(tOu~Gk%joi}`_HHuww0yA+nq*2jy0#SUR#z$f zL89=ZSX-nZys1Isiq}>Jkq(|I^T}G-Rpj|i`is!FKA27Z@TdiKfLX(M>NSif1C6f= z8Y|r74+Z%Xfq!4H#{^h-P+>4cNaTo3NLnHxk<8xAQ`0T*&wLXXF)f*3%`@&bLtc3` zkP2j{Hw9iTH_%e}N_ps0A0`hX6}nG4kVhgL{2JU$&w}C&LnoG@$nV7?B%r7fp~&we z3UV->TpjNuVl5K+14Jj{&Q5xyrb{4@)B>L51o#My_>bXEMBG-2%AojdWcnZx#Ao~= zBAzFrjfg7PU0HZ&19n&YuCKdS%#H}TG@9hF1mp~BAxGf#B*;BU2Y;$;O^XGO+s`X18BSIO%$Gvrp&N=@S#Rgu&WTqv#B&;A*^Sfo?eJ06 zu1~jz*SRoXL^aVzG4S2uGfZ;$TQ07_ssFK$YsiN8o^q!*kQ)%IE+6R;g5(MLBpk8< z?A7*KQb&eKL3N6H%t)m2d8J)iJik#XwalAU(!zP`W?GsvU(2YZne)~{)E=Z&(xQ25 xsnjul7`vGtSiK-r=U};bu{xLThP%}^o18Wew|+?B phi1 + self.u1*self.init_alpha*phi1_prime) or (phi2 > phi1): self.state == OptimizerState.PINPOINTING # Save old points - self.save_gains = np.array([gains, gainsh, gains2, gains2h, gainsp, + self.save_gains = np.array([gains, gains2, gains2h, gainsp, [gain + self.h for gain in gainsp]]) self.save_phis = np.array([phi1, phi1_prime, phi2, phi2_prime]) # Request new point alphap = self.interpolate(alpha1, alpha2) gainsp = self.current_gains + alphap*self.p - new_gains = np.array([self.save_gains[4], self.save_gains[5]]) + new_gains = np.array([self.save_gains[3], self.save_gains[4]]) return new_gains # Optimized @@ -255,13 +261,13 @@ def bracketing(self, gains, gainsh, phi1, phi1_prime, gains2, gains2h, phi2, phi elif phi2_prime >= 0: self.state == OptimizerState.PINPOINTING # Save old points - self.save_gains = np.array([gains, gainsh, gains2, gains2h, gainsp, + self.save_gains = np.array([gains, gains2, gains2h, gainsp, [gain + self.h for gain in gainsp]]) self.save_phis = np.array([phi1, phi1_prime, phi2, phi2_prime]) # Request new point alphap = self.interpolate(alpha1, alpha2) gainsp = self.current_gains + alphap*self.p - new_gains = np.array([self.save_gains[4], self.save_gains[5]]) + new_gains = np.array([self.save_gains[3], self.save_gains[4]]) return new_gains # Needs more Bracketing diff --git a/rosplane_tuning/src/autotune/optimizer_tester.py b/rosplane_tuning/src/autotune/optimizer_tester.py index 4454607..a72380c 100644 --- a/rosplane_tuning/src/autotune/optimizer_tester.py +++ b/rosplane_tuning/src/autotune/optimizer_tester.py @@ -35,15 +35,15 @@ def gradient(x): # Calculate error for current points error = [] - # print(curr_points) # Testing + print("CP", curr_points) # Testing for point in curr_points: error.append(function(point)) error = np.array(error) # Pass points to optimizer - print("CP", curr_points) # Testing + # print("CP", curr_points) # Testing # print("G", gradient(curr_points[0])) # Testing curr_points = optimizer.get_next_parameter_set(error) - print("CP", curr_points) # Testing + # print("CP", curr_points) # Testing # Store points for point in curr_points: From ea4e4f4aaa0c0738c4d068a593a3a0acc155eb33 Mon Sep 17 00:00:00 2001 From: Joseph Date: Tue, 9 Apr 2024 17:20:59 -0600 Subject: [PATCH 37/52] Added different parameter options for different gains. If there are other options we want to change based on the gains, we can add them there as well. --- rosplane_tuning/src/autotune/autotune.py | 44 +++++++++++++++++++++--- 1 file changed, 39 insertions(+), 5 deletions(-) diff --git a/rosplane_tuning/src/autotune/autotune.py b/rosplane_tuning/src/autotune/autotune.py index 765d3a2..43dea1f 100644 --- a/rosplane_tuning/src/autotune/autotune.py +++ b/rosplane_tuning/src/autotune/autotune.py @@ -153,11 +153,45 @@ def __init__(self): # Optimization self.initial_gains = None # get_gains function cannot be called in __init__ since the node # has not yet been passed to the executor - self.optimization_params = {'u1': 1e-3, - 'u2': 0.5, - 'sigma': 2.0, - 'alpha': 0.05, - 'tau': 1e-2} + + # As we conduct optimizations, these parameters will be changed to be more + # efficient to optimize the specific gains, based on the design spaces. + if self.get_parameter('current_tuning_autopilot').value == 'roll': + self.optimization_params = {'u1': 1e-3, + 'u2': 0.5, + 'sigma': 2.0, + 'alpha': 0.05, + 'tau': 1e-2} + elif self.get_parameter('current_tuning_autopilot').value == 'course': + self.optimization_params = {'u1': 1e-3, + 'u2': 0.5, + 'sigma': 2.0, + 'alpha': 0.05, + 'tau': 1e-2} + elif self.get_parameter('current_tuning_autopilot').value == 'pitch': + self.optimization_params = {'u1': 1e-3, + 'u2': 0.5, + 'sigma': 2.0, + 'alpha': 0.05, + 'tau': 1e-2} + elif self.get_parameter('current_tuning_autopilot').value == 'altitude': + self.optimization_params = {'u1': 1e-3, + 'u2': 0.5, + 'sigma': 2.0, + 'alpha': 0.05, + 'tau': 1e-2} + elif self.get_parameter('current_tuning_autopilot').value == 'airspeed': + self.optimization_params = {'u1': 1e-3, + 'u2': 0.5, + 'sigma': 2.0, + 'alpha': 0.05, + 'tau': 1e-2} + else: + raise ValueError(self.get_parameter('current_tuning_autopilot').value + + ' is not a valid value for current_tuning_autopilot.' + + ' Please select one of the' + + ' following: roll, course, pitch, altitude, airspeed.') + self.optimizer = None From 41b820a53531101a29aa304e84d6bee8dafdfc35 Mon Sep 17 00:00:00 2001 From: Brandon Sutherland Date: Tue, 9 Apr 2024 16:06:04 -0600 Subject: [PATCH 38/52] got bracketing working --- rosplane_tuning/src/autotune/optimizer.py | 346 +++++++----------- .../src/autotune/optimizer_tester.py | 4 +- 2 files changed, 141 insertions(+), 209 deletions(-) diff --git a/rosplane_tuning/src/autotune/optimizer.py b/rosplane_tuning/src/autotune/optimizer.py index 6a4fb66..3837f55 100644 --- a/rosplane_tuning/src/autotune/optimizer.py +++ b/rosplane_tuning/src/autotune/optimizer.py @@ -8,8 +8,8 @@ class OptimizerState(Enum): """ This class defines the various states that exist within the optimizer. """ + UNINITIALIZED = auto() FINDING_GRADIENT = auto() - SELECT_DIRECTION = auto() BRACKETING = auto() PINPOINTING = auto() @@ -22,38 +22,66 @@ class Optimizer: optimize functions of any number of parameters. """ - def __init__(self, initial_gains, optimization_params): + def __init__(self, initial_point, optimization_params): """ Parameters: - initial_gains (np.array, size num_gains, dtype float): The starting point for the gains to - be optimized (x0). [gain1, gain2, ...] + initial_point (np.array, size num_gains, dtype float): The starting point for the gains to + be optimized (x_0). [gain1, gain2, ...] optimization_params (dictionary): Parameters needed for the optimization operation. - - u1 (float): 1st Strong Wolfe Condition, must be between 0 and 1. - - u2 (float): 2nd Strong Wolfe Condition, must be between u1 and 1. - - sigma (float): The step size multiplier. - - alpha (float): The initial step size, usually 1. - - tau (float): Convergence tolerance, usually 1e-3. + - mu_1 (float): 1st Strong Wolfe Condition, specifies the minimum decrease that will + be accepted. Must be between 0 and 1, but is usually a very small value like 1e-4. + - mu_2 (float): 2nd Strong Wolfe Condition, specifies the sufficient decrease in + curvature that will be accepted. Must be between mu_1 and 1, typically between 0.1 + and 0.9. + - sigma (float): The step size multiplier, must be greater than 1. + - alpha (float): The initial step size. The ideal value for this is very + application-specific. + - tau (float): The convergence tolerance. The optimization is considered converged + when the inifity norm is less than tau. """ - # Set initial values - self.u1 = optimization_params['u1'] - self.u2 = optimization_params['u2'] - self.sigma = optimization_params['sigma'] - self.init_alpha = optimization_params['alpha'] - self.tau = optimization_params['tau'] - self.state = OptimizerState.FINDING_GRADIENT # Find the initial gradient from the - # starting gains - self.current_gains = initial_gains - - # Line Search Variables - self.reset = True - self.p = np.array([]) - self.line_search_vector = np.array([0.0, 0.0]) # [prior_p, prior_phi_prime] - self.save_gains = np.array([]) # np.array to save gains between autotune - # iterations - self.save_phis = np.array([]) # np.array to save phis (to calc gradients) - # between autotune iterations, correspond - # with above array - self.h = 0.01 # The finite difference step size, h + + # Save passed optimization parameters. See above for parameters details. + self.mu_1 = optimization_params['mu_1'] + self.mu_2 = optimization_params['mu_2'] + self.sigma = optimization_params['sigma'] + self.alpha = optimization_params['alpha'] + self.tau = optimization_params['tau'] + + # The current state of the optimization process. + self.state = OptimizerState.UNINITIALIZED + + # The jacobian of the function at x_0. + self.J = None + + # The current search direction from x_0. + self.p = None + + # The step size for calculating gradients with finite-difference. + self.h = 0.01 + + # The current best point in the optimization, where we are stepping away from. + self.x_0 = initial_point + # The function value (error) at x_0. + self.phi_0 = None + # The gradient at x_0 along the search direction p. + self.phi_0_prime = None + + # The function value at the best point on the line search (x_1). + self.phi_1 = None + # The gradient at x_1 along the search direction p. + self.phi_1_prime = None + # The step size from x_0 to x_1 along the search direction p. + self.alpha_1 = None + + # The function value at the best point on the line search (x_2). + self.phi_2 = None + # The gradient at x_2 along the search direction p. + self.phi_2_prime = None + # The step size from x_0 to x_2 along the search direction p. + self.alpha_2 = None + + # Bool to specify if this is the first bracketing step, used by the bracketing algorithm. + self.first_bracket = None def optimization_terminated(self): @@ -75,11 +103,16 @@ def get_optimiztion_status(self): Returns: str: The status of the optimization algorithm. """ - # Return with the final values, the best value found - if self.state == OptimizerState.TERMINATED: - return 'Optimization Terminated' - else: - return "Optimization in Progress" + if self.state == OptimizerState.UNINITIALIZED: + return "Optimizer awaiting first iteration." + elif self.state == OptimizerState.FINDING_GRADIENT: + return "Finding gradient." + elif self.state == OptimizerState.BRACKETING: + return "Bracketing step size." + elif self.state == OptimizerState.PINPOINTING: + return "Pinpointing step size." + elif self.state == OptimizerState.TERMINATED: + return "Optimizer terminated with final gains at: " + str(self.x_0) def get_next_parameter_set(self, error): """ @@ -97,192 +130,97 @@ def get_next_parameter_set(self, error): given at initialization. [[gain1_1, gain2_1, ...], [gain1_2, gain2_2, ...], ...] """ - if self.state == OptimizerState.FINDING_GRADIENT: - # Store current gains and error, request new gain and error for gradient, then select - # direction - self.state = OptimizerState.SELECT_DIRECTION - self.save_phis = error - new_gains = np.tile(self.current_gains, (self.current_gains.shape[0], 1)) \ - + np.identity(self.current_gains.shape[0]) * self.h - self.save_gains = new_gains - return new_gains + if self.state == OptimizerState.UNINITIALIZED: + # Find the gradient at the new point + self.state = OptimizerState.FINDING_GRADIENT + self.phi_0 = error[0] - elif self.state == OptimizerState.SELECT_DIRECTION: - new_gains = self.line_search(self.current_gains, - self.save_phis, error) - return new_gains + # Find the values for finite difference, where each dimmension is individually + # stepped by h + return np.tile(self.x_0, (self.x_0.shape[0], 1)) \ + + np.identity(self.x_0.shape[0]) * self.h + + if self.state == OptimizerState.FINDING_GRADIENT: + # Calculate the jacobian at the current point + J_prior = self.J + self.J = (error - self.phi_0) / self.h + + # Check for convergence using infinity norm + if np.linalg.norm(self.J, np.inf) < self.tau: + self.state = OptimizerState.TERMINATED + return self.x_0.reshape(1, 2) + + # Find the search direction + if J_prior is None: + self.p = -self.J / np.linalg.norm(self.J) + else: + beta = np.dot(self.J, self.J) / np.dot(J_prior, J_prior) + self.p = -self.J / np.linalg.norm(self.J) + beta*self.p + + # Begin bracketing + self.phi_0_prime = np.dot(self.p, self.J) + self.alpha_1 = 0 + self.alpha_2 = self.alpha + self.phi_1 = self.phi_0 + self.phi_1_prime = self.phi_0_prime + self.first_bracket = True + self.state = OptimizerState.BRACKETING + + # Find phi_2 and phi_2_prime + x_2 = (self.x_0 + self.alpha_2*self.p) + return np.array([x_2, (x_2 + self.h*self.p)]) elif self.state == OptimizerState.BRACKETING: - # using error, conduct bracketing - # return the next set of gains to test - gains = self.save_gains[0] - gains2 = self.save_gains[1] - gains2h = self.save_gains[2] - - new_gains = self.bracketing(gains, - self.save_phis[0], self.save_phis[1], - gains2, gains2h, - error[0], error[1]) - return new_gains + self.phi_2 = error[0] + self.phi_2_prime = (error[1] - error[0]) / self.h + new_points = self.bracketing() + self.first_bracket = False + return new_points elif self.state == OptimizerState.PINPOINTING: - - gains1 = self.save_gains[0] - gains2 = self.save_gains[1] - gainsp = self.save_gains[3] - - phip = error[0] - phiph = error[1] - phip_prime = self.get_gradient(phip, phiph) - - phi1 = self.save_phis[0] - phi1_prime = self.save_phis[1] - - new_gains = self.pinpointing(gains1, phi1, phi1_prime, gains2, gainsp, phip, phip_prime) - return new_gains + self.phi_2 = error[0] + self.phi_2_prime = (error[1] - error[0]) / self.h + return self.pinpointing() else: # Process Terminated - return self.current_gains + return self.x_0.reshape(1, 2) - def get_gradient(self, phi, phih): - """ - This function returns the gradient at the given point using forward finite difference. - Parameters: - phi (np.array, size num_gains, dtype float): The function evaluation at the point of the - gradient. [error_1, error_2, ... ] - phih (np.array, size num_gains, dtype float): The function evaluation at a point slightly - offset from the point. [[error_h_1, error_h_2], ... ] - ex. if initial gain is [0,5], phih would be the error from [[0.01, 5],[0,5.01]] - - Returns: - np.array, size num_gains, dtype float: The gradient at the given point. - """ - xvalue = (phih[0] - phi) / self.h - yvalue = (phih[1] - phi) / self.h - - gradient = np.array([xvalue, yvalue]) - return gradient - - def line_search(self, gains, phi, error): - """ - This function conducts the initial direction selection part of the optimization, - using conjugate gradient descent. - - Parameters: - gains (np.array, size num_gains, dtype float): The current gains. [gain1, gain2, ...] - phi (np.array, size num_gains, dtype float): The error from the list of gains. - [error1, error2, ...] - phi_prime (): The gradient of the error from the list of gains. - - Returns: - np.array, size num_gains*num_sets, dtype float: The next series of gains to be tested. Any - number of gain sets can be returned, depending on the number of gains that need to be - tested for the follow on optimization. - [[gain1_1, gain2_1, ...], [gain1_2, gain2_2, ...], ...] - """ - phih = error - phi_prime = self.get_gradient(phi, phih) - - # Check for convergence using infinity norm - if np.linalg.norm(phi_prime, np.inf) < self.tau: - self.state = OptimizerState.TERMINATED - return self.current_gains - - # Select Direction - prior_p = self.line_search_vector[0] - prior_phi_prime = self.line_search_vector[1] - - if self.reset: - self.reset = False - self.p = -phi_prime/np.linalg.norm(phi_prime) - else: - bk = np.dot(phi_prime,phi_prime)/np.dot(prior_phi_prime,prior_phi_prime) - self.p = -phi_prime/np.linalg.norm(phi_prime) + bk*prior_p - - # Prepare for bracketing - self.state = OptimizerState.BRACKETING - # Request phi2 and phi2+h - gains2 = np.squeeze(gains + self.init_alpha*self.p.T) - gains2hx = np.array([gains2[0] + self.h, gains2[1]]) - gains2hy = np.array([gains2[0], gains2[1] + self.h]) - new_gains = np.vstack([gains2, gains2hx, gains2hy]) - print("NG", new_gains) - - # Save phi1 and phi1+h - self.save_gains = np.vstack([gains, gains2, gains2hx, gains2hy]) - self.save_phis = np.vstack([phi, [phih[0]], [phih[1]]]) - - return new_gains - - def distance(self, point1, point2): - """ - This function calculates the distance between two points. - - Parameters: - point1 (np.array, size num_gains, dtype float): The first point. [gain1, gain2, ...] - point2 (np.array, size num_gains, dtype float): The second point. [gain1, gain2, ...] - - Returns: - float: The distance between the two points. - """ - return np.linalg.norm(point1 - point2) - - def bracketing(self, gains, phi1, phi1_prime, gains2, gains2h, phi2, phi2_prime): + def bracketing(self): """ This function conducts the bracketing part of the optimization. """ - alpha1 = self.distance(self.current_gains, gains) # For the initial pass, gains should be - # current_gains, so alpha1 = 0 - alpha2 = self.distance(self.current_gains, gains2) - # Needs Pinpointing - if(phi2 > phi1 + self.u1*self.init_alpha*phi1_prime) or (phi2 > phi1): - self.state == OptimizerState.PINPOINTING - # Save old points - self.save_gains = np.array([gains, gains2, gains2h, gainsp, - [gain + self.h for gain in gainsp]]) - self.save_phis = np.array([phi1, phi1_prime, phi2, phi2_prime]) - # Request new point - alphap = self.interpolate(alpha1, alpha2) - gainsp = self.current_gains + alphap*self.p - new_gains = np.array([self.save_gains[3], self.save_gains[4]]) - return new_gains + if(self.phi_2 > self.phi_0 + self.mu_1*self.alpha_2*self.phi_0_prime) \ + or (not self.first_bracket and self.phi_2 > self.phi_1): + self.state = OptimizerState.PINPOINTING + return self.pinpointing() # Optimized - if abs(phi2_prime) <= -self.u2*phi1_prime: - self.state == OptimizerState.SELECT_DIRECTION - new_gains = np.array([self.current_gains + alpha2*self.p]) - self.current_gains = new_gains - return new_gains + if abs(self.phi_2_prime) <= -self.mu_2*self.phi_0_prime: + self.x_0 = self.x_0 + self.alpha_2*self.p + self.phi_0 = self.phi_2 + self.state = OptimizerState.FINDING_GRADIENT + return np.tile(self.x_0, (self.x_0.shape[0], 1)) \ + + np.identity(self.x_0.shape[0]) * self.h # Needs Pinpointing - elif phi2_prime >= 0: - self.state == OptimizerState.PINPOINTING - # Save old points - self.save_gains = np.array([gains, gains2, gains2h, gainsp, - [gain + self.h for gain in gainsp]]) - self.save_phis = np.array([phi1, phi1_prime, phi2, phi2_prime]) - # Request new point - alphap = self.interpolate(alpha1, alpha2) - gainsp = self.current_gains + alphap*self.p - new_gains = np.array([self.save_gains[3], self.save_gains[4]]) - return new_gains + elif self.phi_2_prime >= 0: + self.state = OptimizerState.PINPOINTING + return self.pinpointing() # Needs more Bracketing else: - alpha1 = alpha2 - alpha2 = self.sigma*alpha2 - - # Request new points - gains1 = self.current_gains + alpha1*self.p - gains2 = self.current_gains + alpha2*self.p - gains1h = [gain + self.h for gain in gains1] - gains2h = [gain + self.h for gain in gains2] + # Set x_2 to x_1 and increase alpha_2 + self.alpha_1 = self.alpha_2 + self.alpha_2 = self.sigma*self.alpha_2 + self.phi_1 = self.phi_2 + self.phi_1_prime = self.phi_2_prime - new_gains = np.array([gains1, gains1h, gains2, gains2h]) - return new_gains + # Find new x_2 + x_2 = self.x_0 + self.alpha_2*self.p + return np.array([x_2, (x_2 + self.h*self.p)]) def interpolate(self, alpha1, alpha2): """ @@ -297,12 +235,10 @@ def interpolate(self, alpha1, alpha2): """ return (alpha1 + alpha2)/2 - def pinpointing(self, gains1, phi1, phi1_prime, gains2, gainsp, phip, phip_prime): + def pinpointing(self): """ This function conducts the pinpointing part of the optimization. - Parameters: - Returns: alphastar (float): The optimal step size """ @@ -311,7 +247,7 @@ def pinpointing(self, gains1, phi1, phi1_prime, gains2, gainsp, phip, phip_prime alpha2 = self.distance(self.current_gains, gains2) alphap = self.distance(self.current_gains, gainsp) - if alphap > phi1 + self.u1*alphap*phi1_prime or alphap > phi1: + if alphap > phi1 + self.mu_1*alphap*phi1_prime or alphap > phi1: # Continue pinpointing gains2 = gainsp gainsp = self.interpolate(gains1, gains2) @@ -323,7 +259,7 @@ def pinpointing(self, gains1, phi1, phi1_prime, gains2, gainsp, phip, phip_prime new_gains = np.array([self.save_gains[4], self.save_gains[5]]) return new_gains else: - # Optimized + # Optimizedmu_2 if abs(phip_prime) <= -self.u2*phi1_prime: self.state == OptimizerState.SELECT_DIRECTION alphastar = alphap @@ -344,7 +280,3 @@ def pinpointing(self, gains1, phi1, phi1_prime, gains2, gainsp, phip, phip_prime new_gains = np.array([self.save_gains[4], self.save_gains[5]]) return new_gains - # Check for failure criteria - the nuclear option - # self.state == OptimizerState.SELECT_DIRECTION - # self.current_gains = new_gains - diff --git a/rosplane_tuning/src/autotune/optimizer_tester.py b/rosplane_tuning/src/autotune/optimizer_tester.py index a72380c..941b9b3 100644 --- a/rosplane_tuning/src/autotune/optimizer_tester.py +++ b/rosplane_tuning/src/autotune/optimizer_tester.py @@ -16,8 +16,8 @@ def gradient(x): # Initialize optimizer curr_points = np.array([[0.0, 5.0]]) # Initial point -optimization_params = {'u1': 1e-4, - 'u2': 0.5, +optimization_params = {'mu_1': 1e-4, + 'mu_2': 0.5, 'sigma': 1.5, 'alpha': 1.0, 'tau': 1e-3} From 9a0167bd4d27db9737789f78e86cb6980c59b891 Mon Sep 17 00:00:00 2001 From: Brandon Sutherland Date: Tue, 9 Apr 2024 18:18:22 -0600 Subject: [PATCH 39/52] got pinpoint working, completing inital optimizer --- rosplane_tuning/src/autotune/optimizer.py | 171 +++++++++++------- .../src/autotune/optimizer_tester.py | 36 ++-- 2 files changed, 120 insertions(+), 87 deletions(-) diff --git a/rosplane_tuning/src/autotune/optimizer.py b/rosplane_tuning/src/autotune/optimizer.py index 3837f55..4b803d9 100644 --- a/rosplane_tuning/src/autotune/optimizer.py +++ b/rosplane_tuning/src/autotune/optimizer.py @@ -9,10 +9,9 @@ class OptimizerState(Enum): This class defines the various states that exist within the optimizer. """ UNINITIALIZED = auto() - FINDING_GRADIENT = auto() + FINDING_JACOBEAN = auto() BRACKETING = auto() PINPOINTING = auto() - TERMINATED = auto() @@ -37,7 +36,11 @@ def __init__(self, initial_point, optimization_params): - alpha (float): The initial step size. The ideal value for this is very application-specific. - tau (float): The convergence tolerance. The optimization is considered converged - when the inifity norm is less than tau. + when the infinity norm is less than tau. + - h (float): Finite difference step size. This is used to calculate the gradient + by stepping slightly away from a point and then calculating the slope of the + line between the two points. Ideally this value will be small, but it is sensitive + to noise and can't get too small. """ # Save passed optimization parameters. See above for parameters details. @@ -46,19 +49,17 @@ def __init__(self, initial_point, optimization_params): self.sigma = optimization_params['sigma'] self.alpha = optimization_params['alpha'] self.tau = optimization_params['tau'] + self.h = optimization_params['h'] # The current state of the optimization process. self.state = OptimizerState.UNINITIALIZED - # The jacobian of the function at x_0. + # The jacobean of the function at x_0. self.J = None # The current search direction from x_0. self.p = None - # The step size for calculating gradients with finite-difference. - self.h = 0.01 - # The current best point in the optimization, where we are stepping away from. self.x_0 = initial_point # The function value (error) at x_0. @@ -83,6 +84,16 @@ def __init__(self, initial_point, optimization_params): # Bool to specify if this is the first bracketing step, used by the bracketing algorithm. self.first_bracket = None + # TODO: describe these parameters + self.alpha_p = None + self.phi_p = None + self.phi_p_prime = None + self.alpha_low = None + self.alpha_high = None + self.phi_low = None + self.phi_low_prime = None + self.phi_high = None + self.phi_high_prime = None def optimization_terminated(self): """ @@ -96,7 +107,7 @@ def optimization_terminated(self): else: return False - def get_optimiztion_status(self): + def get_optimization_status(self): """ This function returns the status of the optimization algorithm. @@ -105,7 +116,7 @@ def get_optimiztion_status(self): """ if self.state == OptimizerState.UNINITIALIZED: return "Optimizer awaiting first iteration." - elif self.state == OptimizerState.FINDING_GRADIENT: + elif self.state == OptimizerState.FINDING_JACOBEAN: return "Finding gradient." elif self.state == OptimizerState.BRACKETING: return "Bracketing step size." @@ -132,16 +143,16 @@ def get_next_parameter_set(self, error): if self.state == OptimizerState.UNINITIALIZED: # Find the gradient at the new point - self.state = OptimizerState.FINDING_GRADIENT + self.state = OptimizerState.FINDING_JACOBEAN self.phi_0 = error[0] - # Find the values for finite difference, where each dimmension is individually + # Find the values for finite difference, where each dimension is individually # stepped by h return np.tile(self.x_0, (self.x_0.shape[0], 1)) \ + np.identity(self.x_0.shape[0]) * self.h - if self.state == OptimizerState.FINDING_GRADIENT: - # Calculate the jacobian at the current point + if self.state == OptimizerState.FINDING_JACOBEAN: + # Calculate the jacobean at the current point J_prior = self.J self.J = (error - self.phi_0) / self.h @@ -178,8 +189,8 @@ def get_next_parameter_set(self, error): return new_points elif self.state == OptimizerState.PINPOINTING: - self.phi_2 = error[0] - self.phi_2_prime = (error[1] - error[0]) / self.h + self.phi_p = error[0] + self.phi_p_prime = (error[1] - error[0]) / self.h return self.pinpointing() else: # Process Terminated @@ -194,21 +205,41 @@ def bracketing(self): # Needs Pinpointing if(self.phi_2 > self.phi_0 + self.mu_1*self.alpha_2*self.phi_0_prime) \ or (not self.first_bracket and self.phi_2 > self.phi_1): + self.alpha_low = self.alpha_1 + self.alpha_high = self.alpha_2 + self.phi_low = self.phi_1 + self.phi_low_prime = self.phi_1_prime + self.phi_high = self.phi_2 + self.phi_high_prime = self.phi_2_prime self.state = OptimizerState.PINPOINTING - return self.pinpointing() + + self.alpha_p = interpolate(self.alpha_1, self.alpha_2, self.phi_1, self.phi_2, + self.phi_1_prime, self.phi_2_prime) + x_p = self.x_0 + self.alpha_p*self.p + return np.array([x_p, (x_p + self.h*self.p)]) # Optimized if abs(self.phi_2_prime) <= -self.mu_2*self.phi_0_prime: self.x_0 = self.x_0 + self.alpha_2*self.p self.phi_0 = self.phi_2 - self.state = OptimizerState.FINDING_GRADIENT + self.state = OptimizerState.FINDING_JACOBEAN return np.tile(self.x_0, (self.x_0.shape[0], 1)) \ + np.identity(self.x_0.shape[0]) * self.h # Needs Pinpointing elif self.phi_2_prime >= 0: + self.alpha_low = self.alpha_2 + self.alpha_high = self.alpha_1 + self.phi_low = self.phi_2 + self.phi_low_prime = self.phi_2_prime + self.phi_high = self.phi_1 + self.phi_high_prime = self.phi_1_prime self.state = OptimizerState.PINPOINTING - return self.pinpointing() + + self.alpha_p = interpolate(self.alpha_1, self.alpha_2, self.phi_1, self.phi_2, + self.phi_1_prime, self.phi_2_prime) + x_p = self.x_0 + self.alpha_p*self.p + return np.array([x_p, (x_p + self.h*self.p)]) # Needs more Bracketing else: @@ -222,61 +253,63 @@ def bracketing(self): x_2 = self.x_0 + self.alpha_2*self.p return np.array([x_2, (x_2 + self.h*self.p)]) - def interpolate(self, alpha1, alpha2): - """ - This function interpolates between two points. - - Parameters: - alpha1 (float): The first distance. - alpha2 (float): The second distance. - - Returns: - float: The interpolated value or alphap. - """ - return (alpha1 + alpha2)/2 - def pinpointing(self): """ This function conducts the pinpointing part of the optimization. - - Returns: - alphastar (float): The optimal step size """ - alpha1 = self.distance(self.current_gains, gains1) - alpha2 = self.distance(self.current_gains, gains2) - alphap = self.distance(self.current_gains, gainsp) - - if alphap > phi1 + self.mu_1*alphap*phi1_prime or alphap > phi1: - # Continue pinpointing - gains2 = gainsp - gainsp = self.interpolate(gains1, gains2) - phi2 = phip - phi2_prime = phip_prime - self.save_gains = np.array([gains1, None, gains2, None, gainsp, - [gain + self.h for gain in gainsp]]) - self.save_phis = np.array([phi1, phi1_prime, phi2, phi2_prime]) - new_gains = np.array([self.save_gains[4], self.save_gains[5]]) - return new_gains + if (self.phi_p > self.phi_0 + self.mu_1*self.alpha_p*self.phi_0_prime) \ + or (self.phi_p > self.phi_low): + # Set interpolated point to high, continue + self.alpha_high = self.alpha_p + self.phi_high = self.phi_p + self.phi_high_prime = self.phi_p_prime else: - # Optimizedmu_2 - if abs(phip_prime) <= -self.u2*phi1_prime: - self.state == OptimizerState.SELECT_DIRECTION - alphastar = alphap - new_gains = np.array([self.current_gains + alphastar*self.p]) - return new_gains - # More parameterization needed - elif phip_prime*(alpha2 - alpha1) >= 0: - gains2 = gains1 - phi2 = phi1 - - gains1 = gainsp - phi1 = phip - gainsp = self.interpolate(gains1, gains2) - - self.save_gains = np.array([gains1, None, gains2, None, gainsp, - [gain + self.h for gain in gainsp]]) - self.save_phis = np.array([phi1, phi1_prime, phi2, phi2_prime]) - new_gains = np.array([self.save_gains[4], self.save_gains[5]]) - return new_gains + if abs(self.phi_p_prime) <= -self.mu_2*self.phi_0_prime: + # Pinpointing complete, set interpolated point to x_0 and find new jacobian + self.x_0 = self.x_0 + self.alpha_p*self.p + self.phi_0 = self.phi_p + self.state = OptimizerState.FINDING_JACOBEAN + return np.tile(self.x_0, (self.x_0.shape[0], 1)) \ + + np.identity(self.x_0.shape[0]) * self.h + + elif self.phi_p_prime*(self.alpha_high - self.alpha_low) >= 0: + # Set high to low (and low to interpolated) + self.alpha_high = self.alpha_low + self.phi_high = self.phi_low + self.phi_high_prime = self.phi_low_prime + + # Set low to interpolated + self.alpha_low = self.alpha_p + self.phi_low = self.phi_p + self.phi_low_prime = self.phi_p_prime + + # Find value and gradient at interpolated point + self.alpha_p = interpolate(self.alpha_low, self.alpha_high, self.phi_low, self.phi_high, + self.phi_low_prime, self.phi_high_prime) + x_p = self.x_0 + self.alpha_p*self.p + return np.array([x_p, (x_p + self.h*self.p)]) + + +def interpolate(alpha_1, alpha_2, phi_1, phi_2, phi_1_prime, phi_2_prime): + """ + This function performs quadratic interpolation between two points to find the minimum, + given their function values and derivatives. + + Parameters: + alpha_1 (float): Function input 1. + alpha_2 (float): Function input 2. + phi_1 (float): Function value at alpha_1. + phi_2 (float): Function value at alpha_2. + phi_1_prime (float): Function derivative at alpha_1. + phi_2_prime (float): Function derivative at alpha_2. + + Returns: + float: The function input (alpha_p) at the interpolated minimum. + """ + + beta_1 = phi_1_prime + phi_2_prime - 3*(phi_1 - phi_2) / (alpha_1 - alpha_2) + beta_2 = np.sign(alpha_2 - alpha_1) * np.sqrt(beta_1**2 - phi_1_prime*phi_2_prime) + return alpha_2 - (alpha_2 - alpha_1) \ + * (phi_2_prime + beta_2 - beta_1) / (phi_2_prime - phi_1_prime + 2*beta_2) diff --git a/rosplane_tuning/src/autotune/optimizer_tester.py b/rosplane_tuning/src/autotune/optimizer_tester.py index 941b9b3..26d84ec 100644 --- a/rosplane_tuning/src/autotune/optimizer_tester.py +++ b/rosplane_tuning/src/autotune/optimizer_tester.py @@ -5,56 +5,53 @@ # Function to test the optimizer with -def function(x): +def Matyas(x): # Matyas function - print("f", x) return 0.26 * (x[0] ** 2 + x[1] ** 2) - 0.48 * x[0] * x[1] -def gradient(x): - # Gradient of Matyas function - return np.array([0.52 * x[0] - 0.48 * x[1], 0.52 * x[1] - 0.48 * x[0]]) +def Circle(x): + # Circle function + return x[0] ** 2 + x[1] ** 2 + +function = Circle +function = Matyas # Initialize optimizer -curr_points = np.array([[0.0, 5.0]]) # Initial point +curr_points = np.array([[0.0, 2.0]]) # Initial point optimization_params = {'mu_1': 1e-4, 'mu_2': 0.5, 'sigma': 1.5, 'alpha': 1.0, - 'tau': 1e-3} + 'tau': 1e-2, + 'h': 1e-2} optimizer = Optimizer(curr_points[0], optimization_params) # Run optimization all_points = [] -k = 0 +x_0_points = [] while not optimizer.optimization_terminated(): - print("Iteration ", k) # Print status - print(optimizer.get_optimiztion_status()) - print(optimizer.state) + print(optimizer.get_optimization_status()) # Calculate error for current points error = [] - print("CP", curr_points) # Testing for point in curr_points: error.append(function(point)) error = np.array(error) # Pass points to optimizer - # print("CP", curr_points) # Testing - # print("G", gradient(curr_points[0])) # Testing curr_points = optimizer.get_next_parameter_set(error) - # print("CP", curr_points) # Testing # Store points for point in curr_points: all_points.append(point) + x_0_points.append(optimizer.x_0) # End interation step - k += 1 - print() all_points = np.array(all_points) +x_0_points = np.array(x_0_points) -print('Optimization terminated with status: {}'.format(optimizer.get_optimiztion_status())) +print('Optimization terminated with status: {}'.format(optimizer.get_optimization_status())) # Plot the function with the optimization path @@ -67,6 +64,9 @@ def gradient(x): X, Y = np.meshgrid(x, y) Z = function([X, Y]) plt.contour(X, Y, Z, 50) +plt.scatter(x_0_points[:, 0], x_0_points[:, 1], color='g', marker='x', s=200) plt.plot(all_points[:, 0], all_points[:, 1], marker='o', color='r') +# Lock the x and y axis to be equal +plt.axis('equal') plt.show() From 4d689f34b983e1a55d87dee07dcf654c1180a4b6 Mon Sep 17 00:00:00 2001 From: Eero Date: Wed, 10 Apr 2024 16:19:23 -0600 Subject: [PATCH 40/52] Correct optimization param names to line up with optimizer --- rosplane_tuning/src/autotune/autotune.py | 38 +++++++++++++---------- rosplane_tuning/src/autotune/optimizer.py | 1 - 2 files changed, 21 insertions(+), 18 deletions(-) diff --git a/rosplane_tuning/src/autotune/autotune.py b/rosplane_tuning/src/autotune/autotune.py index 43dea1f..815d544 100644 --- a/rosplane_tuning/src/autotune/autotune.py +++ b/rosplane_tuning/src/autotune/autotune.py @@ -157,35 +157,40 @@ def __init__(self): # As we conduct optimizations, these parameters will be changed to be more # efficient to optimize the specific gains, based on the design spaces. if self.get_parameter('current_tuning_autopilot').value == 'roll': - self.optimization_params = {'u1': 1e-3, - 'u2': 0.5, + self.optimization_params = {'mu_1': 1e-3, + 'mu_2': 0.5, 'sigma': 2.0, 'alpha': 0.05, - 'tau': 1e-2} + 'tau': 1e-2, + 'h': 0.01} elif self.get_parameter('current_tuning_autopilot').value == 'course': - self.optimization_params = {'u1': 1e-3, - 'u2': 0.5, + self.optimization_params = {'mu_1': 1e-3, + 'mu_2': 0.5, 'sigma': 2.0, 'alpha': 0.05, - 'tau': 1e-2} + 'tau': 1e-2, + 'h': 0.01} elif self.get_parameter('current_tuning_autopilot').value == 'pitch': - self.optimization_params = {'u1': 1e-3, - 'u2': 0.5, + self.optimization_params = {'mu_1': 1e-3, + 'mu_2': 0.5, 'sigma': 2.0, 'alpha': 0.05, - 'tau': 1e-2} + 'tau': 1e-2, + 'h': 0.01} elif self.get_parameter('current_tuning_autopilot').value == 'altitude': - self.optimization_params = {'u1': 1e-3, - 'u2': 0.5, + self.optimization_params = {'mu_1': 1e-3, + 'mu_2': 0.5, 'sigma': 2.0, 'alpha': 0.05, - 'tau': 1e-2} + 'tau': 1e-2, + 'h': 0.01} elif self.get_parameter('current_tuning_autopilot').value == 'airspeed': - self.optimization_params = {'u1': 1e-3, - 'u2': 0.5, + self.optimization_params = {'mu_1': 1e-3, + 'mu_2': 0.5, 'sigma': 2.0, 'alpha': 0.05, - 'tau': 1e-2} + 'tau': 1e-2, + 'h': 0.01} else: raise ValueError(self.get_parameter('current_tuning_autopilot').value + ' is not a valid value for current_tuning_autopilot.' + @@ -204,7 +209,7 @@ def state_callback(self, msg): if self.collecting_data: time = msg.header.stamp.sec + msg.header.stamp.nanosec * 1e-9 - self.state.append([time, msg.va, msg.phi, msg.chi, msg.theta, + self.state.append([time, msg.va, msg.phi, msg.chi, msg.theta, -msg.position[2]]) # h = -msg.position[2] def commands_callback(self, msg): @@ -597,4 +602,3 @@ def main(args=None): if __name__ == '__main__': main() - diff --git a/rosplane_tuning/src/autotune/optimizer.py b/rosplane_tuning/src/autotune/optimizer.py index 4b803d9..d04f6a0 100644 --- a/rosplane_tuning/src/autotune/optimizer.py +++ b/rosplane_tuning/src/autotune/optimizer.py @@ -312,4 +312,3 @@ def interpolate(alpha_1, alpha_2, phi_1, phi_2, phi_1_prime, phi_2_prime): beta_2 = np.sign(alpha_2 - alpha_1) * np.sqrt(beta_1**2 - phi_1_prime*phi_2_prime) return alpha_2 - (alpha_2 - alpha_1) \ * (phi_2_prime + beta_2 - beta_1) / (phi_2_prime - phi_1_prime + 2*beta_2) - From d4b53ca2b63d9eda72a6646b84b078b621cf926e Mon Sep 17 00:00:00 2001 From: Eero Date: Wed, 10 Apr 2024 17:07:46 -0600 Subject: [PATCH 41/52] Added additional state comment for during flight with minor adjustment to init. --- rosplane_tuning/src/autotune/autotune.py | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/rosplane_tuning/src/autotune/autotune.py b/rosplane_tuning/src/autotune/autotune.py index 815d544..9aa7fdd 100644 --- a/rosplane_tuning/src/autotune/autotune.py +++ b/rosplane_tuning/src/autotune/autotune.py @@ -156,41 +156,41 @@ def __init__(self): # As we conduct optimizations, these parameters will be changed to be more # efficient to optimize the specific gains, based on the design spaces. - if self.get_parameter('current_tuning_autopilot').value == 'roll': + if self.current_autopilot == CurrentAutopilot.ROLL: self.optimization_params = {'mu_1': 1e-3, 'mu_2': 0.5, 'sigma': 2.0, 'alpha': 0.05, 'tau': 1e-2, - 'h': 0.01} - elif self.get_parameter('current_tuning_autopilot').value == 'course': + 'h': 1e-2} + elif self.current_autopilot == CurrentAutopilot.PITCH: self.optimization_params = {'mu_1': 1e-3, 'mu_2': 0.5, 'sigma': 2.0, 'alpha': 0.05, 'tau': 1e-2, - 'h': 0.01} - elif self.get_parameter('current_tuning_autopilot').value == 'pitch': + 'h': 1e-2} + elif self.current_autopilot == CurrentAutopilot.COURSE: self.optimization_params = {'mu_1': 1e-3, 'mu_2': 0.5, 'sigma': 2.0, 'alpha': 0.05, 'tau': 1e-2, - 'h': 0.01} - elif self.get_parameter('current_tuning_autopilot').value == 'altitude': + 'h': 1e-2} + elif self.current_autopilot == CurrentAutopilot.ALTITUDE: self.optimization_params = {'mu_1': 1e-3, 'mu_2': 0.5, 'sigma': 2.0, 'alpha': 0.05, 'tau': 1e-2, - 'h': 0.01} - elif self.get_parameter('current_tuning_autopilot').value == 'airspeed': + 'h': 1e-2} + elif self.current_autopilot == CurrentAutopilot.AIRSPEED: self.optimization_params = {'mu_1': 1e-3, 'mu_2': 0.5, 'sigma': 2.0, 'alpha': 0.05, 'tau': 1e-2, - 'h': 0.01} + 'h': 1e-2} else: raise ValueError(self.get_parameter('current_tuning_autopilot').value + ' is not a valid value for current_tuning_autopilot.' + @@ -295,6 +295,7 @@ def run_tuning_iteration_callback(self, request, response): + str(self.get_parameter('stabilize_period').value) + ' seconds...') self.autotune_state = AutoTuneState.STABILIZING + self.get_logger().info(f"State: {self.optimizer.get_optimization_status()}") else: self.get_logger().info('Optimization terminated with: ' + From d01c283333df32004ff28fa26772c076b83c1911 Mon Sep 17 00:00:00 2001 From: Brandon Sutherland Date: Thu, 11 Apr 2024 13:20:30 -0600 Subject: [PATCH 42/52] added simulated roll function --- .../src/autotune/optimizer_tester.py | 29 ++++++++++++++----- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/rosplane_tuning/src/autotune/optimizer_tester.py b/rosplane_tuning/src/autotune/optimizer_tester.py index 26d84ec..729747b 100644 --- a/rosplane_tuning/src/autotune/optimizer_tester.py +++ b/rosplane_tuning/src/autotune/optimizer_tester.py @@ -4,7 +4,6 @@ import matplotlib.pyplot as plt -# Function to test the optimizer with def Matyas(x): # Matyas function return 0.26 * (x[0] ** 2 + x[1] ** 2) - 0.48 * x[0] * x[1] @@ -13,11 +12,18 @@ def Circle(x): # Circle function return x[0] ** 2 + x[1] ** 2 -function = Circle -function = Matyas +def FakeRollController(x): + # Function that roughly mimics the roll controller landscape + std = 0.1 + rotation = np.deg2rad(45) + x_rot = x[0] * np.cos(rotation) - x[1] * np.sin(rotation) + y_rot = x[0] * np.sin(rotation) + x[1] * np.cos(rotation) + return 15*(x_rot - 0.2)**2 + 3*(y_rot - 0.03)**2 + np.random.normal(0, std) + +function = FakeRollController # Initialize optimizer -curr_points = np.array([[0.0, 2.0]]) # Initial point +curr_points = np.array([[0.06, 0.04]]) # Initial point optimization_params = {'mu_1': 1e-4, 'mu_2': 0.5, 'sigma': 1.5, @@ -26,6 +32,9 @@ def Circle(x): 'h': 1e-2} optimizer = Optimizer(curr_points[0], optimization_params) +# Print initial point and value +print('Initial point: {}'.format(curr_points[0])) +print('Initial value: {}'.format(function(curr_points[0]))) # Run optimization all_points = [] @@ -52,13 +61,17 @@ def Circle(x): x_0_points = np.array(x_0_points) print('Optimization terminated with status: {}'.format(optimizer.get_optimization_status())) +print('Total number of points tested: {}'.format(len(all_points))) +print('Final point: {}'.format(curr_points[0])) +print('Final value: {}'.format(function(curr_points[0]))) # Plot the function with the optimization path -x_min = np.min(all_points[:, 0]) - 1 -x_max = np.max(all_points[:, 0]) + 1 -y_min = np.min(all_points[:, 1]) - 1 -y_max = np.max(all_points[:, 1]) + 1 +range = 0.5 +x_min = np.min(all_points[:, 0]) - range/2 +x_max = np.max(all_points[:, 0]) + range/2 +y_min = np.min(all_points[:, 1]) - range/2 +y_max = np.max(all_points[:, 1]) + range/2 x = np.linspace(x_min, x_max, 100) y = np.linspace(y_min, y_max, 100) X, Y = np.meshgrid(x, y) From 0cb8ccc0b3f501726ed873d7a25c164f49884512 Mon Sep 17 00:00:00 2001 From: Brandon Sutherland Date: Thu, 11 Apr 2024 15:01:47 -0600 Subject: [PATCH 43/52] added additional end condition and alpha updating --- rosplane_tuning/src/autotune/optimizer.py | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/rosplane_tuning/src/autotune/optimizer.py b/rosplane_tuning/src/autotune/optimizer.py index d04f6a0..f3a26e1 100644 --- a/rosplane_tuning/src/autotune/optimizer.py +++ b/rosplane_tuning/src/autotune/optimizer.py @@ -95,6 +95,13 @@ def __init__(self, initial_point, optimization_params): self.phi_high = None self.phi_high_prime = None + # Number of iterations since improvement + self.k = 0 + self.max_k = 10 + + # Reason for termination + self.reason = "" + def optimization_terminated(self): """ This function checks if the optimization algorithm has terminated. @@ -123,7 +130,7 @@ def get_optimization_status(self): elif self.state == OptimizerState.PINPOINTING: return "Pinpointing step size." elif self.state == OptimizerState.TERMINATED: - return "Optimizer terminated with final gains at: " + str(self.x_0) + return str(self.reason) + " Final gains: " + str(self.x_0) def get_next_parameter_set(self, error): """ @@ -141,6 +148,11 @@ def get_next_parameter_set(self, error): given at initialization. [[gain1_1, gain2_1, ...], [gain1_2, gain2_2, ...], ...] """ + if self.k >= self.max_k: + self.reason = f"{self.k} iterations without improvement." + self.state = OptimizerState.TERMINATED + return self.x_0.reshape(1, 2) + if self.state == OptimizerState.UNINITIALIZED: # Find the gradient at the new point self.state = OptimizerState.FINDING_JACOBEAN @@ -152,12 +164,15 @@ def get_next_parameter_set(self, error): + np.identity(self.x_0.shape[0]) * self.h if self.state == OptimizerState.FINDING_JACOBEAN: + self.k = 0 + # Calculate the jacobean at the current point J_prior = self.J self.J = (error - self.phi_0) / self.h # Check for convergence using infinity norm if np.linalg.norm(self.J, np.inf) < self.tau: + self.reason = "Converged." self.state = OptimizerState.TERMINATED return self.x_0.reshape(1, 2) @@ -182,6 +197,7 @@ def get_next_parameter_set(self, error): return np.array([x_2, (x_2 + self.h*self.p)]) elif self.state == OptimizerState.BRACKETING: + self.k += 1 self.phi_2 = error[0] self.phi_2_prime = (error[1] - error[0]) / self.h new_points = self.bracketing() @@ -189,6 +205,7 @@ def get_next_parameter_set(self, error): return new_points elif self.state == OptimizerState.PINPOINTING: + self.k += 1 self.phi_p = error[0] self.phi_p_prime = (error[1] - error[0]) / self.h return self.pinpointing() @@ -223,6 +240,7 @@ def bracketing(self): self.x_0 = self.x_0 + self.alpha_2*self.p self.phi_0 = self.phi_2 self.state = OptimizerState.FINDING_JACOBEAN + self.alpha = self.alpha_2 return np.tile(self.x_0, (self.x_0.shape[0], 1)) \ + np.identity(self.x_0.shape[0]) * self.h @@ -270,6 +288,7 @@ def pinpointing(self): self.x_0 = self.x_0 + self.alpha_p*self.p self.phi_0 = self.phi_p self.state = OptimizerState.FINDING_JACOBEAN + self.alpha = self.alpha_p return np.tile(self.x_0, (self.x_0.shape[0], 1)) \ + np.identity(self.x_0.shape[0]) * self.h From 0775822a33fc569d3c9e9ddd6e582fd3f5791849 Mon Sep 17 00:00:00 2001 From: Brandon Sutherland Date: Thu, 11 Apr 2024 15:02:08 -0600 Subject: [PATCH 44/52] fixed error in FakeRollController --- .../src/autotune/optimizer_tester.py | 52 +++++++------------ 1 file changed, 20 insertions(+), 32 deletions(-) diff --git a/rosplane_tuning/src/autotune/optimizer_tester.py b/rosplane_tuning/src/autotune/optimizer_tester.py index 729747b..26cfcfc 100644 --- a/rosplane_tuning/src/autotune/optimizer_tester.py +++ b/rosplane_tuning/src/autotune/optimizer_tester.py @@ -4,37 +4,30 @@ import matplotlib.pyplot as plt -def Matyas(x): - # Matyas function - return 0.26 * (x[0] ** 2 + x[1] ** 2) - 0.48 * x[0] * x[1] - -def Circle(x): - # Circle function - return x[0] ** 2 + x[1] ** 2 - -def FakeRollController(x): +def FakeRollController(x, include_noise): # Function that roughly mimics the roll controller landscape - std = 0.1 - rotation = np.deg2rad(45) - x_rot = x[0] * np.cos(rotation) - x[1] * np.sin(rotation) - y_rot = x[0] * np.sin(rotation) + x[1] * np.cos(rotation) - return 15*(x_rot - 0.2)**2 + 3*(y_rot - 0.03)**2 + np.random.normal(0, std) - -function = FakeRollController + std = 0.1 if include_noise else 0.0 + rotation = np.deg2rad(-40) + x_trans = x[0] - 0.2 + y_trans = x[1] - 0.03 + x_rot = x_trans * np.cos(rotation) + y_trans * np.sin(rotation) + y_rot = x_trans * -np.sin(rotation) + y_trans * np.cos(rotation) + return 15*x_rot**2 + 3*y_rot**2 + np.random.normal(0, std) # Initialize optimizer curr_points = np.array([[0.06, 0.04]]) # Initial point optimization_params = {'mu_1': 1e-4, 'mu_2': 0.5, 'sigma': 1.5, - 'alpha': 1.0, - 'tau': 1e-2, - 'h': 1e-2} + 'alpha': 0.05, + 'tau': 0.1, + 'h': 0.05} optimizer = Optimizer(curr_points[0], optimization_params) +num_repeats = 5 # Print initial point and value print('Initial point: {}'.format(curr_points[0])) -print('Initial value: {}'.format(function(curr_points[0]))) +print('Initial value: {}'.format(FakeRollController(curr_points[0], False))) # Run optimization all_points = [] @@ -46,7 +39,7 @@ def FakeRollController(x): # Calculate error for current points error = [] for point in curr_points: - error.append(function(point)) + error.append(np.average([FakeRollController(point, True) for _ in range(num_repeats)])) error = np.array(error) # Pass points to optimizer curr_points = optimizer.get_next_parameter_set(error) @@ -61,24 +54,19 @@ def FakeRollController(x): x_0_points = np.array(x_0_points) print('Optimization terminated with status: {}'.format(optimizer.get_optimization_status())) -print('Total number of points tested: {}'.format(len(all_points))) +print('Total number of points tested: {}'.format(len(all_points) * num_repeats)) print('Final point: {}'.format(curr_points[0])) -print('Final value: {}'.format(function(curr_points[0]))) +print('Final value: {}'.format(FakeRollController(curr_points[0], False))) # Plot the function with the optimization path range = 0.5 -x_min = np.min(all_points[:, 0]) - range/2 -x_max = np.max(all_points[:, 0]) + range/2 -y_min = np.min(all_points[:, 1]) - range/2 -y_max = np.max(all_points[:, 1]) + range/2 -x = np.linspace(x_min, x_max, 100) -y = np.linspace(y_min, y_max, 100) +x = np.linspace(-0.1, 0.5, 100) +y = np.linspace(-0.3, 0.3, 100) X, Y = np.meshgrid(x, y) -Z = function([X, Y]) +Z = FakeRollController([X, Y], False) plt.contour(X, Y, Z, 50) -plt.scatter(x_0_points[:, 0], x_0_points[:, 1], color='g', marker='x', s=200) -plt.plot(all_points[:, 0], all_points[:, 1], marker='o', color='r') +plt.plot(x_0_points[:, 0], x_0_points[:, 1], color='b', marker='o') # Lock the x and y axis to be equal plt.axis('equal') plt.show() From 85d3407bc682337205538a08e97f26a48b250141 Mon Sep 17 00:00:00 2001 From: Brandon Sutherland Date: Thu, 11 Apr 2024 15:39:57 -0600 Subject: [PATCH 45/52] fixed it for real this time --- .../src/autotune/optimizer_tester.py | 25 ++++++++++++------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/rosplane_tuning/src/autotune/optimizer_tester.py b/rosplane_tuning/src/autotune/optimizer_tester.py index 26cfcfc..d358125 100644 --- a/rosplane_tuning/src/autotune/optimizer_tester.py +++ b/rosplane_tuning/src/autotune/optimizer_tester.py @@ -7,12 +7,20 @@ def FakeRollController(x, include_noise): # Function that roughly mimics the roll controller landscape std = 0.1 if include_noise else 0.0 - rotation = np.deg2rad(-40) - x_trans = x[0] - 0.2 - y_trans = x[1] - 0.03 - x_rot = x_trans * np.cos(rotation) + y_trans * np.sin(rotation) - y_rot = x_trans * -np.sin(rotation) + y_trans * np.cos(rotation) - return 15*x_rot**2 + 3*y_rot**2 + np.random.normal(0, std) + + rotation = np.deg2rad(35) + x_offset = 0.20 + y_offset = 0.03 + x_scale = 15 + y_scale = 3 + + x_rot = x[0] * np.cos(rotation) - x[1] * np.sin(rotation) + y_rot = x[0] * np.sin(rotation) + x[1] * np.cos(rotation) + x_trans = x_rot - (x_offset*np.cos(rotation) - y_offset*np.sin(rotation)) + y_trans = y_rot - (x_offset*np.sin(rotation) + y_offset*np.cos(rotation)) + x_scaled = x_trans**2 * x_scale + y_scaled = y_trans**2 * y_scale + return x_scaled + y_scaled + np.random.normal(0, std) # Initialize optimizer curr_points = np.array([[0.06, 0.04]]) # Initial point @@ -60,9 +68,8 @@ def FakeRollController(x, include_noise): # Plot the function with the optimization path -range = 0.5 -x = np.linspace(-0.1, 0.5, 100) -y = np.linspace(-0.3, 0.3, 100) +x = np.linspace(0., 0.4, 100) +y = np.linspace(-0.2, 0.3, 100) X, Y = np.meshgrid(x, y) Z = FakeRollController([X, Y], False) plt.contour(X, Y, Z, 50) From c77946fe3086fef67b4d9af24b8226dad5ac76a5 Mon Sep 17 00:00:00 2001 From: Brandon Sutherland Date: Thu, 11 Apr 2024 15:43:57 -0600 Subject: [PATCH 46/52] misc value tweaks --- rosplane_tuning/src/autotune/optimizer.py | 2 +- rosplane_tuning/src/autotune/optimizer_tester.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/rosplane_tuning/src/autotune/optimizer.py b/rosplane_tuning/src/autotune/optimizer.py index f3a26e1..d280b92 100644 --- a/rosplane_tuning/src/autotune/optimizer.py +++ b/rosplane_tuning/src/autotune/optimizer.py @@ -97,7 +97,7 @@ def __init__(self, initial_point, optimization_params): # Number of iterations since improvement self.k = 0 - self.max_k = 10 + self.max_k = 5 # Reason for termination self.reason = "" diff --git a/rosplane_tuning/src/autotune/optimizer_tester.py b/rosplane_tuning/src/autotune/optimizer_tester.py index d358125..072baff 100644 --- a/rosplane_tuning/src/autotune/optimizer_tester.py +++ b/rosplane_tuning/src/autotune/optimizer_tester.py @@ -29,9 +29,9 @@ def FakeRollController(x, include_noise): 'sigma': 1.5, 'alpha': 0.05, 'tau': 0.1, - 'h': 0.05} + 'h': 0.01} optimizer = Optimizer(curr_points[0], optimization_params) -num_repeats = 5 +num_repeats = 3 # Print initial point and value print('Initial point: {}'.format(curr_points[0])) From 49acb6ba202359b4d798e2f2aeeda3fd20f97c98 Mon Sep 17 00:00:00 2001 From: Brandon Sutherland Date: Thu, 11 Apr 2024 17:18:15 -0600 Subject: [PATCH 47/52] replaced optimizer with momentum & damping method --- rosplane_tuning/src/autotune/optimizer.py | 269 +++--------------- .../src/autotune/optimizer_tester.py | 24 +- 2 files changed, 50 insertions(+), 243 deletions(-) diff --git a/rosplane_tuning/src/autotune/optimizer.py b/rosplane_tuning/src/autotune/optimizer.py index d280b92..3daa692 100644 --- a/rosplane_tuning/src/autotune/optimizer.py +++ b/rosplane_tuning/src/autotune/optimizer.py @@ -9,9 +9,8 @@ class OptimizerState(Enum): This class defines the various states that exist within the optimizer. """ UNINITIALIZED = auto() - FINDING_JACOBEAN = auto() - BRACKETING = auto() - PINPOINTING = auto() + FIRST_ITERATION = auto() + OPTIMIZING = auto() TERMINATED = auto() @@ -27,12 +26,6 @@ def __init__(self, initial_point, optimization_params): initial_point (np.array, size num_gains, dtype float): The starting point for the gains to be optimized (x_0). [gain1, gain2, ...] optimization_params (dictionary): Parameters needed for the optimization operation. - - mu_1 (float): 1st Strong Wolfe Condition, specifies the minimum decrease that will - be accepted. Must be between 0 and 1, but is usually a very small value like 1e-4. - - mu_2 (float): 2nd Strong Wolfe Condition, specifies the sufficient decrease in - curvature that will be accepted. Must be between mu_1 and 1, typically between 0.1 - and 0.9. - - sigma (float): The step size multiplier, must be greater than 1. - alpha (float): The initial step size. The ideal value for this is very application-specific. - tau (float): The convergence tolerance. The optimization is considered converged @@ -44,9 +37,6 @@ def __init__(self, initial_point, optimization_params): """ # Save passed optimization parameters. See above for parameters details. - self.mu_1 = optimization_params['mu_1'] - self.mu_2 = optimization_params['mu_2'] - self.sigma = optimization_params['sigma'] self.alpha = optimization_params['alpha'] self.tau = optimization_params['tau'] self.h = optimization_params['h'] @@ -54,53 +44,22 @@ def __init__(self, initial_point, optimization_params): # The current state of the optimization process. self.state = OptimizerState.UNINITIALIZED - # The jacobean of the function at x_0. - self.J = None - - # The current search direction from x_0. - self.p = None - - # The current best point in the optimization, where we are stepping away from. - self.x_0 = initial_point - # The function value (error) at x_0. - self.phi_0 = None - # The gradient at x_0 along the search direction p. - self.phi_0_prime = None - - # The function value at the best point on the line search (x_1). - self.phi_1 = None - # The gradient at x_1 along the search direction p. - self.phi_1_prime = None - # The step size from x_0 to x_1 along the search direction p. - self.alpha_1 = None - - # The function value at the best point on the line search (x_2). - self.phi_2 = None - # The gradient at x_2 along the search direction p. - self.phi_2_prime = None - # The step size from x_0 to x_2 along the search direction p. - self.alpha_2 = None - - # Bool to specify if this is the first bracketing step, used by the bracketing algorithm. - self.first_bracket = None + # Reason for termination + self.reason = "" - # TODO: describe these parameters - self.alpha_p = None - self.phi_p = None - self.phi_p_prime = None - self.alpha_low = None - self.alpha_high = None - self.phi_low = None - self.phi_low_prime = None - self.phi_high = None - self.phi_high_prime = None + # Initialize the gains to the initial point + self.x = initial_point + self.f_initial = None - # Number of iterations since improvement + # Keep track of the number of iterations self.k = 0 - self.max_k = 5 + self.max_k = 20 - # Reason for termination - self.reason = "" + # Initialize the velocity + self.v = np.zeros([self.x.shape[0]]).T + + self.inertia = 0.8 + self.drag = 0.9 def optimization_terminated(self): """ @@ -123,14 +82,10 @@ def get_optimization_status(self): """ if self.state == OptimizerState.UNINITIALIZED: return "Optimizer awaiting first iteration." - elif self.state == OptimizerState.FINDING_JACOBEAN: - return "Finding gradient." - elif self.state == OptimizerState.BRACKETING: - return "Bracketing step size." - elif self.state == OptimizerState.PINPOINTING: - return "Pinpointing step size." + elif self.state == OptimizerState.OPTIMIZING: + return "Optimizing." elif self.state == OptimizerState.TERMINATED: - return str(self.reason) + " Final gains: " + str(self.x_0) + return str(self.reason) + " Final gains: " + str(self.x) def get_next_parameter_set(self, error): """ @@ -149,185 +104,45 @@ def get_next_parameter_set(self, error): """ if self.k >= self.max_k: - self.reason = f"{self.k} iterations without improvement." + self.reason = "Interations complete." self.state = OptimizerState.TERMINATED - return self.x_0.reshape(1, 2) + return self.x.reshape(1, 2) if self.state == OptimizerState.UNINITIALIZED: # Find the gradient at the new point - self.state = OptimizerState.FINDING_JACOBEAN - self.phi_0 = error[0] + self.state = OptimizerState.FIRST_ITERATION + self.f_initial = error.item(0) # Find the values for finite difference, where each dimension is individually # stepped by h - return np.tile(self.x_0, (self.x_0.shape[0], 1)) \ - + np.identity(self.x_0.shape[0]) * self.h - - if self.state == OptimizerState.FINDING_JACOBEAN: - self.k = 0 + return np.tile(self.x, (self.x.shape[0], 1)) \ + + np.identity(self.x.shape[0]) * self.h + if self.state == OptimizerState.FIRST_ITERATION or self.state == OptimizerState.OPTIMIZING: + self.k += 1 # Calculate the jacobean at the current point - J_prior = self.J - self.J = (error - self.phi_0) / self.h - - # Check for convergence using infinity norm - if np.linalg.norm(self.J, np.inf) < self.tau: - self.reason = "Converged." - self.state = OptimizerState.TERMINATED - return self.x_0.reshape(1, 2) - - # Find the search direction - if J_prior is None: - self.p = -self.J / np.linalg.norm(self.J) + if self.state == OptimizerState.FIRST_ITERATION: + J = (error - self.f_initial) / self.h + self.state = OptimizerState.OPTIMIZING else: - beta = np.dot(self.J, self.J) / np.dot(J_prior, J_prior) - self.p = -self.J / np.linalg.norm(self.J) + beta*self.p + J = (error[1:] - error[0]) / self.h + J_norm = J / np.linalg.norm(J) - # Begin bracketing - self.phi_0_prime = np.dot(self.p, self.J) - self.alpha_1 = 0 - self.alpha_2 = self.alpha - self.phi_1 = self.phi_0 - self.phi_1_prime = self.phi_0_prime - self.first_bracket = True - self.state = OptimizerState.BRACKETING + # Update the velocity + self.v = self.inertia * self.v + (1 - self.inertia) * -J_norm - # Find phi_2 and phi_2_prime - x_2 = (self.x_0 + self.alpha_2*self.p) - return np.array([x_2, (x_2 + self.h*self.p)]) + # Slow the velocity down with drag + self.v = self.v * self.drag - elif self.state == OptimizerState.BRACKETING: - self.k += 1 - self.phi_2 = error[0] - self.phi_2_prime = (error[1] - error[0]) / self.h - new_points = self.bracketing() - self.first_bracket = False - return new_points + # Take a step in the search direction + self.x = self.x + self.v * self.alpha - elif self.state == OptimizerState.PINPOINTING: - self.k += 1 - self.phi_p = error[0] - self.phi_p_prime = (error[1] - error[0]) / self.h - return self.pinpointing() + # Find the gradient at the new point + points = np.tile(self.x, (self.x.shape[0], 1)) \ + + np.identity(self.x.shape[0]) * self.h + points = np.vstack((self.x, points)) + return points else: # Process Terminated - return self.x_0.reshape(1, 2) - - - def bracketing(self): - """ - This function conducts the bracketing part of the optimization. - """ - - # Needs Pinpointing - if(self.phi_2 > self.phi_0 + self.mu_1*self.alpha_2*self.phi_0_prime) \ - or (not self.first_bracket and self.phi_2 > self.phi_1): - self.alpha_low = self.alpha_1 - self.alpha_high = self.alpha_2 - self.phi_low = self.phi_1 - self.phi_low_prime = self.phi_1_prime - self.phi_high = self.phi_2 - self.phi_high_prime = self.phi_2_prime - self.state = OptimizerState.PINPOINTING - - self.alpha_p = interpolate(self.alpha_1, self.alpha_2, self.phi_1, self.phi_2, - self.phi_1_prime, self.phi_2_prime) - x_p = self.x_0 + self.alpha_p*self.p - return np.array([x_p, (x_p + self.h*self.p)]) - - # Optimized - if abs(self.phi_2_prime) <= -self.mu_2*self.phi_0_prime: - self.x_0 = self.x_0 + self.alpha_2*self.p - self.phi_0 = self.phi_2 - self.state = OptimizerState.FINDING_JACOBEAN - self.alpha = self.alpha_2 - return np.tile(self.x_0, (self.x_0.shape[0], 1)) \ - + np.identity(self.x_0.shape[0]) * self.h - - # Needs Pinpointing - elif self.phi_2_prime >= 0: - self.alpha_low = self.alpha_2 - self.alpha_high = self.alpha_1 - self.phi_low = self.phi_2 - self.phi_low_prime = self.phi_2_prime - self.phi_high = self.phi_1 - self.phi_high_prime = self.phi_1_prime - self.state = OptimizerState.PINPOINTING - - self.alpha_p = interpolate(self.alpha_1, self.alpha_2, self.phi_1, self.phi_2, - self.phi_1_prime, self.phi_2_prime) - x_p = self.x_0 + self.alpha_p*self.p - return np.array([x_p, (x_p + self.h*self.p)]) - - # Needs more Bracketing - else: - # Set x_2 to x_1 and increase alpha_2 - self.alpha_1 = self.alpha_2 - self.alpha_2 = self.sigma*self.alpha_2 - self.phi_1 = self.phi_2 - self.phi_1_prime = self.phi_2_prime - - # Find new x_2 - x_2 = self.x_0 + self.alpha_2*self.p - return np.array([x_2, (x_2 + self.h*self.p)]) - - def pinpointing(self): - """ - This function conducts the pinpointing part of the optimization. - """ - - if (self.phi_p > self.phi_0 + self.mu_1*self.alpha_p*self.phi_0_prime) \ - or (self.phi_p > self.phi_low): - # Set interpolated point to high, continue - self.alpha_high = self.alpha_p - self.phi_high = self.phi_p - self.phi_high_prime = self.phi_p_prime - else: - if abs(self.phi_p_prime) <= -self.mu_2*self.phi_0_prime: - # Pinpointing complete, set interpolated point to x_0 and find new jacobian - self.x_0 = self.x_0 + self.alpha_p*self.p - self.phi_0 = self.phi_p - self.state = OptimizerState.FINDING_JACOBEAN - self.alpha = self.alpha_p - return np.tile(self.x_0, (self.x_0.shape[0], 1)) \ - + np.identity(self.x_0.shape[0]) * self.h - - elif self.phi_p_prime*(self.alpha_high - self.alpha_low) >= 0: - # Set high to low (and low to interpolated) - self.alpha_high = self.alpha_low - self.phi_high = self.phi_low - self.phi_high_prime = self.phi_low_prime - - # Set low to interpolated - self.alpha_low = self.alpha_p - self.phi_low = self.phi_p - self.phi_low_prime = self.phi_p_prime - - # Find value and gradient at interpolated point - self.alpha_p = interpolate(self.alpha_low, self.alpha_high, self.phi_low, self.phi_high, - self.phi_low_prime, self.phi_high_prime) - x_p = self.x_0 + self.alpha_p*self.p - return np.array([x_p, (x_p + self.h*self.p)]) - - -def interpolate(alpha_1, alpha_2, phi_1, phi_2, phi_1_prime, phi_2_prime): - """ - This function performs quadratic interpolation between two points to find the minimum, - given their function values and derivatives. - - Parameters: - alpha_1 (float): Function input 1. - alpha_2 (float): Function input 2. - phi_1 (float): Function value at alpha_1. - phi_2 (float): Function value at alpha_2. - phi_1_prime (float): Function derivative at alpha_1. - phi_2_prime (float): Function derivative at alpha_2. - - Returns: - float: The function input (alpha_p) at the interpolated minimum. - """ + return self.x.reshape(1, 2) - beta_1 = phi_1_prime + phi_2_prime - 3*(phi_1 - phi_2) / (alpha_1 - alpha_2) - beta_2 = np.sign(alpha_2 - alpha_1) * np.sqrt(beta_1**2 - phi_1_prime*phi_2_prime) - return alpha_2 - (alpha_2 - alpha_1) \ - * (phi_2_prime + beta_2 - beta_1) / (phi_2_prime - phi_1_prime + 2*beta_2) diff --git a/rosplane_tuning/src/autotune/optimizer_tester.py b/rosplane_tuning/src/autotune/optimizer_tester.py index 072baff..fa123b6 100644 --- a/rosplane_tuning/src/autotune/optimizer_tester.py +++ b/rosplane_tuning/src/autotune/optimizer_tester.py @@ -24,14 +24,10 @@ def FakeRollController(x, include_noise): # Initialize optimizer curr_points = np.array([[0.06, 0.04]]) # Initial point -optimization_params = {'mu_1': 1e-4, - 'mu_2': 0.5, - 'sigma': 1.5, - 'alpha': 0.05, +optimization_params = {'alpha': 0.1, 'tau': 0.1, - 'h': 0.01} + 'h': 0.03} optimizer = Optimizer(curr_points[0], optimization_params) -num_repeats = 3 # Print initial point and value print('Initial point: {}'.format(curr_points[0])) @@ -39,15 +35,12 @@ def FakeRollController(x, include_noise): # Run optimization all_points = [] -x_0_points = [] +x_points = [] while not optimizer.optimization_terminated(): - # Print status - print(optimizer.get_optimization_status()) - # Calculate error for current points error = [] for point in curr_points: - error.append(np.average([FakeRollController(point, True) for _ in range(num_repeats)])) + error.append(FakeRollController(point, True)) error = np.array(error) # Pass points to optimizer curr_points = optimizer.get_next_parameter_set(error) @@ -55,16 +48,15 @@ def FakeRollController(x, include_noise): # Store points for point in curr_points: all_points.append(point) - x_0_points.append(optimizer.x_0) + x_points.append(optimizer.x) # End interation step all_points = np.array(all_points) -x_0_points = np.array(x_0_points) +x_points = np.array(x_points) print('Optimization terminated with status: {}'.format(optimizer.get_optimization_status())) -print('Total number of points tested: {}'.format(len(all_points) * num_repeats)) -print('Final point: {}'.format(curr_points[0])) print('Final value: {}'.format(FakeRollController(curr_points[0], False))) +print('Total number of points tested: {}'.format(len(all_points))) # Plot the function with the optimization path @@ -73,7 +65,7 @@ def FakeRollController(x, include_noise): X, Y = np.meshgrid(x, y) Z = FakeRollController([X, Y], False) plt.contour(X, Y, Z, 50) -plt.plot(x_0_points[:, 0], x_0_points[:, 1], color='b', marker='o') +plt.plot(x_points[:, 0], x_points[:, 1], color='b', marker='o') # Lock the x and y axis to be equal plt.axis('equal') plt.show() From 1b14e37a60ee8905805db630529dd1d69c6176a1 Mon Sep 17 00:00:00 2001 From: Brandon Sutherland Date: Thu, 11 Apr 2024 19:06:28 -0600 Subject: [PATCH 48/52] got final optimization working? --- rosplane_tuning/src/autotune/optimizer.py | 45 +++++-------------- .../src/autotune/optimizer_tester.py | 7 ++- 2 files changed, 15 insertions(+), 37 deletions(-) diff --git a/rosplane_tuning/src/autotune/optimizer.py b/rosplane_tuning/src/autotune/optimizer.py index 3daa692..86e8169 100644 --- a/rosplane_tuning/src/autotune/optimizer.py +++ b/rosplane_tuning/src/autotune/optimizer.py @@ -26,19 +26,12 @@ def __init__(self, initial_point, optimization_params): initial_point (np.array, size num_gains, dtype float): The starting point for the gains to be optimized (x_0). [gain1, gain2, ...] optimization_params (dictionary): Parameters needed for the optimization operation. - - alpha (float): The initial step size. The ideal value for this is very - application-specific. - - tau (float): The convergence tolerance. The optimization is considered converged - when the infinity norm is less than tau. - - h (float): Finite difference step size. This is used to calculate the gradient - by stepping slightly away from a point and then calculating the slope of the - line between the two points. Ideally this value will be small, but it is sensitive - to noise and can't get too small. + - alpha (float): The step size. + - h (float): Finite difference step size, used to calculate the gradient. """ # Save passed optimization parameters. See above for parameters details. self.alpha = optimization_params['alpha'] - self.tau = optimization_params['tau'] self.h = optimization_params['h'] # The current state of the optimization process. @@ -49,17 +42,8 @@ def __init__(self, initial_point, optimization_params): # Initialize the gains to the initial point self.x = initial_point - self.f_initial = None + self.prev_error = None - # Keep track of the number of iterations - self.k = 0 - self.max_k = 20 - - # Initialize the velocity - self.v = np.zeros([self.x.shape[0]]).T - - self.inertia = 0.8 - self.drag = 0.9 def optimization_terminated(self): """ @@ -103,15 +87,10 @@ def get_next_parameter_set(self, error): given at initialization. [[gain1_1, gain2_1, ...], [gain1_2, gain2_2, ...], ...] """ - if self.k >= self.max_k: - self.reason = "Interations complete." - self.state = OptimizerState.TERMINATED - return self.x.reshape(1, 2) - if self.state == OptimizerState.UNINITIALIZED: # Find the gradient at the new point self.state = OptimizerState.FIRST_ITERATION - self.f_initial = error.item(0) + self.prev_error = error.item(0) # Find the values for finite difference, where each dimension is individually # stepped by h @@ -119,23 +98,23 @@ def get_next_parameter_set(self, error): + np.identity(self.x.shape[0]) * self.h if self.state == OptimizerState.FIRST_ITERATION or self.state == OptimizerState.OPTIMIZING: - self.k += 1 # Calculate the jacobean at the current point if self.state == OptimizerState.FIRST_ITERATION: - J = (error - self.f_initial) / self.h + J = (error - self.prev_error) / self.h self.state = OptimizerState.OPTIMIZING else: J = (error[1:] - error[0]) / self.h - J_norm = J / np.linalg.norm(J) - # Update the velocity - self.v = self.inertia * self.v + (1 - self.inertia) * -J_norm + # Check if any of the new points are better + if np.min(error) > self.prev_error: + self.reason = "Current iteration worst than previous." + self.state = OptimizerState.TERMINATED + return self.x.reshape(1, 2) + self.prev_error = error.item(0) - # Slow the velocity down with drag - self.v = self.v * self.drag # Take a step in the search direction - self.x = self.x + self.v * self.alpha + self.x = self.x - J * self.alpha # Find the gradient at the new point points = np.tile(self.x, (self.x.shape[0], 1)) \ diff --git a/rosplane_tuning/src/autotune/optimizer_tester.py b/rosplane_tuning/src/autotune/optimizer_tester.py index fa123b6..ca41315 100644 --- a/rosplane_tuning/src/autotune/optimizer_tester.py +++ b/rosplane_tuning/src/autotune/optimizer_tester.py @@ -24,9 +24,8 @@ def FakeRollController(x, include_noise): # Initialize optimizer curr_points = np.array([[0.06, 0.04]]) # Initial point -optimization_params = {'alpha': 0.1, - 'tau': 0.1, - 'h': 0.03} +optimization_params = {'alpha': 0.01, + 'h': 0.04} optimizer = Optimizer(curr_points[0], optimization_params) # Print initial point and value @@ -61,7 +60,7 @@ def FakeRollController(x, include_noise): # Plot the function with the optimization path x = np.linspace(0., 0.4, 100) -y = np.linspace(-0.2, 0.3, 100) +y = np.linspace(-0.1, 0.2, 100) X, Y = np.meshgrid(x, y) Z = FakeRollController([X, Y], False) plt.contour(X, Y, Z, 50) From 6fe80027f3a461c1741ec6e12c1452035de0092f Mon Sep 17 00:00:00 2001 From: Brandon Sutherland Date: Fri, 12 Apr 2024 11:42:52 -0600 Subject: [PATCH 49/52] update autotune gains based on roll surragate --- rosplane_tuning/src/autotune/autotune.py | 40 ++++++------------------ 1 file changed, 10 insertions(+), 30 deletions(-) diff --git a/rosplane_tuning/src/autotune/autotune.py b/rosplane_tuning/src/autotune/autotune.py index 9aa7fdd..9ded5b4 100644 --- a/rosplane_tuning/src/autotune/autotune.py +++ b/rosplane_tuning/src/autotune/autotune.py @@ -157,40 +157,20 @@ def __init__(self): # As we conduct optimizations, these parameters will be changed to be more # efficient to optimize the specific gains, based on the design spaces. if self.current_autopilot == CurrentAutopilot.ROLL: - self.optimization_params = {'mu_1': 1e-3, - 'mu_2': 0.5, - 'sigma': 2.0, - 'alpha': 0.05, - 'tau': 1e-2, - 'h': 1e-2} + self.optimization_params = {'alpha': 0.01, + 'h': 0.04} elif self.current_autopilot == CurrentAutopilot.PITCH: - self.optimization_params = {'mu_1': 1e-3, - 'mu_2': 0.5, - 'sigma': 2.0, - 'alpha': 0.05, - 'tau': 1e-2, - 'h': 1e-2} + self.optimization_params = {'alpha': 0.01, + 'h': -0.04} elif self.current_autopilot == CurrentAutopilot.COURSE: - self.optimization_params = {'mu_1': 1e-3, - 'mu_2': 0.5, - 'sigma': 2.0, - 'alpha': 0.05, - 'tau': 1e-2, - 'h': 1e-2} + self.optimization_params = {'alpha': 0.01, + 'h': 0.04} elif self.current_autopilot == CurrentAutopilot.ALTITUDE: - self.optimization_params = {'mu_1': 1e-3, - 'mu_2': 0.5, - 'sigma': 2.0, - 'alpha': 0.05, - 'tau': 1e-2, - 'h': 1e-2} + self.optimization_params = {'alpha': 0.01, + 'h': 0.04} elif self.current_autopilot == CurrentAutopilot.AIRSPEED: - self.optimization_params = {'mu_1': 1e-3, - 'mu_2': 0.5, - 'sigma': 2.0, - 'alpha': 0.05, - 'tau': 1e-2, - 'h': 1e-2} + self.optimization_params = {'alpha': 0.01, + 'h': 0.04} else: raise ValueError(self.get_parameter('current_tuning_autopilot').value + ' is not a valid value for current_tuning_autopilot.' + From 9d633f36eb0ed2a2a51229cf2ba189ff72a49cea Mon Sep 17 00:00:00 2001 From: Brandon Sutherland Date: Fri, 12 Apr 2024 11:54:14 -0600 Subject: [PATCH 50/52] fixed bug with get_optimization_status --- rosplane_tuning/src/autotune/optimizer.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/rosplane_tuning/src/autotune/optimizer.py b/rosplane_tuning/src/autotune/optimizer.py index 86e8169..30129f5 100644 --- a/rosplane_tuning/src/autotune/optimizer.py +++ b/rosplane_tuning/src/autotune/optimizer.py @@ -66,9 +66,11 @@ def get_optimization_status(self): """ if self.state == OptimizerState.UNINITIALIZED: return "Optimizer awaiting first iteration." + elif self.state == OptimizerState.FIRST_ITERATION: + return "First iteration." elif self.state == OptimizerState.OPTIMIZING: return "Optimizing." - elif self.state == OptimizerState.TERMINATED: + else: # self.state == OptimizerState.TERMINATED: return str(self.reason) + " Final gains: " + str(self.x) def get_next_parameter_set(self, error): From 1bbe1db719585804893ff259f3360486e37adff6 Mon Sep 17 00:00:00 2001 From: Eero Date: Tue, 16 Apr 2024 09:01:52 -0600 Subject: [PATCH 51/52] Spelling error --- rosplane_tuning/src/autotune/optimizer.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/rosplane_tuning/src/autotune/optimizer.py b/rosplane_tuning/src/autotune/optimizer.py index 30129f5..bf2e8a3 100644 --- a/rosplane_tuning/src/autotune/optimizer.py +++ b/rosplane_tuning/src/autotune/optimizer.py @@ -109,7 +109,7 @@ def get_next_parameter_set(self, error): # Check if any of the new points are better if np.min(error) > self.prev_error: - self.reason = "Current iteration worst than previous." + self.reason = "Current iteration worse than previous." self.state = OptimizerState.TERMINATED return self.x.reshape(1, 2) self.prev_error = error.item(0) @@ -126,4 +126,3 @@ def get_next_parameter_set(self, error): else: # Process Terminated return self.x.reshape(1, 2) - From 490e1f569eb50487c750eec229e2540eedfa39bb Mon Sep 17 00:00:00 2001 From: Brandon Sutherland Date: Fri, 17 May 2024 15:15:45 -0600 Subject: [PATCH 52/52] misc changes --- rosplane_tuning/launch/altitude_autotune.launch.py | 2 +- rosplane_tuning/launch/course_autotune.launch.py | 2 +- rosplane_tuning/src/autotune/autotune.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/rosplane_tuning/launch/altitude_autotune.launch.py b/rosplane_tuning/launch/altitude_autotune.launch.py index 64cf1fa..72619aa 100644 --- a/rosplane_tuning/launch/altitude_autotune.launch.py +++ b/rosplane_tuning/launch/altitude_autotune.launch.py @@ -10,7 +10,7 @@ def generate_launch_description(): stabilize_period = LaunchConfiguration('stabilize_period') stabilize_period_launch_arg = DeclareLaunchArgument( 'stabilize_period', - default_value=TextSubstitution(text='5.0'), + default_value=TextSubstitution(text='10.0'), description='Whether to run the tuning sequence continuously or to wait for manual input' ) continuous_tuning = LaunchConfiguration('continuous_tuning') diff --git a/rosplane_tuning/launch/course_autotune.launch.py b/rosplane_tuning/launch/course_autotune.launch.py index e4adab7..bcdb0b5 100644 --- a/rosplane_tuning/launch/course_autotune.launch.py +++ b/rosplane_tuning/launch/course_autotune.launch.py @@ -10,7 +10,7 @@ def generate_launch_description(): stabilize_period = LaunchConfiguration('stabilize_period') stabilize_period_launch_arg = DeclareLaunchArgument( 'stabilize_period', - default_value=TextSubstitution(text='5.0'), + default_value=TextSubstitution(text='10.0'), description='Whether to run the tuning sequence continuously or to wait for manual input' ) continuous_tuning = LaunchConfiguration('continuous_tuning') diff --git a/rosplane_tuning/src/autotune/autotune.py b/rosplane_tuning/src/autotune/autotune.py index 9ded5b4..fb182f5 100644 --- a/rosplane_tuning/src/autotune/autotune.py +++ b/rosplane_tuning/src/autotune/autotune.py @@ -3,7 +3,7 @@ from rosplane_msgs.msg import ControllerCommands from rosplane_msgs.msg import ControllerInternalsDebug from rosplane_msgs.msg import State -from optimizer import Optimizer +from plot_util import Optimizer import rclpy from rclpy.executors import MultiThreadedExecutor