diff --git a/vehicle/learning_based_accel_brake_map_calibrator/CMakeLists.txt b/vehicle/learning_based_accel_brake_map_calibrator/CMakeLists.txt new file mode 100644 index 0000000000000..3698faad4068c --- /dev/null +++ b/vehicle/learning_based_accel_brake_map_calibrator/CMakeLists.txt @@ -0,0 +1,50 @@ +cmake_minimum_required(VERSION 3.8) +project(learning_based_vehicle_calibration) + +# find dependencies +find_package(ament_cmake_auto REQUIRED) +ament_auto_find_build_dependencies() +find_package(std_msgs REQUIRED) +find_package(rosidl_default_generators REQUIRED) +# find_package(builtin_interfaces REQUIRED) + +install( + PROGRAMS + scripts/data_collection.py + scripts/data_monitor.py + scripts/neural_network_brake.py + scripts/neural_network_throttle.py + scripts/throttle_data_visualization.py + scripts/rosbag_collection.sh + scripts_steering/data_collection_steer.py + scripts_steering/neural_network_steer1.py + scripts_steering/neural_network_steer2.py + scripts_steering/neural_network_steer3.py + scripts_steering/neural_network_steer4.py + scripts_steering/neural_network_steer5.py + scripts_steering/data_monitor_steer.py + + + DESTINATION lib/${PROJECT_NAME} + +) + +rosidl_generate_interfaces(${PROJECT_NAME} + "msg/LongitudinalProgress.msg" + "msg/SteeringProgress.msg" + "msg/LongitudinalProcesses.msg" + "msg/SteeringProcesses.msg" + DEPENDENCIES std_msgs + + ) + + + +# ament_export_dependencies(rosidl_default_runtime) + + +# install +ament_auto_package( + INSTALL_TO_SHARE + launch +) diff --git a/vehicle/learning_based_accel_brake_map_calibrator/README.md b/vehicle/learning_based_accel_brake_map_calibrator/README.md new file mode 100644 index 0000000000000..3b00c8bac4a4d --- /dev/null +++ b/vehicle/learning_based_accel_brake_map_calibrator/README.md @@ -0,0 +1,387 @@ +# Learning Based Vehicle Calibration + +## Getting Started + +### 1. Installation + +Download the repository and move inside the folder: + +```bash +git clone https://github.com/pixmoving-moveit/learning_based_vehicle_calibration.git && cd learning_based_vehicle_calibration +``` + +Install the required libraries: + +```bash +pip install -r requirements.txt +``` + +Make sure to place this package correcly inside your workspace, colcon build it and source it. + +### 2. Longitudinal Dynamics + +To start collecting data, launch in your workspace: + +```bash +ros2 launch learning_based_vehicle_calibration calibration_launch.py +``` + +Inside this launch file there is a variable called 'Recovery_Mode', set to False by default. If while you were collecting data the software stopped for some reasons or something happened causing the interruption of your collection process, you can collect data recovering from previous breaking points by setting the variable to True. This way it will update the csv tables you have already started to collect without the need to start from scratch. + +You can visualize the collection process from the terminal. + +![data_collection1](./imgs/data_collection1.png) + +Otherwise, we built some custom messages for representing the progress that are being published on the following topics: + +```bash +/scenarios_collection_longitudinal_throttling + +/scenarios_collection_longitudinal_braking +``` + +Once you have collected the data, in order to train and build your black box models, for both throttling and braking scenarios, launch: + +```bash +ros2 launch learning_based_vehicle_calibration neural_network_launch.py +``` + +You will obtain the acceleration and braking calibration maps and visualize how the neural network fits the data: + +![NN_throttle](./imgs/NN_throttle.png) + +### 3. Steering Dynamics + +To start collecting data, launch in your workspace: + +```bash +ros2 launch learning_based_vehicle_calibration calibration_steering_launch.py +``` + +Also in this case, you can set the variable 'Recovery_Mode' to True if you need to recover from previous breaking points. + +You can visualize the collection process through the terminal: + +![data_collection_steer](./imgs/data_collection_steer.png) + +Also in this case, we built some custom messages for representing the progress that are being published on the following topic: + +```bash +/scenarios_collection_steering_progress +``` + +To train and build your models, launch: + +```bash +ros2 launch learning_based_vehicle_calibration neural_network_steering_launch.py +``` + +You will obtain 5 different acceleration calibration maps, divided according to steering thresholds. Moreover, you will visualize the 5 neural networks trained for the different steering thresholds along with the real data. + +Here is an example of the visualization for the steering range 5% - 20%: + +![NNsteer0](./imgs/NN_steer0.png) + +Instead, the model for the steering range 60% - 80% looks like this: + +![NNsteer](./imgs/NN_steer.png)s + +## Overview + +Here we present the software structure, data collection, data preprocessing and neural network training and visualization about the end-to-end calibration of a vehicle, in two different scenarios: + +- **Normal driving conditions (longitudinal dynamics);** +- **Parking conditions (steering dynamics).** + +## Input Data Software + +Launch Autoware as follows: + +```bash +./autoware.sh +``` + +It is recommended to record the topics we need to collect in order to train our model. The data we need to record are the pitch angle, the linear acceleration, the velocity, the braking and throttling values and the steering angle (make sure to modify the name of the topics according to your vehicle): + +```bash +ros2 bag record /sensing/combination_navigation/chc/pitch /vehicle/status/actuation_status /vehicle/status/steering_status /vehicle/status/velocity_status /vehicle/status/imu +``` + +- **data_field: rosbag should contains brake_paddle, throttle_paddle, velocity, steering, imu and pitch** + +Data's values and units of measure are as follows: + +```bash +# brake paddle value (frome 0 to 1, float) +/vehicle/status/actuation_status -> status.brake_status +# throttle paddle value (frome 0 to 1, float) +/vehicle/status/actuation_status -> status.accel_status +# velocity value (unit is m/s, float) +/vehicle/status/velocity_status -> longitudinal_velocity +# steering value (from 0 to 0.5 [radians], float) +/vehicle/status/steering_status -> steering_tire_angle +# imu data(unit is rad/s or m/s) +/vehicle/status/imu -> linear_acceleration.x +# pitch angle (unit is degrees) +/sensing/combination_navigation/chc/pitch -> data +``` + +When you run the calibration, data_monitor script is launched automatically. + +Thanks to data_monitor script, we can make sure that we are receiving all the topics correctly, without any delay and any problem. + +You can have a look at the following examples: + +- if all the topics are published correctly, the scripts is going to print to terminal the following text, with the frequency of one second + +![data_monitor_normal](./imgs/data_monitor_normal.png) + +- if instead one or more topics are not published or received correctly, or they are delayed, the script is going to warn you by printing to terminal for example the following text + +![data_monitor_error](./imgs/data_monitor_error.png) + +## 1. Longitudinal Dynamics + +## Record Data Software Case + +Since our goal is to build a data-driven model, the first step is to collect enough data suitable for training a neural network model. + +Every data is classified according to its speed and throttling/braking information. This way, we can check if we have collected enough data for every case scenario. In order to do so, we have built a software with a simple if-else logic so that it is able to detect and classify the data recorded in its scenario. This stage is very important because this way we can make sure that we have a balanced dataset and that we have enough data to train our neural network model for every conditions. We will start collecting 3000 triplet (velocity, acceleration, throttling/braking) of data per scenario. For each timestamp, if the steering angle is greater than a threshold (2 degrees), that data will be discarded and not classified (since so far we are just interested in the longitudinal dynamic so we should avoid steering): + +- **LOW SPEED SCENARIO (0 - 10km/h)**: in this scenario we have 8 different throttling/braking conditions. + +0. Brake 0 - deadzone +1. Brake deadzone - 15% +2. Brake 15% - 25% +3. Brake > 25% +4. Throttle 0 - deadzone +5. Throttle deadzone - 30% +6. Throttle 30% - 55% +7. Throttle > 55% + +- **HIGH SPEED SCENARIO ( > 10km/h)**: in this scenario we have 8 different throttling/braking conditions. + +0. Brake 0 - deadzone +1. Brake deadzone - 15% +2. Brake 15% - 25% +3. Brake > 25% +4. Throttle 0 - deadzone +5. Throttle deadzone - 30% +6. Throttle 30% - 55% +7. Throttle > 55% + +As already stated, if the steering angle is greater than 2 degrees, data will not be collected. But there are other conditions under which we discard data: + +1. If the velocity is higher than 35km/h, we are not collecting data; +2. If the velocity is equal to 0km/h, we are not collecting data; +3. We need to ensure data consistency. In order to do that, we need to check the values of two consecutive throttle/brake commands: if their difference is greater than a certain threshold, we don't collect those data. + +This is how you will visualize the data that are being recorded. This visualization tool updates itself in real time based on the data we are collecting: + +![data_collection1](./imgs/data_collection1.png) +![data_collection2](./imgs/data_collection2.png) +![data_collection3](./imgs/data_collection3.png) + +This way, while we are manually controlling the vehicle, we can have an idea on how to control it and which maneuver to do in order to fill the entire dataset. Once a case scenario reaches the MAX_DATA threshold, the data in that scenario will not be recorded anymore. + +Another important consideration is the pitch compensation: + +the longitudinal acceleration we measure from the IMU is not the real longitudinal acceleration. Even if we choose a flat road to collect data, it’s almost impossible to avoid some ground irregularities which are going to create some bumps in the vehicle. These bumps introduce a gravitational acceleration component which disturbs the longitudinal one. Luckily, by measuring the pitch angle, we can remove this disturbance according to the following formula: + +real_acc = acc_imu – g \* sin(pitch_angle) + +where real_acc is the real longitudinal acceleration we want to collect, acc_imu is the longitudinal acceleration measured by the IMU sensor, g is the gravitational acceleration and pitch_angle is the pitch angle measured by the sensor, converted into radians. + +These are the rules we adopted for collecting data in an efficient way. But raw data are often affected by noise so before the training stage we should preprocess them. + +## Data Preprocessing + +Since raw data collected by human are noisy and non-uniform, preprocessing is an essential step to ensure high quality data. + +First, we applied a mean filter to smooth data with a window size of 20 (which can be tuned). + +After having collected the data and having saved them in csv format, you can visualize them by launching the following scrips: + +```bash +python3 throttle_data_visualization.py + +python3 brake_data_visualization.py +``` + +This way, we can have an idea about the distribution of our data. + +![throttle_data](./imgs/throttle_data.png) + +![brake_data](./imgs/brake_data.png) + +As expected, they are noisy and with several outliers, so we need to standardize and normalize them in order to remove outliers and to prepare them for the training stage. + +## Neural Network Training and Visualization + +In order to find the relationship between velocity, acceleration and throttle/brake commands, we first need to define our neural network structure and its inputs and output. + +Notice that throttling and braking models will be trained separately. + +Since we want to generate a calibration table which takes throttle/brake and speed as inputs, and outputs acceleration, we can follow the same structure for the neural network. + +So the fully connected neural network will have 2 nodes in the input layer and 1 node in the output layer. + +Regarding the hidden layers, we can't know in advance how many layers and parameters we need in order to fit our data in the best way. So, based on the performance we obtain according to some metrics such as MSE, MAE, RMSE and R2, we can modify the structure and the number of parameters to be trained in order to optimize our metrics. + +Now we can split the dataset into training set (80%) and testing set (20%) and we are ready for the training stage. + +We chose the ReLu function as the activation function, mean square error as the cost function and Adam optimizer, all in Pytorch. + +To start the training of the throttling and braking models and visualize their shapes and metrics, we can launch: + +```bash +ros2 launch learning_based_vehicle_calibration neural_network_launch.py +``` + +![NN_throttle](./imgs/NN_throttle.png) + +![NN_brake](./imgs/NN_brake.png) + +When you launch these scripts, the training stage begins and when it is done, the software automatically saves the trained model in a zip file. + +This way, when you want to use the neural network model, you can avoid to train it everytime from scratch, but you can just train it once and then load it, according to the following scripts: + +These scripts will also create the acceleration and braking offline tables. + +Finally, the script will also plot the distribution of velocity, throttling/braking levels and acceleration data: + +![dioss](./imgs/dioss.png) + +## Evaluation and Results + +In order to evaluate our calibration table for the longitudinal dynamic, we are going to check the speed error of the vehicle while driving in autonomous driving mode. + +We have tested the vehicle in different road conditions with different slopes, and these are the results we have obtained: + +![evaluation](./imgs/evaluation.png) + +The blue line is the speed error [m/s] and the red line is the pitch angle [degrees]. + +As you can see, the speed error is bounded between ± 0.3m/s, which is pretty good considering that the pitch angle is not constant during the test. + +We can also visualize the setpoint (blue line) compared to the measured velocity (red line): + +![evaluation1](./imgs/evaluation1.png) + +We are now ready for the steering calibration tables. + +## 2. Steering Dynamics + +Similarly to the Longitudinal Dynamics case, in this module we are going to collect data and train a neural network model to map velocity, throttling command and steering angle into acceleration. Thus, we will have another input, which is the steering angle. + +The goal indeed is to calibrate the vehicle's dynamics for low speeds and large steering angles (typically in parking scenarios), and try to obtain a model which can compensate the friction force between the tyres and the road. + +Precisely, when the vehicle has a low speed (we assume less than 5km/h), the larger the steering angle, the higher the friction force so we need a higher throttling command to move the vehicle, as you can see from the following 3D-plot: + +![steermap](./imgs/steermap.png) + +For this reason, in these conditions, we cannot rely on the calibration map obtained from the Longitudinal Dynamics case, but we need to build new maps that can take into account this friction compensation strategy. + +Here we propose our methods and results. + +The idea is to obtain different calibration tables, according to some steering angle ranges. In the Longitudinal Dynamics case, we just obtained one calibration table for the throttling scenario and one calibration table for the braking scenario. In this case, instead, we will obtain 5 calibration tables for the throttling scenario. Each calibration table differs from each other according to the steering range they belong to. + +For example, the first calibration table is valid for steering angles between 5% and 20%, the second one is valid for steering angles between 20% and 40% etc. + +## Record Data Software Case (Steering) + +Here we follow the same logic of the Longitudinal Dynamics case. The difference is that we have different scenarios: + +- **LOW THROTTLE SCENARIO ( <= 12 )**: in this scenario we have 5 different steering conditions. + +0. Steering: 5% - 20% +1. Steering: 20% - 40% +2. Steering: 40% - 60% +3. Steering: 60% - 80% +4. Steering: 80% - 100% + +- **HIGH THROTTLE SCENARIO ( > 12 )**: in this scenario we have 5 different steering conditions. + +0. Steering: 5% - 20% +1. Steering: 20% - 40% +2. Steering: 40% - 60% +3. Steering: 60% - 80% +4. Steering: 80% - 100% + +We don't collect data in the following cases: + +1. If the velocity is higher than 5km/h; +2. If the velocity is equal to 0km/h; +3. If the steering angle is less than 5%. + +To launch the script: + +```bash +ros2 launch learning_based_vehicle_calibration calibration_steering_launch.py +``` + +This is how you will visualize the data that are being recorded. This visualization tool updates itself in real time based on the data we are collecting: + +![data_collection_steer](./imgs/data_collection_steer.png) + +The considerations we have done for the longitudinal dynamics case are still valid here. + +At the end of the recording process, the script will save our data in 5 different csv files, according to the 5 different steering ranges. + +## Data Preprocessing (Steering) + +First, let's visualize our raw data by launching the script (make sure to read the right csv file): + +```bash +python3 steering_data_visualization.py +``` + +![rawdatasteer](./imgs/raw_data_steer.png) + +These are the data of the steering condition: 60% - 80%. + +The preprocessing step consist of a mean filter with size 11 (tunable) and the removal of the outliers. Then we standardize and normalize them to be ready for the training stage. + +## Neural Network Training and Visualization (Steering) + +Like the Longitudinal Dynamics case, here we find the relationship between velocity, acceleration and throttling command. + +We will train 5 different models according to the steering range. The inputs of the neural network model are velocity and throttling command, and the output is the acceleration. + +Like before, we chose the ReLu activation function, mean square error as the cost function and Adam optimizer, all in Pytorch. + +To start the training stage, visualize the models and save the calibration tables in csv format, we can launch: + +```bash +ros2 launch learning_based_vehicle_calibration neural_network_steering_launch.py +``` + +As stated above, you need to train 5 different models, so you will obtain 5 different calibration tables, according to the steering range. + +Here is an example of the visualization for the steering range 5% - 20%: + +![NNsteer0](./imgs/NN_steer0.png) + +Instead, the model for the steering range 60% - 80% looks like this: + +![NNsteer](./imgs/NN_steer.png) + +As you can see, for the same throttle value, the acceleration is higher is the calibration table for the steering range 5% - 20%. This happens because as already stated, the higher the steering angle, the higher the friction force, so we need a higher throttle command to output the same acceleration value. + +## Evaluation and Results (Steering) + +In order to evaluate the performances of our tables in steering conditions at low speeds, we are going to plot the speed error while driving in autonomous driving mode. + +First, we will show the speed error of the calibration table without friction compensation: + +![err2](./imgs/err2.png) + +As you can notice, the error is pretty large, and most important thing, the velocity of the vehicle cannot reach the setpoint. Indeed, the speed error has a bias of around 0.15m/s and cannot reach 0. This happens because this table is not able to compensate the force generated by the friction between the tyres and the ground with large steering angles, so the throttle output is not enough to reach the desired speed. + +Let's now visualize the speed error of the calibration tables with friction compensation: + +![err4](./imgs/err4.png) + +The results are outstanding! As you can see, the speed error is bounded between ± 0.02m/s in large steering conditions at low speeds (such as in parking scenarios). + +So, we can conclude that these tables can compensate the friction force and can track the desired speed without problems. diff --git a/vehicle/learning_based_accel_brake_map_calibrator/imgs/NN_brake.png b/vehicle/learning_based_accel_brake_map_calibrator/imgs/NN_brake.png new file mode 100755 index 0000000000000..e69ece9589d8b Binary files /dev/null and b/vehicle/learning_based_accel_brake_map_calibrator/imgs/NN_brake.png differ diff --git a/vehicle/learning_based_accel_brake_map_calibrator/imgs/NN_steer.png b/vehicle/learning_based_accel_brake_map_calibrator/imgs/NN_steer.png new file mode 100755 index 0000000000000..01b385e445607 Binary files /dev/null and b/vehicle/learning_based_accel_brake_map_calibrator/imgs/NN_steer.png differ diff --git a/vehicle/learning_based_accel_brake_map_calibrator/imgs/NN_steer0.png b/vehicle/learning_based_accel_brake_map_calibrator/imgs/NN_steer0.png new file mode 100755 index 0000000000000..ebf6f6efba24e Binary files /dev/null and b/vehicle/learning_based_accel_brake_map_calibrator/imgs/NN_steer0.png differ diff --git a/vehicle/learning_based_accel_brake_map_calibrator/imgs/NN_throttle.png b/vehicle/learning_based_accel_brake_map_calibrator/imgs/NN_throttle.png new file mode 100755 index 0000000000000..51140227933aa Binary files /dev/null and b/vehicle/learning_based_accel_brake_map_calibrator/imgs/NN_throttle.png differ diff --git a/vehicle/learning_based_accel_brake_map_calibrator/imgs/brake_data.png b/vehicle/learning_based_accel_brake_map_calibrator/imgs/brake_data.png new file mode 100755 index 0000000000000..aa3000032c742 Binary files /dev/null and b/vehicle/learning_based_accel_brake_map_calibrator/imgs/brake_data.png differ diff --git a/vehicle/learning_based_accel_brake_map_calibrator/imgs/data_collection1.png b/vehicle/learning_based_accel_brake_map_calibrator/imgs/data_collection1.png new file mode 100755 index 0000000000000..b6f32cb656253 Binary files /dev/null and b/vehicle/learning_based_accel_brake_map_calibrator/imgs/data_collection1.png differ diff --git a/vehicle/learning_based_accel_brake_map_calibrator/imgs/data_collection2.png b/vehicle/learning_based_accel_brake_map_calibrator/imgs/data_collection2.png new file mode 100755 index 0000000000000..8649a78f72b21 Binary files /dev/null and b/vehicle/learning_based_accel_brake_map_calibrator/imgs/data_collection2.png differ diff --git a/vehicle/learning_based_accel_brake_map_calibrator/imgs/data_collection3.png b/vehicle/learning_based_accel_brake_map_calibrator/imgs/data_collection3.png new file mode 100755 index 0000000000000..365d57d138a25 Binary files /dev/null and b/vehicle/learning_based_accel_brake_map_calibrator/imgs/data_collection3.png differ diff --git a/vehicle/learning_based_accel_brake_map_calibrator/imgs/data_collection_steer.png b/vehicle/learning_based_accel_brake_map_calibrator/imgs/data_collection_steer.png new file mode 100755 index 0000000000000..80d2e9d707cd4 Binary files /dev/null and b/vehicle/learning_based_accel_brake_map_calibrator/imgs/data_collection_steer.png differ diff --git a/vehicle/learning_based_accel_brake_map_calibrator/imgs/data_monitor_error.png b/vehicle/learning_based_accel_brake_map_calibrator/imgs/data_monitor_error.png new file mode 100755 index 0000000000000..c66b018f90456 Binary files /dev/null and b/vehicle/learning_based_accel_brake_map_calibrator/imgs/data_monitor_error.png differ diff --git a/vehicle/learning_based_accel_brake_map_calibrator/imgs/data_monitor_normal.png b/vehicle/learning_based_accel_brake_map_calibrator/imgs/data_monitor_normal.png new file mode 100755 index 0000000000000..6b21b0ba76651 Binary files /dev/null and b/vehicle/learning_based_accel_brake_map_calibrator/imgs/data_monitor_normal.png differ diff --git a/vehicle/learning_based_accel_brake_map_calibrator/imgs/dioss.png b/vehicle/learning_based_accel_brake_map_calibrator/imgs/dioss.png new file mode 100644 index 0000000000000..804626e343456 Binary files /dev/null and b/vehicle/learning_based_accel_brake_map_calibrator/imgs/dioss.png differ diff --git a/vehicle/learning_based_accel_brake_map_calibrator/imgs/err1.png b/vehicle/learning_based_accel_brake_map_calibrator/imgs/err1.png new file mode 100755 index 0000000000000..d63b77d57e777 Binary files /dev/null and b/vehicle/learning_based_accel_brake_map_calibrator/imgs/err1.png differ diff --git a/vehicle/learning_based_accel_brake_map_calibrator/imgs/err2.png b/vehicle/learning_based_accel_brake_map_calibrator/imgs/err2.png new file mode 100755 index 0000000000000..c3f4d98a1f723 Binary files /dev/null and b/vehicle/learning_based_accel_brake_map_calibrator/imgs/err2.png differ diff --git a/vehicle/learning_based_accel_brake_map_calibrator/imgs/err3.png b/vehicle/learning_based_accel_brake_map_calibrator/imgs/err3.png new file mode 100755 index 0000000000000..ca5c41f2df919 Binary files /dev/null and b/vehicle/learning_based_accel_brake_map_calibrator/imgs/err3.png differ diff --git a/vehicle/learning_based_accel_brake_map_calibrator/imgs/err4.png b/vehicle/learning_based_accel_brake_map_calibrator/imgs/err4.png new file mode 100755 index 0000000000000..cae0e79ab5421 Binary files /dev/null and b/vehicle/learning_based_accel_brake_map_calibrator/imgs/err4.png differ diff --git a/vehicle/learning_based_accel_brake_map_calibrator/imgs/evaluation.png b/vehicle/learning_based_accel_brake_map_calibrator/imgs/evaluation.png new file mode 100755 index 0000000000000..4cb1f6de9a35e Binary files /dev/null and b/vehicle/learning_based_accel_brake_map_calibrator/imgs/evaluation.png differ diff --git a/vehicle/learning_based_accel_brake_map_calibrator/imgs/evaluation1.png b/vehicle/learning_based_accel_brake_map_calibrator/imgs/evaluation1.png new file mode 100755 index 0000000000000..81532ff93fb30 Binary files /dev/null and b/vehicle/learning_based_accel_brake_map_calibrator/imgs/evaluation1.png differ diff --git a/vehicle/learning_based_accel_brake_map_calibrator/imgs/raw_data_steer.png b/vehicle/learning_based_accel_brake_map_calibrator/imgs/raw_data_steer.png new file mode 100755 index 0000000000000..eb8b98c96d9e5 Binary files /dev/null and b/vehicle/learning_based_accel_brake_map_calibrator/imgs/raw_data_steer.png differ diff --git a/vehicle/learning_based_accel_brake_map_calibrator/imgs/steermap.png b/vehicle/learning_based_accel_brake_map_calibrator/imgs/steermap.png new file mode 100755 index 0000000000000..c69f78abff92d Binary files /dev/null and b/vehicle/learning_based_accel_brake_map_calibrator/imgs/steermap.png differ diff --git a/vehicle/learning_based_accel_brake_map_calibrator/imgs/throttle_data.png b/vehicle/learning_based_accel_brake_map_calibrator/imgs/throttle_data.png new file mode 100755 index 0000000000000..c36ffb1a8a64b Binary files /dev/null and b/vehicle/learning_based_accel_brake_map_calibrator/imgs/throttle_data.png differ diff --git a/vehicle/learning_based_accel_brake_map_calibrator/launch/calibration_launch.py b/vehicle/learning_based_accel_brake_map_calibrator/launch/calibration_launch.py new file mode 100644 index 0000000000000..88d3a37af3402 --- /dev/null +++ b/vehicle/learning_based_accel_brake_map_calibrator/launch/calibration_launch.py @@ -0,0 +1,208 @@ +import os + +import launch +import launch_ros.actions +from launch.actions import OpaqueFunction +import launch.substitutions + + +def launch_data_monitor(context): + # Open a new terminal and run data_monitor.py + os.system( + "gnome-terminal -- /bin/bash -c 'ros2 run learning_based_vehicle_calibration data_monitor.py; exec bash'" + ) + + +def generate_launch_description(): + return launch.LaunchDescription( + [ + launch.actions.DeclareLaunchArgument( + name="max_data", + default_value="1500", + description="Max number of data to collect for each scenario", + ), + launch.actions.DeclareLaunchArgument( + name="num_of_queue", + default_value="20", + description="Window size of mean filter used to smooth data", + ), + launch.actions.DeclareLaunchArgument( + name="speed_threshold", + default_value="2.8", + description="Threshold between low and high speeds in m/s", + ), + launch.actions.DeclareLaunchArgument( + name="steering_threshold", + default_value="0.03490658503988659", + description="Steering radians value which corresponds to 2 degrees, over which we do not collect data", + ), + launch.actions.DeclareLaunchArgument( + name="throttle_deadzone", + default_value="5", + description="Percentage of throttle deadzone", + ), + launch.actions.DeclareLaunchArgument( + name="brake_deadzone", + default_value="5", + description="Percentage of break deadzone", + ), + launch.actions.DeclareLaunchArgument( + name="max_velocity", + default_value="11.1", + description="Max speed in m/s over which we do not collect data", + ), + launch.actions.DeclareLaunchArgument( + name="throttle_threshold1", + default_value="30", + description="Threshold throttle percentage 1", + ), + launch.actions.DeclareLaunchArgument( + name="throttle_threshold2", + default_value="55", + description="Threshold throttle percentage 2", + ), + launch.actions.DeclareLaunchArgument( + name="brake_threshold1", + default_value="15", + description="Threshold brake percentage 1", + ), + launch.actions.DeclareLaunchArgument( + name="brake_threshold2", + default_value="25", + description="Threshold brake percentage 2", + ), + launch.actions.DeclareLaunchArgument( + name="consistency_threshold", + default_value="20", + description="If 2 consecutive throttle or brake commands differ for more than 20, they are not consistent so we do not collect them", + ), + # Add launch arguments for topic names + launch.actions.DeclareLaunchArgument( + name="pitch_topic", + default_value="/sensing/gnss/chc/pitch", + description="Topic for pitch data", + ), + launch.actions.DeclareLaunchArgument( + name="actuation_status_topic", + default_value="/vehicle/status/actuation_status", + description="Topic for actuation status data (brake and acceleration)", + ), + launch.actions.DeclareLaunchArgument( + name="steering_status_topic", + default_value="/vehicle/status/steering_status", + description="Topic for steering status data", + ), + launch.actions.DeclareLaunchArgument( + name="velocity_status_topic", + default_value="/vehicle/status/velocity_status", + description="Topic for velocity status data", + ), + launch.actions.DeclareLaunchArgument( + name="imu_topic", + default_value="/sensing/gnss/chc/imu", + description="Topic for IMU data", + ), + launch.actions.DeclareLaunchArgument( + name="Recovery_Mode", + default_value="False", + description="If False, the node will create new csv tables from scratch. If True, it will recover the previous csv tables and will start to collect data from the previous indexes", + ), + OpaqueFunction( + function=launch_data_monitor, + ), + launch_ros.actions.Node( + package="learning_based_vehicle_calibration", + executable="data_collection.py", + name="data_collection", + output="screen", + parameters=[ + {"max_data": launch.substitutions.LaunchConfiguration("max_data")}, + { + "num_of_queue": launch.substitutions.LaunchConfiguration( + "num_of_queue" + ) + }, + { + "speed_threshold": launch.substitutions.LaunchConfiguration( + "speed_threshold" + ) + }, + { + "steering_threshold": launch.substitutions.LaunchConfiguration( + "steering_threshold" + ) + }, + { + "throttle_deadzone": launch.substitutions.LaunchConfiguration( + "throttle_deadzone" + ) + }, + { + "brake_deadzone": launch.substitutions.LaunchConfiguration( + "brake_deadzone" + ) + }, + { + "max_velocity": launch.substitutions.LaunchConfiguration( + "max_velocity" + ) + }, + { + "throttle_threshold1": launch.substitutions.LaunchConfiguration( + "throttle_threshold1" + ) + }, + { + "throttle_threshold2": launch.substitutions.LaunchConfiguration( + "throttle_threshold2" + ) + }, + { + "brake_threshold1": launch.substitutions.LaunchConfiguration( + "brake_threshold1" + ) + }, + { + "brake_threshold2": launch.substitutions.LaunchConfiguration( + "brake_threshold2" + ) + }, + { + "consistency_threshold": launch.substitutions.LaunchConfiguration( + "consistency_threshold" + ) + }, + { + "pitch_topic": launch.substitutions.LaunchConfiguration( + "pitch_topic" + ) + }, + { + "actuation_status_topic": launch.substitutions.LaunchConfiguration( + "actuation_status_topic" + ) + }, + { + "steering_status_topic": launch.substitutions.LaunchConfiguration( + "steering_status_topic" + ) + }, + { + "velocity_status_topic": launch.substitutions.LaunchConfiguration( + "velocity_status_topic" + ) + }, + { + "imu_topic": launch.substitutions.LaunchConfiguration( + "imu_topic" + ) + }, + { + "Recovery_Mode": launch.substitutions.LaunchConfiguration( + "Recovery_Mode" + ) + }, + ], + ), + ] + ) diff --git a/vehicle/learning_based_accel_brake_map_calibrator/launch/calibration_steering_launch.py b/vehicle/learning_based_accel_brake_map_calibrator/launch/calibration_steering_launch.py new file mode 100644 index 0000000000000..5bfc81e111444 --- /dev/null +++ b/vehicle/learning_based_accel_brake_map_calibrator/launch/calibration_steering_launch.py @@ -0,0 +1,167 @@ +import os + +import launch +import launch_ros.actions +from launch.actions import OpaqueFunction + + +def launch_data_monitor_steer(context): + # Open a new terminal and run data_monitor.py + os.system( + "gnome-terminal -- /bin/bash -c 'ros2 run learning_based_vehicle_calibration data_monitor_steer.py; exec bash'" + ) + + +def generate_launch_description(): + return launch.LaunchDescription( + [ + launch.actions.DeclareLaunchArgument( + "max_data", + default_value="10000", + description="Max number of data to collect for each scenario", + ), + launch.actions.DeclareLaunchArgument( + "max_velocity", + default_value="1.95", + description="Max speed in m/s over which we do not collect data", + ), + launch.actions.DeclareLaunchArgument( + "throttle_threshold", + default_value="12", + description="Threshold between low and high throttle command", + ), + launch.actions.DeclareLaunchArgument( + "steering_threshold_1", + default_value="0.04", + description="Radians value", + ), + launch.actions.DeclareLaunchArgument( + "steering_threshold_2", + default_value="0.10", + description="Radians value", + ), + launch.actions.DeclareLaunchArgument( + "steering_threshold_3", + default_value="0.20", + description="Radians value", + ), + launch.actions.DeclareLaunchArgument( + "steering_threshold_4", + default_value="0.30", + description="Radians value", + ), + launch.actions.DeclareLaunchArgument( + "steering_threshold_5", + default_value="0.40", + description="Radians value", + ), + # Add launch arguments for topic names + launch.actions.DeclareLaunchArgument( + name="pitch_topic", + default_value="/sensing/gnss/chc/pitch", + description="Topic for pitch data", + ), + launch.actions.DeclareLaunchArgument( + name="actuation_status_topic", + default_value="/vehicle/status/actuation_status", + description="Topic for actuation status data (brake and acceleration)", + ), + launch.actions.DeclareLaunchArgument( + name="steering_status_topic", + default_value="/vehicle/status/steering_status", + description="Topic for steering status data", + ), + launch.actions.DeclareLaunchArgument( + name="velocity_status_topic", + default_value="/vehicle/status/velocity_status", + description="Topic for velocity status data", + ), + launch.actions.DeclareLaunchArgument( + name="imu_topic", + default_value="/sensing/gnss/chc/imu", + description="Topic for IMU data", + ), + launch.actions.DeclareLaunchArgument( + "Recovery_Mode", + default_value="False", + description="If False, the node will create new csv tables from scratch. If True, it will recover the previous csv tables and will start to collect data from the previous indexes", + ), + OpaqueFunction( + function=launch_data_monitor_steer, + ), + launch_ros.actions.Node( + package="learning_based_vehicle_calibration", + executable="data_collection_steer.py", + name="data_collection_steer", + output="screen", + parameters=[ + {"max_data": launch.substitutions.LaunchConfiguration("max_data")}, + { + "max_velocity": launch.substitutions.LaunchConfiguration( + "max_velocity" + ) + }, + { + "throttle_threshold": launch.substitutions.LaunchConfiguration( + "throttle_threshold" + ) + }, + { + "steering_threshold_1": launch.substitutions.LaunchConfiguration( + "steering_threshold_1" + ) + }, + { + "steering_threshold_2": launch.substitutions.LaunchConfiguration( + "steering_threshold_2" + ) + }, + { + "steering_threshold_3": launch.substitutions.LaunchConfiguration( + "steering_threshold_3" + ) + }, + { + "steering_threshold_4": launch.substitutions.LaunchConfiguration( + "steering_threshold_4" + ) + }, + { + "steering_threshold_5": launch.substitutions.LaunchConfiguration( + "steering_threshold_5" + ) + }, + { + "pitch_topic": launch.substitutions.LaunchConfiguration( + "pitch_topic" + ) + }, + { + "actuation_status_topic": launch.substitutions.LaunchConfiguration( + "actuation_status_topic" + ) + }, + { + "steering_status_topic": launch.substitutions.LaunchConfiguration( + "steering_status_topic" + ) + }, + { + "velocity_status_topic": launch.substitutions.LaunchConfiguration( + "velocity_status_topic" + ) + }, + { + "imu_topic": launch.substitutions.LaunchConfiguration( + "imu_topic" + ) + }, + { + "Recovery_Mode": launch.substitutions.LaunchConfiguration( + "Recovery_Mode" + ) + }, + ], + ), + ] + ) diff --git a/vehicle/learning_based_accel_brake_map_calibrator/launch/neural_network_launch.py b/vehicle/learning_based_accel_brake_map_calibrator/launch/neural_network_launch.py new file mode 100644 index 0000000000000..17fe57ac34c22 --- /dev/null +++ b/vehicle/learning_based_accel_brake_map_calibrator/launch/neural_network_launch.py @@ -0,0 +1,81 @@ +import launch +import launch_ros.actions + + +def generate_launch_description(): + return launch.LaunchDescription( + [ + launch.actions.DeclareLaunchArgument( + "filter_vel_throttle", + default_value="10.0", + description="Outliers threshold removal for velocity data in neural_network_throttle model", + ), + launch.actions.DeclareLaunchArgument( + "filter_cmd_throttle", + default_value="10.0", + description="Outliers threshold removal for throttle commands data in neural_network_throttle model", + ), + launch.actions.DeclareLaunchArgument( + "filter_acc_throttle", + default_value="10.0", + description="Outliers threshold removal for acceleration data in neural_network_throttle model", + ), + launch.actions.DeclareLaunchArgument( + "filter_vel_brake", + default_value="1.5", + description="Outliers threshold removal for velocity data in neural_network_brake model", + ), + launch.actions.DeclareLaunchArgument( + "filter_cmd_brake", + default_value="10.0", + description="Outliers threshold removal for brake commands data in neural_network_brake model", + ), + launch.actions.DeclareLaunchArgument( + "filter_acc_brake", + default_value="10.0", + description="Outliers threshold removal for acceleration data in neural_network_brake model", + ), + launch_ros.actions.Node( + package="learning_based_vehicle_calibration", + executable="neural_network_throttle.py", + name="neural_network_throttle", + output="screen", + parameters=[ + { + "filter_vel_throttle": launch.substitutions.LaunchConfiguration( + "filter_vel_throttle" + ), + "filter_cmd_throttle": launch.substitutions.LaunchConfiguration( + "filter_cmd_throttle" + ), + "filter_acc_throttle": launch.substitutions.LaunchConfiguration( + "filter_acc_throttle" + ), + } + ], + ), + launch_ros.actions.Node( + package="learning_based_vehicle_calibration", + executable="neural_network_brake.py", + name="neural_network_brake", + output="screen", + parameters=[ + { + "filter_vel_brake": launch.substitutions.LaunchConfiguration( + "filter_vel_brake" + ) + }, + { + "filter_cmd_brake": launch.substitutions.LaunchConfiguration( + "filter_cmd_brake" + ) + }, + { + "filter_acc_brake": launch.substitutions.LaunchConfiguration( + "filter_acc_brake" + ) + }, + ], + ), + ] + ) diff --git a/vehicle/learning_based_accel_brake_map_calibrator/launch/neural_network_steering_launch.py b/vehicle/learning_based_accel_brake_map_calibrator/launch/neural_network_steering_launch.py new file mode 100644 index 0000000000000..b71096004a1d8 --- /dev/null +++ b/vehicle/learning_based_accel_brake_map_calibrator/launch/neural_network_steering_launch.py @@ -0,0 +1,139 @@ +import launch +import launch_ros.actions + + +def generate_launch_description(): + return launch.LaunchDescription( + [ + launch.actions.DeclareLaunchArgument( + "filter_vel", + default_value="10.0", + description="Outliers threshold removal for velocity data in neural_network_throttle model", + ), + launch.actions.DeclareLaunchArgument( + "filter_cmd", + default_value="10.0", + description="Outliers threshold removal for throttle commands data in neural_network_throttle model", + ), + launch.actions.DeclareLaunchArgument( + "filter_acc", + default_value="10.0", + description="Outliers threshold removal for acceleration data in neural_network_throttle model", + ), + launch.actions.DeclareLaunchArgument( + "mean_filter_size", + default_value="21", + description="Window size of mean filter applied to data", + ), + launch_ros.actions.Node( + package="learning_based_vehicle_calibration", + executable="neural_network_steer1.py", + name="neural_network_steer1", + output="screen", + parameters=[ + { + "filter_vel": launch.substitutions.LaunchConfiguration( + "filter_vel" + ), + "filter_cmd": launch.substitutions.LaunchConfiguration( + "filter_cmd" + ), + "filter_acc": launch.substitutions.LaunchConfiguration( + "filter_acc" + ), + "mean_filter_size": launch.substitutions.LaunchConfiguration( + "mean_filter_size" + ), + } + ], + ), + launch_ros.actions.Node( + package="learning_based_vehicle_calibration", + executable="neural_network_steer2.py", + name="neural_network_steer2", + output="screen", + parameters=[ + { + "filter_vel": launch.substitutions.LaunchConfiguration( + "filter_vel" + ), + "filter_cmd": launch.substitutions.LaunchConfiguration( + "filter_cmd" + ), + "filter_acc": launch.substitutions.LaunchConfiguration( + "filter_acc" + ), + "mean_filter_size": launch.substitutions.LaunchConfiguration( + "mean_filter_size" + ), + } + ], + ), + launch_ros.actions.Node( + package="learning_based_vehicle_calibration", + executable="neural_network_steer3.py", + name="neural_network_steer3", + output="screen", + parameters=[ + { + "filter_vel": launch.substitutions.LaunchConfiguration( + "filter_vel" + ), + "filter_cmd": launch.substitutions.LaunchConfiguration( + "filter_cmd" + ), + "filter_acc": launch.substitutions.LaunchConfiguration( + "filter_acc" + ), + "mean_filter_size": launch.substitutions.LaunchConfiguration( + "mean_filter_size" + ), + } + ], + ), + launch_ros.actions.Node( + package="learning_based_vehicle_calibration", + executable="neural_network_steer4.py", + name="neural_network_steer4", + output="screen", + parameters=[ + { + "filter_vel": launch.substitutions.LaunchConfiguration( + "filter_vel" + ), + "filter_cmd": launch.substitutions.LaunchConfiguration( + "filter_cmd" + ), + "filter_acc": launch.substitutions.LaunchConfiguration( + "filter_acc" + ), + "mean_filter_size": launch.substitutions.LaunchConfiguration( + "mean_filter_size" + ), + } + ], + ), + launch_ros.actions.Node( + package="learning_based_vehicle_calibration", + executable="neural_network_steer5.py", + name="neural_network_steer5", + output="screen", + parameters=[ + { + "filter_vel": launch.substitutions.LaunchConfiguration( + "filter_vel" + ), + "filter_cmd": launch.substitutions.LaunchConfiguration( + "filter_cmd" + ), + "filter_acc": launch.substitutions.LaunchConfiguration( + "filter_acc" + ), + "mean_filter_size": launch.substitutions.LaunchConfiguration( + "mean_filter_size" + ), + } + ], + ), + ] + ) diff --git a/vehicle/learning_based_accel_brake_map_calibrator/msg/LongitudinalProcesses.msg b/vehicle/learning_based_accel_brake_map_calibrator/msg/LongitudinalProcesses.msg new file mode 100644 index 0000000000000..b81f561f23aef --- /dev/null +++ b/vehicle/learning_based_accel_brake_map_calibrator/msg/LongitudinalProcesses.msg @@ -0,0 +1,2 @@ +std_msgs/Header header +learning_based_vehicle_calibration/LongitudinalProgress[] processes diff --git a/vehicle/learning_based_accel_brake_map_calibrator/msg/LongitudinalProgress.msg b/vehicle/learning_based_accel_brake_map_calibrator/msg/LongitudinalProgress.msg new file mode 100644 index 0000000000000..7a2074339d34c --- /dev/null +++ b/vehicle/learning_based_accel_brake_map_calibrator/msg/LongitudinalProgress.msg @@ -0,0 +1,6 @@ +int64 pedal_value_start +int64 pedal_value_end +float64 velocity_start +float64 velocity_end +int64 data_count +int64 progress diff --git a/vehicle/learning_based_accel_brake_map_calibrator/msg/SteeringProcesses.msg b/vehicle/learning_based_accel_brake_map_calibrator/msg/SteeringProcesses.msg new file mode 100644 index 0000000000000..f9d62a1e5e35f --- /dev/null +++ b/vehicle/learning_based_accel_brake_map_calibrator/msg/SteeringProcesses.msg @@ -0,0 +1,2 @@ +std_msgs/Header header +learning_based_vehicle_calibration/SteeringProgress[] processes diff --git a/vehicle/learning_based_accel_brake_map_calibrator/msg/SteeringProgress.msg b/vehicle/learning_based_accel_brake_map_calibrator/msg/SteeringProgress.msg new file mode 100644 index 0000000000000..3b7362efc2ffc --- /dev/null +++ b/vehicle/learning_based_accel_brake_map_calibrator/msg/SteeringProgress.msg @@ -0,0 +1,7 @@ +int64 pedal_value_start +int64 pedal_value_end +float64 steering_value_start +float64 steering_value_end +float64 velocity_max +int64 data_count +int64 progress diff --git a/vehicle/learning_based_accel_brake_map_calibrator/package.xml b/vehicle/learning_based_accel_brake_map_calibrator/package.xml new file mode 100644 index 0000000000000..3517723d6e53b --- /dev/null +++ b/vehicle/learning_based_accel_brake_map_calibrator/package.xml @@ -0,0 +1,27 @@ + + + + learning_based_vehicle_calibration + 2.0.0 + E2E vehicle dynamics calibration using neural network + Cristian Gariboldi + + Apache License 2.0 + + ament_cmake_auto + rosidl_default_generators + + tier4_vehicle_msgs + sensor_msgs + rclpy + can_msgs + std_msgs + ros2launch + rosidl_default_runtime + + rosidl_interface_packages + + + ament_cmake + + diff --git a/vehicle/learning_based_accel_brake_map_calibrator/requirements.txt b/vehicle/learning_based_accel_brake_map_calibrator/requirements.txt new file mode 100644 index 0000000000000..e1b2f377bc137 --- /dev/null +++ b/vehicle/learning_based_accel_brake_map_calibrator/requirements.txt @@ -0,0 +1,8 @@ +# ros2==humble +# Python==3.10.12 +torch==2.0.1 +scikit-learn==1.3.2 +numpy==1.21.5 +matplotlib==3.5.1 +pandas==1.3.5 +tqdm==4.66.1 diff --git a/vehicle/learning_based_accel_brake_map_calibrator/scripts/brake_data_visualization.py b/vehicle/learning_based_accel_brake_map_calibrator/scripts/brake_data_visualization.py new file mode 100644 index 0000000000000..82943b1da4aeb --- /dev/null +++ b/vehicle/learning_based_accel_brake_map_calibrator/scripts/brake_data_visualization.py @@ -0,0 +1,51 @@ +import numpy as np +import matplotlib.pyplot as plt +import pandas as pd +from sklearn.linear_model import LinearRegression + +# Read the data +columns = ["Velocity", "Braking", "Acceleration_measured"] +df = pd.read_csv("braking.csv", usecols=columns) + +xdata = df.Velocity +ydata = df.Braking +zdata = df.Acceleration_measured + +# Fit a linear regression model +X = np.column_stack((xdata, ydata)) +model = LinearRegression().fit(X, zdata) + + +x_grid, y_grid = np.meshgrid( + np.linspace(xdata.min(), xdata.max(), 100), + np.linspace(ydata.min(), ydata.max(), 100), +) + + +z_grid = model.predict(np.column_stack((x_grid.ravel(), y_grid.ravel()))) +z_grid = z_grid.reshape(x_grid.shape) + + +fig = plt.figure() +ax = plt.axes(projection="3d") + + +cmap = plt.get_cmap("Greens") +normalize = plt.Normalize(zdata.min(), zdata.max()) +sc = plt.cm.ScalarMappable(cmap=cmap, norm=normalize) +sc.set_array([]) + +scatter = ax.scatter3D(xdata, ydata, zdata, c=zdata, cmap=cmap, marker="o") + + +ax.plot_surface(x_grid, y_grid, z_grid, alpha=0.5, cmap=cmap) + + +ax.set_xlabel("Velocity") +ax.set_zlabel("Acceleration") +ax.set_ylabel("Braking Output") + + +cbar = plt.colorbar(sc, ax=ax, label="Acceleration_measured") + +plt.show() diff --git a/vehicle/learning_based_accel_brake_map_calibrator/scripts/data_collection.py b/vehicle/learning_based_accel_brake_map_calibrator/scripts/data_collection.py new file mode 100644 index 0000000000000..f986b9a1dd5eb --- /dev/null +++ b/vehicle/learning_based_accel_brake_map_calibrator/scripts/data_collection.py @@ -0,0 +1,894 @@ +#! /usr/bin/env python3 +import math + +from collections import deque +from statistics import mean + +import rclpy +import rclpy.node +import pandas as pd + +from tier4_vehicle_msgs.msg import ActuationStatusStamped +from autoware_auto_vehicle_msgs.msg import VelocityReport +from autoware_auto_vehicle_msgs.msg import SteeringReport +from sensor_msgs.msg import Imu +from std_msgs.msg import Float32 + + +from tqdm import tqdm + +from learning_based_accel_brake_map_calibrator.msg import LongitudinalProgress +from learning_based_accel_brake_map_calibrator.msg import LongitudinalProcesses + + +class primotest(rclpy.node.Node): + + def __init__(self): + + super().__init__("primo_test") + + # data sharing member variables + self.braking = 0.0 + self.braking_prec = 0.0 + self.throttling = 0.0 + self.throttling_prec = 0.0 + self.steering = 0.0 + self.acceleration = 0.0 + self.velocity = 0.0 + self.pitch_angle = 0.0 + self.flag = 0 + self.g = 9.80665 + + # launch params initialization to default_values + self.declare_parameter("max_data", 1500) + self.declare_parameter("num_of_queue", 20) + self.declare_parameter("speed_threshold", 2.8) + self.declare_parameter("steering_threshold", 0.03490658503988659) + self.declare_parameter("throttle_deadzone", 5) + self.declare_parameter("brake_deadzone", 5) + self.declare_parameter("max_velocity", 11.1) + self.declare_parameter("throttle_threshold1", 30) + self.declare_parameter("throttle_threshold2", 55) + self.declare_parameter("brake_threshold1", 15) + self.declare_parameter("brake_threshold2", 25) + self.declare_parameter("consistency_threshold", 20) + self.declare_parameter("pitch_topic", "/sensing/gnss/chc/pitch") + self.declare_parameter( + "actuation_status_topic", "/vehicle/status/actuation_status" + ) + self.declare_parameter( + "steering_status_topic", "/vehicle/status/steering_status" + ) + self.declare_parameter( + "velocity_status_topic", "/vehicle/status/velocity_status" + ) + self.declare_parameter("imu_topic", "/sensing/gnss/chc/imu") + + self.declare_parameter("Recovery_Mode", False) + + # Load params from launch file + + self.MAX_DATA = ( + self.get_parameter("max_data").get_parameter_value().integer_value + ) + self.NUM_OF_QUEUE = ( + self.get_parameter("num_of_queue").get_parameter_value().integer_value + ) + self.SPEED_THRESHOLD = ( + self.get_parameter("speed_threshold").get_parameter_value().double_value + ) + self.STEERING_THRESHOLD = ( + self.get_parameter("steering_threshold").get_parameter_value().double_value + ) + self.THROTTLE_DEADZONE = ( + self.get_parameter("throttle_deadzone").get_parameter_value().integer_value + ) + self.BRAKE_DEADZONE = ( + self.get_parameter("brake_deadzone").get_parameter_value().integer_value + ) + self.MAX_VELOCITY = ( + self.get_parameter("max_velocity").get_parameter_value().double_value + ) + self.THROTTLE_THRESHOLD1 = ( + self.get_parameter("throttle_threshold1") + .get_parameter_value() + .integer_value + ) + self.THROTTLE_THRESHOLD2 = ( + self.get_parameter("throttle_threshold2") + .get_parameter_value() + .integer_value + ) + self.BRAKE_THRESHOLD1 = ( + self.get_parameter("brake_threshold1").get_parameter_value().integer_value + ) + self.BRAKE_THRESHOLD2 = ( + self.get_parameter("brake_threshold2").get_parameter_value().integer_value + ) + self.CONSISTENCY_THRESHOLD = ( + self.get_parameter("consistency_threshold") + .get_parameter_value() + .integer_value + ) + # Get topic names from parameters + self.pitch_topic = ( + self.get_parameter("pitch_topic").get_parameter_value().string_value + ) + self.actuation_status_topic = ( + self.get_parameter("actuation_status_topic") + .get_parameter_value() + .string_value + ) + self.steering_status_topic = ( + self.get_parameter("steering_status_topic") + .get_parameter_value() + .string_value + ) + self.velocity_status_topic = ( + self.get_parameter("velocity_status_topic") + .get_parameter_value() + .string_value + ) + self.imu_topic = ( + self.get_parameter("imu_topic").get_parameter_value().string_value + ) + + self.RECOVERY_MODE = ( + self.get_parameter("Recovery_Mode").get_parameter_value().bool_value + ) + + if self.RECOVERY_MODE: + df_existing1 = pd.read_csv("throttling.csv") + df_existing2 = pd.read_csv("braking.csv") + + self.k = int(df_existing1["Low_V_0_deadzone"].iloc[0]) + self.i = int(df_existing1["Low_V_deadzone_thr1"].iloc[0]) + self.j = int(df_existing1["Low_V_thr1_thr2"].iloc[0]) + self.h = int(df_existing1["Low_V_thr2_max"].iloc[0]) + self.d = int(df_existing1["High_V_0_deadzone"].iloc[0]) + self.a = int(df_existing1["High_V_deadzone_thr1"].iloc[0]) + self.b = int(df_existing1["High_V_thr1_thr2"].iloc[0]) + self.c = int(df_existing1["High_V_thr2_max"].iloc[0]) + self.vel = df_existing1["Velocity"].tolist() + self.cmd = df_existing1["Throttling"].tolist() + self.acc = df_existing1["Acceleration_with_pitch_comp"].tolist() + self.acc2 = df_existing1["Acceleration_measured"].tolist() + self.pitch = df_existing1["Pitch_angle"].tolist() + + self.kk = int(df_existing2["Low_V_0_deadzone"].iloc[0]) + self.ii = int(df_existing2["Low_V_deadzone_thr1"].iloc[0]) + self.jj = int(df_existing2["Low_V_thr1_thr2"].iloc[0]) + self.hh = int(df_existing2["Low_V_thr2_max"].iloc[0]) + self.dd = int(df_existing2["High_V_0_deadzone"].iloc[0]) + self.aa = int(df_existing2["High_V_deadzone_thr1"].iloc[0]) + self.bb = int(df_existing2["High_V_thr1_thr2"].iloc[0]) + self.cc = int(df_existing2["High_V_thr2_max"].iloc[0]) + self.velb = df_existing2["Velocity"].tolist() + self.cmdb = df_existing2["Braking"].tolist() + self.accb = df_existing2["Acceleration_with_pitch_comp"].tolist() + self.accb2 = df_existing2["Acceleration_measured"].tolist() + self.pitch2 = df_existing2["Pitch_angle"].tolist() + + else: + self.i = self.j = self.h = self.k = self.a = self.b = self.c = self.d = ( + self.d + ) = self.ii = self.jj = self.hh = self.kk = self.aa = self.bb = self.cc = ( + self.dd + ) = 0 + self.vel = [] + self.cmd = [] + self.acc = [] + self.acc2 = [] + self.velb = [] + self.cmdb = [] + self.accb = [] + self.accb2 = [] + self.pitch = [] + self.pitch2 = [] + + # custom messages definitions and initialization + self.long_progress_throttle_msg = [LongitudinalProgress() for _ in range(8)] + self.long_progress_brake_msg = [LongitudinalProgress() for _ in range(8)] + self.long_processes_throttle_msg = LongitudinalProcesses() + self.long_processes_brake_msg = LongitudinalProcesses() + self.long_processes_throttle_msg.processes = [ + LongitudinalProgress() for _ in range(8) + ] + self.long_processes_brake_msg.processes = [ + LongitudinalProgress() for _ in range(8) + ] + + self.long_processes_throttle_msg.header.frame_id = "Throttle scenario" + self.long_processes_brake_msg.header.frame_id = "Brake scenario" + + self.long_progress_throttle_msg[0].pedal_value_start = 0 + self.long_progress_throttle_msg[0].pedal_value_end = self.THROTTLE_DEADZONE + self.long_progress_throttle_msg[0].velocity_start = 0.0 + self.long_progress_throttle_msg[0].velocity_end = self.SPEED_THRESHOLD + self.long_processes_throttle_msg.processes[0] = self.long_progress_throttle_msg[ + 0 + ] + self.long_progress_brake_msg[0].pedal_value_start = 0 + self.long_progress_brake_msg[0].pedal_value_end = self.BRAKE_DEADZONE + self.long_progress_brake_msg[0].velocity_start = 0.0 + self.long_progress_brake_msg[0].velocity_end = self.SPEED_THRESHOLD + self.long_processes_brake_msg.processes[0] = self.long_progress_brake_msg[0] + self.long_progress_throttle_msg[0].data_count = self.k + self.long_progress_throttle_msg[0].progress = int(self.k * 100 / self.MAX_DATA) + self.long_progress_brake_msg[0].data_count = self.kk + self.long_progress_brake_msg[0].progress = int(self.kk * 100 / self.MAX_DATA) + + self.long_progress_throttle_msg[1].pedal_value_start = self.THROTTLE_DEADZONE + self.long_progress_throttle_msg[1].pedal_value_end = self.THROTTLE_THRESHOLD1 + self.long_progress_throttle_msg[1].velocity_start = 0.0 + self.long_progress_throttle_msg[1].velocity_end = self.SPEED_THRESHOLD + self.long_processes_throttle_msg.processes[1] = self.long_progress_throttle_msg[ + 1 + ] + self.long_progress_brake_msg[1].pedal_value_start = self.BRAKE_DEADZONE + self.long_progress_brake_msg[1].pedal_value_end = self.BRAKE_THRESHOLD1 + self.long_progress_brake_msg[1].velocity_start = 0.0 + self.long_progress_brake_msg[1].velocity_end = self.SPEED_THRESHOLD + self.long_processes_brake_msg.processes[1] = self.long_progress_brake_msg[1] + self.long_progress_throttle_msg[1].data_count = self.i + self.long_progress_throttle_msg[1].progress = int(self.i * 100 / self.MAX_DATA) + self.long_progress_brake_msg[1].data_count = self.ii + self.long_progress_brake_msg[1].progress = int(self.ii * 100 / self.MAX_DATA) + + self.long_progress_throttle_msg[2].pedal_value_start = self.THROTTLE_THRESHOLD1 + self.long_progress_throttle_msg[2].pedal_value_end = self.THROTTLE_THRESHOLD2 + self.long_progress_throttle_msg[2].velocity_start = 0.0 + self.long_progress_throttle_msg[2].velocity_end = self.SPEED_THRESHOLD + self.long_processes_throttle_msg.processes[2] = self.long_progress_throttle_msg[ + 2 + ] + self.long_progress_brake_msg[2].pedal_value_start = self.BRAKE_THRESHOLD1 + self.long_progress_brake_msg[2].pedal_value_end = self.BRAKE_THRESHOLD2 + self.long_progress_brake_msg[2].velocity_start = 0.0 + self.long_progress_brake_msg[2].velocity_end = self.SPEED_THRESHOLD + self.long_processes_brake_msg.processes[2] = self.long_progress_brake_msg[2] + self.long_progress_throttle_msg[2].data_count = self.j + self.long_progress_throttle_msg[2].progress = int(self.j * 100 / self.MAX_DATA) + self.long_progress_brake_msg[2].data_count = self.jj + self.long_progress_brake_msg[2].progress = int(self.jj * 100 / self.MAX_DATA) + + self.long_progress_throttle_msg[3].pedal_value_start = self.THROTTLE_THRESHOLD2 + self.long_progress_throttle_msg[3].pedal_value_end = 100 + self.long_progress_throttle_msg[3].velocity_start = 0.0 + self.long_progress_throttle_msg[3].velocity_end = self.SPEED_THRESHOLD + self.long_processes_throttle_msg.processes[3] = self.long_progress_throttle_msg[ + 3 + ] + self.long_progress_brake_msg[3].pedal_value_start = self.BRAKE_THRESHOLD2 + self.long_progress_brake_msg[3].pedal_value_end = 100 + self.long_progress_brake_msg[3].velocity_start = 0.0 + self.long_progress_brake_msg[3].velocity_end = self.SPEED_THRESHOLD + self.long_processes_brake_msg.processes[3] = self.long_progress_brake_msg[3] + self.long_progress_throttle_msg[3].data_count = self.h + self.long_progress_throttle_msg[3].progress = int(self.h * 100 / self.MAX_DATA) + self.long_progress_brake_msg[3].data_count = self.hh + self.long_progress_brake_msg[3].progress = int(self.hh * 100 / self.MAX_DATA) + + self.long_progress_throttle_msg[4].pedal_value_start = 0 + self.long_progress_throttle_msg[4].pedal_value_end = self.THROTTLE_DEADZONE + self.long_progress_throttle_msg[4].velocity_start = self.SPEED_THRESHOLD + self.long_progress_throttle_msg[4].velocity_end = self.MAX_VELOCITY + self.long_processes_throttle_msg.processes[4] = self.long_progress_throttle_msg[ + 4 + ] + self.long_progress_brake_msg[4].pedal_value_start = 0 + self.long_progress_brake_msg[4].pedal_value_end = self.BRAKE_DEADZONE + self.long_progress_brake_msg[4].velocity_start = self.SPEED_THRESHOLD + self.long_progress_brake_msg[4].velocity_end = self.MAX_VELOCITY + self.long_processes_brake_msg.processes[4] = self.long_progress_brake_msg[4] + self.long_progress_throttle_msg[4].data_count = self.d + self.long_progress_throttle_msg[4].progress = int(self.d * 100 / self.MAX_DATA) + self.long_progress_brake_msg[4].data_count = self.dd + self.long_progress_brake_msg[4].progress = int(self.dd * 100 / self.MAX_DATA) + + self.long_progress_throttle_msg[5].pedal_value_start = self.THROTTLE_DEADZONE + self.long_progress_throttle_msg[5].pedal_value_end = self.THROTTLE_THRESHOLD1 + self.long_progress_throttle_msg[5].velocity_start = self.SPEED_THRESHOLD + self.long_progress_throttle_msg[5].velocity_end = self.MAX_VELOCITY + self.long_processes_throttle_msg.processes[5] = self.long_progress_throttle_msg[ + 5 + ] + self.long_progress_brake_msg[5].pedal_value_start = self.BRAKE_DEADZONE + self.long_progress_brake_msg[5].pedal_value_end = self.BRAKE_THRESHOLD1 + self.long_progress_brake_msg[5].velocity_start = self.SPEED_THRESHOLD + self.long_progress_brake_msg[5].velocity_end = self.MAX_VELOCITY + self.long_processes_brake_msg.processes[5] = self.long_progress_brake_msg[5] + self.long_progress_throttle_msg[5].data_count = self.a + self.long_progress_throttle_msg[5].progress = int(self.a * 100 / self.MAX_DATA) + self.long_progress_brake_msg[5].data_count = self.aa + self.long_progress_brake_msg[5].progress = int(self.aa * 100 / self.MAX_DATA) + + self.long_progress_throttle_msg[6].pedal_value_start = self.THROTTLE_THRESHOLD1 + self.long_progress_throttle_msg[6].pedal_value_end = self.THROTTLE_THRESHOLD2 + self.long_progress_throttle_msg[6].velocity_start = self.SPEED_THRESHOLD + self.long_progress_throttle_msg[6].velocity_end = self.MAX_VELOCITY + self.long_processes_throttle_msg.processes[6] = self.long_progress_throttle_msg[ + 6 + ] + self.long_progress_brake_msg[6].pedal_value_start = self.BRAKE_THRESHOLD1 + self.long_progress_brake_msg[6].pedal_value_end = self.BRAKE_THRESHOLD2 + self.long_progress_brake_msg[6].velocity_start = self.SPEED_THRESHOLD + self.long_progress_brake_msg[6].velocity_end = self.MAX_VELOCITY + self.long_processes_brake_msg.processes[6] = self.long_progress_brake_msg[6] + self.long_progress_throttle_msg[6].data_count = self.b + self.long_progress_throttle_msg[6].progress = int(self.b * 100 / self.MAX_DATA) + self.long_progress_brake_msg[6].data_count = self.bb + self.long_progress_brake_msg[6].progress = int(self.bb * 100 / self.MAX_DATA) + + self.long_progress_throttle_msg[7].pedal_value_start = self.THROTTLE_THRESHOLD2 + self.long_progress_throttle_msg[7].pedal_value_end = 100 + self.long_progress_throttle_msg[7].velocity_start = self.SPEED_THRESHOLD + self.long_progress_throttle_msg[7].velocity_end = self.MAX_VELOCITY + self.long_processes_throttle_msg.processes[7] = self.long_progress_throttle_msg[ + 7 + ] + self.long_progress_brake_msg[7].pedal_value_start = self.BRAKE_THRESHOLD2 + self.long_progress_brake_msg[7].pedal_value_end = 100 + self.long_progress_brake_msg[7].velocity_start = self.SPEED_THRESHOLD + self.long_progress_brake_msg[7].velocity_end = self.MAX_VELOCITY + self.long_processes_brake_msg.processes[7] = self.long_progress_brake_msg[7] + self.long_progress_throttle_msg[7].data_count = self.c + self.long_progress_throttle_msg[7].progress = int(self.c * 100 / self.MAX_DATA) + self.long_progress_brake_msg[7].data_count = self.cc + self.long_progress_brake_msg[7].progress = int(self.cc * 100 / self.MAX_DATA) + + self.progress_bar0 = tqdm( + initial=self.k, + total=self.MAX_DATA, + desc=" Low speed: 0 - Throttle deadzone ", + dynamic_ncols=True, + ) + self.progress_bar1 = tqdm( + initial=self.i, + total=self.MAX_DATA, + desc=" Low speed: Throttle deadzone - " + + str(self.THROTTLE_THRESHOLD1) + + " ", + dynamic_ncols=True, + ) + self.progress_bar2 = tqdm( + initial=self.j, + total=self.MAX_DATA, + desc=" Low speed: Throttle " + + str(self.THROTTLE_THRESHOLD1) + + " - " + + str(self.THROTTLE_THRESHOLD2) + + " ", + dynamic_ncols=True, + ) + self.progress_bar3 = tqdm( + initial=self.h, + total=self.MAX_DATA, + desc=" Low speed: Throttle > " + + str(self.THROTTLE_THRESHOLD2) + + " ", + dynamic_ncols=True, + ) + self.progress_bar4 = tqdm( + initial=self.d, + total=self.MAX_DATA, + desc=" High speed: 0 - Throttle deadzone ", + dynamic_ncols=True, + ) + self.progress_bar5 = tqdm( + initial=self.a, + total=self.MAX_DATA, + desc=" High speed: Throttle deadzone - " + + str(self.THROTTLE_THRESHOLD2), + dynamic_ncols=True, + ) + self.progress_bar6 = tqdm( + initial=self.b, + total=self.MAX_DATA, + desc=" High speed: Throttle " + + str(self.THROTTLE_THRESHOLD1) + + " - " + + str(self.THROTTLE_THRESHOLD2) + + " ", + dynamic_ncols=True, + ) + self.progress_bar7 = tqdm( + initial=self.c, + total=self.MAX_DATA, + desc=" High speed: Throttle > " + + str(self.THROTTLE_THRESHOLD2) + + " ", + dynamic_ncols=True, + ) + self.progress_bar8 = tqdm( + initial=self.kk, + total=self.MAX_DATA, + desc=" Low speed: 0 - Brake deadzone ", + dynamic_ncols=True, + ) + self.progress_bar9 = tqdm( + initial=self.ii, + total=self.MAX_DATA, + desc=" Low speed: Brake deadzone - " + + str(self.BRAKE_THRESHOLD1) + + " ", + dynamic_ncols=True, + ) + self.progress_bar10 = tqdm( + initial=self.jj, + total=self.MAX_DATA, + desc=" Low speed: Brake " + + str(self.BRAKE_THRESHOLD1) + + " - " + + str(self.BRAKE_THRESHOLD2) + + " ", + dynamic_ncols=True, + ) + self.progress_bar11 = tqdm( + initial=self.hh, + total=self.MAX_DATA, + desc=" Low speed: Brake > " + + str(self.BRAKE_THRESHOLD2) + + " ", + dynamic_ncols=True, + ) + self.progress_bar12 = tqdm( + initial=self.dd, + total=self.MAX_DATA, + desc=" High speed: 0 - Brake deadzone ", + dynamic_ncols=True, + ) + self.progress_bar13 = tqdm( + initial=self.aa, + total=self.MAX_DATA, + desc=" High speed: Brake deadzone - " + + str(self.BRAKE_THRESHOLD1) + + " ", + dynamic_ncols=True, + ) + self.progress_bar14 = tqdm( + initial=self.bb, + total=self.MAX_DATA, + desc=" High speed: Brake " + + str(self.BRAKE_THRESHOLD1) + + " - " + + str(self.BRAKE_THRESHOLD2) + + " ", + dynamic_ncols=True, + ) + self.progress_bar15 = tqdm( + initial=self.cc, + total=self.MAX_DATA, + desc=" High speed: Brake > " + + str(self.BRAKE_THRESHOLD2) + + " ", + dynamic_ncols=True, + ) + + self.create_subscription( + Float32, self.pitch_topic, self.pitch_topic_callback, 1 + ) + self.create_subscription( + ActuationStatusStamped, + self.actuation_status_topic, + self.actuation_topic_callback, + 1, + ) + self.create_subscription( + SteeringReport, self.steering_status_topic, self.steer_topic_callback, 1 + ) + self.create_subscription( + VelocityReport, self.velocity_status_topic, self.velocity_topic_callback, 1 + ) + self.create_subscription(Imu, self.imu_topic, self.imu_topic_callback, 1) + self.progress_throttle = self.create_publisher( + LongitudinalProcesses, "/scenarios_collection_longitudinal_throttling", 10 + ) + self.progress_brake = self.create_publisher( + LongitudinalProcesses, "/scenarios_collection_longitudinal_braking", 10 + ) + + self.timer = self.create_timer(0.02, self.test_callback) + self.timer1 = self.create_timer(0.5, self.throttle_message_callback) + self.timer2 = self.create_timer(0.5, self.brake_message_callback) + + self.queue_velocity = deque() + self.queue_acceleration = deque() + self.queue_acceleration_mov_avg = deque() + self.queue_pitch_angle = deque() + self.queue_pitch_angle_mov_avg = deque() + self.queue_throttle = deque() + self.queue_braking = deque() + + def pitch_topic_callback(self, msg): + + self.pitch_angle = float(msg.data) + # apply a mean filter + if len(self.queue_pitch_angle) < self.NUM_OF_QUEUE: + self.queue_pitch_angle.append(self.pitch_angle) + else: + self.queue_pitch_angle.popleft() + + def velocity_topic_callback(self, msg): + self.velocity = float(msg.longitudinal_velocity) + if len(self.queue_velocity) < self.NUM_OF_QUEUE: + self.queue_velocity.append(self.velocity) + else: + self.queue_velocity.popleft() + + def actuation_topic_callback(self, msg): + + self.braking = float(msg.status.brake_status) * 100.0 + if len(self.queue_braking) < self.NUM_OF_QUEUE: + self.queue_braking.append(self.braking) + else: + self.queue_braking.popleft() + self.throttling = float(msg.status.accel_status) * 100.0 + if len(self.queue_throttle) < self.NUM_OF_QUEUE: + self.queue_throttle.append(self.throttling) + else: + self.queue_throttle.popleft() + + def steer_topic_callback(self, msg): + + self.steering = float(msg.steering_tire_angle) + + def imu_topic_callback(self, msg): + + self.acceleration = float(msg.linear_acceleration.x) + if len(self.queue_acceleration) < self.NUM_OF_QUEUE: + self.queue_acceleration.append(self.acceleration) + else: + self.queue_acceleration.popleft() + + def collection_throttling(self): + + self.vel.append(abs(mean(self.queue_velocity))) + self.cmd.append(mean(self.queue_throttle)) + if mean(self.queue_velocity) < 0: + self.acc.append( + -1 * mean(self.queue_acceleration) + - self.g * math.sin(math.radians(-1 * mean(self.queue_pitch_angle))) + ) + self.acc2.append(-1 * mean(self.queue_acceleration)) + self.pitch.append(-1 * mean(self.queue_pitch_angle)) + else: + self.acc.append( + mean(self.queue_acceleration) + - self.g * math.sin(math.radians(mean(self.queue_pitch_angle))) + ) + self.acc2.append(mean(self.queue_acceleration)) + self.pitch.append(mean(self.queue_pitch_angle)) + + # save data in csv file + dict1 = { + "Velocity": self.vel, + "Throttling": self.cmd, + "Acceleration_with_pitch_comp": self.acc, + "Acceleration_measured": self.acc2, + "Pitch_angle": self.pitch, + "Low_V_0_deadzone": self.k, + "Low_V_deadzone_thr1": self.i, + "Low_V_thr1_thr2": self.j, + "Low_V_thr2_max": self.h, + "High_V_0_deadzone": self.d, + "High_V_deadzone_thr1": self.a, + "High_V_thr1_thr2": self.b, + "High_V_thr2_max": self.c, + } + df1 = pd.DataFrame(dict1) + df1.to_csv("throttling.csv") + + self.throttling_prec = mean(self.queue_throttle) + + def collection_braking(self): + + self.velb.append(abs(mean(self.queue_velocity))) + self.cmdb.append(mean(self.queue_braking)) + if mean(self.queue_velocity) < 0: + self.accb.append( + -1 * mean(self.queue_acceleration) + - self.g * math.sin(math.radians(mean(self.queue_pitch_angle))) + ) + self.accb2.append(-1 * mean(self.queue_acceleration)) + self.pitch2.append(-1 * mean(self.queue_pitch_angle)) + else: + self.accb.append( + mean(self.queue_acceleration) + - self.g * math.sin(math.radians(mean(self.queue_pitch_angle))) + ) + self.accb2.append(mean(self.queue_acceleration)) + self.pitch2.append(mean(self.queue_pitch_angle)) + + dict2 = { + "Velocity": self.velb, + "Braking": self.cmdb, + "Acceleration_with_pitch_comp": self.accb, + "Acceleration_measured": self.accb2, + "Pitch_angle": self.pitch2, + "Low_V_0_deadzone": self.kk, + "Low_V_deadzone_thr1": self.ii, + "Low_V_thr1_thr2": self.jj, + "Low_V_thr2_max": self.hh, + "High_V_0_deadzone": self.dd, + "High_V_deadzone_thr1": self.aa, + "High_V_thr1_thr2": self.bb, + "High_V_thr2_max": self.cc, + } + df2 = pd.DataFrame(dict2) + df2.to_csv("braking.csv") + + self.braking_prec = mean(self.queue_braking) + + def throttle_message_publish(self, count: int, index: int): + + self.long_progress_throttle_msg[index].data_count = count + self.long_progress_throttle_msg[index].progress = int( + count * 100 / self.MAX_DATA + ) + self.long_processes_throttle_msg.header.stamp = self.get_clock().now().to_msg() + self.long_processes_throttle_msg.processes[index] = ( + self.long_progress_throttle_msg[index] + ) + + def brake_message_publish(self, count: int, index: int): + + self.long_progress_brake_msg[index].data_count = count + self.long_progress_brake_msg[index].progress = int(count * 100 / self.MAX_DATA) + self.long_processes_brake_msg.header.stamp = self.get_clock().now().to_msg() + self.long_processes_brake_msg.processes[index] = self.long_progress_brake_msg[ + index + ] + + def test_callback(self): + + if len(self.queue_throttle) >= self.NUM_OF_QUEUE and ( + len(self.queue_braking) >= self.NUM_OF_QUEUE + ): + + # THROTTLING SCENARIO to train throttling model + if ( + self.braking == 0 + and abs(self.steering) < self.STEERING_THRESHOLD + and abs(self.throttling_prec - mean(self.queue_throttle)) + <= self.CONSISTENCY_THRESHOLD + ): + + # low velocity scenario + + if 0 < abs(self.velocity) <= self.SPEED_THRESHOLD: + + if ( + 0 <= self.throttling <= self.THROTTLE_DEADZONE + and self.k < self.MAX_DATA + and self.flag == 0 + ): + + self.collection_throttling() + self.progress_bar0.update(1) + self.k += 1 + + self.throttle_message_publish(self.k, 0) + + elif ( + self.THROTTLE_DEADZONE + < self.throttling + <= self.THROTTLE_THRESHOLD1 + and self.i < self.MAX_DATA + ): + + self.collection_throttling() + self.progress_bar1.update(1) + self.flag = 0 + self.i += 1 + + self.throttle_message_publish(self.i, 1) + + elif ( + self.THROTTLE_THRESHOLD1 + < self.throttling + <= self.THROTTLE_THRESHOLD2 + and self.j < self.MAX_DATA + ): + + self.collection_throttling() + self.progress_bar2.update(1) + self.flag = 0 + self.j += 1 + + self.throttle_message_publish(self.j, 2) + + elif ( + self.throttling > self.THROTTLE_THRESHOLD2 + and self.h < self.MAX_DATA + ): + + self.collection_throttling() + self.progress_bar3.update(1) + self.flag = 0 + self.h += 1 + + self.throttle_message_publish(self.h, 3) + + # high velocity scenario + + elif self.SPEED_THRESHOLD < abs(self.velocity) <= self.MAX_VELOCITY: + + if ( + 0 <= self.throttling <= self.THROTTLE_DEADZONE + and self.d < self.MAX_DATA + and self.flag == 0 + ): + + self.collection_throttling() + self.progress_bar4.update(1) + self.d += 1 + + self.throttle_message_publish(self.d, 4) + + elif ( + self.THROTTLE_DEADZONE + < self.throttling + <= self.THROTTLE_THRESHOLD1 + and self.a < self.MAX_DATA + ): + + self.collection_throttling() + self.progress_bar5.update(1) + self.flag = 0 + self.a += 1 + + self.throttle_message_publish(self.a, 5) + + elif ( + self.THROTTLE_THRESHOLD1 + < self.throttling + <= self.THROTTLE_THRESHOLD2 + and self.b < self.MAX_DATA + ): + + self.collection_throttling() + self.progress_bar6.update(1) + self.flag = 0 + self.b += 1 + + self.throttle_message_publish(self.b, 6) + + elif ( + self.throttling > self.THROTTLE_THRESHOLD2 + and self.c < self.MAX_DATA + ): + + self.collection_throttling() + self.progress_bar7.update(1) + self.flag = 0 + self.c += 1 + + self.throttle_message_publish(self.c, 7) + + # BRAKING SCENARIO to train braking model + + if ( + self.throttling == 0 + and abs(self.steering) < self.STEERING_THRESHOLD + and abs(self.braking_prec - mean(self.queue_braking)) + <= self.CONSISTENCY_THRESHOLD + ): + + # low velocity scenario + + if 0 < abs(self.velocity) <= self.SPEED_THRESHOLD: + + if ( + 0 <= self.braking <= self.BRAKE_DEADZONE + and self.kk < self.MAX_DATA + and self.flag == 1 + ): + + self.collection_braking() + self.progress_bar8.update(1) + self.kk += 1 + + self.brake_message_publish(self.kk, 0) + + elif ( + self.BRAKE_DEADZONE < self.braking <= self.BRAKE_THRESHOLD1 + and self.ii < self.MAX_DATA + ): + + self.collection_braking() + self.progress_bar9.update(1) + self.flag = 1 + self.ii += 1 + + self.brake_message_publish(self.ii, 1) + + elif ( + self.BRAKE_THRESHOLD1 < self.braking <= self.BRAKE_THRESHOLD2 + and self.jj < self.MAX_DATA + ): + + self.collection_braking() + self.progress_bar10.update(1) + self.flag = 1 + self.jj += 1 + + self.brake_message_publish(self.jj, 2) + + elif ( + self.braking > self.BRAKE_THRESHOLD2 and self.hh < self.MAX_DATA + ): + + self.collection_braking() + self.progress_bar11.update(1) + self.flag = 1 + self.hh += 1 + + self.brake_message_publish(self.hh, 3) + + # high velocity scenario + + elif self.SPEED_THRESHOLD < abs(self.velocity) <= self.MAX_VELOCITY: + + if ( + 0 <= self.braking <= self.BRAKE_DEADZONE + and self.dd < self.MAX_DATA + and self.flag == 1 + ): + + self.collection_braking() + self.progress_bar12.update(1) + self.dd += 1 + + self.brake_message_publish(self.dd, 4) + + elif ( + self.BRAKE_DEADZONE < self.braking <= self.BRAKE_THRESHOLD1 + and self.aa < self.MAX_DATA + ): + + self.collection_braking() + self.progress_bar13.update(1) + self.flag = 1 + self.aa += 1 + + self.brake_message_publish(self.aa, 5) + + elif ( + self.BRAKE_THRESHOLD1 < self.braking <= self.BRAKE_THRESHOLD2 + and self.bb < self.MAX_DATA + ): + + self.collection_braking() + self.progress_bar14.update(1) + self.flag = 1 + self.bb += 1 + + self.brake_message_publish(self.bb, 6) + + elif ( + self.braking > self.BRAKE_THRESHOLD2 and self.cc < self.MAX_DATA + ): + + self.collection_braking() + self.progress_bar15.update(1) + self.flag = 1 + self.cc += 1 + + self.brake_message_publish(self.cc, 7) + + def throttle_message_callback(self): + self.progress_throttle.publish(self.long_processes_throttle_msg) + + def brake_message_callback(self): + self.progress_brake.publish(self.long_processes_brake_msg) + + +def main(): + rclpy.init() + node = primotest() + rclpy.spin(node) + + +if __name__ == "__main__": + main() diff --git a/vehicle/learning_based_accel_brake_map_calibrator/scripts/data_monitor.py b/vehicle/learning_based_accel_brake_map_calibrator/scripts/data_monitor.py new file mode 100644 index 0000000000000..9adce5b0e18ac --- /dev/null +++ b/vehicle/learning_based_accel_brake_map_calibrator/scripts/data_monitor.py @@ -0,0 +1,160 @@ +#! /usr/bin/env python3 +import rclpy +import rclpy.node +from tier4_vehicle_msgs.msg import ActuationStatusStamped +from autoware_auto_vehicle_msgs.msg import VelocityReport +from autoware_auto_vehicle_msgs.msg import SteeringReport +from std_msgs.msg import Float32 +from sensor_msgs.msg import Imu + + +class DataMonitor(rclpy.node.Node): + def __init__(self): + self.brake_timestamp = 0.0 + self.throttle_timestamp = 0.0 + self.velocity_timestamp = 0.0 + self.steering_timestamp = 0.0 + self.pitch_timestamp = 0.0 + self.imu_timestamp = 0.0 + self.can_timestamp = 0.0 + super().__init__("data_monitor") + + self.declare_parameter("pitch_topic", "/sensing/gnss/chc/pitch") + self.declare_parameter( + "actuation_status_topic", "/vehicle/status/actuation_status" + ) + self.declare_parameter( + "steering_status_topic", "/vehicle/status/steering_status" + ) + self.declare_parameter( + "velocity_status_topic", "/vehicle/status/velocity_status" + ) + self.declare_parameter("imu_topic", "/sensing/gnss/chc/imu") + + # Get topic names from parameters + self.pitch_topic = ( + self.get_parameter("pitch_topic").get_parameter_value().string_value + ) + self.actuation_status_topic = ( + self.get_parameter("actuation_status_topic") + .get_parameter_value() + .string_value + ) + self.steering_status_topic = ( + self.get_parameter("steering_status_topic") + .get_parameter_value() + .string_value + ) + self.velocity_status_topic = ( + self.get_parameter("velocity_status_topic") + .get_parameter_value() + .string_value + ) + self.imu_topic = ( + self.get_parameter("imu_topic").get_parameter_value().string_value + ) + + self.timer = self.create_timer(1, self.timer_callback) + self.create_subscription( + ActuationStatusStamped, + self.actuation_status_topic, + self.brake_topic_callback, + 10, + ) + self.create_subscription( + ActuationStatusStamped, + self.actuation_status_topic, + self.drive_topic_callback, + 10, + ) + self.create_subscription( + SteeringReport, self.steering_status_topic, self.steer_topic_callback, 10 + ) + self.create_subscription( + VelocityReport, self.velocity_status_topic, self.velocity_topic_callback, 10 + ) + self.create_subscription( + Float32, self.pitch_topic, self.pitch_topic_callback, 10 + ) + self.create_subscription(Imu, self.imu_topic, self.imu_topic_callback, 10) + + def brake_topic_callback(self, msg): + self.brake_timestamp = int(self.get_clock().now().nanoseconds / 1000000) + + def drive_topic_callback(self, msg): + self.throttle_timestamp = int(self.get_clock().now().nanoseconds / 1000000) + + def velocity_topic_callback(self, msg): + self.velocity_timestamp = int(self.get_clock().now().nanoseconds / 1000000) + + def steer_topic_callback(self, msg): + self.steering_timestamp = int(self.get_clock().now().nanoseconds / 1000000) + + def pitch_topic_callback(self, msg): + self.pitch_timestamp = int(self.get_clock().now().nanoseconds / 1000000) + + def imu_topic_callback(self, msg): + self.imu_timestamp = int(self.get_clock().now().nanoseconds / 1000000) + + def timer_callback(self): + self.get_logger().info("data monitor checking") + timestamp = int(self.get_clock().now().nanoseconds / 1000000) + + brake_timegap = timestamp - self.brake_timestamp + throttle_timegap = timestamp - self.throttle_timestamp + steering_timegap = timestamp - self.steering_timestamp + pitch_timegap = timestamp - self.pitch_timestamp + imu_timegap = timestamp - self.imu_timestamp + velocity_timegap = timestamp - self.velocity_timestamp + + if self.brake_timestamp == 0: + self.get_logger().error("brake topic is not publish") + elif brake_timegap > 1000: + self.get_logger().error("brake topic is not alive") + else: + self.get_logger().debug("brake topic is good") + + if self.throttle_timestamp == 0: + self.get_logger().error("throttle topic is not publish") + elif throttle_timegap > 1000: + self.get_logger().error("throttle topic is not alive") + else: + self.get_logger().debug("throttle topic is good") + + if self.steering_timestamp == 0: + self.get_logger().error("steering topic is not publish") + elif steering_timegap > 1000: + self.get_logger().error("steering topic is not alive") + else: + self.get_logger().debug("steering topic is good") + + if self.pitch_timestamp == 0: + self.get_logger().error("pitch topic is not publish") + elif pitch_timegap > 1000: + self.get_logger().error("pitch topic is not alive") + else: + self.get_logger().debug("pitch topic is good") + + if self.imu_timestamp == 0: + self.get_logger().error("imu topic is not publish") + elif imu_timegap > 1000: + self.get_logger().error("imu topic is not alive") + else: + self.get_logger().debug("imu topic is good") + + if self.velocity_timestamp == 0: + self.get_logger().error("imu topic is not publish") + elif velocity_timegap > 1000: + self.get_logger().error("imu topic is not alive") + else: + self.get_logger().debug("imu topic is good") + + +def main(): + rclpy.init() + node = DataMonitor() + rclpy.spin(node) + + +if __name__ == "__main__": + main() diff --git a/vehicle/learning_based_accel_brake_map_calibrator/scripts/neural_network_brake.py b/vehicle/learning_based_accel_brake_map_calibrator/scripts/neural_network_brake.py new file mode 100644 index 0000000000000..649891ae06efb --- /dev/null +++ b/vehicle/learning_based_accel_brake_map_calibrator/scripts/neural_network_brake.py @@ -0,0 +1,237 @@ +#! /usr/bin/python3 +import pandas as pd +import numpy as np +import torch +import torch.nn as nn +import torch.optim as optim +from sklearn.model_selection import train_test_split +import matplotlib.pyplot as plt +from sklearn.metrics import mean_squared_error +from sklearn.metrics import mean_absolute_error +from sklearn.metrics import r2_score +import rclpy +from rclpy.node import Node + + +class NeuralNetworkBrake(Node): + class NeuralNetwork(nn.Module): + def __init__(self): + super(NeuralNetworkBrake.NeuralNetwork, self).__init__() + self.fc1 = nn.Linear( + 2, 128 + ) # Input layer with 2 neurons, hidden layer with n neurons + self.relu1 = nn.ReLU() + self.fc2 = nn.Linear(128, 32) + self.relu2 = nn.ReLU() + self.fc3 = nn.Linear(32, 1) # Output layer with 1 neuron + + def forward(self, x): + x = self.fc1(x) + x = self.relu1(x) + x = self.fc2(x) + x = self.relu2(x) + x = self.fc3(x) + + return x + + def __init__(self): + + super().__init__("neural_network_brake") + + self.model = self.NeuralNetwork() + + data = pd.read_csv("braking.csv") + dataa = pd.read_csv("braking.csv") + ush = pd.read_csv("braking.csv") + + # declare params from launch file + self.declare_parameter("filter_vel_brake", 1.5) + self.declare_parameter("filter_cmd_brake", 10.0) + self.declare_parameter("filter_acc_brake", 10.0) + + # Load params from launch file + self.FILTER_VEL_BRAKE = ( + self.get_parameter("filter_vel_brake").get_parameter_value().double_value + ) + self.FILTER_CMD_BRAKE = ( + self.get_parameter("filter_cmd_brake").get_parameter_value().double_value + ) + self.FILTER_ACC_BRAKE = ( + self.get_parameter("filter_acc_brake").get_parameter_value().double_value + ) + + mean0 = data["Velocity"].mean() + std0 = data["Velocity"].std() + data["Velocity"] = (data["Velocity"] - mean0) / std0 + dataa["Velocity"] = (dataa["Velocity"] - mean0) / std0 + + data = data[abs(data["Velocity"] - mean0) <= std0 * self.FILTER_VEL_BRAKE] + dataa = dataa[abs(dataa["Velocity"] - mean0) <= std0 * self.FILTER_VEL_BRAKE] + + mean1 = data["Braking"].mean() + std1 = data["Braking"].std() + data["Braking"] = (data["Braking"] - mean1) / std1 + dataa["Braking"] = (dataa["Braking"] - mean1) / std1 + + data = data[abs(data["Braking"] - mean1) <= std1 * self.FILTER_CMD_BRAKE] + dataa = dataa[abs(dataa["Braking"] - mean1) <= std1 * self.FILTER_CMD_BRAKE] + + mean2 = data["Acceleration_measured"].mean() + std2 = data["Acceleration_measured"].std() + data["Acceleration_measured"] = (data["Acceleration_measured"] - mean2) / std2 + dataa["Acceleration_measured"] = (dataa["Acceleration_measured"] - mean2) / std2 + + data = data[ + abs(data["Acceleration_measured"] - mean2) <= std2 * self.FILTER_ACC_BRAKE + ] + dataa = dataa[ + abs(dataa["Acceleration_measured"] - mean2) <= std2 * self.FILTER_ACC_BRAKE + ] + + # Split the data into input features (velocity and braking) and target (acceleration) + + X = data[["Velocity", "Braking"]].values + y = data["Acceleration_measured"].values + + X_train, X_test, y_train, y_test = train_test_split( + X, y, test_size=0.2, random_state=42 + ) + + # Convert NumPy arrays to PyTorch tensors + X_train = torch.tensor(X_train, dtype=torch.float32) + y_train = torch.tensor(y_train, dtype=torch.float32) + X_test = torch.tensor(X_test, dtype=torch.float32) + y_test = torch.tensor(y_test, dtype=torch.float32) + + criterion = nn.MSELoss() + optimizer = optim.Adam(self.model.parameters(), lr=0.001) + + # Training loop + num_epochs = 100 + for epoch in range(num_epochs): + # Forward pass + outputs = self.model(X_train) + + loss = criterion(outputs, y_train.view(-1, 1)) + + # Backpropagation and optimization + optimizer.zero_grad() + loss.backward() + optimizer.step() + + # Evaluate the model on the test data + with torch.no_grad(): + test_outputs = self.model(X_test) + # test_loss = criterion(test_outputs, y_test.view(-1, 1)) + # print(f"Mean Squared Error on Test Data: {test_loss.item()}") + + # Visualization + + # velocity_range = np.linspace((X[:, 0]*std0+mean0).min(), (X[:, 0]*std0+mean0).max(), 20) + # braking_range = np.linspace((X[:, 1]*std1+mean1).min(), (X[:, 1]*std1+mean1).max(), 20) + + velocity_range = np.linspace(0, (X[:, 0] * std0 + mean0).max(), 20) + braking_range = np.linspace((X[:, 1] * std1 + mean1).min(), 80, 20) + V, A = np.meshgrid(velocity_range, braking_range) + + input_grid = np.column_stack( + ((V.flatten() - mean0) / std0, (A.flatten() - mean1) / std1) + ) + input_grid = torch.tensor(input_grid, dtype=torch.float32) + + with torch.no_grad(): + commands = self.model(input_grid).reshape(V.shape) + + commands_new = commands * std2 + mean2 + + # Save the trained model + # torch.save(self.model.state_dict(), 'trained_brake.pth') + + # evaluation + mse = mean_squared_error(y_test, test_outputs.view(-1).numpy()) + self.get_logger().info(f"Mean Squared Error on Test Data: {mse}") + + mae = mean_absolute_error(y_test, test_outputs.view(-1).numpy()) + self.get_logger().info(f"Mean Absolute Error on Test Data: {mae}") + + rmse = np.sqrt(mse) + self.get_logger().info(f"Root Mean Squared Error on Test Data: {rmse}") + + r2 = r2_score(y_test, test_outputs.view(-1).numpy()) + self.get_logger().info(f"R-squared (R2) Score on Test Data: {r2}") + + # Save NN model in csv correct format for testing in the real vehicle + + velocity_headers = ["{:.2f}".format(v) for v in velocity_range] + + # we normalize braking values from 0 to 1 + braking_range /= 100 + + headers = [""] + velocity_headers + + # Add braking values to the commands_new matrix as the first column + commands_new_with_braking = np.column_stack((braking_range, commands_new)) + + csv_filename = "brake_map.csv" + np.savetxt( + csv_filename, + commands_new_with_braking, + delimiter=",", + header=",".join(headers), + comments="", + ) + + # visualize raw data with the NN model for comparison + xdata = dataa.Velocity * std0 + mean0 + ydata = dataa.Braking * std1 + mean1 + zdata = dataa.Acceleration_measured * std2 + mean2 + + fig = plt.figure() + ax = fig.add_subplot(111, projection="3d") + + ax.scatter3D(xdata, ydata, zdata, c=zdata, marker="o") + surf = ax.plot_surface(V, A, commands_new, cmap="viridis") + + ax.set_xlabel("Velocity") + ax.set_zlabel("Acceleration") + ax.set_ylabel("Braking Output") + ax.set_title("Neural Network Output vs. Velocity and Braking") + + plt.figure(figsize=(10, 6)) + plt.subplot(3, 1, 1) + plt.hist(ush["Velocity"], bins=20, color="skyblue", edgecolor="black") + plt.title("Distribution of Velocity") + plt.xlabel("Velocity") + plt.ylabel("Frequency") + + # Plot the distribution of 'Throttling' + plt.subplot(3, 1, 2) + plt.hist(ush["Braking"], bins=20, color="salmon", edgecolor="black") + plt.title("Distribution of Braking") + plt.xlabel("Braking") + plt.ylabel("Frequency") + + # Plot the distribution of 'Acceleration_measured' + plt.subplot(3, 1, 3) + plt.hist( + ush["Acceleration_measured"], bins=20, color="lightgreen", edgecolor="black" + ) + plt.title("Distribution of Acceleration") + plt.xlabel("Acceleration") + plt.ylabel("Frequency") + + plt.tight_layout() + + fig.colorbar(surf) + + plt.show() + + +def main(): + rclpy.init() + neural_network_brake = NeuralNetworkBrake() + rclpy.spin(neural_network_brake) + + +if __name__ == "__main__": + main() diff --git a/vehicle/learning_based_accel_brake_map_calibrator/scripts/neural_network_throttle.py b/vehicle/learning_based_accel_brake_map_calibrator/scripts/neural_network_throttle.py new file mode 100644 index 0000000000000..8f658e9b450fa --- /dev/null +++ b/vehicle/learning_based_accel_brake_map_calibrator/scripts/neural_network_throttle.py @@ -0,0 +1,241 @@ +#! /usr/bin/python3 +import math + +import pandas as pd +import numpy as np +import torch +import torch.nn as nn +import torch.optim as optim +from sklearn.model_selection import train_test_split +import matplotlib.pyplot as plt +from sklearn.metrics import mean_squared_error +from sklearn.metrics import mean_absolute_error +from sklearn.metrics import r2_score +import rclpy +from rclpy.node import Node + + +class NeuralNetworkThrottle(Node): + class NeuralNetwork(nn.Module): + def __init__(self): + super(NeuralNetworkThrottle.NeuralNetwork, self).__init__() + self.fc1 = nn.Linear( + 2, 128 + ) # Input layer with 2 neurons, hidden layer with n neurons + self.relu1 = nn.ReLU() + self.fc2 = nn.Linear(128, 32) + self.relu2 = nn.ReLU() + self.fc3 = nn.Linear(32, 1) # Output layer with 1 neuron + + def forward(self, x): + x = self.fc1(x) + x = self.relu1(x) + x = self.fc2(x) + x = self.relu2(x) + x = self.fc3(x) + + return x + + def __init__(self): + + super().__init__("neural_network_throttle") + + self.model = self.NeuralNetwork() + + data = pd.read_csv("throttling.csv") + dataa = pd.read_csv("throttling.csv") + ush = pd.read_csv("throttling.csv") + + # Declare params from launch file + self.declare_parameter("filter_vel_throttle", 10.0) + self.declare_parameter("filter_cmd_throttle", 10.0) + self.declare_parameter("filter_acc_throttle", 10.0) + + # Load params from launch file + self.FILTER_VEL_THROTTLE = ( + self.get_parameter("filter_vel_throttle").get_parameter_value().double_value + ) + self.FILTER_CMD_THROTTLE = ( + self.get_parameter("filter_cmd_throttle").get_parameter_value().double_value + ) + self.FILTER_ACC_THROTTLE = ( + self.get_parameter("filter_acc_throttle").get_parameter_value().double_value + ) + + mean0 = data["Velocity"].mean() + std0 = data["Velocity"].std() + data["Velocity"] = (data["Velocity"] - mean0) / std0 + dataa["Velocity"] = (dataa["Velocity"] - mean0) / std0 + + data = data[abs(data["Velocity"] - mean0) <= std0 * self.FILTER_VEL_THROTTLE] + dataa = dataa[abs(dataa["Velocity"] - mean0) <= std0 * self.FILTER_VEL_THROTTLE] + + mean1 = data["Throttling"].mean() + std1 = data["Throttling"].std() + data["Throttling"] = (data["Throttling"] - mean1) / std1 + dataa["Throttling"] = (dataa["Throttling"] - mean1) / std1 + + data = data[abs(data["Throttling"] - mean1) <= std1 * self.FILTER_CMD_THROTTLE] + dataa = dataa[ + abs(dataa["Throttling"] - mean1) <= std1 * self.FILTER_CMD_THROTTLE + ] + + mean2 = data["Acceleration_measured"].mean() + std2 = data["Acceleration_measured"].std() + data["Acceleration_measured"] = (data["Acceleration_measured"] - mean2) / std2 + dataa["Acceleration_measured"] = (dataa["Acceleration_measured"] - mean2) / std2 + + data = data[ + abs(data["Acceleration_measured"] - mean2) + <= std2 * self.FILTER_ACC_THROTTLE + ] + dataa = dataa[ + abs(dataa["Acceleration_measured"] - mean2) + <= std2 * self.FILTER_ACC_THROTTLE + ] + + # Split the data into input features (velocity and throttle) and target (acceleration) and test/train + + X = data[["Velocity", "Throttling"]].values + y = data["Acceleration_measured"].values + + X_train, X_test, y_train, y_test = train_test_split( + X, y, test_size=0.2, random_state=42 + ) + + # Convert NumPy arrays to PyTorch tensors + X_train = torch.tensor(X_train, dtype=torch.float32) + y_train = torch.tensor(y_train, dtype=torch.float32) + X_test = torch.tensor(X_test, dtype=torch.float32) + y_test = torch.tensor(y_test, dtype=torch.float32) + + criterion = nn.MSELoss() + optimizer = optim.Adam( + self.model.parameters(), lr=0.001 + ) # , weight_decay=0.001) + + # Training loop + num_epochs = 100 + for epoch in range(num_epochs): + # Forward pass + outputs = self.model(X_train) + + loss = criterion(outputs, y_train.view(-1, 1)) + + # Backpropagation and optimization + optimizer.zero_grad() + loss.backward() + optimizer.step() + + with torch.no_grad(): + test_outputs = self.model(X_test) + # test_loss = criterion(test_outputs, y_test.view(-1, 1)) + # print(f"Mean Squared Error on Test Data: {test_loss.item()}") + + # Visualization + + velocity_range = np.linspace(0, (X[:, 0] * std0 + mean0).max(), 20) + # throttling_range = np.linspace((X[:, 1]*std1+mean1).min(), (X[:, 1]*std1+mean1).max(), 20) + throttling_range = np.linspace(0, (X[:, 1] * std1 + mean1).max(), 20) + V, A = np.meshgrid(velocity_range, throttling_range) + + input_grid = np.column_stack( + ((V.flatten() - mean0) / std0, (A.flatten() - mean1) / std1) + ) + input_grid = torch.tensor(input_grid, dtype=torch.float32) + + with torch.no_grad(): + commands = self.model(input_grid).reshape(V.shape) + + commands_new = commands * std2 + mean2 + + # Save the trained model + # torch.save(self.model.state_dict(), 'trained_throttle.pth') + + # evaluation + mse = mean_squared_error(y_test, test_outputs.view(-1).numpy()) + self.get_logger().info(f"Mean Squared Error on Test Data: {mse}") + + mae = mean_absolute_error(y_test, test_outputs.view(-1).numpy()) + self.get_logger().info(f"Mean Absolute Error on Test Data: {mae}") + + rmse = math.sqrt(mse) + self.get_logger().info(f"Root Mean Squared Error on Test Data: {rmse}") + + r2 = r2_score(y_test, test_outputs.view(-1).numpy()) + self.get_logger().info(f"R-squared (R2) Score on Test Data: {r2}") + + # Save NN model in csv correct format for testing in the real vehicle + + velocity_headers = ["{:.2f}".format(v) for v in velocity_range] + + # we normalize throttling values between 0 and 1 + throttling_range /= 100 + + headers = [""] + velocity_headers + + commands_new_with_throttling = np.column_stack((throttling_range, commands_new)) + + csv_filename = "accel_map.csv" + np.savetxt( + csv_filename, + commands_new_with_throttling, + delimiter=",", + header=",".join(headers), + comments="", + ) + + # 3D Visualization (plot) + xdata = dataa.Velocity * std0 + mean0 + ydata = dataa.Throttling * std1 + mean1 + zdata = dataa.Acceleration_measured * std2 + mean2 + + fig = plt.figure() + ax = fig.add_subplot(111, projection="3d") + + ax.scatter3D(xdata, ydata, zdata, c=zdata, marker="o") + surf = ax.plot_surface(V, A, commands_new, cmap="viridis") + + ax.set_xlabel("Velocity") + ax.set_zlabel("Acceleration") + ax.set_ylabel("Throttling Output") + ax.set_title("Neural Network Output vs. Velocity and Throttling") + + plt.figure(figsize=(10, 6)) + plt.subplot(3, 1, 1) + plt.hist(ush["Velocity"], bins=20, color="skyblue", edgecolor="black") + plt.title("Distribution of Velocity") + plt.xlabel("Velocity") + plt.ylabel("Frequency") + + # Plot the distribution of 'Throttling' + plt.subplot(3, 1, 2) + plt.hist(ush["Throttling"], bins=20, color="salmon", edgecolor="black") + plt.title("Distribution of Throttling") + plt.xlabel("Throttling") + plt.ylabel("Frequency") + + # Plot the distribution of 'Acceleration_measured' + plt.subplot(3, 1, 3) + plt.hist( + ush["Acceleration_measured"], bins=20, color="lightgreen", edgecolor="black" + ) + plt.title("Distribution of Acceleration") + plt.xlabel("Acceleration") + plt.ylabel("Frequency") + + plt.tight_layout() + + fig.colorbar(surf) + + plt.show() + + +def main(): + rclpy.init() + neural_network_throttle = NeuralNetworkThrottle() + rclpy.spin(neural_network_throttle) + + +if __name__ == "__main__": + main() diff --git a/vehicle/learning_based_accel_brake_map_calibrator/scripts/throttle_data_visualization.py b/vehicle/learning_based_accel_brake_map_calibrator/scripts/throttle_data_visualization.py new file mode 100644 index 0000000000000..5077162f23126 --- /dev/null +++ b/vehicle/learning_based_accel_brake_map_calibrator/scripts/throttle_data_visualization.py @@ -0,0 +1,46 @@ +import numpy as np +import matplotlib.pyplot as plt +import pandas as pd +from sklearn.linear_model import LinearRegression + +# Read the data +columns = ["Velocity", "Throttling", "Acceleration_measured"] +df = pd.read_csv("throttling.csv", usecols=columns) + +xdata = df.Velocity +ydata = df.Throttling +zdata = df.Acceleration_measured + +# Fit a linear regression model +X = np.column_stack((xdata, ydata)) +model = LinearRegression().fit(X, zdata) + +x_grid, y_grid = np.meshgrid( + np.linspace(xdata.min(), xdata.max(), 100), + np.linspace(ydata.min(), ydata.max(), 100), +) + +z_grid = model.predict(np.column_stack((x_grid.ravel(), y_grid.ravel()))) +z_grid = z_grid.reshape(x_grid.shape) + + +fig = plt.figure() +ax = plt.axes(projection="3d") + + +cmap = plt.get_cmap("Greens") +normalize = plt.Normalize(zdata.min(), zdata.max()) +sc = plt.cm.ScalarMappable(cmap=cmap, norm=normalize) +sc.set_array([]) + +scatter = ax.scatter3D(xdata, ydata, zdata, c=zdata, cmap=cmap, marker="o") + +ax.plot_surface(x_grid, y_grid, z_grid, alpha=0.5, cmap=cmap) + +ax.set_xlabel("Velocity") +ax.set_zlabel("Acceleration") +ax.set_ylabel("Throttling Output") + +cbar = plt.colorbar(sc, ax=ax, label="Acceleration_measured") + +plt.show() diff --git a/vehicle/learning_based_accel_brake_map_calibrator/scripts_steering/data_collection_steer.py b/vehicle/learning_based_accel_brake_map_calibrator/scripts_steering/data_collection_steer.py new file mode 100644 index 0000000000000..f7a2924239e42 --- /dev/null +++ b/vehicle/learning_based_accel_brake_map_calibrator/scripts_steering/data_collection_steer.py @@ -0,0 +1,742 @@ +#! /usr/bin/env python3 +import math + +import rclpy +import rclpy.node +import pandas as pd + +from tier4_vehicle_msgs.msg import ActuationStatusStamped +from autoware_auto_vehicle_msgs.msg import VelocityReport +from autoware_auto_vehicle_msgs.msg import SteeringReport +from sensor_msgs.msg import Imu +from std_msgs.msg import Float32 + +from tqdm import tqdm + +from learning_based_accel_brake_map_calibrator.msg import SteeringProgress +from learning_based_accel_brake_map_calibrator.msg import SteeringProcesses + + +class primotest(rclpy.node.Node): + + def __init__(self): + + super().__init__("primo_test") + + # data sharing member variables + + self.throttling = 0.0 + self.throttling_prec = 0.0 + self.steering = 0.0 + self.acceleration = 0.0 + self.velocity = 0.0 + self.pitch_angle = 0.0 + + self.g = 9.80665 + + # launch params initialization to default values + self.declare_parameter("max_data", 10000) + self.declare_parameter("max_velocity", 1.95) + self.declare_parameter("throttle_threshold", 12) + self.declare_parameter("steering_threshold_1", 0.04) + self.declare_parameter("steering_threshold_2", 0.10) + self.declare_parameter("steering_threshold_3", 0.20) + self.declare_parameter("steering_threshold_4", 0.30) + self.declare_parameter("steering_threshold_5", 0.40) + self.declare_parameter("pitch_topic", "/sensing/gnss/chc/pitch") + self.declare_parameter( + "actuation_status_topic", "/vehicle/status/actuation_status" + ) + self.declare_parameter( + "steering_status_topic", "/vehicle/status/steering_status" + ) + self.declare_parameter( + "velocity_status_topic", "/vehicle/status/velocity_status" + ) + self.declare_parameter("imu_topic", "/sensing/gnss/chc/imu") + + self.declare_parameter("Recovery_Mode", False) + + # Load params from launch file + + self.MAX_DATA = ( + self.get_parameter("max_data").get_parameter_value().integer_value + ) + self.MAX_VELOCITY = ( + self.get_parameter("max_velocity").get_parameter_value().double_value + ) + self.THROTTLE_THRESHOLD = ( + self.get_parameter("throttle_threshold").get_parameter_value().integer_value + ) + self.STEERING_THR1 = ( + self.get_parameter("steering_threshold_1") + .get_parameter_value() + .double_value + ) + self.STEERING_THR2 = ( + self.get_parameter("steering_threshold_2") + .get_parameter_value() + .double_value + ) + self.STEERING_THR3 = ( + self.get_parameter("steering_threshold_3") + .get_parameter_value() + .double_value + ) + self.STEERING_THR4 = ( + self.get_parameter("steering_threshold_4") + .get_parameter_value() + .double_value + ) + self.STEERING_THR5 = ( + self.get_parameter("steering_threshold_5") + .get_parameter_value() + .double_value + ) + # Get topic names from parameters + self.pitch_topic = ( + self.get_parameter("pitch_topic").get_parameter_value().string_value + ) + self.actuation_status_topic = ( + self.get_parameter("actuation_status_topic") + .get_parameter_value() + .string_value + ) + self.steering_status_topic = ( + self.get_parameter("steering_status_topic") + .get_parameter_value() + .string_value + ) + self.velocity_status_topic = ( + self.get_parameter("velocity_status_topic") + .get_parameter_value() + .string_value + ) + self.imu_topic = ( + self.get_parameter("imu_topic").get_parameter_value().string_value + ) + + self.RECOVERY_MODE = ( + self.get_parameter("Recovery_Mode").get_parameter_value().bool_value + ) + + if self.RECOVERY_MODE: + df_existing1 = pd.read_csv("steering_01.csv") + df_existing2 = pd.read_csv("steering_02.csv") + df_existing3 = pd.read_csv("steering_03.csv") + df_existing4 = pd.read_csv("steering_04.csv") + df_existing5 = pd.read_csv("steering_05.csv") + + self.k = int(df_existing1["Index_low_cmd"].iloc[0]) + self.kk = int(df_existing1["Index_high_cmd"].iloc[0]) + self.vel1 = df_existing1["Velocity"].tolist() + self.cmd1 = df_existing1["Throttling"].tolist() + self.acc1 = df_existing1["Acceleration_with_pitch_comp"].tolist() + self.accp1 = df_existing1["Acceleration_measured"].tolist() + self.pitch1 = df_existing1["Pitch_angle"].tolist() + self.steer1 = df_existing1["Steering"].tolist() + + self.i = int(df_existing2["Index_low_cmd"].iloc[0]) + self.ii = int(df_existing2["Index_high_cmd"].iloc[0]) + self.vel2 = df_existing2["Velocity"].tolist() + self.cmd2 = df_existing2["Throttling"].tolist() + self.acc2 = df_existing2["Acceleration_with_pitch_comp"].tolist() + self.accp2 = df_existing2["Acceleration_measured"].tolist() + self.pitch2 = df_existing2["Pitch_angle"].tolist() + self.steer2 = df_existing2["Steering"].tolist() + + self.j = int(df_existing3["Index_low_cmd"].iloc[0]) + self.jj = int(df_existing3["Index_high_cmd"].iloc[0]) + self.vel3 = df_existing3["Velocity"].tolist() + self.cmd3 = df_existing3["Throttling"].tolist() + self.acc3 = df_existing3["Acceleration_with_pitch_comp"].tolist() + self.accp3 = df_existing3["Acceleration_measured"].tolist() + self.pitch3 = df_existing3["Pitch_angle"].tolist() + self.steer3 = df_existing3["Steering"].tolist() + + self.h = int(df_existing4["Index_low_cmd"].iloc[0]) + self.hh = int(df_existing4["Index_high_cmd"].iloc[0]) + self.vel4 = df_existing4["Velocity"].tolist() + self.cmd4 = df_existing4["Throttling"].tolist() + self.acc4 = df_existing4["Acceleration_with_pitch_comp"].tolist() + self.accp4 = df_existing4["Acceleration_measured"].tolist() + self.pitch4 = df_existing4["Pitch_angle"].tolist() + self.steer4 = df_existing4["Steering"].tolist() + + self.a = int(df_existing5["Index_low_cmd"].iloc[0]) + self.aa = int(df_existing5["Index_high_cmd"].iloc[0]) + self.vel5 = df_existing5["Velocity"].tolist() + self.cmd5 = df_existing5["Throttling"].tolist() + self.acc5 = df_existing5["Acceleration_with_pitch_comp"].tolist() + self.accp5 = df_existing5["Acceleration_measured"].tolist() + self.pitch5 = df_existing5["Pitch_angle"].tolist() + self.steer5 = df_existing5["Steering"].tolist() + + else: + self.i = self.j = self.h = self.k = self.a = self.ii = self.jj = self.hh = ( + self.kk + ) = self.aa = 0 + self.vel1 = [] + self.cmd1 = [] + self.acc1 = [] + self.accp1 = [] + self.pitch1 = [] + self.steer1 = [] + self.vel2 = [] + self.cmd2 = [] + self.acc2 = [] + self.accp2 = [] + self.pitch2 = [] + self.steer2 = [] + self.vel3 = [] + self.cmd3 = [] + self.acc3 = [] + self.accp3 = [] + self.pitch3 = [] + self.steer3 = [] + self.vel4 = [] + self.cmd4 = [] + self.acc4 = [] + self.accp4 = [] + self.pitch4 = [] + self.steer4 = [] + self.vel5 = [] + self.cmd5 = [] + self.acc5 = [] + self.accp5 = [] + self.pitch5 = [] + self.steer5 = [] + + # custom messages definitions and iniialization + self.steer_progress_msg = [SteeringProgress() for _ in range(10)] + self.steer_processes_msg = SteeringProcesses() + self.steer_processes_msg.processes = [SteeringProgress() for _ in range(10)] + + self.steer_processes_msg.header.frame_id = "Steering scenario" + + self.steer_progress_msg[0].pedal_value_start = 0 + self.steer_progress_msg[0].pedal_value_end = self.THROTTLE_THRESHOLD + self.steer_progress_msg[0].steering_value_start = self.STEERING_THR1 + self.steer_progress_msg[0].steering_value_end = self.STEERING_THR2 + self.steer_progress_msg[0].velocity_max = self.MAX_VELOCITY + self.steer_processes_msg.processes[0] = self.steer_progress_msg[0] + self.steer_progress_msg[0].data_count = self.k + self.steer_progress_msg[0].progress = int(self.k * 100 / self.MAX_DATA) + + self.steer_progress_msg[1].pedal_value_start = 0 + self.steer_progress_msg[1].pedal_value_end = self.THROTTLE_THRESHOLD + self.steer_progress_msg[1].steering_value_start = self.STEERING_THR2 + self.steer_progress_msg[1].steering_value_end = self.STEERING_THR3 + self.steer_progress_msg[1].velocity_max = self.MAX_VELOCITY + self.steer_processes_msg.processes[1] = self.steer_progress_msg[1] + self.steer_progress_msg[1].data_count = self.i + self.steer_progress_msg[1].progress = int(self.i * 100 / self.MAX_DATA) + + self.steer_progress_msg[2].pedal_value_start = 0 + self.steer_progress_msg[2].pedal_value_end = self.THROTTLE_THRESHOLD + self.steer_progress_msg[2].steering_value_start = self.STEERING_THR3 + self.steer_progress_msg[2].steering_value_end = self.STEERING_THR4 + self.steer_progress_msg[2].velocity_max = self.MAX_VELOCITY + self.steer_processes_msg.processes[2] = self.steer_progress_msg[2] + self.steer_progress_msg[2].data_count = self.j + self.steer_progress_msg[2].progress = int(self.j * 100 / self.MAX_DATA) + + self.steer_progress_msg[3].pedal_value_start = 0 + self.steer_progress_msg[3].pedal_value_end = self.THROTTLE_THRESHOLD + self.steer_progress_msg[3].steering_value_start = self.STEERING_THR4 + self.steer_progress_msg[3].steering_value_end = self.STEERING_THR5 + self.steer_progress_msg[3].velocity_max = self.MAX_VELOCITY + self.steer_processes_msg.processes[3] = self.steer_progress_msg[3] + self.steer_progress_msg[3].data_count = self.h + self.steer_progress_msg[3].progress = int(self.h * 100 / self.MAX_DATA) + + self.steer_progress_msg[4].pedal_value_start = 0 + self.steer_progress_msg[4].pedal_value_end = self.THROTTLE_THRESHOLD + self.steer_progress_msg[4].steering_value_start = self.STEERING_THR5 + self.steer_progress_msg[4].steering_value_end = 0.50 + self.steer_progress_msg[4].velocity_max = self.MAX_VELOCITY + self.steer_processes_msg.processes[4] = self.steer_progress_msg[4] + self.steer_progress_msg[4].data_count = self.a + self.steer_progress_msg[4].progress = int(self.a * 100 / self.MAX_DATA) + + self.steer_progress_msg[5].pedal_value_start = self.THROTTLE_THRESHOLD + self.steer_progress_msg[5].pedal_value_end = 100 + self.steer_progress_msg[5].steering_value_start = self.STEERING_THR1 + self.steer_progress_msg[5].steering_value_end = self.STEERING_THR2 + self.steer_progress_msg[5].velocity_max = self.MAX_VELOCITY + self.steer_processes_msg.processes[5] = self.steer_progress_msg[5] + self.steer_progress_msg[5].data_count = self.kk + self.steer_progress_msg[5].progress = int(self.kk * 100 / self.MAX_DATA) + + self.steer_progress_msg[6].pedal_value_start = self.THROTTLE_THRESHOLD + self.steer_progress_msg[6].pedal_value_end = 100 + self.steer_progress_msg[6].steering_value_start = self.STEERING_THR2 + self.steer_progress_msg[6].steering_value_end = self.STEERING_THR3 + self.steer_progress_msg[6].velocity_max = self.MAX_VELOCITY + self.steer_processes_msg.processes[6] = self.steer_progress_msg[6] + self.steer_progress_msg[6].data_count = self.ii + self.steer_progress_msg[6].progress = int(self.ii * 100 / self.MAX_DATA) + + self.steer_progress_msg[7].pedal_value_start = self.THROTTLE_THRESHOLD + self.steer_progress_msg[7].pedal_value_end = 100 + self.steer_progress_msg[7].steering_value_start = self.STEERING_THR3 + self.steer_progress_msg[7].steering_value_end = self.STEERING_THR4 + self.steer_progress_msg[7].velocity_max = self.MAX_VELOCITY + self.steer_processes_msg.processes[7] = self.steer_progress_msg[7] + self.steer_progress_msg[7].data_count = self.jj + self.steer_progress_msg[7].progress = int(self.jj * 100 / self.MAX_DATA) + + self.steer_progress_msg[8].pedal_value_start = self.THROTTLE_THRESHOLD + self.steer_progress_msg[8].pedal_value_end = 100 + self.steer_progress_msg[8].steering_value_start = self.STEERING_THR4 + self.steer_progress_msg[8].steering_value_end = self.STEERING_THR5 + self.steer_progress_msg[8].velocity_max = self.MAX_VELOCITY + self.steer_processes_msg.processes[8] = self.steer_progress_msg[8] + self.steer_progress_msg[8].data_count = self.hh + self.steer_progress_msg[8].progress = int(self.hh * 100 / self.MAX_DATA) + + self.steer_progress_msg[9].pedal_value_start = self.THROTTLE_THRESHOLD + self.steer_progress_msg[9].pedal_value_end = 100 + self.steer_progress_msg[9].steering_value_start = self.STEERING_THR5 + self.steer_progress_msg[9].steering_value_end = 0.50 + self.steer_progress_msg[9].velocity_max = self.MAX_VELOCITY + self.steer_processes_msg.processes[9] = self.steer_progress_msg[9] + self.steer_progress_msg[9].data_count = self.aa + self.steer_progress_msg[9].progress = int(self.aa * 100 / self.MAX_DATA) + + self.progress_bar0 = tqdm( + initial=self.k, + total=self.MAX_DATA, + desc=" Low throttle | Steering: " + + str(self.STEERING_THR1) + + " - " + + str(self.STEERING_THR2), + dynamic_ncols=True, + ) + self.progress_bar1 = tqdm( + initial=self.i, + total=self.MAX_DATA, + desc=" Low throttle | Steering: " + + str(self.STEERING_THR2) + + " - " + + str(self.STEERING_THR3), + dynamic_ncols=True, + ) + self.progress_bar2 = tqdm( + initial=self.j, + total=self.MAX_DATA, + desc=" Low throttle | Steering: " + + str(self.STEERING_THR3) + + " - " + + str(self.STEERING_THR4), + dynamic_ncols=True, + ) + self.progress_bar3 = tqdm( + initial=self.h, + total=self.MAX_DATA, + desc=" Low throttle | Steering: " + + str(self.STEERING_THR4) + + " - " + + str(self.STEERING_THR5), + dynamic_ncols=True, + ) + self.progress_bar4 = tqdm( + initial=self.a, + total=self.MAX_DATA, + desc=" Low throttle | Steering: " + + str(self.STEERING_THR5) + + " - max", + dynamic_ncols=True, + ) + self.progress_bar10 = tqdm( + initial=self.kk, + total=self.MAX_DATA, + desc=" High throttle | Steering: " + + str(self.STEERING_THR1) + + " - " + + str(self.STEERING_THR2), + dynamic_ncols=True, + ) + self.progress_bar11 = tqdm( + initial=self.ii, + total=self.MAX_DATA, + desc=" High throttle | Steering: " + + str(self.STEERING_THR2) + + " - " + + str(self.STEERING_THR3), + dynamic_ncols=True, + ) + self.progress_bar12 = tqdm( + initial=self.jj, + total=self.MAX_DATA, + desc=" High throttle | Steering: " + + str(self.STEERING_THR3) + + " - " + + str(self.STEERING_THR4), + dynamic_ncols=True, + ) + self.progress_bar13 = tqdm( + initial=self.hh, + total=self.MAX_DATA, + desc=" High throttle | Steering: " + + str(self.STEERING_THR4) + + " - " + + str(self.STEERING_THR5), + dynamic_ncols=True, + ) + self.progress_bar14 = tqdm( + initial=self.aa, + total=self.MAX_DATA, + desc=" High throttle | Steering: " + + str(self.STEERING_THR5) + + " - max", + dynamic_ncols=True, + ) + + self.create_subscription( + Float32, self.pitch_topic, self.pitch_topic_callback, 1 + ) + self.create_subscription( + ActuationStatusStamped, + self.actuation_status_topic, + self.actuation_topic_callback, + 1, + ) + self.create_subscription( + SteeringReport, self.steering_status_topic, self.steer_topic_callback, 1 + ) + self.create_subscription( + VelocityReport, self.velocity_status_topic, self.velocity_topic_callback, 1 + ) + self.create_subscription(Imu, self.imu_topic, self.imu_topic_callback, 1) + self.progress = self.create_publisher( + SteeringProcesses, "/scenarios_collection_steering_progress", 10 + ) + self.timer = self.create_timer(0.02, self.test_callback) + self.timer1 = self.create_timer(0.5, self.steering_message_callback) + + # WE WILL FILTER THE DATA DURING THE POST-PROCESSING, SO IN THIS CASE WE DON'T USE DE QUEUE + + def pitch_topic_callback(self, msg): + + self.pitch_angle = float(msg.data) + + def velocity_topic_callback(self, msg): + self.velocity = float(msg.longitudinal_velocity) + + def actuation_topic_callback(self, msg): + + self.throttling = float(msg.status.accel_status) * 100.0 + + def steer_topic_callback(self, msg): + + self.steering = float(msg.steering_tire_angle) + + def imu_topic_callback(self, msg): + + self.acceleration = float(msg.linear_acceleration.x) + + def collection_steering1(self): + + self.vel1.append(abs(self.velocity)) + self.cmd1.append(self.throttling) + self.steer1.append(abs(self.steering)) + if self.velocity < 0: + self.acc1.append( + -1 * self.acceleration + - self.g * math.sin(math.radians(-1 * self.pitch_angle)) + ) + self.accp1.append(-1 * self.acceleration) + self.pitch1.append(-1 * self.pitch_angle) + else: + self.acc1.append( + self.acceleration - self.g * math.sin(math.radians(self.pitch_angle)) + ) + self.accp1.append(self.acceleration) + self.pitch1.append(self.pitch_angle) + + # save data in csv file + dict1 = { + "Velocity": self.vel1, + "Throttling": self.cmd1, + "Steering": self.steer1, + "Acceleration_with_pitch_comp": self.acc1, + "Acceleration_measured": self.accp1, + "Pitch_angle": self.pitch1, + "Index_low_cmd": self.k, + "Index_high_cmd": self.kk, + } + df1 = pd.DataFrame(dict1) + df1.to_csv("steering_01.csv") + + def collection_steering2(self): + + self.vel2.append(abs(self.velocity)) + self.cmd2.append(self.throttling) + self.steer2.append(abs(self.steering)) + if self.velocity < 0: + self.acc2.append( + -1 * self.acceleration + - self.g * math.sin(math.radians(-1 * self.pitch_angle)) + ) + self.accp2.append(-1 * self.acceleration) + self.pitch2.append(-1 * self.pitch_angle) + else: + self.acc2.append( + self.acceleration - self.g * math.sin(math.radians(self.pitch_angle)) + ) + self.accp2.append(self.acceleration) + self.pitch2.append(self.pitch_angle) + + # save data in csv file + dict2 = { + "Velocity": self.vel2, + "Throttling": self.cmd2, + "Steering": self.steer2, + "Acceleration_with_pitch_comp": self.acc2, + "Acceleration_measured": self.accp2, + "Pitch_angle": self.pitch2, + "Index_low_cmd": self.i, + "Index_high_cmd": self.ii, + } + df2 = pd.DataFrame(dict2) + df2.to_csv("steering_02.csv") + + def collection_steering3(self): + + self.vel3.append(abs(self.velocity)) + self.cmd3.append(self.throttling) + self.steer3.append(abs(self.steering)) + if self.velocity < 0: + self.acc3.append( + -1 * self.acceleration + - self.g * math.sin(math.radians(-1 * self.pitch_angle)) + ) + self.accp3.append(-1 * self.acceleration) + self.pitch3.append(-1 * self.pitch_angle) + else: + self.acc3.append( + self.acceleration - self.g * math.sin(math.radians(self.pitch_angle)) + ) + self.accp3.append(self.acceleration) + self.pitch3.append(self.pitch_angle) + + # save data in csv file + dict3 = { + "Velocity": self.vel3, + "Throttling": self.cmd3, + "Steering": self.steer3, + "Acceleration_with_pitch_comp": self.acc3, + "Acceleration_measured": self.accp3, + "Pitch_angle": self.pitch3, + "Index_low_cmd": self.j, + "Index_high_cmd": self.jj, + } + df3 = pd.DataFrame(dict3) + df3.to_csv("steering_03.csv") + + def collection_steering4(self): + + self.vel4.append(abs(self.velocity)) + self.cmd4.append(self.throttling) + self.steer4.append(abs(self.steering)) + if self.velocity < 0: + self.acc4.append( + -1 * self.acceleration + - self.g * math.sin(math.radians(-1 * self.pitch_angle)) + ) + self.accp4.append(-1 * self.acceleration) + self.pitch4.append(-1 * self.pitch_angle) + else: + self.acc4.append( + self.acceleration - self.g * math.sin(math.radians(self.pitch_angle)) + ) + self.accp4.append(self.acceleration) + self.pitch4.append(self.pitch_angle) + + # save data in csv file + dict4 = { + "Velocity": self.vel4, + "Throttling": self.cmd4, + "Steering": self.steer4, + "Acceleration_with_pitch_comp": self.acc4, + "Acceleration_measured": self.accp4, + "Pitch_angle": self.pitch4, + "Index_low_cmd": self.h, + "Index_high_cmd": self.hh, + } + df4 = pd.DataFrame(dict4) + df4.to_csv("steering_04.csv") + + def collection_steering5(self): + + self.vel5.append(abs(self.velocity)) + self.cmd5.append(self.throttling) + self.steer5.append(abs(self.steering)) + if self.velocity < 0: + self.acc5.append( + -1 * self.acceleration + - self.g * math.sin(math.radians(-1 * self.pitch_angle)) + ) + self.accp5.append(-1 * self.acceleration) + self.pitch5.append(-1 * self.pitch_angle) + else: + self.acc5.append( + self.acceleration - self.g * math.sin(math.radians(self.pitch_angle)) + ) + self.accp5.append(self.acceleration) + self.pitch5.append(self.pitch_angle) + + # save data in csv file + dict5 = { + "Velocity": self.vel5, + "Throttling": self.cmd5, + "Steering": self.steer5, + "Acceleration_with_pitch_comp": self.acc5, + "Acceleration_measured": self.accp5, + "Pitch_angle": self.pitch5, + "Index_low_cmd": self.a, + "Index_high_cmd": self.aa, + } + df5 = pd.DataFrame(dict5) + df5.to_csv("steering_05.csv") + + def steering_message_publish(self, count: int, index: int): + + self.steer_progress_msg[index].data_count = count + self.steer_progress_msg[index].progress = int(count * 100 / self.MAX_DATA) + self.steer_processes_msg.header.stamp = self.get_clock().now().to_msg() + self.steer_processes_msg.processes[index] = self.steer_progress_msg[index] + + def test_callback(self): + + if 0 < abs(self.velocity) <= self.MAX_VELOCITY: + + # low throttling scenario + + if self.throttling <= self.THROTTLE_THRESHOLD: + + if ( + self.STEERING_THR1 <= abs(self.steering) < self.STEERING_THR2 + and self.k < self.MAX_DATA + ): + + self.collection_steering1() + self.progress_bar0.update(1) + self.k += 1 + + self.steering_message_publish(self.k, 0) + + elif ( + self.STEERING_THR2 <= abs(self.steering) < self.STEERING_THR3 + and self.i < self.MAX_DATA + ): + + self.collection_steering2() + self.progress_bar1.update(1) + self.i += 1 + + self.steering_message_publish(self.i, 1) + + elif ( + self.STEERING_THR3 <= abs(self.steering) < self.STEERING_THR4 + and self.j < self.MAX_DATA + ): + + self.collection_steering3() + self.progress_bar2.update(1) + self.j += 1 + + self.steering_message_publish(self.j, 2) + + elif ( + self.STEERING_THR4 <= abs(self.steering) < self.STEERING_THR5 + and self.h < self.MAX_DATA + ): + + self.collection_steering4() + self.progress_bar3.update(1) + self.h += 1 + + self.steering_message_publish(self.h, 3) + + elif ( + abs(self.steering) >= self.STEERING_THR5 and self.a < self.MAX_DATA + ): + + self.collection_steering5() + self.progress_bar4.update(1) + self.a += 1 + + self.steering_message_publish(self.a, 4) + + # high throttling scenario + + elif self.throttling > self.THROTTLE_THRESHOLD: + + if ( + self.STEERING_THR1 <= abs(self.steering) < self.STEERING_THR2 + and self.kk < self.MAX_DATA + ): + + self.collection_steering1() + self.progress_bar10.update(1) + self.kk += 1 + + self.steering_message_publish(self.kk, 5) + + elif ( + self.STEERING_THR2 <= abs(self.steering) < self.STEERING_THR3 + and self.ii < self.MAX_DATA + ): + + self.collection_steering2() + self.progress_bar11.update(1) + self.ii += 1 + + self.steering_message_publish(self.ii, 6) + + elif ( + self.STEERING_THR3 <= abs(self.steering) < self.STEERING_THR4 + and self.jj < self.MAX_DATA + ): + + self.collection_steering3() + self.progress_bar12.update(1) + self.jj += 1 + + self.steering_message_publish(self.jj, 7) + + elif ( + self.STEERING_THR4 <= abs(self.steering) < self.STEERING_THR5 + and self.hh < self.MAX_DATA + ): + + self.collection_steering4() + self.progress_bar13.update(1) + self.hh += 1 + + self.steering_message_publish(self.hh, 8) + + elif ( + abs(self.steering) >= self.STEERING_THR5 and self.aa < self.MAX_DATA + ): + + self.collection_steering5() + self.progress_bar14.update(1) + self.aa += 1 + + self.steering_message_publish(self.aa, 9) + + def steering_message_callback(self): + self.progress.publish(self.steer_processes_msg) + + +def main(): + rclpy.init() + node = primotest() + rclpy.spin(node) + + +if __name__ == "__main__": + main() diff --git a/vehicle/learning_based_accel_brake_map_calibrator/scripts_steering/data_monitor_steer.py b/vehicle/learning_based_accel_brake_map_calibrator/scripts_steering/data_monitor_steer.py new file mode 100644 index 0000000000000..96d071c7a3acf --- /dev/null +++ b/vehicle/learning_based_accel_brake_map_calibrator/scripts_steering/data_monitor_steer.py @@ -0,0 +1,144 @@ +#! /usr/bin/env python3 +import rclpy +import rclpy.node +from tier4_vehicle_msgs.msg import ActuationStatusStamped +from autoware_auto_vehicle_msgs.msg import VelocityReport +from autoware_auto_vehicle_msgs.msg import SteeringReport +from std_msgs.msg import Float32 +from sensor_msgs.msg import Imu + + +class DataMonitor(rclpy.node.Node): + def __init__(self): + self.brake_timestamp = 0.0 + self.throttle_timestamp = 0.0 + self.velocity_timestamp = 0.0 + self.steering_timestamp = 0.0 + self.pitch_timestamp = 0.0 + self.imu_timestamp = 0.0 + self.can_timestamp = 0.0 + super().__init__("data_monitor") + + self.declare_parameter("pitch_topic", "/sensing/gnss/chc/pitch") + self.declare_parameter( + "actuation_status_topic", "/vehicle/status/actuation_status" + ) + self.declare_parameter( + "steering_status_topic", "/vehicle/status/steering_status" + ) + self.declare_parameter( + "velocity_status_topic", "/vehicle/status/velocity_status" + ) + self.declare_parameter("imu_topic", "/sensing/gnss/chc/imu") + + # Get topic names from parameters + self.pitch_topic = ( + self.get_parameter("pitch_topic").get_parameter_value().string_value + ) + self.actuation_status_topic = ( + self.get_parameter("actuation_status_topic") + .get_parameter_value() + .string_value + ) + self.steering_status_topic = ( + self.get_parameter("steering_status_topic") + .get_parameter_value() + .string_value + ) + self.velocity_status_topic = ( + self.get_parameter("velocity_status_topic") + .get_parameter_value() + .string_value + ) + self.imu_topic = ( + self.get_parameter("imu_topic").get_parameter_value().string_value + ) + + self.timer = self.create_timer(1, self.timer_callback) + + self.create_subscription( + ActuationStatusStamped, + self.actuation_status_topic, + self.drive_topic_callback, + 10, + ) + self.create_subscription( + SteeringReport, self.steering_status_topic, self.steer_topic_callback, 10 + ) + self.create_subscription( + VelocityReport, self.velocity_status_topic, self.velocity_topic_callback, 10 + ) + self.create_subscription( + Float32, self.pitch_topic, self.pitch_topic_callback, 10 + ) + self.create_subscription(Imu, self.imu_topic, self.imu_topic_callback, 10) + + def drive_topic_callback(self, msg): + self.throttle_timestamp = int(self.get_clock().now().nanoseconds / 1000000) + + def velocity_topic_callback(self, msg): + self.velocity_timestamp = int(self.get_clock().now().nanoseconds / 1000000) + + def steer_topic_callback(self, msg): + self.steering_timestamp = int(self.get_clock().now().nanoseconds / 1000000) + + def pitch_topic_callback(self, msg): + self.pitch_timestamp = int(self.get_clock().now().nanoseconds / 1000000) + + def imu_topic_callback(self, msg): + self.imu_timestamp = int(self.get_clock().now().nanoseconds / 1000000) + + def timer_callback(self): + self.get_logger().info("data monitor checking") + timestamp = int(self.get_clock().now().nanoseconds / 1000000) + + throttle_timegap = timestamp - self.throttle_timestamp + steering_timegap = timestamp - self.steering_timestamp + pitch_timegap = timestamp - self.pitch_timestamp + imu_timegap = timestamp - self.imu_timestamp + velocity_timegap = timestamp - self.velocity_timestamp + + if self.throttle_timestamp == 0: + self.get_logger().error("throttle topic is not publish") + elif throttle_timegap > 1000: + self.get_logger().error("throttle topic is not alive") + else: + self.get_logger().debug("throttle topic is good") + + if self.steering_timestamp == 0: + self.get_logger().error("steering topic is not publish") + elif steering_timegap > 1000: + self.get_logger().error("steering topic is not alive") + else: + self.get_logger().debug("steering topic is good") + + if self.pitch_timestamp == 0: + self.get_logger().error("pitch topic is not publish") + elif pitch_timegap > 1000: + self.get_logger().error("pitch topic is not alive") + else: + self.get_logger().debug("pitch topic is good") + + if self.imu_timestamp == 0: + self.get_logger().error("imu topic is not publish") + elif imu_timegap > 1000: + self.get_logger().error("imu topic is not alive") + else: + self.get_logger().debug("imu topic is good") + + if self.velocity_timestamp == 0: + self.get_logger().error("imu topic is not publish") + elif velocity_timegap > 1000: + self.get_logger().error("imu topic is not alive") + else: + self.get_logger().debug("imu topic is good") + + +def main(): + rclpy.init() + node = DataMonitor() + rclpy.spin(node) + + +if __name__ == "__main__": + main() diff --git a/vehicle/learning_based_accel_brake_map_calibrator/scripts_steering/neural_network_steer1.py b/vehicle/learning_based_accel_brake_map_calibrator/scripts_steering/neural_network_steer1.py new file mode 100644 index 0000000000000..f7b27a22071a4 --- /dev/null +++ b/vehicle/learning_based_accel_brake_map_calibrator/scripts_steering/neural_network_steer1.py @@ -0,0 +1,251 @@ +#! /usr/bin/env python3 +import math + +import pandas as pd +import numpy as np +import torch +import torch.nn as nn +import torch.optim as optim +from sklearn.model_selection import train_test_split +import matplotlib.pyplot as plt +from sklearn.metrics import mean_squared_error +from sklearn.metrics import mean_absolute_error +from sklearn.metrics import r2_score +from scipy.signal import medfilt +import rclpy +from rclpy.node import Node + + +class NeuralNetworkSteering1(Node): + class NeuralNetwork(nn.Module): + def __init__(self): + super(NeuralNetworkSteering1.NeuralNetwork, self).__init__() + self.fc1 = nn.Linear(2, 128) + self.relu1 = nn.ReLU() + self.fc2 = nn.Linear(128, 64) + self.relu2 = nn.ReLU() + self.fc3 = nn.Linear(64, 1) + + def forward(self, x): + x = self.fc1(x) + x = self.relu1(x) + x = self.fc2(x) + x = self.relu2(x) + x = self.fc3(x) + + return x + + def __init__(self): + super().__init__("neural_network_steering1") + + self.model = self.NeuralNetwork() + + data = pd.read_csv("steering_01.csv") + dataa = pd.read_csv("steering_01.csv") + ush = pd.read_csv("steering_01.csv") + + columns = ["Velocity", "Throttling", "Acceleration_measured"] + + # Apply a median filter with a window size of 11 to each column + self.declare_parameter("mean_filter_size", 21) + self.filter_size = ( + self.get_parameter("mean_filter_size").get_parameter_value().integer_value + ) + + for col in columns: + data[col] = medfilt(data[col], kernel_size=self.filter_size) + dataa[col] = medfilt(dataa[col], kernel_size=self.filter_size) + + # declare params from launch file to default values + self.declare_parameter("filter_vel", 10.0) + self.declare_parameter("filter_cmd", 10.0) + self.declare_parameter("filter_acc", 10.0) + + # Load params from launch file + self.FILTER_VEL = ( + self.get_parameter("filter_vel").get_parameter_value().double_value + ) + self.FILTER_CMD = ( + self.get_parameter("filter_cmd").get_parameter_value().double_value + ) + self.FILTER_ACC = ( + self.get_parameter("filter_acc").get_parameter_value().double_value + ) + + mean0 = data["Velocity"].mean() + std0 = data["Velocity"].std() + data["Velocity"] = (data["Velocity"] - mean0) / std0 + dataa["Velocity"] = (dataa["Velocity"] - mean0) / std0 + + data = data[abs(data["Velocity"] - mean0) <= std0 * self.FILTER_VEL] + dataa = dataa[abs(dataa["Velocity"] - mean0) <= std0 * self.FILTER_VEL] + + mean1 = data["Throttling"].mean() + std1 = data["Throttling"].std() + data["Throttling"] = (data["Throttling"] - mean1) / std1 + dataa["Throttling"] = (dataa["Throttling"] - mean1) / std1 + + data = data[abs(data["Throttling"] - mean1) <= std1 * self.FILTER_CMD] + dataa = dataa[abs(dataa["Throttling"] - mean1) <= std1 * self.FILTER_CMD] + + mean2 = data["Acceleration_measured"].mean() + std2 = data["Acceleration_measured"].std() + data["Acceleration_measured"] = (data["Acceleration_measured"] - mean2) / std2 + dataa["Acceleration_measured"] = (dataa["Acceleration_measured"] - mean2) / std2 + + data = data[ + abs(data["Acceleration_measured"] - mean2) <= std2 * self.FILTER_ACC + ] + dataa = dataa[ + abs(dataa["Acceleration_measured"] - mean2) <= std2 * self.FILTER_ACC + ] + + # Split the data into input features (velocity and throttle) and target (acceleration) and test/train + + X = data[["Velocity", "Throttling"]].values + y = data["Acceleration_measured"].values + + X_train, X_test, y_train, y_test = train_test_split( + X, y, test_size=0.2, random_state=42 + ) + + # Convert NumPy arrays to PyTorch tensors + X_train = torch.tensor(X_train, dtype=torch.float32) + y_train = torch.tensor(y_train, dtype=torch.float32) + X_test = torch.tensor(X_test, dtype=torch.float32) + y_test = torch.tensor(y_test, dtype=torch.float32) + + criterion = nn.MSELoss() + optimizer = optim.Adam( + self.model.parameters(), lr=0.001 + ) # , weight_decay=0.001) + + # Training loop + num_epochs = 100 + for epoch in range(num_epochs): + # Forward pass + outputs = self.model(X_train) + + loss = criterion(outputs, y_train.view(-1, 1)) + + # Backpropagation and optimization + optimizer.zero_grad() + loss.backward() + optimizer.step() + + with torch.no_grad(): + test_outputs = self.model(X_test) + # test_loss = criterion(test_outputs, y_test.view(-1, 1)) + # print(f"Mean Squared Error on Test Data: {test_loss.item()}") + + # Visualization (you can modify the range based on your needs) + + velocity_range = np.linspace(0, (X[:, 0] * std0 + mean0).max(), 20) + # throttling_range = np.linspace(0, 50, 20) + throttling_range = np.linspace(0, (X[:, 1] * std1 + mean1).max(), 20) + V, A = np.meshgrid(velocity_range, throttling_range) + + input_grid = np.column_stack( + ((V.flatten() - mean0) / std0, (A.flatten() - mean1) / std1) + ) + input_grid = torch.tensor(input_grid, dtype=torch.float32) + + with torch.no_grad(): + commands = self.model(input_grid).reshape(V.shape) + + commands_new = commands * std2 + mean2 + + # Save the trained model + # torch.save(model.state_dict(), 'trained_throttle.pth') + + # evaluation + mse = mean_squared_error(y_test, test_outputs.view(-1).numpy()) + self.get_logger().info(f"Mean Squared Error on Test Data: {mse}") + + mae = mean_absolute_error(y_test, test_outputs.view(-1).numpy()) + self.get_logger().info(f"Mean Absolute Error on Test Data: {mae}") + + rmse = math.sqrt(mse) + self.get_logger().info(f"Root Mean Squared Error on Test Data: {rmse}") + + r2 = r2_score(y_test, test_outputs.view(-1).numpy()) + self.get_logger().info(f"R-squared (R2) Score on Test Data: {r2}") + + # Save NN model in csv correct format for testing in the real vehicle + + velocity_headers = ["{:.2f}".format(v) for v in velocity_range] + + # we normalize throttling values between 0 and 1 + throttling_range /= 100 + + headers = [""] + velocity_headers + + commands_new_with_throttling = np.column_stack((throttling_range, commands_new)) + + # WHEN YOU READ ANOTHER CSV FIRE FOR ANOTHER STEERING CONDITION, RENAME THE FOLLOWING CSV FILE AS WELL!!! + + csv_filename = "steer_map_1.csv" + np.savetxt( + csv_filename, + commands_new_with_throttling, + delimiter=",", + header=",".join(headers), + comments="", + ) + + xdata = dataa.Velocity * std0 + mean0 + ydata = dataa.Throttling * std1 + mean1 + zdata = dataa.Acceleration_measured * std2 + mean2 + + fig = plt.figure() + ax = fig.add_subplot(111, projection="3d") + + ax.scatter3D(xdata, ydata, zdata, c=zdata, marker="o") + surf = ax.plot_surface(V, A, commands_new, cmap="viridis") + + ax.set_xlabel("Velocity") + ax.set_zlabel("Acceleration") + ax.set_ylabel("Throttling Output") + + # CHANGE THE TITLE OF THE PLOT DEPENDING ON THE CSV TABLE YOU ARE VISUALIZING + + ax.set_title("Neural Network Output vs. Velocity and Throttling | Steering 1") + + plt.figure(figsize=(10, 6)) + plt.subplot(3, 1, 1) + plt.hist(ush["Velocity"], bins=20, color="skyblue", edgecolor="black") + plt.title("Distribution of Velocity - Table 1") + plt.xlabel("Velocity") + plt.ylabel("Frequency") + + # Plot the distribution of 'Throttling' + plt.subplot(3, 1, 2) + plt.hist(ush["Throttling"], bins=20, color="salmon", edgecolor="black") + plt.title("Distribution of Throttling - Table 1") + plt.xlabel("Throttling") + plt.ylabel("Frequency") + + # Plot the distribution of 'Acceleration_measured' + plt.subplot(3, 1, 3) + plt.hist( + ush["Acceleration_measured"], bins=20, color="lightgreen", edgecolor="black" + ) + plt.title("Distribution of Acceleration - Table 1") + plt.xlabel("Acceleration") + plt.ylabel("Frequency") + + plt.tight_layout() + + fig.colorbar(surf) + + plt.show() + + +def main(): + rclpy.init() + neural_network_steering1 = NeuralNetworkSteering1() + rclpy.spin(neural_network_steering1) + + +if __name__ == "__main__": + main() diff --git a/vehicle/learning_based_accel_brake_map_calibrator/scripts_steering/neural_network_steer2.py b/vehicle/learning_based_accel_brake_map_calibrator/scripts_steering/neural_network_steer2.py new file mode 100644 index 0000000000000..2294dd4ff9568 --- /dev/null +++ b/vehicle/learning_based_accel_brake_map_calibrator/scripts_steering/neural_network_steer2.py @@ -0,0 +1,251 @@ +#! /usr/bin/env python3 +import math + +import pandas as pd +import numpy as np +import torch +import torch.nn as nn +import torch.optim as optim +from sklearn.model_selection import train_test_split +import matplotlib.pyplot as plt +from sklearn.metrics import mean_squared_error +from sklearn.metrics import mean_absolute_error +from sklearn.metrics import r2_score +from scipy.signal import medfilt +import rclpy +from rclpy.node import Node + + +class NeuralNetworkSteering2(Node): + class NeuralNetwork(nn.Module): + def __init__(self): + super(NeuralNetworkSteering2.NeuralNetwork, self).__init__() + self.fc1 = nn.Linear(2, 128) + self.relu1 = nn.ReLU() + self.fc2 = nn.Linear(128, 64) + self.relu2 = nn.ReLU() + self.fc3 = nn.Linear(64, 1) + + def forward(self, x): + x = self.fc1(x) + x = self.relu1(x) + x = self.fc2(x) + x = self.relu2(x) + x = self.fc3(x) + + return x + + def __init__(self): + super().__init__("neural_network_steering2") + + self.model = self.NeuralNetwork() + + data = pd.read_csv("steering_02.csv") + dataa = pd.read_csv("steering_02.csv") + ush = pd.read_csv("steering_02.csv") + + columns = ["Velocity", "Throttling", "Acceleration_measured"] + + # Apply a median filter with a window size of 11 to each column + self.declare_parameter("mean_filter_size", 21) + self.filter_size = ( + self.get_parameter("mean_filter_size").get_parameter_value().integer_value + ) + + for col in columns: + data[col] = medfilt(data[col], kernel_size=self.filter_size) + dataa[col] = medfilt(dataa[col], kernel_size=self.filter_size) + + # declare params from launch file to default values + self.declare_parameter("filter_vel", 10.0) + self.declare_parameter("filter_cmd", 10.0) + self.declare_parameter("filter_acc", 10.0) + + # Load params from launch file + self.FILTER_VEL = ( + self.get_parameter("filter_vel").get_parameter_value().double_value + ) + self.FILTER_CMD = ( + self.get_parameter("filter_cmd").get_parameter_value().double_value + ) + self.FILTER_ACC = ( + self.get_parameter("filter_acc").get_parameter_value().double_value + ) + + mean0 = data["Velocity"].mean() + std0 = data["Velocity"].std() + data["Velocity"] = (data["Velocity"] - mean0) / std0 + dataa["Velocity"] = (dataa["Velocity"] - mean0) / std0 + + data = data[abs(data["Velocity"] - mean0) <= std0 * self.FILTER_VEL] + dataa = dataa[abs(dataa["Velocity"] - mean0) <= std0 * self.FILTER_VEL] + + mean1 = data["Throttling"].mean() + std1 = data["Throttling"].std() + data["Throttling"] = (data["Throttling"] - mean1) / std1 + dataa["Throttling"] = (dataa["Throttling"] - mean1) / std1 + + data = data[abs(data["Throttling"] - mean1) <= std1 * self.FILTER_CMD] + dataa = dataa[abs(dataa["Throttling"] - mean1) <= std1 * self.FILTER_CMD] + + mean2 = data["Acceleration_measured"].mean() + std2 = data["Acceleration_measured"].std() + data["Acceleration_measured"] = (data["Acceleration_measured"] - mean2) / std2 + dataa["Acceleration_measured"] = (dataa["Acceleration_measured"] - mean2) / std2 + + data = data[ + abs(data["Acceleration_measured"] - mean2) <= std2 * self.FILTER_ACC + ] + dataa = dataa[ + abs(dataa["Acceleration_measured"] - mean2) <= std2 * self.FILTER_ACC + ] + + # Split the data into input features (velocity and throttle) and target (acceleration) and test/train + + X = data[["Velocity", "Throttling"]].values + y = data["Acceleration_measured"].values + + X_train, X_test, y_train, y_test = train_test_split( + X, y, test_size=0.2, random_state=42 + ) + + # Convert NumPy arrays to PyTorch tensors + X_train = torch.tensor(X_train, dtype=torch.float32) + y_train = torch.tensor(y_train, dtype=torch.float32) + X_test = torch.tensor(X_test, dtype=torch.float32) + y_test = torch.tensor(y_test, dtype=torch.float32) + + criterion = nn.MSELoss() + optimizer = optim.Adam( + self.model.parameters(), lr=0.001 + ) # , weight_decay=0.001) + + # Training loop + num_epochs = 100 + for epoch in range(num_epochs): + # Forward pass + outputs = self.model(X_train) + + loss = criterion(outputs, y_train.view(-1, 1)) + + # Backpropagation and optimization + optimizer.zero_grad() + loss.backward() + optimizer.step() + + with torch.no_grad(): + test_outputs = self.model(X_test) + # test_loss = criterion(test_outputs, y_test.view(-1, 1)) + # print(f"Mean Squared Error on Test Data: {test_loss.item()}") + + # Visualization (you can modify the range based on your needs) + + velocity_range = np.linspace(0, (X[:, 0] * std0 + mean0).max(), 20) + # throttling_range = np.linspace(0, 50, 20) + throttling_range = np.linspace(0, (X[:, 1] * std1 + mean1).max(), 20) + V, A = np.meshgrid(velocity_range, throttling_range) + + input_grid = np.column_stack( + ((V.flatten() - mean0) / std0, (A.flatten() - mean1) / std1) + ) + input_grid = torch.tensor(input_grid, dtype=torch.float32) + + with torch.no_grad(): + commands = self.model(input_grid).reshape(V.shape) + + commands_new = commands * std2 + mean2 + + # Save the trained model + # torch.save(model.state_dict(), 'trained_throttle.pth') + + # evaluation + mse = mean_squared_error(y_test, test_outputs.view(-1).numpy()) + self.get_logger().info(f"Mean Squared Error on Test Data: {mse}") + + mae = mean_absolute_error(y_test, test_outputs.view(-1).numpy()) + self.get_logger().info(f"Mean Absolute Error on Test Data: {mae}") + + rmse = math.sqrt(mse) + self.get_logger().info(f"Root Mean Squared Error on Test Data: {rmse}") + + r2 = r2_score(y_test, test_outputs.view(-1).numpy()) + self.get_logger().info(f"R-squared (R2) Score on Test Data: {r2}") + + # Save NN model in csv correct format for testing in the real vehicle + + velocity_headers = ["{:.2f}".format(v) for v in velocity_range] + + # we normalize throttling values between 0 and 1 + throttling_range /= 100 + + headers = [""] + velocity_headers + + commands_new_with_throttling = np.column_stack((throttling_range, commands_new)) + + # WHEN YOU READ ANOTHER CSV FIRE FOR ANOTHER STEERING CONDITION, RENAME THE FOLLOWING CSV FILE AS WELL!!! + + csv_filename = "steer_map_2.csv" + np.savetxt( + csv_filename, + commands_new_with_throttling, + delimiter=",", + header=",".join(headers), + comments="", + ) + + xdata = dataa.Velocity * std0 + mean0 + ydata = dataa.Throttling * std1 + mean1 + zdata = dataa.Acceleration_measured * std2 + mean2 + + fig = plt.figure() + ax = fig.add_subplot(111, projection="3d") + + ax.scatter3D(xdata, ydata, zdata, c=zdata, marker="o") + surf = ax.plot_surface(V, A, commands_new, cmap="viridis") + + ax.set_xlabel("Velocity") + ax.set_zlabel("Acceleration") + ax.set_ylabel("Throttling Output") + + # CHANGE THE TITLE OF THE PLOT DEPENDING ON THE CSV TABLE YOU ARE VISUALIZING + + ax.set_title("Neural Network Output vs. Velocity and Throttling | Steering 2") + + plt.figure(figsize=(10, 6)) + plt.subplot(3, 1, 1) + plt.hist(ush["Velocity"], bins=20, color="skyblue", edgecolor="black") + plt.title("Distribution of Velocity - Table 2") + plt.xlabel("Velocity") + plt.ylabel("Frequency") + + # Plot the distribution of 'Throttling' + plt.subplot(3, 1, 2) + plt.hist(ush["Throttling"], bins=20, color="salmon", edgecolor="black") + plt.title("Distribution of Throttling - Table 2") + plt.xlabel("Throttling") + plt.ylabel("Frequency") + + # Plot the distribution of 'Acceleration_measured' + plt.subplot(3, 1, 3) + plt.hist( + ush["Acceleration_measured"], bins=20, color="lightgreen", edgecolor="black" + ) + plt.title("Distribution of Acceleration - Table 2") + plt.xlabel("Acceleration") + plt.ylabel("Frequency") + + plt.tight_layout() + + fig.colorbar(surf) + + plt.show() + + +def main(): + rclpy.init() + neural_network_steering2 = NeuralNetworkSteering2() + rclpy.spin(neural_network_steering2) + + +if __name__ == "__main__": + main() diff --git a/vehicle/learning_based_accel_brake_map_calibrator/scripts_steering/neural_network_steer3.py b/vehicle/learning_based_accel_brake_map_calibrator/scripts_steering/neural_network_steer3.py new file mode 100644 index 0000000000000..cce696d5157fa --- /dev/null +++ b/vehicle/learning_based_accel_brake_map_calibrator/scripts_steering/neural_network_steer3.py @@ -0,0 +1,251 @@ +#! /usr/bin/env python3 +import math + +import pandas as pd +import numpy as np +import torch +import torch.nn as nn +import torch.optim as optim +from sklearn.model_selection import train_test_split +import matplotlib.pyplot as plt +from sklearn.metrics import mean_squared_error +from sklearn.metrics import mean_absolute_error +from sklearn.metrics import r2_score +from scipy.signal import medfilt +import rclpy +from rclpy.node import Node + + +class NeuralNetworkSteering3(Node): + class NeuralNetwork(nn.Module): + def __init__(self): + super(NeuralNetworkSteering3.NeuralNetwork, self).__init__() + self.fc1 = nn.Linear(2, 128) + self.relu1 = nn.ReLU() + self.fc2 = nn.Linear(128, 64) + self.relu2 = nn.ReLU() + self.fc3 = nn.Linear(64, 1) + + def forward(self, x): + x = self.fc1(x) + x = self.relu1(x) + x = self.fc2(x) + x = self.relu2(x) + x = self.fc3(x) + + return x + + def __init__(self): + super().__init__("neural_network_steering3") + + self.model = self.NeuralNetwork() + + data = pd.read_csv("steering_03.csv") + dataa = pd.read_csv("steering_03.csv") + ush = pd.read_csv("steering_03.csv") + + columns = ["Velocity", "Throttling", "Acceleration_measured"] + + # Apply a median filter with a window size of 11 to each column + self.declare_parameter("mean_filter_size", 21) + self.filter_size = ( + self.get_parameter("mean_filter_size").get_parameter_value().integer_value + ) + + for col in columns: + data[col] = medfilt(data[col], kernel_size=self.filter_size) + dataa[col] = medfilt(dataa[col], kernel_size=self.filter_size) + + # declare params from launch file to default values + self.declare_parameter("filter_vel", 10.0) + self.declare_parameter("filter_cmd", 10.0) + self.declare_parameter("filter_acc", 10.0) + + # Load params from launch file + self.FILTER_VEL = ( + self.get_parameter("filter_vel").get_parameter_value().double_value + ) + self.FILTER_CMD = ( + self.get_parameter("filter_cmd").get_parameter_value().double_value + ) + self.FILTER_ACC = ( + self.get_parameter("filter_acc").get_parameter_value().double_value + ) + + mean0 = data["Velocity"].mean() + std0 = data["Velocity"].std() + data["Velocity"] = (data["Velocity"] - mean0) / std0 + dataa["Velocity"] = (dataa["Velocity"] - mean0) / std0 + + data = data[abs(data["Velocity"] - mean0) <= std0 * self.FILTER_VEL] + dataa = dataa[abs(dataa["Velocity"] - mean0) <= std0 * self.FILTER_VEL] + + mean1 = data["Throttling"].mean() + std1 = data["Throttling"].std() + data["Throttling"] = (data["Throttling"] - mean1) / std1 + dataa["Throttling"] = (dataa["Throttling"] - mean1) / std1 + + data = data[abs(data["Throttling"] - mean1) <= std1 * self.FILTER_CMD] + dataa = dataa[abs(dataa["Throttling"] - mean1) <= std1 * self.FILTER_CMD] + + mean2 = data["Acceleration_measured"].mean() + std2 = data["Acceleration_measured"].std() + data["Acceleration_measured"] = (data["Acceleration_measured"] - mean2) / std2 + dataa["Acceleration_measured"] = (dataa["Acceleration_measured"] - mean2) / std2 + + data = data[ + abs(data["Acceleration_measured"] - mean2) <= std2 * self.FILTER_ACC + ] + dataa = dataa[ + abs(dataa["Acceleration_measured"] - mean2) <= std2 * self.FILTER_ACC + ] + + # Split the data into input features (velocity and throttle) and target (acceleration) and test/train + + X = data[["Velocity", "Throttling"]].values + y = data["Acceleration_measured"].values + + X_train, X_test, y_train, y_test = train_test_split( + X, y, test_size=0.2, random_state=42 + ) + + # Convert NumPy arrays to PyTorch tensors + X_train = torch.tensor(X_train, dtype=torch.float32) + y_train = torch.tensor(y_train, dtype=torch.float32) + X_test = torch.tensor(X_test, dtype=torch.float32) + y_test = torch.tensor(y_test, dtype=torch.float32) + + criterion = nn.MSELoss() + optimizer = optim.Adam( + self.model.parameters(), lr=0.001 + ) # , weight_decay=0.001) + + # Training loop + num_epochs = 100 + for epoch in range(num_epochs): + # Forward pass + outputs = self.model(X_train) + + loss = criterion(outputs, y_train.view(-1, 1)) + + # Backpropagation and optimization + optimizer.zero_grad() + loss.backward() + optimizer.step() + + with torch.no_grad(): + test_outputs = self.model(X_test) + # test_loss = criterion(test_outputs, y_test.view(-1, 1)) + # print(f"Mean Squared Error on Test Data: {test_loss.item()}") + + # Visualization (you can modify the range based on your needs) + + velocity_range = np.linspace(0, (X[:, 0] * std0 + mean0).max(), 20) + # throttling_range = np.linspace(0, 50, 20) + throttling_range = np.linspace(0, (X[:, 1] * std1 + mean1).max(), 20) + V, A = np.meshgrid(velocity_range, throttling_range) + + input_grid = np.column_stack( + ((V.flatten() - mean0) / std0, (A.flatten() - mean1) / std1) + ) + input_grid = torch.tensor(input_grid, dtype=torch.float32) + + with torch.no_grad(): + commands = self.model(input_grid).reshape(V.shape) + + commands_new = commands * std2 + mean2 + + # Save the trained model + # torch.save(model.state_dict(), 'trained_throttle.pth') + + # evaluation + mse = mean_squared_error(y_test, test_outputs.view(-1).numpy()) + self.get_logger().info(f"Mean Squared Error on Test Data: {mse}") + + mae = mean_absolute_error(y_test, test_outputs.view(-1).numpy()) + self.get_logger().info(f"Mean Absolute Error on Test Data: {mae}") + + rmse = math.sqrt(mse) + self.get_logger().info(f"Root Mean Squared Error on Test Data: {rmse}") + + r2 = r2_score(y_test, test_outputs.view(-1).numpy()) + self.get_logger().info(f"R-squared (R2) Score on Test Data: {r2}") + + # Save NN model in csv correct format for testing in the real vehicle + + velocity_headers = ["{:.2f}".format(v) for v in velocity_range] + + # we normalize throttling values between 0 and 1 + throttling_range /= 100 + + headers = [""] + velocity_headers + + commands_new_with_throttling = np.column_stack((throttling_range, commands_new)) + + # WHEN YOU READ ANOTHER CSV FIRE FOR ANOTHER STEERING CONDITION, RENAME THE FOLLOWING CSV FILE AS WELL!!! + + csv_filename = "steer_map_3.csv" + np.savetxt( + csv_filename, + commands_new_with_throttling, + delimiter=",", + header=",".join(headers), + comments="", + ) + + xdata = dataa.Velocity * std0 + mean0 + ydata = dataa.Throttling * std1 + mean1 + zdata = dataa.Acceleration_measured * std2 + mean2 + + fig = plt.figure() + ax = fig.add_subplot(111, projection="3d") + + ax.scatter3D(xdata, ydata, zdata, c=zdata, marker="o") + surf = ax.plot_surface(V, A, commands_new, cmap="viridis") + + ax.set_xlabel("Velocity") + ax.set_zlabel("Acceleration") + ax.set_ylabel("Throttling Output") + + # CHANGE THE TITLE OF THE PLOT DEPENDING ON THE CSV TABLE YOU ARE VISUALIZING + + ax.set_title("Neural Network Output vs. Velocity and Throttling | Steering 3") + + plt.figure(figsize=(10, 6)) + plt.subplot(3, 1, 1) + plt.hist(ush["Velocity"], bins=20, color="skyblue", edgecolor="black") + plt.title("Distribution of Velocity - Table 3") + plt.xlabel("Velocity") + plt.ylabel("Frequency") + + # Plot the distribution of 'Throttling' + plt.subplot(3, 1, 2) + plt.hist(ush["Throttling"], bins=20, color="salmon", edgecolor="black") + plt.title("Distribution of Throttling - Table 3") + plt.xlabel("Throttling") + plt.ylabel("Frequency") + + # Plot the distribution of 'Acceleration_measured' + plt.subplot(3, 1, 3) + plt.hist( + ush["Acceleration_measured"], bins=20, color="lightgreen", edgecolor="black" + ) + plt.title("Distribution of Acceleration - Table 3") + plt.xlabel("Acceleration") + plt.ylabel("Frequency") + + plt.tight_layout() + + fig.colorbar(surf) + + plt.show() + + +def main(): + rclpy.init() + neural_network_steering3 = NeuralNetworkSteering3() + rclpy.spin(neural_network_steering3) + + +if __name__ == "__main__": + main() diff --git a/vehicle/learning_based_accel_brake_map_calibrator/scripts_steering/neural_network_steer4.py b/vehicle/learning_based_accel_brake_map_calibrator/scripts_steering/neural_network_steer4.py new file mode 100644 index 0000000000000..d732d59230c46 --- /dev/null +++ b/vehicle/learning_based_accel_brake_map_calibrator/scripts_steering/neural_network_steer4.py @@ -0,0 +1,251 @@ +#! /usr/bin/env python3 +import math + +import pandas as pd +import numpy as np +import torch +import torch.nn as nn +import torch.optim as optim +from sklearn.model_selection import train_test_split +import matplotlib.pyplot as plt +from sklearn.metrics import mean_squared_error +from sklearn.metrics import mean_absolute_error +from sklearn.metrics import r2_score +from scipy.signal import medfilt +import rclpy +from rclpy.node import Node + + +class NeuralNetworkSteering4(Node): + class NeuralNetwork(nn.Module): + def __init__(self): + super(NeuralNetworkSteering4.NeuralNetwork, self).__init__() + self.fc1 = nn.Linear(2, 128) + self.relu1 = nn.ReLU() + self.fc2 = nn.Linear(128, 64) + self.relu2 = nn.ReLU() + self.fc3 = nn.Linear(64, 1) + + def forward(self, x): + x = self.fc1(x) + x = self.relu1(x) + x = self.fc2(x) + x = self.relu2(x) + x = self.fc3(x) + + return x + + def __init__(self): + super().__init__("neural_network_steering4") + + self.model = self.NeuralNetwork() + + data = pd.read_csv("steering_04.csv") + dataa = pd.read_csv("steering_04.csv") + ush = pd.read_csv("steering_04.csv") + + columns = ["Velocity", "Throttling", "Acceleration_measured"] + + # Apply a median filter with a window size of 11 to each column + self.declare_parameter("mean_filter_size", 21) + self.filter_size = ( + self.get_parameter("mean_filter_size").get_parameter_value().integer_value + ) + + for col in columns: + data[col] = medfilt(data[col], kernel_size=self.filter_size) + dataa[col] = medfilt(dataa[col], kernel_size=self.filter_size) + + # declare params from launch file to default values + self.declare_parameter("filter_vel", 10.0) + self.declare_parameter("filter_cmd", 10.0) + self.declare_parameter("filter_acc", 10.0) + + # Load params from launch file + self.FILTER_VEL = ( + self.get_parameter("filter_vel").get_parameter_value().double_value + ) + self.FILTER_CMD = ( + self.get_parameter("filter_cmd").get_parameter_value().double_value + ) + self.FILTER_ACC = ( + self.get_parameter("filter_acc").get_parameter_value().double_value + ) + + mean0 = data["Velocity"].mean() + std0 = data["Velocity"].std() + data["Velocity"] = (data["Velocity"] - mean0) / std0 + dataa["Velocity"] = (dataa["Velocity"] - mean0) / std0 + + data = data[abs(data["Velocity"] - mean0) <= std0 * self.FILTER_VEL] + dataa = dataa[abs(dataa["Velocity"] - mean0) <= std0 * self.FILTER_VEL] + + mean1 = data["Throttling"].mean() + std1 = data["Throttling"].std() + data["Throttling"] = (data["Throttling"] - mean1) / std1 + dataa["Throttling"] = (dataa["Throttling"] - mean1) / std1 + + data = data[abs(data["Throttling"] - mean1) <= std1 * self.FILTER_CMD] + dataa = dataa[abs(dataa["Throttling"] - mean1) <= std1 * self.FILTER_CMD] + + mean2 = data["Acceleration_measured"].mean() + std2 = data["Acceleration_measured"].std() + data["Acceleration_measured"] = (data["Acceleration_measured"] - mean2) / std2 + dataa["Acceleration_measured"] = (dataa["Acceleration_measured"] - mean2) / std2 + + data = data[ + abs(data["Acceleration_measured"] - mean2) <= std2 * self.FILTER_ACC + ] + dataa = dataa[ + abs(dataa["Acceleration_measured"] - mean2) <= std2 * self.FILTER_ACC + ] + + # Split the data into input features (velocity and throttle) and target (acceleration) and test/train + + X = data[["Velocity", "Throttling"]].values + y = data["Acceleration_measured"].values + + X_train, X_test, y_train, y_test = train_test_split( + X, y, test_size=0.2, random_state=42 + ) + + # Convert NumPy arrays to PyTorch tensors + X_train = torch.tensor(X_train, dtype=torch.float32) + y_train = torch.tensor(y_train, dtype=torch.float32) + X_test = torch.tensor(X_test, dtype=torch.float32) + y_test = torch.tensor(y_test, dtype=torch.float32) + + criterion = nn.MSELoss() + optimizer = optim.Adam( + self.model.parameters(), lr=0.001 + ) # , weight_decay=0.001) + + # Training loop + num_epochs = 100 + for epoch in range(num_epochs): + # Forward pass + outputs = self.model(X_train) + + loss = criterion(outputs, y_train.view(-1, 1)) + + # Backpropagation and optimization + optimizer.zero_grad() + loss.backward() + optimizer.step() + + with torch.no_grad(): + test_outputs = self.model(X_test) + # test_loss = criterion(test_outputs, y_test.view(-1, 1)) + # print(f"Mean Squared Error on Test Data: {test_loss.item()}") + + # Visualization (you can modify the range based on your needs) + + velocity_range = np.linspace(0, (X[:, 0] * std0 + mean0).max(), 20) + # throttling_range = np.linspace(0, 50, 20) + throttling_range = np.linspace(0, (X[:, 1] * std1 + mean1).max(), 20) + V, A = np.meshgrid(velocity_range, throttling_range) + + input_grid = np.column_stack( + ((V.flatten() - mean0) / std0, (A.flatten() - mean1) / std1) + ) + input_grid = torch.tensor(input_grid, dtype=torch.float32) + + with torch.no_grad(): + commands = self.model(input_grid).reshape(V.shape) + + commands_new = commands * std2 + mean2 + + # Save the trained model + # torch.save(model.state_dict(), 'trained_throttle.pth') + + # evaluation + mse = mean_squared_error(y_test, test_outputs.view(-1).numpy()) + self.get_logger().info(f"Mean Squared Error on Test Data: {mse}") + + mae = mean_absolute_error(y_test, test_outputs.view(-1).numpy()) + self.get_logger().info(f"Mean Absolute Error on Test Data: {mae}") + + rmse = math.sqrt(mse) + self.get_logger().info(f"Root Mean Squared Error on Test Data: {rmse}") + + r2 = r2_score(y_test, test_outputs.view(-1).numpy()) + self.get_logger().info(f"R-squared (R2) Score on Test Data: {r2}") + + # Save NN model in csv correct format for testing in the real vehicle + + velocity_headers = ["{:.2f}".format(v) for v in velocity_range] + + # we normalize throttling values between 0 and 1 + throttling_range /= 100 + + headers = [""] + velocity_headers + + commands_new_with_throttling = np.column_stack((throttling_range, commands_new)) + + # WHEN YOU READ ANOTHER CSV FIRE FOR ANOTHER STEERING CONDITION, RENAME THE FOLLOWING CSV FILE AS WELL!!! + + csv_filename = "steer_map_4.csv" + np.savetxt( + csv_filename, + commands_new_with_throttling, + delimiter=",", + header=",".join(headers), + comments="", + ) + + xdata = dataa.Velocity * std0 + mean0 + ydata = dataa.Throttling * std1 + mean1 + zdata = dataa.Acceleration_measured * std2 + mean2 + + fig = plt.figure() + ax = fig.add_subplot(111, projection="3d") + + ax.scatter3D(xdata, ydata, zdata, c=zdata, marker="o") + surf = ax.plot_surface(V, A, commands_new, cmap="viridis") + + ax.set_xlabel("Velocity") + ax.set_zlabel("Acceleration") + ax.set_ylabel("Throttling Output") + + # CHANGE THE TITLE OF THE PLOT DEPENDING ON THE CSV TABLE YOU ARE VISUALIZING + + ax.set_title("Neural Network Output vs. Velocity and Throttling | Steering 4") + + plt.figure(figsize=(10, 6)) + plt.subplot(3, 1, 1) + plt.hist(ush["Velocity"], bins=20, color="skyblue", edgecolor="black") + plt.title("Distribution of Velocity - Table 4") + plt.xlabel("Velocity") + plt.ylabel("Frequency") + + # Plot the distribution of 'Throttling' + plt.subplot(3, 1, 2) + plt.hist(ush["Throttling"], bins=20, color="salmon", edgecolor="black") + plt.title("Distribution of Throttling - Table 4") + plt.xlabel("Throttling") + plt.ylabel("Frequency") + + # Plot the distribution of 'Acceleration_measured' + plt.subplot(3, 1, 3) + plt.hist( + ush["Acceleration_measured"], bins=20, color="lightgreen", edgecolor="black" + ) + plt.title("Distribution of Acceleration - Table 4") + plt.xlabel("Acceleration") + plt.ylabel("Frequency") + + plt.tight_layout() + + fig.colorbar(surf) + + plt.show() + + +def main(): + rclpy.init() + neural_network_steering4 = NeuralNetworkSteering4() + rclpy.spin(neural_network_steering4) + + +if __name__ == "__main__": + main() diff --git a/vehicle/learning_based_accel_brake_map_calibrator/scripts_steering/neural_network_steer5.py b/vehicle/learning_based_accel_brake_map_calibrator/scripts_steering/neural_network_steer5.py new file mode 100644 index 0000000000000..48a849ed8a9e6 --- /dev/null +++ b/vehicle/learning_based_accel_brake_map_calibrator/scripts_steering/neural_network_steer5.py @@ -0,0 +1,251 @@ +#! /usr/bin/env python3 +import math + +import pandas as pd +import numpy as np +import torch +import torch.nn as nn +import torch.optim as optim +from sklearn.model_selection import train_test_split +import matplotlib.pyplot as plt +from sklearn.metrics import mean_squared_error +from sklearn.metrics import mean_absolute_error +from sklearn.metrics import r2_score +from scipy.signal import medfilt +import rclpy +from rclpy.node import Node + + +class NeuralNetworkSteering5(Node): + class NeuralNetwork(nn.Module): + def __init__(self): + super(NeuralNetworkSteering5.NeuralNetwork, self).__init__() + self.fc1 = nn.Linear(2, 128) + self.relu1 = nn.ReLU() + self.fc2 = nn.Linear(128, 64) + self.relu2 = nn.ReLU() + self.fc3 = nn.Linear(64, 1) + + def forward(self, x): + x = self.fc1(x) + x = self.relu1(x) + x = self.fc2(x) + x = self.relu2(x) + x = self.fc3(x) + + return x + + def __init__(self): + super().__init__("neural_network_steering5") + + self.model = self.NeuralNetwork() + + data = pd.read_csv("steering_05.csv") + dataa = pd.read_csv("steering_05.csv") + ush = pd.read_csv("steering_05.csv") + + columns = ["Velocity", "Throttling", "Acceleration_measured"] + + # Apply a median filter with a window size of 11 to each column + self.declare_parameter("mean_filter_size", 21) + self.filter_size = ( + self.get_parameter("mean_filter_size").get_parameter_value().integer_value + ) + + for col in columns: + data[col] = medfilt(data[col], kernel_size=self.filter_size) + dataa[col] = medfilt(dataa[col], kernel_size=self.filter_size) + + # declare params from launch file to default values + self.declare_parameter("filter_vel", 10.0) + self.declare_parameter("filter_cmd", 10.0) + self.declare_parameter("filter_acc", 10.0) + + # Load params from launch file + self.FILTER_VEL = ( + self.get_parameter("filter_vel").get_parameter_value().double_value + ) + self.FILTER_CMD = ( + self.get_parameter("filter_cmd").get_parameter_value().double_value + ) + self.FILTER_ACC = ( + self.get_parameter("filter_acc").get_parameter_value().double_value + ) + + mean0 = data["Velocity"].mean() + std0 = data["Velocity"].std() + data["Velocity"] = (data["Velocity"] - mean0) / std0 + dataa["Velocity"] = (dataa["Velocity"] - mean0) / std0 + + data = data[abs(data["Velocity"] - mean0) <= std0 * self.FILTER_VEL] + dataa = dataa[abs(dataa["Velocity"] - mean0) <= std0 * self.FILTER_VEL] + + mean1 = data["Throttling"].mean() + std1 = data["Throttling"].std() + data["Throttling"] = (data["Throttling"] - mean1) / std1 + dataa["Throttling"] = (dataa["Throttling"] - mean1) / std1 + + data = data[abs(data["Throttling"] - mean1) <= std1 * self.FILTER_CMD] + dataa = dataa[abs(dataa["Throttling"] - mean1) <= std1 * self.FILTER_CMD] + + mean2 = data["Acceleration_measured"].mean() + std2 = data["Acceleration_measured"].std() + data["Acceleration_measured"] = (data["Acceleration_measured"] - mean2) / std2 + dataa["Acceleration_measured"] = (dataa["Acceleration_measured"] - mean2) / std2 + + data = data[ + abs(data["Acceleration_measured"] - mean2) <= std2 * self.FILTER_ACC + ] + dataa = dataa[ + abs(dataa["Acceleration_measured"] - mean2) <= std2 * self.FILTER_ACC + ] + + # Split the data into input features (velocity and throttle) and target (acceleration) and test/train + + X = data[["Velocity", "Throttling"]].values + y = data["Acceleration_measured"].values + + X_train, X_test, y_train, y_test = train_test_split( + X, y, test_size=0.2, random_state=42 + ) + + # Convert NumPy arrays to PyTorch tensors + X_train = torch.tensor(X_train, dtype=torch.float32) + y_train = torch.tensor(y_train, dtype=torch.float32) + X_test = torch.tensor(X_test, dtype=torch.float32) + y_test = torch.tensor(y_test, dtype=torch.float32) + + criterion = nn.MSELoss() + optimizer = optim.Adam( + self.model.parameters(), lr=0.001 + ) # , weight_decay=0.001) + + # Training loop + num_epochs = 100 + for epoch in range(num_epochs): + # Forward pass + outputs = self.model(X_train) + + loss = criterion(outputs, y_train.view(-1, 1)) + + # Backpropagation and optimization + optimizer.zero_grad() + loss.backward() + optimizer.step() + + with torch.no_grad(): + test_outputs = self.model(X_test) + # test_loss = criterion(test_outputs, y_test.view(-1, 1)) + # print(f"Mean Squared Error on Test Data: {test_loss.item()}") + + # Visualization (you can modify the range based on your needs) + + velocity_range = np.linspace(0, (X[:, 0] * std0 + mean0).max(), 20) + # throttling_range = np.linspace(0, 50, 20) + throttling_range = np.linspace(0, (X[:, 1] * std1 + mean1).max(), 20) + V, A = np.meshgrid(velocity_range, throttling_range) + + input_grid = np.column_stack( + ((V.flatten() - mean0) / std0, (A.flatten() - mean1) / std1) + ) + input_grid = torch.tensor(input_grid, dtype=torch.float32) + + with torch.no_grad(): + commands = self.model(input_grid).reshape(V.shape) + + commands_new = commands * std2 + mean2 + + # Save the trained model + # torch.save(model.state_dict(), 'trained_throttle.pth') + + # evaluation + mse = mean_squared_error(y_test, test_outputs.view(-1).numpy()) + self.get_logger().info(f"Mean Squared Error on Test Data: {mse}") + + mae = mean_absolute_error(y_test, test_outputs.view(-1).numpy()) + self.get_logger().info(f"Mean Absolute Error on Test Data: {mae}") + + rmse = math.sqrt(mse) + self.get_logger().info(f"Root Mean Squared Error on Test Data: {rmse}") + + r2 = r2_score(y_test, test_outputs.view(-1).numpy()) + self.get_logger().info(f"R-squared (R2) Score on Test Data: {r2}") + + # Save NN model in csv correct format for testing in the real vehicle + + velocity_headers = ["{:.2f}".format(v) for v in velocity_range] + + # we normalize throttling values between 0 and 1 + throttling_range /= 100 + + headers = [""] + velocity_headers + + commands_new_with_throttling = np.column_stack((throttling_range, commands_new)) + + # WHEN YOU READ ANOTHER CSV FIRE FOR ANOTHER STEERING CONDITION, RENAME THE FOLLOWING CSV FILE AS WELL!!! + + csv_filename = "steer_map_5.csv" + np.savetxt( + csv_filename, + commands_new_with_throttling, + delimiter=",", + header=",".join(headers), + comments="", + ) + + xdata = dataa.Velocity * std0 + mean0 + ydata = dataa.Throttling * std1 + mean1 + zdata = dataa.Acceleration_measured * std2 + mean2 + + fig = plt.figure() + ax = fig.add_subplot(111, projection="3d") + + ax.scatter3D(xdata, ydata, zdata, c=zdata, marker="o") + surf = ax.plot_surface(V, A, commands_new, cmap="viridis") + + ax.set_xlabel("Velocity") + ax.set_zlabel("Acceleration") + ax.set_ylabel("Throttling Output") + + # CHANGE THE TITLE OF THE PLOT DEPENDING ON THE CSV TABLE YOU ARE VISUALIZING + + ax.set_title("Neural Network Output vs. Velocity and Throttling | Steering 5") + + plt.figure(figsize=(10, 6)) + plt.subplot(3, 1, 1) + plt.hist(ush["Velocity"], bins=20, color="skyblue", edgecolor="black") + plt.title("Distribution of Velocity - Table 5") + plt.xlabel("Velocity") + plt.ylabel("Frequency") + + # Plot the distribution of 'Throttling' + plt.subplot(3, 1, 2) + plt.hist(ush["Throttling"], bins=20, color="salmon", edgecolor="black") + plt.title("Distribution of Throttling - Table 5") + plt.xlabel("Throttling") + plt.ylabel("Frequency") + + # Plot the distribution of 'Acceleration_measured' + plt.subplot(3, 1, 3) + plt.hist( + ush["Acceleration_measured"], bins=20, color="lightgreen", edgecolor="black" + ) + plt.title("Distribution of Acceleration - Table 5") + plt.xlabel("Acceleration") + plt.ylabel("Frequency") + + plt.tight_layout() + + fig.colorbar(surf) + + plt.show() + + +def main(): + rclpy.init() + neural_network_steering5 = NeuralNetworkSteering5() + rclpy.spin(neural_network_steering5) + + +if __name__ == "__main__": + main() diff --git a/vehicle/learning_based_accel_brake_map_calibrator/scripts_steering/steering_data_visualization.py b/vehicle/learning_based_accel_brake_map_calibrator/scripts_steering/steering_data_visualization.py new file mode 100644 index 0000000000000..295bfab2a9cf7 --- /dev/null +++ b/vehicle/learning_based_accel_brake_map_calibrator/scripts_steering/steering_data_visualization.py @@ -0,0 +1,49 @@ +import numpy as np +import matplotlib.pyplot as plt +import pandas as pd +from sklearn.linear_model import LinearRegression + +# Read the data +columns = ["Velocity", "Throttling", "Acceleration_with_pitch_comp"] + +# WRITE HERE THE NAME OF THE CSV FILE YOU WANT TO VISUALIZE + +df = pd.read_csv("steering_01.csv", usecols=columns) + +xdata = df.Velocity +ydata = df.Throttling +zdata = df.Acceleration_with_pitch_comp + +# Fit a linear regression model +X = np.column_stack((xdata, ydata)) +model = LinearRegression().fit(X, zdata) + +x_grid, y_grid = np.meshgrid( + np.linspace(xdata.min(), xdata.max(), 100), + np.linspace(ydata.min(), ydata.max(), 100), +) + +z_grid = model.predict(np.column_stack((x_grid.ravel(), y_grid.ravel()))) +z_grid = z_grid.reshape(x_grid.shape) + + +fig = plt.figure() +ax = plt.axes(projection="3d") + + +cmap = plt.get_cmap("Greens") +normalize = plt.Normalize(zdata.min(), zdata.max()) +sc = plt.cm.ScalarMappable(cmap=cmap, norm=normalize) +sc.set_array([]) + +scatter = ax.scatter3D(xdata, ydata, zdata, c=zdata, cmap=cmap, marker="o") + +ax.plot_surface(x_grid, y_grid, z_grid, alpha=0.5, cmap=cmap) + +ax.set_xlabel("Velocity") +ax.set_zlabel("Acceleration") +ax.set_ylabel("Throttling Output") + +cbar = plt.colorbar(sc, ax=ax, label="Acceleration_with_pitch_comp") + +plt.show() diff --git a/vehicle/learning_based_accel_brake_map_calibrator/setup.py b/vehicle/learning_based_accel_brake_map_calibrator/setup.py new file mode 100644 index 0000000000000..b9f4b90a34b75 --- /dev/null +++ b/vehicle/learning_based_accel_brake_map_calibrator/setup.py @@ -0,0 +1,35 @@ +import os +from glob import glob + +from setuptools import setup, find_packages + +package_name = "learning_based_vehicle_calibration" + + +setup( + name=package_name, + version="0.0.0", + packages=find_packages(exclude=["test"]), + data_files=[ + ("share/ament_index/resource_index/packages", ["resource/" + package_name]), + ("share/" + package_name, ["package.xml"]), + # (os.path.join('share', package_name, 'launch'), glob(os.path.join('launch', '*.py'))), + ( + os.path.join("share", package_name, "launch"), + glob(os.path.join("launch", "*launch.[pxy][yma]*")), + ), + # (f"share/{package_name}/launch", [f"launch/neural_network_launch.py"]), + ], + install_requires=["setuptools"], + zip_safe=True, + maintainer="Cristian Gariboldi", + maintainer_email="gariboldicristian@gmail.com", + description="Black box model of longitudinal dynamics", + license="Apache License 2.0", + tests_require=["pytest"], + entry_points={ + "console_scripts": [], + }, +) + +# ('share/' + package_name + '/launch', ['launch/neural_network_throttle_launch.py']),