|
61 | 61 | //! _data,
|
62 | 62 | //! })
|
63 | 63 | //! }
|
64 |
| -//!}``` |
| 64 | +//!} |
| 65 | +//! ``` |
65 | 66 | //! Next, add a main function to launch it:
|
66 | 67 |
|
67 | 68 | //! ```rust
|
|
73 | 74 | //! .spin(SpinOptions::default())
|
74 | 75 | //! .first_error()
|
75 | 76 | //! .map_err(|err| err.into())
|
76 |
| -//! }``` |
| 77 | +//! } |
| 78 | +//! ``` |
77 | 79 |
|
78 | 80 | //! ## Run the node
|
79 | 81 | //! You should now be able to run this node with `cargo build` and then `cargo run`. However, the subscription callback still has a `todo!` in it, so it will exit with an error when it receives a message.
|
|
105 | 107 | //! 4. Make the closure `move`, and inside it, lock the `Mutex` and store the message
|
106 | 108 |
|
107 | 109 | //! ```rust
|
| 110 | +//! use rclrs::*; |
108 | 111 | //! use std::sync::{Arc, Mutex}; // (1)
|
109 | 112 | //! use std_msgs::msg::String as StringMsg;
|
110 |
| -//! use rclrs::*; |
111 | 113 |
|
112 | 114 | //! struct RepublisherNode {
|
113 | 115 | //! _node: Arc<rclrs::Node>,
|
|
175 | 177 | //! let context = Context::default_from_env()?;
|
176 | 178 | //! let mut executor = context.create_basic_executor();
|
177 | 179 | //! let _republisher = RepublisherNode::new(&executor)?;
|
178 |
| -//! std::thread::spawn(move || -> Result<(), rclrs::RclrsError> { |
| 180 | +//! std::thread::spawn(|| -> Result<(), rclrs::RclrsError> { |
179 | 181 | //! loop {
|
180 | 182 | //! use std::time::Duration;
|
181 | 183 | //! std::thread::sleep(Duration::from_millis(1000));
|
|
194 | 196 | //! because the function that the variable is coming from might return before the thread that borrows the variable ends.
|
195 | 197 | //! > 💡 Of course, you could argue that this cannot really happen here, because returning from `main()` will also terminate the other threads, but Rust isn't that smart.
|
196 | 198 | //!
|
197 |
| -//! The solution is also the same as above: Shared ownership with `Arc`. Only this time, `Mutex` isn't needed since both the `rclcpp::spin()` |
| 199 | +//! The solution is also the same as above: Shared ownership with `Arc`. Only this time, `Mutex` isn't needed since both the `executor::spin()` |
198 | 200 | //! and the `republish()` function only require a shared reference:
|
199 | 201 | //! ```rust
|
200 | 202 | //! fn main() -> Result<(), rclrs::RclrsError> {
|
|
216 | 218 | //!```
|
217 | 219 |
|
218 | 220 | //! ## Try it out
|
219 |
| -//! In separate terminals, run `cargo run --bin first_rclrs_node` if running the current node or `cargo run` otherwise and `ros2 topic echo /out_topic`. Nothing will be shown yet, since our node hasn't received any data yet. |
220 |
| -//! |
| 221 | +//! ### Terminal 1: |
| 222 | +//! In a first terminal, in the workspace root, run: |
| 223 | +//! 1. `colcon build --packages-select examples_rclrs_minimal_pub_sub` to build the node. |
| 224 | +//! 2. `ros2 run examples_rclrs_minimal_pub_sub first_rclrs_node` to run the node. |
| 225 | +//! ### Terminal 2: |
| 226 | +//! In another terminal, run `ros2 topic echo /out_topic`. Nothing will be shown yet, since our node hasn't received any data yet. |
| 227 | +//! ### Terminal 3: |
221 | 228 | //! In another terminal, publish a single message with `ros2 topic pub /in_topic std_msgs/msg/String '{data: "Bonjour"}' -1`.
|
222 | 229 | //! The terminal with `ros2 topic echo` should now receive a new `Bonjour` message every second.
|
223 | 230 | //!
|
|
0 commit comments