8
8
The pose is estimated by finding matching keypoints between the query and
9
9
reference images and then solving the resulting :term:`PnP` problem.
10
10
"""
11
- from typing import Optional , Tuple , Union , cast
11
+ from typing import Final , Optional , Tuple , Union , cast
12
12
13
13
import cv2
14
14
import numpy as np
18
18
import torch
19
19
from builtin_interfaces .msg import Time
20
20
from cv_bridge import CvBridge
21
- from geometry_msgs .msg import PoseWithCovariance , PoseWithCovarianceStamped
21
+ from geometry_msgs .msg import (
22
+ PoseWithCovariance ,
23
+ PoseWithCovarianceStamped ,
24
+ TwistWithCovarianceStamped ,
25
+ )
22
26
from kornia .feature import DISK , LightGlueMatcher , laf_from_center_scale_ori
23
27
from rclpy .node import Node
24
28
from rclpy .qos import QoSPresetProfiles
25
29
from robot_localization .srv import SetPose
30
+ from scipy .interpolate import interp1d
26
31
from sensor_msgs .msg import CameraInfo , Image , TimeReference
27
32
28
33
from gisnav_msgs .msg import ( # type: ignore[attr-defined]
38
43
ROS_TOPIC_CAMERA_INFO ,
39
44
ROS_TOPIC_RELATIVE_POSE ,
40
45
ROS_TOPIC_RELATIVE_POSE_IMAGE ,
41
- ROS_TOPIC_RELATIVE_QUERY_POSE ,
46
+ ROS_TOPIC_RELATIVE_QUERY_TWIST ,
42
47
ROS_TOPIC_RELATIVE_TWIST_IMAGE ,
43
48
STEREO_NODE_NAME ,
44
49
FrameID ,
@@ -74,6 +79,39 @@ class PoseNode(Node):
74
79
MIN_MATCHES = 30
75
80
"""Minimum number of keypoint matches before attempting pose estimation"""
76
81
82
+ class ScalingBuffer :
83
+ """Maintains timestamped query frame to world frame scaling in a sliding windown
84
+ buffer so that shallow matching (VO) pose can be scaled to meters using
85
+ scaling information obtained from deep matching
86
+ """
87
+
88
+ _WINDOW_LENGTH : Final = 100
89
+
90
+ def __init__ (self ):
91
+ self ._scaling_arr : np .ndarray = np .ndarray ([])
92
+ self ._timestamp_arr : np .ndarray = np .ndarray ([])
93
+
94
+ def append (self , timestamp_usec : int , scaling : float ) -> None :
95
+ self ._scaling_arr = np .append (self ._scaling_arr , scaling )
96
+ self ._timestamp_arr = np .append (self ._timestamp_arr , timestamp_usec )
97
+
98
+ if self ._scaling_arr .size > self ._WINDOW_LENGTH :
99
+ self ._scaling_arr [- self ._WINDOW_LENGTH :]
100
+ if self ._timestamp_arr .size > self ._WINDOW_LENGTH :
101
+ self ._timestamp_arr [- self ._WINDOW_LENGTH :]
102
+
103
+ def interpolate (self , timestamp_usec : int ) -> Optional [float ]:
104
+ if self ._scaling_arr .size < 2 :
105
+ return None
106
+
107
+ interp_function = interp1d (
108
+ self ._timestamp_arr ,
109
+ self ._scaling_arr ,
110
+ kind = "linear" ,
111
+ fill_value = "extrapolate" ,
112
+ )
113
+ return interp_function (timestamp_usec )
114
+
77
115
def __init__ (self , * args , ** kwargs ):
78
116
"""Class initializer
79
117
@@ -114,7 +152,7 @@ def __init__(self, *args, **kwargs):
114
152
# initialize publishers (for launch tests)
115
153
self .pose
116
154
# TODO method does not support none input
117
- self .camera_optical_pose_in_query_frame (None )
155
+ self .camera_optical_twist_in_camera_optical_frame (None )
118
156
119
157
self ._tf_buffer = tf2_ros .Buffer ()
120
158
self ._tf_listener = tf2_ros .TransformListener (self ._tf_buffer , self )
@@ -127,6 +165,8 @@ def __init__(self, *args, **kwargs):
127
165
self ._set_pose_request = SetPose .Request ()
128
166
self ._pose_sent = False
129
167
168
+ self ._scaling_buffer = self .ScalingBuffer ()
169
+
130
170
def _set_initial_pose (self , pose ):
131
171
if not self ._pose_sent :
132
172
self ._set_pose_request .pose = pose
@@ -160,7 +200,7 @@ def _pose_image_cb(self, msg: Image) -> None:
160
200
161
201
def _twist_image_cb (self , msg : Image ) -> None :
162
202
"""Callback for :attr:`.twist_image` message"""
163
- self .camera_optical_pose_in_query_frame (self .twist_image )
203
+ self .camera_optical_twist_in_camera_optical_frame (self .twist_image )
164
204
165
205
@property
166
206
@ROS .publish (
@@ -202,6 +242,7 @@ def _get_pose(
202
242
r , t = pose
203
243
204
244
r_inv = r .T
245
+
205
246
camera_optical_position_in_world = - r_inv @ t
206
247
207
248
tf_ .visualize_camera_position (
@@ -212,15 +253,38 @@ def _get_pose(
212
253
f"{ 'shallow' if shallow_inference else 'deep' } inference" ,
213
254
)
214
255
256
+ if isinstance (msg , MonocularStereoImage ):
257
+ scaling = self ._scaling_buffer .interpolate (
258
+ tf_ .usec_from_header (msg .query .header )
259
+ )
260
+ if scaling is not None :
261
+ camera_optical_position_in_world = (
262
+ camera_optical_position_in_world * scaling
263
+ )
264
+ else :
265
+ self .get_logger ().debug (
266
+ "No scaling availble for VO - will not return VO pose"
267
+ )
268
+ return None
269
+
215
270
pose = tf_ .create_pose_msg (
216
271
msg .query .header .stamp ,
217
- cast (FrameID , "earth" ),
272
+ cast (FrameID , "earth" )
273
+ if isinstance (msg , OrthoStereoImage )
274
+ else cast (FrameID , "camera_optical" ),
218
275
r_inv ,
219
276
camera_optical_position_in_world ,
220
277
)
221
278
222
279
if isinstance (msg , OrthoStereoImage ):
223
280
affine = tf_ .proj_to_affine (msg .crs .data )
281
+
282
+ # use z scaling value
283
+ usec = tf_ .usec_from_header (msg .query .header )
284
+ scaling = np .abs (affine [2 , 2 ])
285
+ if usec is not None and scaling is not None :
286
+ self ._scaling_buffer .append (usec , scaling )
287
+
224
288
t_wgs84 = affine @ np .append (camera_optical_position_in_world , 1 )
225
289
x , y , z = tf_ .wgs84_to_ecef (* t_wgs84 .tolist ())
226
290
pose .pose .position .x = x
@@ -254,16 +318,33 @@ def _get_pose(
254
318
return pose_with_covariance
255
319
256
320
@ROS .publish (
257
- ROS_TOPIC_RELATIVE_QUERY_POSE ,
321
+ ROS_TOPIC_RELATIVE_QUERY_TWIST ,
258
322
QoSPresetProfiles .SENSOR_DATA .value ,
259
323
)
260
324
# @ROS.transform("camera_optical") # TODO: enable after scaling to meters
261
325
@narrow_types
262
- def camera_optical_pose_in_query_frame (
326
+ def camera_optical_twist_in_camera_optical_frame (
263
327
self , msg : MonocularStereoImage
264
- ) -> Optional [PoseWithCovarianceStamped ]:
328
+ ) -> Optional [TwistWithCovarianceStamped ]:
265
329
"""Camera pose in visual odometry world frame (i.e. previous query frame)"""
266
- return self ._get_pose (msg )
330
+ scaling = self ._scaling_buffer .interpolate (
331
+ tf_ .usec_from_header (msg .query .header )
332
+ )
333
+ if scaling is None :
334
+ return None
335
+
336
+ x , y , z = (
337
+ scaling * 320.0 ,
338
+ scaling * 180.0 ,
339
+ scaling * - 205.0 ,
340
+ ) # todo do not hard code
341
+ previous_pose = tf_ .create_identity_pose_stamped (x , y , z )
342
+ previous_pose .header = msg .reference .header
343
+ current_pose = self ._get_pose (msg )
344
+ if current_pose is not None :
345
+ return tf_ .poses_to_twist (current_pose , previous_pose )
346
+ else :
347
+ return None
267
348
268
349
@property
269
350
# @ROS.max_delay_ms(DELAY_DEFAULT_MS)
0 commit comments