|
26 | 26 |
|
27 | 27 | # __all__ should order by constants, event classes, other classes, functions.
|
28 | 28 | __all__ = ["EvtNewRawCameraImage", "EvtNewCameraImage",
|
29 |
| - "CameraComponent", "CameraImage"] |
| 29 | + "CameraComponent", "CameraConfig", "CameraImage"] |
30 | 30 |
|
31 | 31 | import asyncio
|
32 | 32 | from concurrent.futures import CancelledError
|
@@ -137,6 +137,116 @@ def annotate_image(self, scale: float = None, fit_size: tuple = None, resample_m
|
137 | 137 | resample_mode=resample_mode)
|
138 | 138 |
|
139 | 139 |
|
| 140 | +class CameraConfig: |
| 141 | + """ The fixed properties for Vector's camera. |
| 142 | +
|
| 143 | + A full 3x3 calibration matrix for doing 3D reasoning based on the camera |
| 144 | + images would look like: |
| 145 | +
|
| 146 | + +--------------+--------------+---------------+ |
| 147 | + |focal_length.x| 0 | center.x | |
| 148 | + +--------------+--------------+---------------+ |
| 149 | + | 0 |focal_length.y| center.y | |
| 150 | + +--------------+--------------+---------------+ |
| 151 | + | 0 | 0 | 1 | |
| 152 | + +--------------+--------------+---------------+ |
| 153 | +
|
| 154 | + .. testcode:: |
| 155 | +
|
| 156 | + import anki_vector |
| 157 | +
|
| 158 | + with anki_vector.Robot() as robot: |
| 159 | + min = robot.camera.config.min_gain |
| 160 | + max = robot.camera.config.max_gain |
| 161 | + print(f"Robot camera allowable exposure gain range is from {min} to {max}") |
| 162 | + """ |
| 163 | + |
| 164 | + def __init__(self, |
| 165 | + focal_length_x: float, |
| 166 | + focal_length_y: float, |
| 167 | + center_x: float, |
| 168 | + center_y: float, |
| 169 | + fov_x: float, |
| 170 | + fov_y: float, |
| 171 | + min_exposure_time_ms: int, |
| 172 | + max_exposure_time_ms: int, |
| 173 | + min_gain: float, |
| 174 | + max_gain: float): |
| 175 | + self._focal_length = util.Vector2(focal_length_x, focal_length_y) |
| 176 | + self._center = util.Vector2(center_x, center_y) |
| 177 | + self._fov_x = util.degrees(fov_x) |
| 178 | + self._fov_y = util.degrees(fov_y) |
| 179 | + self._min_exposure_ms = min_exposure_time_ms |
| 180 | + self._max_exposure_ms = max_exposure_time_ms |
| 181 | + self._min_gain = min_gain |
| 182 | + self._max_gain = max_gain |
| 183 | + |
| 184 | + @classmethod |
| 185 | + def create_from_message(cls, msg: protocol.CameraConfigResponse): |
| 186 | + """Create camera configuration based on Vector's camera configuration from the message sent from the Robot """ |
| 187 | + return cls(msg.focal_length_x, |
| 188 | + msg.focal_length_y, |
| 189 | + msg.center_x, |
| 190 | + msg.center_y, |
| 191 | + msg.fov_x, |
| 192 | + msg.fov_y, |
| 193 | + msg.min_camera_exposure_time_ms, |
| 194 | + msg.max_camera_exposure_time_ms, |
| 195 | + msg.min_camera_gain, |
| 196 | + msg.max_camera_gain) |
| 197 | + |
| 198 | + @property |
| 199 | + def min_gain(self) -> float: |
| 200 | + """The minimum supported camera gain.""" |
| 201 | + return self._min_gain |
| 202 | + |
| 203 | + @property |
| 204 | + def max_gain(self) -> float: |
| 205 | + """The maximum supported camera gain.""" |
| 206 | + return self._max_gain |
| 207 | + |
| 208 | + @property |
| 209 | + def min_exposure_time_ms(self) -> int: |
| 210 | + """The minimum supported exposure time in milliseconds.""" |
| 211 | + return self._min_exposure_ms |
| 212 | + |
| 213 | + @property |
| 214 | + def max_exposure_time_ms(self) -> int: |
| 215 | + """The maximum supported exposure time in milliseconds.""" |
| 216 | + return self._max_exposure_ms |
| 217 | + |
| 218 | + @property |
| 219 | + def focal_length(self): |
| 220 | + """:class:`anki_vector.util.Vector2`: The focal length of the camera. |
| 221 | +
|
| 222 | + This is focal length combined with pixel skew (as the pixels aren't |
| 223 | + perfectly square), so there are subtly different values for x and y. |
| 224 | + It is in floating point pixel values e.g. <288.87, 288.36>. |
| 225 | + """ |
| 226 | + return self._focal_length |
| 227 | + |
| 228 | + @property |
| 229 | + def center(self): |
| 230 | + """:class:`anki_vector.util.Vector2`: The focal center of the camera. |
| 231 | +
|
| 232 | + This is the position of the optical center of projection within the |
| 233 | + image. It will be close to the center of the image, but adjusted based |
| 234 | + on the calibration of the lens. It is in floating point pixel values |
| 235 | + e.g. <155.11, 111.40>. |
| 236 | + """ |
| 237 | + return self._center |
| 238 | + |
| 239 | + @property |
| 240 | + def fov_x(self): |
| 241 | + """:class:`anki_vector.util.Angle`: The x (horizontal) field of view.""" |
| 242 | + return self._fov_x |
| 243 | + |
| 244 | + @property |
| 245 | + def fov_y(self): |
| 246 | + """:class:`anki_vector.util.Angle`: The y (vertical) field of view.""" |
| 247 | + return self._fov_y |
| 248 | + |
| 249 | + |
140 | 250 | class CameraComponent(util.Component):
|
141 | 251 | """Represents Vector's camera.
|
142 | 252 |
|
@@ -169,6 +279,55 @@ def __init__(self, robot):
|
169 | 279 | self._latest_image_id: int = None
|
170 | 280 | self._camera_feed_task: asyncio.Task = None
|
171 | 281 | self._enabled = False
|
| 282 | + self._config = None # type CameraConfig |
| 283 | + self._gain = 0.0 |
| 284 | + self._exposure_ms = 0 |
| 285 | + self._auto_exposure_enabled = True |
| 286 | + |
| 287 | + def set_config(self, message: protocol.CameraConfigRequest): |
| 288 | + """Update Vector's camera configuration from the message sent from the Robot """ |
| 289 | + self._config = CameraConfig.create_from_message(message) |
| 290 | + |
| 291 | + @connection.on_connection_thread(requires_control=False) |
| 292 | + async def get_camera_config(self) -> protocol.CameraConfigResponse: |
| 293 | + """ Get Vector's camera configuration |
| 294 | +
|
| 295 | + Retrieves the calibrated camera settings. This is called during the Robot connection initialization, SDK |
| 296 | + users should use the `config` property in most instances. |
| 297 | +
|
| 298 | + :return: |
| 299 | + """ |
| 300 | + request = protocol.CameraConfigRequest() |
| 301 | + return await self.conn.grpc_interface.GetCameraConfig(request) |
| 302 | + |
| 303 | + @property |
| 304 | + def config(self) -> CameraConfig: |
| 305 | + """:class:`anki_vector.camera.CameraConfig`: The read-only config/calibration for the camera""" |
| 306 | + return self._config |
| 307 | + |
| 308 | + @property |
| 309 | + def is_auto_exposure_enabled(self) -> bool: |
| 310 | + """bool: True if auto exposure is currently enabled |
| 311 | +
|
| 312 | + If auto exposure is enabled the `gain` and `exposure_ms` |
| 313 | + values will constantly be updated by Vector. |
| 314 | + """ |
| 315 | + return self._auto_exposure_enabled |
| 316 | + |
| 317 | + @property |
| 318 | + def gain(self) -> float: |
| 319 | + """float: The current camera gain setting.""" |
| 320 | + return self._gain |
| 321 | + |
| 322 | + @property |
| 323 | + def exposure_ms(self) -> int: |
| 324 | + """int: The current camera exposure setting in milliseconds.""" |
| 325 | + return self._exposure_ms |
| 326 | + |
| 327 | + def update_state(self, _robot, _event_type, msg): |
| 328 | + self._gain = msg.gain |
| 329 | + self._exposure_ms = msg.exposure_ms |
| 330 | + self._auto_exposure_enabled = msg.auto_exposure_enabled |
172 | 331 |
|
173 | 332 | @property
|
174 | 333 | @util.block_while_none()
|
@@ -365,6 +524,71 @@ async def capture_single_image(self, enable_high_resolution: bool = False) -> Ca
|
365 | 524 |
|
366 | 525 | self.logger.error('Failed to capture a single image')
|
367 | 526 |
|
| 527 | + @connection.on_connection_thread() |
| 528 | + async def enable_auto_exposure(self, enable_auto_exposure=True) -> protocol.SetCameraSettingsResponse: |
| 529 | + """Enable auto exposure on Vector's Camera. |
| 530 | +
|
| 531 | + Enable auto exposure on Vector's camera to constantly update the exposure |
| 532 | + time and gain values based on the recent images. This is the default mode |
| 533 | + when any SDK program starts. |
| 534 | +
|
| 535 | + .. testcode:: |
| 536 | +
|
| 537 | + import time |
| 538 | + import anki_vector |
| 539 | + with anki_vector.Robot() as robot: |
| 540 | + robot.camera.enable_auto_exposure(False) |
| 541 | + time.sleep(5) |
| 542 | +
|
| 543 | + :param enable_auto_exposure: whether the camera should automatically adjust exposure |
| 544 | + """ |
| 545 | + |
| 546 | + set_camera_settings_request = protocol.SetCameraSettingsRequest(enable_auto_exposure=enable_auto_exposure) |
| 547 | + result = await self.conn.grpc_interface.SetCameraSettings(set_camera_settings_request) |
| 548 | + self._auto_exposure_enabled = enable_auto_exposure |
| 549 | + return result |
| 550 | + |
| 551 | + @connection.on_connection_thread() |
| 552 | + async def set_manual_exposure(self, exposure_ms: int, gain: float) -> protocol.SetCameraSettingsResponse: |
| 553 | + """Set manual exposure values for Vector's Camera. |
| 554 | +
|
| 555 | + This will disable auto exposure on Vector's camera and force the specified exposure |
| 556 | + time and gain values. |
| 557 | +
|
| 558 | + .. testcode:: |
| 559 | +
|
| 560 | + import time |
| 561 | + import anki_vector |
| 562 | + with anki_vector.Robot() as robot: |
| 563 | + robot.camera.set_manual_exposure(1, 0.25) |
| 564 | + time.sleep(5) |
| 565 | +
|
| 566 | + :param exposure_ms: The desired exposure time in milliseconds. |
| 567 | + Must be within the robot's exposure range from :attr:`CameraConfig.min_exposure_time_ms` to |
| 568 | + :attr:`CameraConfig.max_exposure_time_ms` |
| 569 | + :param gain: The desired gain value. |
| 570 | + Must be within the robot's gain range from :attr:`CameraConfig.min_gain` to |
| 571 | + :attr:`CameraConfig.max_gain` |
| 572 | + Raises: |
| 573 | + :class:`ValueError` if supplied an out-of-range exposure or gain |
| 574 | +
|
| 575 | + """ |
| 576 | + |
| 577 | + if exposure_ms < self._config.min_exposure_time_ms \ |
| 578 | + or exposure_ms > self._config.max_exposure_time_ms \ |
| 579 | + or gain < self._config.min_gain \ |
| 580 | + or gain > self._config.max_gain: |
| 581 | + raise ValueError("Exposure settings out of range") |
| 582 | + |
| 583 | + set_camera_settings_request = protocol.SetCameraSettingsRequest(gain=gain, |
| 584 | + exposure_ms=exposure_ms, |
| 585 | + enable_auto_exposure=False) |
| 586 | + result = await self.conn.grpc_interface.SetCameraSettings(set_camera_settings_request) |
| 587 | + self._gain = gain |
| 588 | + self._exposure_ms = exposure_ms |
| 589 | + self._auto_exposure_enabled = False |
| 590 | + return result |
| 591 | + |
368 | 592 |
|
369 | 593 | class EvtNewRawCameraImage: # pylint: disable=too-few-public-methods
|
370 | 594 | """Dispatched when a new raw image is received from the robot's camera.
|
|
0 commit comments