Skip to content

Commit 27fd015

Browse files
BruceVonKmsintov
authored andcommitted
VIC-12583 SDK set camera settings (#178)
Adapted from the Cozmo codebase. SDK queries Robot for camera configuration, streams robot exposure settings in event stream. Pytest and Tutorial.
1 parent e275b9b commit 27fd015

File tree

8 files changed

+361
-1
lines changed

8 files changed

+361
-1
lines changed

anki_vector/animation.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -190,6 +190,7 @@ async def load_animation_trigger_list(self):
190190
"""
191191
return await self._load_animation_trigger_list()
192192

193+
# TODO: add return type hint
193194
@connection.on_connection_thread()
194195
async def play_animation_trigger(self, anim_trigger: str, loop_count: int = 1, use_lift_safe: bool = False, ignore_body_track: bool = False, ignore_head_track: bool = False, ignore_lift_track: bool = False): # START
195196
"""Starts an animation trigger playing on a robot.
@@ -227,6 +228,7 @@ async def play_animation_trigger(self, anim_trigger: str, loop_count: int = 1, u
227228
ignore_lift_track=ignore_lift_track)
228229
return await self.grpc_interface.PlayAnimationTrigger(req)
229230

231+
# TODO: add return type hint
230232
@connection.on_connection_thread()
231233
async def play_animation(self, anim: str, loop_count: int = 1, ignore_body_track: bool = False, ignore_head_track: bool = False, ignore_lift_track: bool = False):
232234
"""Starts an animation playing on a robot.

anki_vector/camera.py

Lines changed: 225 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@
2626

2727
# __all__ should order by constants, event classes, other classes, functions.
2828
__all__ = ["EvtNewRawCameraImage", "EvtNewCameraImage",
29-
"CameraComponent", "CameraImage"]
29+
"CameraComponent", "CameraConfig", "CameraImage"]
3030

3131
import asyncio
3232
from concurrent.futures import CancelledError
@@ -137,6 +137,116 @@ def annotate_image(self, scale: float = None, fit_size: tuple = None, resample_m
137137
resample_mode=resample_mode)
138138

139139

140+
class CameraConfig:
141+
""" The fixed properties for Vector's camera.
142+
143+
A full 3x3 calibration matrix for doing 3D reasoning based on the camera
144+
images would look like:
145+
146+
+--------------+--------------+---------------+
147+
|focal_length.x| 0 | center.x |
148+
+--------------+--------------+---------------+
149+
| 0 |focal_length.y| center.y |
150+
+--------------+--------------+---------------+
151+
| 0 | 0 | 1 |
152+
+--------------+--------------+---------------+
153+
154+
.. testcode::
155+
156+
import anki_vector
157+
158+
with anki_vector.Robot() as robot:
159+
min = robot.camera.config.min_gain
160+
max = robot.camera.config.max_gain
161+
print(f"Robot camera allowable exposure gain range is from {min} to {max}")
162+
"""
163+
164+
def __init__(self,
165+
focal_length_x: float,
166+
focal_length_y: float,
167+
center_x: float,
168+
center_y: float,
169+
fov_x: float,
170+
fov_y: float,
171+
min_exposure_time_ms: int,
172+
max_exposure_time_ms: int,
173+
min_gain: float,
174+
max_gain: float):
175+
self._focal_length = util.Vector2(focal_length_x, focal_length_y)
176+
self._center = util.Vector2(center_x, center_y)
177+
self._fov_x = util.degrees(fov_x)
178+
self._fov_y = util.degrees(fov_y)
179+
self._min_exposure_ms = min_exposure_time_ms
180+
self._max_exposure_ms = max_exposure_time_ms
181+
self._min_gain = min_gain
182+
self._max_gain = max_gain
183+
184+
@classmethod
185+
def create_from_message(cls, msg: protocol.CameraConfigResponse):
186+
"""Create camera configuration based on Vector's camera configuration from the message sent from the Robot """
187+
return cls(msg.focal_length_x,
188+
msg.focal_length_y,
189+
msg.center_x,
190+
msg.center_y,
191+
msg.fov_x,
192+
msg.fov_y,
193+
msg.min_camera_exposure_time_ms,
194+
msg.max_camera_exposure_time_ms,
195+
msg.min_camera_gain,
196+
msg.max_camera_gain)
197+
198+
@property
199+
def min_gain(self) -> float:
200+
"""The minimum supported camera gain."""
201+
return self._min_gain
202+
203+
@property
204+
def max_gain(self) -> float:
205+
"""The maximum supported camera gain."""
206+
return self._max_gain
207+
208+
@property
209+
def min_exposure_time_ms(self) -> int:
210+
"""The minimum supported exposure time in milliseconds."""
211+
return self._min_exposure_ms
212+
213+
@property
214+
def max_exposure_time_ms(self) -> int:
215+
"""The maximum supported exposure time in milliseconds."""
216+
return self._max_exposure_ms
217+
218+
@property
219+
def focal_length(self):
220+
""":class:`anki_vector.util.Vector2`: The focal length of the camera.
221+
222+
This is focal length combined with pixel skew (as the pixels aren't
223+
perfectly square), so there are subtly different values for x and y.
224+
It is in floating point pixel values e.g. <288.87, 288.36>.
225+
"""
226+
return self._focal_length
227+
228+
@property
229+
def center(self):
230+
""":class:`anki_vector.util.Vector2`: The focal center of the camera.
231+
232+
This is the position of the optical center of projection within the
233+
image. It will be close to the center of the image, but adjusted based
234+
on the calibration of the lens. It is in floating point pixel values
235+
e.g. <155.11, 111.40>.
236+
"""
237+
return self._center
238+
239+
@property
240+
def fov_x(self):
241+
""":class:`anki_vector.util.Angle`: The x (horizontal) field of view."""
242+
return self._fov_x
243+
244+
@property
245+
def fov_y(self):
246+
""":class:`anki_vector.util.Angle`: The y (vertical) field of view."""
247+
return self._fov_y
248+
249+
140250
class CameraComponent(util.Component):
141251
"""Represents Vector's camera.
142252
@@ -169,6 +279,55 @@ def __init__(self, robot):
169279
self._latest_image_id: int = None
170280
self._camera_feed_task: asyncio.Task = None
171281
self._enabled = False
282+
self._config = None # type CameraConfig
283+
self._gain = 0.0
284+
self._exposure_ms = 0
285+
self._auto_exposure_enabled = True
286+
287+
def set_config(self, message: protocol.CameraConfigRequest):
288+
"""Update Vector's camera configuration from the message sent from the Robot """
289+
self._config = CameraConfig.create_from_message(message)
290+
291+
@connection.on_connection_thread(requires_control=False)
292+
async def get_camera_config(self) -> protocol.CameraConfigResponse:
293+
""" Get Vector's camera configuration
294+
295+
Retrieves the calibrated camera settings. This is called during the Robot connection initialization, SDK
296+
users should use the `config` property in most instances.
297+
298+
:return:
299+
"""
300+
request = protocol.CameraConfigRequest()
301+
return await self.conn.grpc_interface.GetCameraConfig(request)
302+
303+
@property
304+
def config(self) -> CameraConfig:
305+
""":class:`anki_vector.camera.CameraConfig`: The read-only config/calibration for the camera"""
306+
return self._config
307+
308+
@property
309+
def is_auto_exposure_enabled(self) -> bool:
310+
"""bool: True if auto exposure is currently enabled
311+
312+
If auto exposure is enabled the `gain` and `exposure_ms`
313+
values will constantly be updated by Vector.
314+
"""
315+
return self._auto_exposure_enabled
316+
317+
@property
318+
def gain(self) -> float:
319+
"""float: The current camera gain setting."""
320+
return self._gain
321+
322+
@property
323+
def exposure_ms(self) -> int:
324+
"""int: The current camera exposure setting in milliseconds."""
325+
return self._exposure_ms
326+
327+
def update_state(self, _robot, _event_type, msg):
328+
self._gain = msg.gain
329+
self._exposure_ms = msg.exposure_ms
330+
self._auto_exposure_enabled = msg.auto_exposure_enabled
172331

173332
@property
174333
@util.block_while_none()
@@ -365,6 +524,71 @@ async def capture_single_image(self, enable_high_resolution: bool = False) -> Ca
365524

366525
self.logger.error('Failed to capture a single image')
367526

527+
@connection.on_connection_thread()
528+
async def enable_auto_exposure(self, enable_auto_exposure=True) -> protocol.SetCameraSettingsResponse:
529+
"""Enable auto exposure on Vector's Camera.
530+
531+
Enable auto exposure on Vector's camera to constantly update the exposure
532+
time and gain values based on the recent images. This is the default mode
533+
when any SDK program starts.
534+
535+
.. testcode::
536+
537+
import time
538+
import anki_vector
539+
with anki_vector.Robot() as robot:
540+
robot.camera.enable_auto_exposure(False)
541+
time.sleep(5)
542+
543+
:param enable_auto_exposure: whether the camera should automatically adjust exposure
544+
"""
545+
546+
set_camera_settings_request = protocol.SetCameraSettingsRequest(enable_auto_exposure=enable_auto_exposure)
547+
result = await self.conn.grpc_interface.SetCameraSettings(set_camera_settings_request)
548+
self._auto_exposure_enabled = enable_auto_exposure
549+
return result
550+
551+
@connection.on_connection_thread()
552+
async def set_manual_exposure(self, exposure_ms: int, gain: float) -> protocol.SetCameraSettingsResponse:
553+
"""Set manual exposure values for Vector's Camera.
554+
555+
This will disable auto exposure on Vector's camera and force the specified exposure
556+
time and gain values.
557+
558+
.. testcode::
559+
560+
import time
561+
import anki_vector
562+
with anki_vector.Robot() as robot:
563+
robot.camera.set_manual_exposure(1, 0.25)
564+
time.sleep(5)
565+
566+
:param exposure_ms: The desired exposure time in milliseconds.
567+
Must be within the robot's exposure range from :attr:`CameraConfig.min_exposure_time_ms` to
568+
:attr:`CameraConfig.max_exposure_time_ms`
569+
:param gain: The desired gain value.
570+
Must be within the robot's gain range from :attr:`CameraConfig.min_gain` to
571+
:attr:`CameraConfig.max_gain`
572+
Raises:
573+
:class:`ValueError` if supplied an out-of-range exposure or gain
574+
575+
"""
576+
577+
if exposure_ms < self._config.min_exposure_time_ms \
578+
or exposure_ms > self._config.max_exposure_time_ms \
579+
or gain < self._config.min_gain \
580+
or gain > self._config.max_gain:
581+
raise ValueError("Exposure settings out of range")
582+
583+
set_camera_settings_request = protocol.SetCameraSettingsRequest(gain=gain,
584+
exposure_ms=exposure_ms,
585+
enable_auto_exposure=False)
586+
result = await self.conn.grpc_interface.SetCameraSettings(set_camera_settings_request)
587+
self._gain = gain
588+
self._exposure_ms = exposure_ms
589+
self._auto_exposure_enabled = False
590+
return result
591+
368592

369593
class EvtNewRawCameraImage: # pylint: disable=too-few-public-methods
370594
"""Dispatched when a new raw image is received from the robot's camera.

anki_vector/events.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@ class Events(Enum):
3737
robot_state = "robot_state" #: Robot event containing changes to the robot's state.
3838
mirror_mode_disabled = "mirror_mode_disabled" # : Robot event triggered when mirror mode (camera feed displayed on robot's face) is automatically disabled due to SDK no longer having control of the robot.
3939
vision_modes_auto_disabled = "vision_modes_auto_disabled" # : Robot event triggered when all vision modes are automatically disabled due to the SDK no longer having control of the robot.
40+
camera_settings_update = "camera_settings_update" # : Robot event triggered when camera exposure settings change
4041

4142
# Objects
4243
object_available = "object_available" #: After the ConnectCube process is started, all available light cubes in range will broadcast an availability message through the Robot.

anki_vector/objects.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -493,6 +493,7 @@ def teardown(self):
493493
self.robot.events.unsubscribe(self._on_object_connection_lost,
494494
Events.cube_connection_lost)
495495

496+
# TODO: add return type hint
496497
@connection.on_connection_thread()
497498
async def set_light_corners(self,
498499
light1: lights.Light,

anki_vector/robot.py

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -684,6 +684,17 @@ def connect(self, timeout: int = 10) -> None:
684684
events.Events.robot_state,
685685
_on_connection_thread=True)
686686

687+
# get the camera configuration from the robot
688+
response = self._camera.get_camera_config()
689+
if isinstance(response, concurrent.futures.Future):
690+
response = response.result()
691+
self._camera.set_config(response)
692+
693+
# Subscribe to a callback for camera exposure settings
694+
self.events.subscribe(self._camera.update_state,
695+
events.Events.camera_settings_update,
696+
_on_connection_thread=True)
697+
687698
# access the pose to prove it has gotten back from the event stream once
688699
try:
689700
if not self.pose:

anki_vector/vision.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -91,6 +91,7 @@ async def disable_all_vision_modes(self):
9191
if self.display_camera_feed_on_face:
9292
await self.enable_display_camera_feed_on_face(False)
9393

94+
# TODO: add return type hint
9495
@connection.on_connection_thread()
9596
async def enable_custom_object_detection(self, detect_custom_objects: bool = True):
9697
"""Enable custom object detection on the robot's camera.
@@ -113,6 +114,7 @@ async def enable_custom_object_detection(self, detect_custom_objects: bool = Tru
113114
enable_marker_detection_request = protocol.EnableMarkerDetectionRequest(enable=detect_custom_objects)
114115
return await self.grpc_interface.EnableMarkerDetection(enable_marker_detection_request)
115116

117+
# TODO: add return type hint
116118
@connection.on_connection_thread()
117119
async def enable_face_detection(
118120
self,
@@ -177,6 +179,7 @@ def on_robot_observed_motion(robot, event_type, event):
177179
enable_motion_detection_request = protocol.EnableMotionDetectionRequest(enable=detect_motion)
178180
return await self.grpc_interface.EnableMotionDetection(enable_motion_detection_request)
179181

182+
# TODO: add return type hint
180183
@connection.on_connection_thread()
181184
async def enable_display_camera_feed_on_face(self, display_camera_feed_on_face: bool = True):
182185
"""Display the robot's camera feed on its face along with any detections (if enabled)

anki_vector/world.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -788,6 +788,7 @@ def create_custom_fixed_object(self,
788788
self._objects[fixed_custom_object.object_id] = fixed_custom_object
789789
return fixed_custom_object
790790

791+
# TODO: add return type hint
791792
@connection.on_connection_thread(requires_control=False)
792793
async def _create_custom_fixed_object(self,
793794
pose: util.Pose,

0 commit comments

Comments
 (0)