-
Notifications
You must be signed in to change notification settings - Fork 59
Open
Description
When initializing a modified Quickstart Python script to generate both RGB and DEPTH streams and applying the align filter to the depth field, align_filter.process(frames) returns None. All similar examples involving align_filter.process lead to the same error.
[orbbec femto bolt]
`
import cv2
import numpy as np
import time
from pyorbbecsdk import *
from utils import frame_to_bgr_image
ESC_KEY = 27
MIN_DEPTH = 20 # 20mm
MAX_DEPTH = 10000 # 10000mm
def main():
pipeline = Pipeline()
config = Config()
# Enable color and depth streams
color_profiles = pipeline.get_stream_profile_list(OBSensorType.COLOR_SENSOR)
color_profile = color_profiles.get_default_video_stream_profile()
config.enable_stream(color_profile)
depth_profiles = pipeline.get_stream_profile_list(OBSensorType.DEPTH_SENSOR)
depth_profile = depth_profiles.get_default_video_stream_profile()
config.enable_stream(depth_profile)
pipeline.enable_frame_sync() # ensures frames are synchronized
pipeline.start(config)
print("Pipeline started successfully. Press 'q' or ESC to exit.")
# Create alignment filter (align depth to color)
align_filter = AlignFilter(align_to_stream=OBStreamType.COLOR_STREAM)
# Set window size
window_width = 1280
window_height = 720
cv2.namedWindow("QuickStart Viewer", cv2.WINDOW_NORMAL)
cv2.resizeWindow("QuickStart Viewer", window_width, window_height)
while True:
try:
frames = pipeline.wait_for_frames(100)
if frames is None:
continue
# Get color frame
color_frame = frames.get_color_frame()
if color_frame is None:
continue
color_image = frame_to_bgr_image(color_frame)
# Get depth frame
depth_frame = frames.get_depth_frame()
if depth_frame is None:
continue
if depth_frame.get_format() != OBFormat.Y16:
print("Depth format is not Y16")
continue
aligned_depth = align_filter.process(frames)
print(aligned_depth)
if aligned_depth is None:
continue
# Process depth data
width = depth_frame.get_width()
height = depth_frame.get_height()
scale = depth_frame.get_depth_scale()
depth_data = np.frombuffer(depth_frame.get_data(), dtype=np.uint16).reshape((height, width))
depth_data = depth_data.astype(np.float32) * scale
depth_data = np.where((depth_data > MIN_DEPTH) & (depth_data < MAX_DEPTH), depth_data, 0).astype(np.uint16)
# Create depth visualization
depth_image = cv2.normalize(depth_data, None, 0, 255, cv2.NORM_MINMAX, dtype=cv2.CV_8U)
depth_image = cv2.applyColorMap(depth_image, cv2.COLORMAP_JET)
# Resize and combine images
color_image_resized = cv2.resize(color_image, (window_width // 2, window_height))
depth_image_resized = cv2.resize(depth_image, (window_width // 2, window_height))
combined_image = np.hstack((color_image_resized, depth_image_resized))
cv2.imshow("QuickStart Viewer", combined_image)
if cv2.waitKey(1) in [ord('q'), ESC_KEY]:
break
except KeyboardInterrupt:
break
cv2.destroyAllWindows()
pipeline.stop()
print("Pipeline stopped and all windows closed.")
if name == "main":
main()
`
Metadata
Metadata
Assignees
Labels
No labels