diff --git a/boards/arduino/nicla_vision/arduino_nicla_vision_stm32h747xx_m7.yaml b/boards/arduino/nicla_vision/arduino_nicla_vision_stm32h747xx_m7.yaml index 0310b5dce890..90e7c42ca08b 100644 --- a/boards/arduino/nicla_vision/arduino_nicla_vision_stm32h747xx_m7.yaml +++ b/boards/arduino/nicla_vision/arduino_nicla_vision_stm32h747xx_m7.yaml @@ -11,4 +11,5 @@ supported: - gpio - spi - i2c + - usbd vendor: arduino diff --git a/doc/build/dts/api/api.rst b/doc/build/dts/api/api.rst index 6fe1fb7fd1bc..ffa56e5ce5ae 100644 --- a/doc/build/dts/api/api.rst +++ b/doc/build/dts/api/api.rst @@ -388,6 +388,8 @@ device. - Sets UART device used for the Bluetooth monitor logging * - zephyr,bt-hci - Selects the HCI device used by the Bluetooth host stack + * - zephyr,camera + - Video input device, typically a camera. * - zephyr,canbus - Sets the default CAN controller * - zephyr,ccm diff --git a/doc/connectivity/usb/device/usb_device.rst b/doc/connectivity/usb/device/usb_device.rst index 37da1f464f61..6b982036beed 100644 --- a/doc/connectivity/usb/device/usb_device.rst +++ b/doc/connectivity/usb/device/usb_device.rst @@ -551,6 +551,8 @@ The following Product IDs are currently used: +----------------------------------------------------+--------+ | :zephyr:code-sample:`uac2-implicit-feedback` | 0x000F | +----------------------------------------------------+--------+ +| :zephyr:code-sample:`uvc` | 0x0011 | ++----------------------------------------------------+--------+ | :zephyr:code-sample:`usb-dfu` (DFU Mode) | 0xFFFF | +----------------------------------------------------+--------+ diff --git a/doc/connectivity/usb/device_next/usb_device.rst b/doc/connectivity/usb/device_next/usb_device.rst index 7e7762997a01..7ab2805dc4fa 100644 --- a/doc/connectivity/usb/device_next/usb_device.rst +++ b/doc/connectivity/usb/device_next/usb_device.rst @@ -32,6 +32,8 @@ Samples * :zephyr:code-sample:`uac2-implicit-feedback` +* :zephyr:code-sample:`uvc` + Samples ported to new USB device support ---------------------------------------- @@ -223,6 +225,8 @@ instance (``n``) and is used as an argument to the :c:func:`usbd_register_class` +-----------------------------------+-------------------------+-------------------------+ | Bluetooth HCI USB transport layer | :ref:`bt_hci_raw` | :samp:`bt_hci_{n}` | +-----------------------------------+-------------------------+-------------------------+ +| USB Video Class (UVC) | Video device | :samp:`uvc_{n}` | ++-----------------------------------+-------------------------+-------------------------+ CDC ACM UART ============ diff --git a/drivers/video/CMakeLists.txt b/drivers/video/CMakeLists.txt index 102a841648e9..1c603a5757a3 100644 --- a/drivers/video/CMakeLists.txt +++ b/drivers/video/CMakeLists.txt @@ -9,6 +9,7 @@ zephyr_library_sources(video_device.c) zephyr_library_sources_ifdef(CONFIG_VIDEO_MCUX_CSI video_mcux_csi.c) zephyr_library_sources_ifdef(CONFIG_VIDEO_MCUX_MIPI_CSI2RX video_mcux_mipi_csi2rx.c) zephyr_library_sources_ifdef(CONFIG_VIDEO_SW_GENERATOR video_sw_generator.c) +zephyr_library_sources_ifdef(CONFIG_VIDEO_SW_PIPELINE video_sw_pipeline.c) zephyr_library_sources_ifdef(CONFIG_VIDEO_MT9M114 mt9m114.c) zephyr_library_sources_ifdef(CONFIG_VIDEO_OV7725 ov7725.c) zephyr_library_sources_ifdef(CONFIG_VIDEO_OV2640 ov2640.c) diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index a3b163443628..e03b954e7b4b 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig @@ -58,6 +58,8 @@ source "drivers/video/Kconfig.mcux_mipi_csi2rx" source "drivers/video/Kconfig.sw_generator" +source "drivers/video/Kconfig.sw_pipeline" + source "drivers/video/Kconfig.mt9m114" source "drivers/video/Kconfig.ov7725" diff --git a/drivers/video/Kconfig.sw_generator b/drivers/video/Kconfig.sw_generator index 7675908b4bc1..5090a2f46cf0 100644 --- a/drivers/video/Kconfig.sw_generator +++ b/drivers/video/Kconfig.sw_generator @@ -1,12 +1,9 @@ -# MT9m114 - # Copyright (c) 2016 Linaro Limited # SPDX-License-Identifier: Apache-2.0 -DT_CHOSEN_ZEPHYR_CAMERA := zephyr,camera - config VIDEO_SW_GENERATOR bool "Video Software Generator" - depends on !$(dt_chosen_enabled,$(DT_CHOSEN_ZEPHYR_CAMERA)) + depends on DT_HAS_ZEPHYR_VIDEO_SW_GENERATOR_ENABLED + default y help Enable video pattern generator (for testing purposes). diff --git a/drivers/video/Kconfig.sw_pipeline b/drivers/video/Kconfig.sw_pipeline new file mode 100644 index 000000000000..da25a9b1e49f --- /dev/null +++ b/drivers/video/Kconfig.sw_pipeline @@ -0,0 +1,18 @@ +# Copyright (c) 2025 tinyVision.ai +# SPDX-License-Identifier: Apache-2.0 + +config VIDEO_SW_PIPELINE + bool "Video Software Pipeline" + depends on DT_HAS_ZEPHYR_VIDEO_SW_PIPELINE_ENABLED + default y + help + Enable the video stream processing based on the lib/pixel. + +config VIDEO_SW_PIPELINE_THREAD_PRIORITY + int "Video Software Pipeline thread priority" + default 2 + depends on VIDEO_SW_PIPELINE + help + The Video Software Pipeline has a thread in which video frames are processed. + This options sets the priority of that thread. + The default is some arbitrary value above 0. diff --git a/drivers/video/video_ctrls.c b/drivers/video/video_ctrls.c index 25906f13c31c..0636a33d68ad 100644 --- a/drivers/video/video_ctrls.c +++ b/drivers/video/video_ctrls.c @@ -17,17 +17,52 @@ LOG_MODULE_REGISTER(video_ctrls, CONFIG_VIDEO_LOG_LEVEL); static inline const char *const *video_get_std_menu_ctrl(uint32_t id) { - static const char *const camera_power_line_frequency[] = {"Disabled", "50 Hz", "60 Hz", - "Auto", NULL}; - static const char *const camera_exposure_auto[] = {"Auto Mode", "Manual Mode", - "Shutter Priority Mode", - "Aperture Priority Mode", NULL}; - switch (id) { + /* User control menus */ case VIDEO_CID_POWER_LINE_FREQUENCY: - return camera_power_line_frequency; + return (char const *const []){ + "Disabled", "50 Hz", "60 Hz", "Auto", NULL + }; + + /* Camera control menus */ case VIDEO_CID_EXPOSURE_AUTO: - return camera_exposure_auto; + return (char const *const []){ + "Auto Mode", "Manual Mode", "Shutter Priority Mode", + "Aperture Priority Mode", NULL + }; + case VIDEO_CID_AUTO_FOCUS_RANGE: + return (char const *const []){ + "Auto", "Normal", "Macro", "Infinity", NULL + }; + case VIDEO_CID_COLORFX: + return (char const *const []){ + "None", "Black & White", "Sepia", "Negative", "Emboss", "Sketch", + "Sky Blue", "Grass Green", "Skin Whiten", "Vivid", "Aqua", "Art Freeze", + "Silhouette", "Solarization", "Antique", "Set Cb/Cr", NULL + }; + case VIDEO_CID_AUTO_N_PRESET_WHITE_BALANCE: + return (char const *const []){ + "Manual", "Auto", "Incandescent", "Fluorescent", "Fluorescent H", "Horizon", + "Daylight", "Flash", "Cloudy", "Shade", "Greyworld", NULL + }; + case VIDEO_CID_ISO_SENSITIVITY_AUTO: + return (char const *const []){ + "Manual", "Auto", NULL + }; + case VIDEO_CID_EXPOSURE_METERING: + return (char const *const []){ + "Average", "Center Weighted", "Spot", "Matrix", NULL + }; + case VIDEO_CID_SCENE_MODE: + return (char const *const []){ + "None", "Backlight", "Beach/Snow", "Candle Light", "Dusk/Dawn", + "Fall Colors", "Fireworks", "Landscape", "Night", "Party/Indoor", + "Portrait", "Sports", "Sunset", "Text", NULL + }; + case VIDEO_CID_CAMERA_ORIENTATION: + return (char const *const []){ + "Front", "Back", "External", NULL + }; default: return NULL; } @@ -69,12 +104,35 @@ static inline void set_type_flag(uint32_t id, enum video_ctrl_type *type, uint32 *flags = 0; switch (id) { + case VIDEO_CID_AUTO_WHITE_BALANCE: + case VIDEO_CID_AUTOGAIN: case VIDEO_CID_HFLIP: case VIDEO_CID_VFLIP: + case VIDEO_CID_HUE_AUTO: + case VIDEO_CID_CHROMA_AGC: + case VIDEO_CID_COLOR_KILLER: + case VIDEO_CID_AUTOBRIGHTNESS: + case VIDEO_CID_ILLUMINATORS_1: + case VIDEO_CID_ILLUMINATORS_2: + case VIDEO_CID_EXPOSURE_AUTO_PRIORITY: + case VIDEO_CID_FOCUS_AUTO: + case VIDEO_CID_PRIVACY: + case VIDEO_CID_WIDE_DYNAMIC_RANGE: + case VIDEO_CID_IMAGE_STABILIZATION: *type = VIDEO_CTRL_TYPE_BOOLEAN; break; + case VIDEO_CID_POWER_LINE_FREQUENCY: + case VIDEO_CID_EXPOSURE_AUTO: + case VIDEO_CID_AUTO_FOCUS_RANGE: + case VIDEO_CID_COLORFX: + case VIDEO_CID_AUTO_N_PRESET_WHITE_BALANCE: + case VIDEO_CID_ISO_SENSITIVITY_AUTO: + case VIDEO_CID_EXPOSURE_METERING: + case VIDEO_CID_SCENE_MODE: case VIDEO_CID_TEST_PATTERN: + case VIDEO_CID_CAMERA_ORIENTATION: + case VIDEO_CID_HDR_SENSOR_MODE: *type = VIDEO_CTRL_TYPE_MENU; break; case VIDEO_CID_PIXEL_RATE: @@ -388,6 +446,16 @@ static inline const char *video_get_ctrl_name(uint32_t id) return "Saturation"; case VIDEO_CID_HUE: return "Hue"; + case VIDEO_CID_AUTO_WHITE_BALANCE: + return "White Balance, Automatic"; + case VIDEO_CID_DO_WHITE_BALANCE: + return "Do White Balance"; + case VIDEO_CID_RED_BALANCE: + return "Red Balance"; + case VIDEO_CID_BLUE_BALANCE: + return "Blue Balance"; + case VIDEO_CID_GAMMA: + return "Gamma"; case VIDEO_CID_EXPOSURE: return "Exposure"; case VIDEO_CID_AUTOGAIN: @@ -402,10 +470,114 @@ static inline const char *video_get_ctrl_name(uint32_t id) return "Vertical Flip"; case VIDEO_CID_POWER_LINE_FREQUENCY: return "Power Line Frequency"; + case VIDEO_CID_HUE_AUTO: + return "Hue, Automatic"; + case VIDEO_CID_WHITE_BALANCE_TEMPERATURE: + return "White Balance Temperature"; + case VIDEO_CID_SHARPNESS: + return "Sharpness"; + case VIDEO_CID_BACKLIGHT_COMPENSATION: + return "Backlight Compensation"; + case VIDEO_CID_CHROMA_AGC: + return "Chroma AGC"; + case VIDEO_CID_COLOR_KILLER: + return "Color Killer"; + case VIDEO_CID_COLORFX: + return "Color Effects"; + case VIDEO_CID_AUTOBRIGHTNESS: + return "Brightness, Automatic"; + case VIDEO_CID_BAND_STOP_FILTER: + return "Band-Stop Filter"; + case VIDEO_CID_ROTATE: + return "Rotate"; + case VIDEO_CID_BG_COLOR: + return "Background Color"; + case VIDEO_CID_CHROMA_GAIN: + return "Chroma Gain"; + case VIDEO_CID_ILLUMINATORS_1: + return "Illuminator 1"; + case VIDEO_CID_ILLUMINATORS_2: + return "Illuminator 2"; + case VIDEO_CID_ALPHA_COMPONENT: + return "Alpha Component"; + case VIDEO_CID_COLORFX_CBCR: + return "Color Effects, CbCr"; + case VIDEO_CID_COLORFX_RGB: + return "Color Effects, RGB"; /* Camera controls */ + case VIDEO_CID_EXPOSURE_AUTO: + return "Auto Exposure"; + case VIDEO_CID_EXPOSURE_ABSOLUTE: + return "Exposure Time, Absolute"; + case VIDEO_CID_EXPOSURE_AUTO_PRIORITY: + return "Exposure, Dynamic Framerate"; + case VIDEO_CID_PAN_RELATIVE: + return "Pan, Relative"; + case VIDEO_CID_TILT_RELATIVE: + return "Tilt, Relative"; + case VIDEO_CID_PAN_RESET: + return "Pan, Reset"; + case VIDEO_CID_TILT_RESET: + return "Tilt, Reset"; + case VIDEO_CID_PAN_ABSOLUTE: + return "Pan, Absolute"; + case VIDEO_CID_TILT_ABSOLUTE: + return "Tilt, Absolute"; + case VIDEO_CID_FOCUS_ABSOLUTE: + return "Focus, Absolute"; + case VIDEO_CID_FOCUS_RELATIVE: + return "Focus, Relative"; + case VIDEO_CID_FOCUS_AUTO: + return "Focus, Automatic Continuous"; case VIDEO_CID_ZOOM_ABSOLUTE: return "Zoom, Absolute"; + case VIDEO_CID_ZOOM_RELATIVE: + return "Zoom, Relative"; + case VIDEO_CID_ZOOM_CONTINUOUS: + return "Zoom, Continuous"; + case VIDEO_CID_PRIVACY: + return "Privacy"; + case VIDEO_CID_IRIS_ABSOLUTE: + return "Iris, Absolute"; + case VIDEO_CID_IRIS_RELATIVE: + return "Iris, Relative"; + case VIDEO_CID_AUTO_EXPOSURE_BIAS: + return "Auto Exposure, Bias"; + case VIDEO_CID_AUTO_N_PRESET_WHITE_BALANCE: + return "White Balance, Auto & Preset"; + case VIDEO_CID_WIDE_DYNAMIC_RANGE: + return "Wide Dynamic Range"; + case VIDEO_CID_IMAGE_STABILIZATION: + return "Image Stabilization"; + case VIDEO_CID_ISO_SENSITIVITY: + return "ISO Sensitivity"; + case VIDEO_CID_ISO_SENSITIVITY_AUTO: + return "ISO Sensitivity, Auto"; + case VIDEO_CID_EXPOSURE_METERING: + return "Exposure, Metering Mode"; + case VIDEO_CID_SCENE_MODE: + return "Scene Mode"; + case VIDEO_CID_3A_LOCK: + return "3A Lock"; + case VIDEO_CID_AUTO_FOCUS_START: + return "Auto Focus, Start"; + case VIDEO_CID_AUTO_FOCUS_STOP: + return "Auto Focus, Stop"; + case VIDEO_CID_AUTO_FOCUS_STATUS: + return "Auto Focus, Status"; + case VIDEO_CID_AUTO_FOCUS_RANGE: + return "Auto Focus, Range"; + case VIDEO_CID_PAN_SPEED: + return "Pan, Speed"; + case VIDEO_CID_TILT_SPEED: + return "Tilt, Speed"; + case VIDEO_CID_CAMERA_ORIENTATION: + return "Camera Orientation"; + case VIDEO_CID_CAMERA_SENSOR_ROTATION: + return "Camera Sensor Rotation"; + case VIDEO_CID_HDR_SENSOR_MODE: + return "HDR Sensor Mode"; /* JPEG encoder controls */ case VIDEO_CID_JPEG_COMPRESSION_QUALITY: diff --git a/drivers/video/video_sw_generator.c b/drivers/video/video_sw_generator.c index ed8ade5270ca..d9e18bb02dee 100644 --- a/drivers/video/video_sw_generator.c +++ b/drivers/video/video_sw_generator.c @@ -4,7 +4,7 @@ * SPDX-License-Identifier: Apache-2.0 */ -#define DT_DRV_COMPAT zephyr_sw_generator +#define DT_DRV_COMPAT zephyr_video_sw_generator #include #include @@ -338,14 +338,6 @@ static DEVICE_API(video, video_sw_generator_driver_api) = { #endif }; -static struct video_sw_generator_data video_sw_generator_data_0 = { - .fmt.width = 320, - .fmt.height = 160, - .fmt.pitch = 320 * 2, - .fmt.pixelformat = VIDEO_PIX_FMT_RGB565, - .frame_rate = DEFAULT_FRAME_RATE, -}; - static int video_sw_generator_init_controls(const struct device *dev) { struct video_sw_generator_data *data = dev->data; @@ -366,8 +358,19 @@ static int video_sw_generator_init(const struct device *dev) return video_sw_generator_init_controls(dev); } -DEVICE_DEFINE(video_sw_generator, "VIDEO_SW_GENERATOR", &video_sw_generator_init, NULL, - &video_sw_generator_data_0, NULL, POST_KERNEL, CONFIG_VIDEO_INIT_PRIORITY, - &video_sw_generator_driver_api); - -VIDEO_DEVICE_DEFINE(video_sw_generator, DEVICE_GET(video_sw_generator), NULL); +#define VIDEO_SW_GENERATOR_DEFINE(n) \ + static struct video_sw_generator_data video_sw_generator_data_##n = { \ + .fmt.width = 320, \ + .fmt.height = 160, \ + .fmt.pitch = 320 * 2, \ + .fmt.pixelformat = VIDEO_PIX_FMT_RGB565, \ + .frame_rate = DEFAULT_FRAME_RATE, \ + }; \ + \ + DEVICE_DT_INST_DEFINE(n, &video_sw_generator_init, NULL, &video_sw_generator_data_##n, \ + NULL, POST_KERNEL, CONFIG_VIDEO_INIT_PRIORITY, \ + &video_sw_generator_driver_api); \ + \ + VIDEO_DEVICE_DEFINE(video_sw_generator_##n, DEVICE_DT_INST_GET(n), NULL); + +DT_INST_FOREACH_STATUS_OKAY(VIDEO_SW_GENERATOR_DEFINE) diff --git a/drivers/video/video_sw_pipeline.c b/drivers/video/video_sw_pipeline.c new file mode 100644 index 000000000000..bf2dbe6b2993 --- /dev/null +++ b/drivers/video/video_sw_pipeline.c @@ -0,0 +1,366 @@ +/* + * Copyright (c) 2025 tinyVision.ai Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#define DT_DRV_COMPAT zephyr_video_sw_pipeline + +#include +#include +#include +#include +#include + +LOG_MODULE_REGISTER(video_sw_pipeline, CONFIG_VIDEO_LOG_LEVEL); + +struct video_sw_pipeline_data { + struct k_fifo fifo_input_in; + struct k_fifo fifo_input_out; + struct k_fifo fifo_output_in; + struct k_fifo fifo_output_out; + struct k_poll_signal *sig; + struct pixel_stream step_root; + struct pixel_stream step_export; + struct k_mutex mutex; + struct video_format_cap fmts_in[2]; + struct video_format_cap fmts_out[2]; + void (*load)(struct pixel_stream *s, const uint8_t *b, size_t n); + const struct device *source_dev; +}; + +static void video_sw_pipeline_thread(void *p0, void *p1, void *p2) +{ + const struct device *dev = p0; + struct video_sw_pipeline_data *data = dev->data; + + while (true) { + struct video_buffer *ibuf; + struct video_buffer *obuf; + + ibuf = k_fifo_get(&data->fifo_input_in, K_FOREVER); + obuf = k_fifo_get(&data->fifo_output_in, K_FOREVER); + + /* Wait until the stream is started and unlock to not block the API calls */ + k_mutex_lock(&data->mutex, K_FOREVER); + k_mutex_unlock(&data->mutex); + + /* Load the output video buffer and run the stream */ + ring_buf_init(&data->step_export.ring, obuf->size, obuf->buffer); + data->load(data->step_root.next, ibuf->buffer, ibuf->bytesused); + + /* Store the obuf in the obuf buffer */ + obuf->timestamp = k_uptime_get_32(); + obuf->bytesused = ring_buf_size_get(&data->step_export.ring); + obuf->line_offset = 0; + + /* Move the buffers from submission to completion queue for both input/output */ + k_fifo_get(&data->fifo_input_in, K_NO_WAIT); + k_fifo_put(&data->fifo_input_out, ibuf); + k_fifo_get(&data->fifo_output_in, K_NO_WAIT); + k_fifo_put(&data->fifo_output_out, obuf); + + if (IS_ENABLED(CONFIG_POLL) && data->sig != NULL) { + k_poll_signal_raise(data->sig, VIDEO_BUF_DONE); + } + } +} + +static int video_sw_pipeline_get_caps(const struct device *dev, enum video_endpoint_id ep, + struct video_caps *caps) +{ + struct video_sw_pipeline_data *data = dev->data; + const struct video_format_cap *fmts = (ep == VIDEO_EP_IN) ? data->fmts_in : data->fmts_out; + + __ASSERT(data->fmts_in[0].pixelformat != 0, "Call video_sw_pipeline_set_stream() first"); + __ASSERT(data->fmts_out[0].pixelformat != 0, "Call video_sw_pipeline_set_stream() first"); + + if (ep != VIDEO_EP_IN && ep != VIDEO_EP_OUT) { + LOG_ERR("Need to specify input or output endpoint."); + return -EINVAL; + } + + caps->min_vbuf_count = 0; + caps->min_line_count = 1; + caps->max_line_count = LINE_COUNT_HEIGHT; + caps->format_caps = fmts; + + return 0; +} + +void video_sw_pipeline_set_source(const struct device *dev, const struct device *source_dev) +{ + struct video_sw_pipeline_data *data = dev->data; + + data->source_dev = source_dev; +} + +void video_sw_pipeline_set_loader(const struct device *dev, + void (*fn)(struct pixel_stream *s, const uint8_t *b, size_t n)) +{ + struct video_sw_pipeline_data *data = dev->data; + + data->load = fn; +} + +void video_sw_pipeline_set_pipeline(const struct device *dev, struct pixel_stream *strm, + uint32_t pixfmt_in, uint16_t width_in, uint16_t height_in, + uint32_t pixfmt_out, uint16_t width_out, uint16_t height_out) +{ + struct video_sw_pipeline_data *data = dev->data; + struct pixel_stream *step = &data->step_root; + + /* Set the driver input/output format according to the stream input/output format */ + data->fmts_in[0].pixelformat = pixfmt_in; + data->fmts_in[0].width_min = data->fmts_in[0].width_max = width_in; + data->fmts_in[0].height_min = data->fmts_in[0].height_max = height_in; + data->fmts_out[0].pixelformat = pixfmt_out; + data->fmts_out[0].width_min = data->fmts_out[0].width_max = width_out; + data->fmts_out[0].height_min = data->fmts_out[0].height_max = height_out; + + /* Find the first and last steps of the stream */ + data->step_root.next = strm; + while (step->next != NULL) { + step = step->next; + } + step = step->next = &data->step_export; + + /* Add an extra export step */ + data->step_export.name = "[export video_sw_pipeline]"; + data->step_export.width = width_out; + data->step_export.height = height_out; + data->step_export.pitch = width_out * video_bits_per_pixel(pixfmt_out) / BITS_PER_BYTE; + data->step_export.run = NULL; +} + +static int video_sw_pipeline_set_fmt(const struct device *dev, enum video_endpoint_id ep, + struct video_format *fmt) +{ + struct video_sw_pipeline_data *data = dev->data; + const struct video_format_cap *fmts = (ep == VIDEO_EP_IN) ? data->fmts_in : data->fmts_out; + struct video_format fmt_out = { + .width = data->fmts_in[0].width_min, + .height = data->fmts_in[0].height_min, + .pixelformat = data->fmts_in[0].pixelformat, + .pitch = data->fmts_in[0].width_min * + video_bits_per_pixel(data->fmts_in[0].pixelformat) / BITS_PER_BYTE, + }; + + __ASSERT(data->fmts_in[0].pixelformat != 0, "Call video_sw_pipeline_set_stream() first"); + __ASSERT(data->fmts_out[0].pixelformat != 0, "Call video_sw_pipeline_set_stream() first"); + + if (ep != VIDEO_EP_IN && ep != VIDEO_EP_OUT) { + LOG_ERR("Need to specify input or output endpoint."); + return -EINVAL; + } + + if (fmt->pixelformat != fmts[0].pixelformat || fmt->width != fmts[0].width_min || + fmt->height != fmts[0].height_min) { + LOG_ERR("Format requested different from format %ux%u '%s'", + fmt->width, fmt->height, VIDEO_FOURCC_TO_STR(fmt->pixelformat)); + return -EINVAL; + } + + /* Apply the format configured by video_sw_pipeline_set_pipeline() to the source device */ + return video_set_format(data->source_dev, VIDEO_EP_OUT, &fmt_out); +} + +static int video_sw_pipeline_get_fmt(const struct device *dev, enum video_endpoint_id ep, + struct video_format *fmt) +{ + struct video_sw_pipeline_data *data = dev->data; + const struct video_format_cap *fmts = (ep == VIDEO_EP_IN) ? data->fmts_in : data->fmts_out; + + if (ep != VIDEO_EP_IN && ep != VIDEO_EP_OUT) { + LOG_ERR("Need to specify input or output endpoint."); + return -EINVAL; + } + + fmt->pixelformat = fmts[0].pixelformat; + fmt->width = fmts[0].width_min; + fmt->height = fmts[0].height_min; + fmt->pitch = fmt->width * video_bits_per_pixel(fmt->pixelformat) / BITS_PER_BYTE; + + return 0; +} + +static int video_sw_pipeline_flush(const struct device *dev, enum video_endpoint_id ep, bool cancel) +{ + struct video_sw_pipeline_data *data = dev->data; + struct video_buffer *vbuf; + + if (cancel) { + /* Skip all the buffers of the input endpointo, unprocessed */ + while ((vbuf = k_fifo_get(&data->fifo_input_in, K_NO_WAIT)) != NULL) { + k_fifo_put(&data->fifo_input_out, vbuf); + if (IS_ENABLED(CONFIG_POLL) && data->sig) { + k_poll_signal_raise(data->sig, VIDEO_BUF_ABORTED); + } + } + /* Skip all the buffers of the output endpoint, unprocessed */ + while ((vbuf = k_fifo_get(&data->fifo_output_in, K_NO_WAIT)) != NULL) { + k_fifo_put(&data->fifo_output_out, vbuf); + if (IS_ENABLED(CONFIG_POLL) && data->sig) { + k_poll_signal_raise(data->sig, VIDEO_BUF_ABORTED); + } + } + } else { + /* Wait for all buffer to be processed on the input endpointo */ + while (!k_fifo_is_empty(&data->fifo_input_in)) { + k_sleep(K_MSEC(1)); + } + /* Wait for all buffer to be processed on the output endpointo */ + while (!k_fifo_is_empty(&data->fifo_output_in)) { + k_sleep(K_MSEC(1)); + } + } + + return 0; +} + +static int video_sw_pipeline_enqueue(const struct device *dev, enum video_endpoint_id ep, + struct video_buffer *vbuf) +{ + struct video_sw_pipeline_data *data = dev->data; + struct k_fifo *fifo = (ep == VIDEO_EP_IN) ? &data->fifo_input_in : &data->fifo_output_in; + + __ASSERT(data->fmts_in[0].pixelformat != 0, "Call video_sw_pipeline_set_stream() first"); + __ASSERT(data->fmts_out[0].pixelformat != 0, "Call video_sw_pipeline_set_stream() first"); + + if (ep != VIDEO_EP_IN && ep != VIDEO_EP_OUT) { + LOG_ERR("Need to specify input or output endpoint."); + return -EINVAL; + } + + k_fifo_put(fifo, vbuf); + + return 0; +} + +static int video_sw_pipeline_dequeue(const struct device *dev, enum video_endpoint_id ep, + struct video_buffer **vbuf, k_timeout_t timeout) +{ + struct video_sw_pipeline_data *data = dev->data; + struct k_fifo *fifo = (ep == VIDEO_EP_IN) ? &data->fifo_input_out : &data->fifo_output_out; + + __ASSERT(data->fmts_in[0].pixelformat != 0, "Call video_sw_pipeline_set_stream() first"); + __ASSERT(data->fmts_out[0].pixelformat != 0, "Call video_sw_pipeline_set_stream() first"); + + if (ep != VIDEO_EP_IN && ep != VIDEO_EP_OUT) { + LOG_ERR("Need to specify input or output endpoint."); + return -EINVAL; + } + + *vbuf = k_fifo_get(fifo, timeout); + if (*vbuf == NULL) { + LOG_DBG("Failed to dequeue %s buffer", ep == VIDEO_EP_IN ? "input" : "output"); + return -EAGAIN; + } + + return 0; +} + +static int video_sw_pipeline_set_stream(const struct device *dev, bool enable) +{ + struct video_sw_pipeline_data *data = dev->data; + + if (enable) { + /* Release the stream processing thread */ + k_mutex_unlock(&data->mutex); + } else { + /* This will stop the stream thread without blocking this thread for long */ + k_mutex_lock(&data->mutex, K_FOREVER); + } + + return 0; +} + +static int video_sw_pipeline_set_frmival(const struct device *dev, enum video_endpoint_id ep, + struct video_frmival *frmival) +{ + struct video_sw_pipeline_data *data = dev->data; + + __ASSERT(data->source_dev != NULL, "Call video_sw_pipeline_set_source() first"); + + return video_set_frmival(data->source_dev, ep, frmival); +} + +static int video_sw_pipeline_get_frmival(const struct device *dev, enum video_endpoint_id ep, + struct video_frmival *frmival) +{ + struct video_sw_pipeline_data *data = dev->data; + + __ASSERT(data->source_dev != NULL, "Call video_sw_pipeline_set_source() first"); + + return video_get_frmival(data->source_dev, ep, frmival); +} + +static int video_sw_pipeline_enum_frmival(const struct device *dev, enum video_endpoint_id ep, + struct video_frmival_enum *fie) +{ + struct video_sw_pipeline_data *data = dev->data; + + __ASSERT(data->source_dev != NULL, "Call video_sw_pipeline_set_source() first"); + + return video_enum_frmival(data->source_dev, ep, fie); +} + +#ifdef CONFIG_POLL +static int video_sw_pipeline_set_signal(const struct device *dev, enum video_endpoint_id ep, + struct k_poll_signal *sig) +{ + struct video_sw_pipeline_data *data = dev->data; + + if (ep != VIDEO_EP_IN && ep != VIDEO_EP_OUT && ep != VIDEO_EP_ALL) { + return -EINVAL; + } + + data->sig = sig; + + return 0; +} +#endif + +static DEVICE_API(video, video_sw_pipeline_driver_api) = { + .set_format = video_sw_pipeline_set_fmt, + .get_format = video_sw_pipeline_get_fmt, + .flush = video_sw_pipeline_flush, + .enqueue = video_sw_pipeline_enqueue, + .dequeue = video_sw_pipeline_dequeue, + .get_caps = video_sw_pipeline_get_caps, + .set_stream = video_sw_pipeline_set_stream, + .set_frmival = video_sw_pipeline_set_frmival, + .get_frmival = video_sw_pipeline_get_frmival, + .enum_frmival = video_sw_pipeline_enum_frmival, +#ifdef CONFIG_POLL + .set_signal = video_sw_pipeline_set_signal, +#endif +}; + +static int video_sw_pipeline_init(const struct device *dev) +{ + struct video_sw_pipeline_data *data = dev->data; + + k_fifo_init(&data->fifo_input_in); + k_fifo_init(&data->fifo_input_out); + k_fifo_init(&data->fifo_output_in); + k_fifo_init(&data->fifo_output_out); + + LOG_DBG("Initial format %ux%u", data->fmts_in[0].width_min, data->fmts_in[0].height_min); + + return 0; +} + +#define VIDEO_SW_GENERATOR_DEFINE(n) \ + static struct video_sw_pipeline_data video_sw_pipeline_data_##n = { \ + .load = &pixel_stream_load, \ + }; \ + \ + DEVICE_DT_INST_DEFINE(n, &video_sw_pipeline_init, NULL, &video_sw_pipeline_data_##n, NULL, \ + POST_KERNEL, CONFIG_VIDEO_INIT_PRIORITY, \ + &video_sw_pipeline_driver_api); \ + \ + K_THREAD_DEFINE(video_sw_pipeline, 1024, video_sw_pipeline_thread, DEVICE_DT_INST_GET(n), \ + NULL, NULL, CONFIG_VIDEO_SW_PIPELINE_THREAD_PRIORITY, 0, 0); + +DT_INST_FOREACH_STATUS_OKAY(VIDEO_SW_GENERATOR_DEFINE) diff --git a/dts/bindings/usb/zephyr,uvc-device.yaml b/dts/bindings/usb/zephyr,uvc-device.yaml new file mode 100644 index 000000000000..4a4c36b373b1 --- /dev/null +++ b/dts/bindings/usb/zephyr,uvc-device.yaml @@ -0,0 +1,12 @@ +# Copyright (c) 2025 tinyVision.ai Inc. +# SPDX-License-Identifier: Apache-2.0 + +description: | + Declare an USB Video Class (UVC) device instance. + + Each UVC instance added to the USB Device Controller (UDC) node will be visible + as a new camera from the host point of view. + +compatible: "zephyr,uvc-device" + +include: base.yaml diff --git a/dts/bindings/video/zephyr,video-sw-generator.yaml b/dts/bindings/video/zephyr,video-sw-generator.yaml new file mode 100644 index 000000000000..0fd6f232ece2 --- /dev/null +++ b/dts/bindings/video/zephyr,video-sw-generator.yaml @@ -0,0 +1,8 @@ +# Copyright 2025 tinyVision.ai Inc. +# SPDX-License-Identifier: Apache-2.0 + +description: Emulated Video test pattern generator + +compatible: "zephyr,video-sw-generator" + +include: base.yaml diff --git a/dts/bindings/video/zephyr,video-sw-pipeline.yaml b/dts/bindings/video/zephyr,video-sw-pipeline.yaml new file mode 100644 index 000000000000..476f81a0a7e5 --- /dev/null +++ b/dts/bindings/video/zephyr,video-sw-pipeline.yaml @@ -0,0 +1,12 @@ +# Copyright 2025 tinyVision.ai Inc. +# SPDX-License-Identifier: Apache-2.0 + +description: Emulated Video test pattern pipeline + +compatible: "zephyr,video-sw-pipeline" + +include: base.yaml + +child-binding: + child-binding: + include: video-interfaces.yaml diff --git a/include/zephyr/drivers/video-controls.h b/include/zephyr/drivers/video-controls.h index 4304316c1818..877ffce315b7 100644 --- a/include/zephyr/drivers/video-controls.h +++ b/include/zephyr/drivers/video-controls.h @@ -54,7 +54,26 @@ extern "C" { /** Shift in the tint of every colors, clockwise in a RGB color wheel */ #define VIDEO_CID_HUE (VIDEO_CID_BASE + 3) -/** Amount of time an image sensor is exposed to light, affecting the brightness */ +/** Automatic white balance (cameras). */ +#define VIDEO_CID_AUTO_WHITE_BALANCE (VIDEO_CID_BASE + 12) + +/** When set the device will do a white balance and then hold the current setting. + * This is an action control that acts as trigger, the value is ignored. + * Unlike @ref VIDEO_CID_AUTO_WHITE_BALANCE that continuously adjust the white balance when on, + * this control performs a single white balance adjustment cycle whenever set to any value. + */ +#define VIDEO_CID_DO_WHITE_BALANCE (VIDEO_CID_BASE + 13) + +/** Red chroma balance, as a ratio to the green channel. */ +#define VIDEO_CID_RED_BALANCE (VIDEO_CID_BASE + 14) + +/** Blue chroma balance, as a ratio to the green channel. */ +#define VIDEO_CID_BLUE_BALANCE (VIDEO_CID_BASE + 15) + +/** Gamma adjust. */ +#define VIDEO_CID_GAMMA (VIDEO_CID_BASE + 16) + +/** Image sensor exposure time. */ #define VIDEO_CID_EXPOSURE (VIDEO_CID_BASE + 17) /** Automatic gain control */ @@ -81,9 +100,110 @@ enum video_power_line_frequency { VIDEO_CID_POWER_LINE_FREQUENCY_AUTO = 3, }; -/** Balance of colors in direction of blue (cold) or red (warm) */ +/** Enables automatic hue control by the device. + * Setting @ref VIDEO_CID_HUE while automatic hue control is enabled is undefined. + * Drivers should ignore such request. + */ +#define VIDEO_CID_HUE_AUTO (VIDEO_CID_BASE + 25) + +/** White balance settings as a color temperature in Kelvin. + * A driver should have a minimum range of 2800 (incandescent) to 6500 (daylight). + */ #define VIDEO_CID_WHITE_BALANCE_TEMPERATURE (VIDEO_CID_BASE + 26) +/** Adjusts the sharpness filters in a camera. + * The minimum value disables the filters, higher values give a sharper picture. + */ +#define VIDEO_CID_SHARPNESS (VIDEO_CID_BASE + 27) + +/** Adjusts the backlight compensation in a camera. + * The minimum value disables backlight compensation. + */ +#define VIDEO_CID_BACKLIGHT_COMPENSATION (VIDEO_CID_BASE + 28) + +/** Chroma automatic gain control. */ +#define VIDEO_CID_CHROMA_AGC (VIDEO_CID_BASE + 29) + +/** Enable the color killer, i.e. force a black and white image in case of a weak video signal. */ +#define VIDEO_CID_COLOR_KILLER (VIDEO_CID_BASE + 30) + +/** Selects a color effect. */ +#define VIDEO_CID_COLORFX (VIDEO_CID_BASE + 31) +enum video_colorfx { + VIDEO_COLORFX_NONE = 0, + VIDEO_COLORFX_BW = 1, + VIDEO_COLORFX_SEPIA = 2, + VIDEO_COLORFX_NEGATIVE = 3, + VIDEO_COLORFX_EMBOSS = 4, + VIDEO_COLORFX_SKETCH = 5, + VIDEO_COLORFX_SKY_BLUE = 6, + VIDEO_COLORFX_GRASS_GREEN = 7, + VIDEO_COLORFX_SKIN_WHITEN = 8, + VIDEO_COLORFX_VIVID = 9, + VIDEO_COLORFX_AQUA = 10, + VIDEO_COLORFX_ART_FREEZE = 11, + VIDEO_COLORFX_SILHOUETTE = 12, + VIDEO_COLORFX_SOLARIZATION = 13, + VIDEO_COLORFX_ANTIQUE = 14, + VIDEO_COLORFX_SET_CBCR = 15, + VIDEO_COLORFX_SET_RGB = 16, +}; + +/* Enable Automatic Brightness. */ +#define VIDEO_CID_AUTOBRIGHTNESS (VIDEO_CID_BASE + 32) + +/** Switch the band-stop filter of a camera sensor on or off, or specify its strength. + * Such band-stop filters can be used, for example, to filter out the fluorescent light component. + */ +#define VIDEO_CID_BAND_STOP_FILTER (VIDEO_CID_BASE + 33) + +/** Rotates the image by the specified angle. + * Common angles are 90, 270 and 180. Rotating the image to 90 and 270 will reverse the height and + * width of the display window. + */ +#define VIDEO_CID_ROTATE (VIDEO_CID_BASE + 34) + +/** Sets the background color on the current output device. + * The 32-bit of the supplied value are interpreted as follow: + * - bits [7:0] as Red component, + * - bits [15:8] as Green component, + * - bits [23:16] as Blue component, + * - bits [31:24] must be zero. + */ +#define VIDEO_CID_BG_COLOR (VIDEO_CID_BASE + 35) + +/** Adjusts the Chroma gain control (for use when chroma AGC is disabled). */ +#define VIDEO_CID_CHROMA_GAIN (VIDEO_CID_BASE + 36) + +/** Switch on or off the illuminator 1 of the device (usually a microscope). */ +#define VIDEO_CID_ILLUMINATORS_1 (VIDEO_CID_BASE + 37) + +/** Switch on or off the illuminator 2 of the device (usually a microscope). */ +#define VIDEO_CID_ILLUMINATORS_2 (VIDEO_CID_BASE + 38) + +/** Sets the alpha color component. + * Some devices produce data with a user-controllable alpha component. Set the value applied to + * the alpha channel of every pixel produced. + */ +#define VIDEO_CID_ALPHA_COMPONENT (VIDEO_CID_BASE + 41) + +/** Determines the Cb and Cr coefficients for the @ref VIDEO_COLORFX_SET_CBCR color effect. + * The 32-bit of the supplied value are interpreted as follow: + * - bits [7:0] as Cr component, + * - bits [15:8] as Cb component, + * - bits [31:16] must be zero. + */ +#define VIDEO_CID_COLORFX_CBCR (VIDEO_CID_BASE + 42) + +/** Determines the Red, Green, and Blue coefficients for @ref VIDEO_COLORFX_SET_RGB color effect. + * The 32-bit of the supplied value are interpreted as follow: + * - bits [7:0] as Blue component, + * - bits [15:8] as Green component, + * - bits [23:16] as Red component, + * - bits [31:24] must be zero. + */ +#define VIDEO_CID_COLORFX_RGB (VIDEO_CID_BASE + 43) + /** Last base CID + 1 */ #define VIDEO_CID_LASTP1 (VIDEO_CID_BASE + 44) @@ -107,18 +227,307 @@ enum video_power_line_frequency { */ #define VIDEO_CID_CAMERA_CLASS_BASE 0x009a0900 -/** Adjustments of exposure time and/or iris aperture. */ +/** Enables automatic adjustments of the exposure time and/or iris aperture. + * Manual exposure or iris changes when it is not @ref VIDEO_EXPOSURE_MANUAL is undefined. + * Drivers should ignore such requests. + */ #define VIDEO_CID_EXPOSURE_AUTO (VIDEO_CID_CAMERA_CLASS_BASE + 1) -enum video_exposure_auto_type { +enum video_exposure_auto { VIDEO_EXPOSURE_AUTO = 0, VIDEO_EXPOSURE_MANUAL = 1, VIDEO_EXPOSURE_SHUTTER_PRIORITY = 2, VIDEO_EXPOSURE_APERTURE_PRIORITY = 3 }; -/** Amount of optical zoom applied through to the camera optics */ +/** Determines the exposure time of the camera sensor. + * The exposure time is limited by the frame in terval. Drivers should interpret the values as + * 100 µs units, where the value 1 stands for 1/10000th of a second, 10000 for 1 second and 100000 + * for 10 seconds. + */ +#define VIDEO_CID_EXPOSURE_ABSOLUTE (VIDEO_CID_CAMERA_CLASS_BASE + 2) + +/** Whether the device may dynamically vary the frame rate under the effect of auto-exposure + * Applicable when @ref VIDEO_CID_EXPOSURE_AUTO is set to @ref VIDEO_EXPOSURE_AUTO or + * @ref VIDEO_EXPOSURE_APERTURE_PRIORITY. Disabled by default: the frame rate must remain constant. + */ +#define VIDEO_CID_EXPOSURE_AUTO_PRIORITY (VIDEO_CID_CAMERA_CLASS_BASE + 3) + +/** This write-only control turns the camera horizontally by the specified amount. + * The unit is undefined. A positive value moves the camera to the right (clockwise when viewed + * from above), a negative value to the left. A value of zero does not cause motion. + */ +#define VIDEO_CID_PAN_RELATIVE (VIDEO_CID_CAMERA_CLASS_BASE + 4) + +/** This write-only control turns the camera vertically by the specified amount. + * The unit is undefined. A positive value moves the camera up, a negative value down. + * A value of zero does not cause motion. + */ +#define VIDEO_CID_TILT_RELATIVE (VIDEO_CID_CAMERA_CLASS_BASE + 5) + +/** When this control is set, the camera moves horizontally to the default position. */ +#define VIDEO_CID_PAN_RESET (VIDEO_CID_CAMERA_CLASS_BASE + 6) + +/** When this control is set, the camera moves vertically to the default position. */ +#define VIDEO_CID_TILT_RESET (VIDEO_CID_CAMERA_CLASS_BASE + 7) + +/** This control turns the camera horizontally to the specified position. + * Positive values move the camera to the right (clockwise when viewed from above), negative + * values to the left. Drivers should interpret the values as arc seconds, with valid values + * between -180 * 3600 and +180 * 3600 inclusive. + */ +#define VIDEO_CID_PAN_ABSOLUTE (VIDEO_CID_CAMERA_CLASS_BASE + 8) + +/** This control turns the camera vertically to the specified position. + * Positive values move the camera up, negative values down. Drivers should interpret the values as + * arc seconds, with valid values between -180 * 3600 and +180 * 3600 inclusive. + */ +#define VIDEO_CID_TILT_ABSOLUTE (VIDEO_CID_CAMERA_CLASS_BASE + 9) + +/** This control sets the focal point of the camera to the specified position. + * The unit is undefined. Positive values set the focus closer to the camera, negative values + * towards infinity. + */ +#define VIDEO_CID_FOCUS_ABSOLUTE (VIDEO_CID_CAMERA_CLASS_BASE + 10) + +/** This write-only control moves the focal point of the camera by the specified amount. + * The unit is undefined. Positive values move the focus closer to the camera, negative values + * towards infinity. + */ +#define VIDEO_CID_FOCUS_RELATIVE (VIDEO_CID_CAMERA_CLASS_BASE + 11) + +/** Enables continuous automatic focus adjustments. + * Manual focus adjustments while this control is on (set to 1) is undefined. + * Drivers should ignore such requests. + */ +#define VIDEO_CID_FOCUS_AUTO (VIDEO_CID_CAMERA_CLASS_BASE + 12) + +/** Specify the objective lens focal length as an absolute value. + * The zoom unit is driver-specific and its value should be a positive integer. + */ #define VIDEO_CID_ZOOM_ABSOLUTE (VIDEO_CID_CAMERA_CLASS_BASE + 13) +/** This write-only control sets the objective lens focal length relatively to the current value. + * Positive values move the zoom lens group towards the telephoto direction, negative values + * towards the wide-angle direction. The zoom unit is driver-specific. + */ +#define VIDEO_CID_ZOOM_RELATIVE (VIDEO_CID_CAMERA_CLASS_BASE + 14) + +/** Start a continuous zoom movement. + * Move the objective lens group at the specified speed until it reaches physical device limits or + * until an explicit request to stop the movement. A positive value moves the zoom lens group + * towards the telephoto direction. A value of zero stops the zoom lens group movement. + * A negative value moves the zoom lens group towards the wide-angle direction. + * The zoom speed unit is driver-specific. + */ +#define VIDEO_CID_ZOOM_CONTINUOUS (VIDEO_CID_CAMERA_CLASS_BASE + 15) + +/** Prevent video from being acquired by the camera. + * When this control is set to TRUE (1), no image can be captured by the camera. Common means to + * enforce privacy are mechanical obturation of the sensor and firmware image processing, + * but the device is not restricted to these methods. Devices that implement the privacy control + * must support read access and may support write access. + */ +#define VIDEO_CID_PRIVACY (VIDEO_CID_CAMERA_CLASS_BASE + 16) + +/** This control sets the camera's aperture to the specified value. + * The unit is undefined. Larger values open the iris wider, smaller values close it. + */ +#define VIDEO_CID_IRIS_ABSOLUTE (VIDEO_CID_CAMERA_CLASS_BASE + 17) + +/** This write-only control modifies the camera's aperture by the specified amount. + * The unit is undefined. Positive values open the iris one step further, negative values close + * it one step further. + */ +#define VIDEO_CID_IRIS_RELATIVE (VIDEO_CID_CAMERA_CLASS_BASE + 18) + +/** Determines the automatic exposure compensation. + * It is effective only when @ref VIDEO_CID_EXPOSURE_AUTO control is set to + * @ref VIDEO_EXPOSURE_AUTO, @ref VIDEO_EXPOSURE_SHUTTER_PRIORITY or + * @ref VIDEO_EXPOSURE_APERTURE_PRIORITY. It is expressed in terms of Exposure Value (EV). + * Drivers should interpret the values as 0.001 EV units, where the value 1000 stands for +1 EV. + * Increasing the exposure compensation value is equivalent to decreasing the EV and will + * increase the amount of light at the image sensor. The camera performs the exposure + * compensation by adjusting absolute exposure time and/or aperture. + */ +#define VIDEO_CID_AUTO_EXPOSURE_BIAS (VIDEO_CID_CAMERA_CLASS_BASE + 19) + +/** Sets white balance to automatic, manual or a preset. + * The presets determine color temperature of the light as a hint to the camera for white balance + * adjustments resulting in most accurate color representation. The following white balance presets + * are listed in order of increasing color temperature. + */ +#define VIDEO_CID_AUTO_N_PRESET_WHITE_BALANCE (VIDEO_CID_CAMERA_CLASS_BASE + 20) +enum video_auto_n_preset_white_balance { + VIDEO_WHITE_BALANCE_MANUAL = 0, + VIDEO_WHITE_BALANCE_AUTO = 1, + VIDEO_WHITE_BALANCE_INCANDESCENT = 2, + VIDEO_WHITE_BALANCE_FLUORESCENT = 3, + VIDEO_WHITE_BALANCE_FLUORESCENT_H = 4, + VIDEO_WHITE_BALANCE_HORIZON = 5, + VIDEO_WHITE_BALANCE_DAYLIGHT = 6, + VIDEO_WHITE_BALANCE_FLASH = 7, + VIDEO_WHITE_BALANCE_CLOUDY = 8, + VIDEO_WHITE_BALANCE_SHADE = 9, + VIDEO_WHITE_BALANCE_GREYWORLD = 10, +}; + +/** Enables or disables the camera's wide dynamic range feature. + * This feature allows to obtain clear images in situations where intensity of the illumination + * varies significantly throughout the scene, i.e. there are simultaneously very dark and very + * bright areas. It is most commonly realized in cameras by combining two subsequent frames with + * different exposure times. + */ +#define VIDEO_CID_WIDE_DYNAMIC_RANGE (VIDEO_CID_CAMERA_CLASS_BASE + 21) + +/** Enables or disables image stabilization. */ +#define VIDEO_CID_IMAGE_STABILIZATION (VIDEO_CID_CAMERA_CLASS_BASE + 22) + +/** Determines the ISO equivalent of an image sensor indicating the sensor's sensitivity to light. + * The numbers are expressed in linear scale. Applications should interpret the values as standard + * ISO values multiplied by 1000, e.g. control value 800 stands for ISO 0.8. Drivers will usually + * support only a subset of standard ISO values. + * Setting this control when @ref VIDEO_CID_ISO_SENSITIVITY_AUTO is not + * @ref VIDEO_ISO_SENSITIVITY_MANUAL is undefined. + * Drivers should ignore such requests. + */ +#define VIDEO_CID_ISO_SENSITIVITY (VIDEO_CID_CAMERA_CLASS_BASE + 23) + +/** Enables or disables automatic ISO sensitivity adjustments. */ +#define VIDEO_CID_ISO_SENSITIVITY_AUTO (VIDEO_CID_CAMERA_CLASS_BASE + 24) +enum video_iso_sensitivity_auto { + VIDEO_ISO_SENSITIVITY_MANUAL = 0, + VIDEO_ISO_SENSITIVITY_AUTO = 1, +}; + +/** Determines how the camera measures the amount of light available for the frame exposure. */ +#define VIDEO_CID_EXPOSURE_METERING (VIDEO_CID_CAMERA_CLASS_BASE + 25) +enum video_exposure_metering { + VIDEO_EXPOSURE_METERING_AVERAGE = 0, + VIDEO_EXPOSURE_METERING_CENTER_WEIGHTED = 1, + VIDEO_EXPOSURE_METERING_SPOT = 2, + VIDEO_EXPOSURE_METERING_MATRIX = 3, +}; + +/** This control selects scene programs optimized for common shooting scenes. + * Within these modes the camera determines best exposure, aperture, focusing, light metering, + * white balance and equivalent sensitivity. The controls of those parameters are influenced by + * the scene mode control. An exact behavior in each mode is subject to the camera specification. + * When the scene mode feature is not used, this control should be set to + * @ref VIDEO_SCENE_MODE_NONE to make sure the other possibly related controls are accessible. + */ +#define VIDEO_CID_SCENE_MODE (VIDEO_CID_CAMERA_CLASS_BASE + 26) +enum video_scene_mode { + VIDEO_SCENE_MODE_NONE = 0, + VIDEO_SCENE_MODE_BACKLIGHT = 1, + VIDEO_SCENE_MODE_BEACH_SNOW = 2, + VIDEO_SCENE_MODE_CANDLE_LIGHT = 3, + VIDEO_SCENE_MODE_DAWN_DUSK = 4, + VIDEO_SCENE_MODE_FALL_COLORS = 5, + VIDEO_SCENE_MODE_FIREWORKS = 6, + VIDEO_SCENE_MODE_LANDSCAPE = 7, + VIDEO_SCENE_MODE_NIGHT = 8, + VIDEO_SCENE_MODE_PARTY_INDOOR = 9, + VIDEO_SCENE_MODE_PORTRAIT = 10, + VIDEO_SCENE_MODE_SPORTS = 11, + VIDEO_SCENE_MODE_SUNSET = 12, + VIDEO_SCENE_MODE_TEXT = 13, +}; + +/** This control locks or unlocks the automatic focus, exposure and white balance. + * The automatic adjustments can be paused independently by setting their lock bit to 1. + * The camera then retains the settings until the lock bit is cleared. + * When a given algorithm is not enabled, drivers return no error. + * An example might be an application setting bit @ref VIDEO_LOCK_WHITE_BALANCE when the + * @ref VIDEO_CID_AUTO_WHITE_BALANCE control is off (set to 0). + * The value of this control may be changed by exposure, white balance or focus controls. + */ +#define VIDEO_CID_3A_LOCK (VIDEO_CID_CAMERA_CLASS_BASE + 27) +enum video_3a_lock { + VIDEO_LOCK_EXPOSURE = BIT(0), + VIDEO_LOCK_WHITE_BALANCE = BIT(1), + VIDEO_LOCK_FOCUS = BIT(2), +}; + +/** Starts single auto focus process. + * Setting this control when @ref VIDEO_CID_FOCUS_AUTO is on (set to 1) is undefined. + * Drivers should ignore such requests. + */ +#define VIDEO_CID_AUTO_FOCUS_START (VIDEO_CID_CAMERA_CLASS_BASE + 28) + +/** Aborts automatic focusing started with @ref VIDEO_CID_AUTO_FOCUS_START control. + * Setting this control when @ref VIDEO_CID_FOCUS_AUTO is on (set to 1) is undefined. + * Drivers should ignore such requests. + */ +#define VIDEO_CID_AUTO_FOCUS_STOP (VIDEO_CID_CAMERA_CLASS_BASE + 29) + +/** The automatic focus status. + * This is a read-only control. + * Setting @ref VIDEO_LOCK_FOCUS lock bit of the @ref VIDEO_CID_3A_LOCK control may stop updates + * of the @ref VIDEO_CID_AUTO_FOCUS_STATUS control value. + */ +#define VIDEO_CID_AUTO_FOCUS_STATUS (VIDEO_CID_CAMERA_CLASS_BASE + 30) +enum video_auto_focus_status { + VIDEO_AUTO_FOCUS_STATUS_IDLE = 0, + VIDEO_AUTO_FOCUS_STATUS_BUSY = BIT(0), + VIDEO_AUTO_FOCUS_STATUS_REACHED = BIT(1), + VIDEO_AUTO_FOCUS_STATUS_FAILED = BIT(2), +}; + +/** Determines auto focus distance range for which lens may be adjusted. */ +#define VIDEO_CID_AUTO_FOCUS_RANGE (VIDEO_CID_CAMERA_CLASS_BASE + 31) +enum video_auto_focus_range { + VIDEO_AUTO_FOCUS_RANGE_AUTO = 0, + VIDEO_AUTO_FOCUS_RANGE_NORMAL = 1, + VIDEO_AUTO_FOCUS_RANGE_MACRO = 2, + VIDEO_AUTO_FOCUS_RANGE_INFINITY = 3, +}; + +/**This control turns the camera horizontally at the specific speed. + * The unit is undefined. A positive value moves the camera to the right (clockwise when viewed + * from above), a negative value to the left. A value of zero stops the motion if one is in + * progress and has no effect otherwise. + */ +#define VIDEO_CID_PAN_SPEED (VIDEO_CID_CAMERA_CLASS_BASE + 32) + +/** This control turns the camera vertically at the specified speed. + * The unit is undefined. A positive value moves the camera up, a negative value down. + * A value of zero stops the motion if one is in progress and has no effect otherwise. + */ +#define VIDEO_CID_TILT_SPEED (VIDEO_CID_CAMERA_CLASS_BASE + 33) + +/** This read-only control describes the camera position on the device + * It by reports where the camera camera is installed, its mounting position on the device. + * This control is particularly meaningful for devices which have a well defined orientation, + * such as phones, laptops and portable devices since the control is expressed as a position + * relative to the device's intended usage orientation. + * , or , are said to have the + * @ref VIDEO_CAMERA_ORIENTATION_EXTERNAL orientation. + */ +#define VIDEO_CID_CAMERA_ORIENTATION (VIDEO_CID_CAMERA_CLASS_BASE + 34) +enum video_camera_orientation { + /** Camera installed on the user-facing side of a phone/tablet/laptop device */ + VIDEO_CAMERA_ORIENTATION_FRONT = 0, + /** Camera installed on the opposite side of the user */ + VIDEO_CAMERA_ORIENTATION_BACK = 1, + /** Camera sensors not directly attached to the device or that can move freely */ + VIDEO_CAMERA_ORIENTATION_EXTERNAL = 2, +}; + +/** This read-only control describes the orientation of the sensor in the device. + * The value is the rotation correction in degrees in the counter-clockwise direction to be + * applied to the captured images once captured to memory to compensate for the camera sensor + * mounting rotation. + */ +#define VIDEO_CID_CAMERA_SENSOR_ROTATION (VIDEO_CID_CAMERA_CLASS_BASE + 35) + +/** Change the sensor HDR mode. + * A HDR picture is obtained by merging two captures of the same scene using two different + * exposure periods. HDR mode describes the way these two captures are merged in the sensor. + * As modes differ for each sensor, menu items are not standardized by this control and have + * hardware-specific values. + */ +#define VIDEO_CID_HDR_SENSOR_MODE (VIDEO_CID_CAMERA_CLASS_BASE + 36) + /** * @} */ diff --git a/include/zephyr/drivers/video/sw_pipeline.h b/include/zephyr/drivers/video/sw_pipeline.h new file mode 100644 index 000000000000..a08c3f6df686 --- /dev/null +++ b/include/zephyr/drivers/video/sw_pipeline.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2025 tinyVision.ai Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef ZEPHYR_INCLUDE_DRIVERS_VIDEO_SW_PIPELINE_H_ +#define ZEPHYR_INCLUDE_DRIVERS_VIDEO_SW_PIPELINE_H_ + +#include +#include + +/** + * @brief Set the source device that feeds data into the pipeline. + * + * This submits a video device to use as source, required to make API + * calls such as @ref video_enum_frmival() work. + * + * @param dev The video-sw-pipeline device that is fed from a source device. + * @param source_dev The video device that feeds the data. + */ +void video_sw_pipeline_set_source(const struct device *dev, const struct device *source_dev); + +/** + * @brief Configure the zephyr,video-sw-pipeline device. + * + * The input format will be applied to the device preceding it as configured in the devicetree. + * The input and output formats must match those of the pixel pipeline. + * + * @param dev The video-sw-pipeline device that runs this pipeline. + * @param strm Pixel stream structs chained together as produced by @ref pixel_stream(). + * @param pixfmt_in Input video pixel format. + * @param width_in Input width. + * @param height_in Input height. + * @param pixfmt_out Output video pixel format. + * @param width_out Output width. + * @param height_out Output height. + */ +void video_sw_pipeline_set_pipeline(const struct device *dev, struct pixel_stream *strm, + uint32_t pixfmt_in, uint16_t width_in, uint16_t height_in, + uint32_t pixfmt_out, uint16_t width_out, uint16_t height_out); + +/** + * @brief Set the loader function that pushes data from the video frame to the device. + * + * The pixel library performs operation on typically small intermediate buffers of just one or a + * few lines, but some video operations require the full frame or most of it. The first element + * of the pipeline is a special case as it has the entire input frame available. + * + * This setter submit such a loader function that performs this initial frame-to-stream operation. + * + * @param dev The video-sw-pipeline device that runs this loader function. + * @param fn The function that has a full frame available and loads data into the stream + * line-by-line. + */ +void video_sw_pipeline_set_loader(const struct device *dev, + void (*fn)(struct pixel_stream *s, const uint8_t *b, size_t n)); + +#endif /* ZEPHYR_INCLUDE_DRIVERS_VIDEO_SW_PIPELINE_H_ */ diff --git a/include/zephyr/pixel/bayer.h b/include/zephyr/pixel/bayer.h new file mode 100644 index 000000000000..08754987b328 --- /dev/null +++ b/include/zephyr/pixel/bayer.h @@ -0,0 +1,139 @@ +/* + * Copyright (c) 2025 tinyVision.ai Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef ZEPHYR_INCLUDE_PIXEL_BAYER_H_ +#define ZEPHYR_INCLUDE_PIXEL_BAYER_H_ + +#include +#include + +#include + +/** + * @brief Convert a line from RGGB8 to RGB24 with 3x3 method + * + * @param i0 Buffer of the input row number 0 in bayer format (1 byte per pixel). + * @param i1 Buffer of the input row number 1 in bayer format (1 byte per pixel). + * @param i2 Buffer of the input row number 2 in bayer format (1 byte per pixel). + * @param rgb24 Buffer of the output row in RGB24 format (3 bytes per pixel). + * @param width Width of the lines in number of pixels. + */ +void pixel_rggb8line_to_rgb24line_3x3(const uint8_t *i0, const uint8_t *i1, const uint8_t *i2, + uint8_t *rgb24, uint16_t width); +/** + * @brief Convert a line from GRBG8 to RGB24 with 3x3 method + * @copydetails pixel_rggb8line_to_rgb24line_3x3() + */ +void pixel_grbg8line_to_rgb24line_3x3(const uint8_t *i0, const uint8_t *i1, const uint8_t *i2, + uint8_t *rgb24, uint16_t width); +/** + * @brief Convert a line from BGGR8 to RGB24 with 3x3 method + * @copydetails pixel_rggb8line_to_rgb24line_3x3() + */ +void pixel_bggr8line_to_rgb24line_3x3(const uint8_t *i0, const uint8_t *i1, const uint8_t *i2, + uint8_t *rgb24, uint16_t width); +/** + * @brief Convert a line from GBRG8 to RGB24 with 3x3 method + * @copydetails pixel_rggb8line_to_rgb24line_3x3() + */ +void pixel_gbrg8line_to_rgb24line_3x3(const uint8_t *i0, const uint8_t *i1, const uint8_t *i2, + uint8_t *rgb24, uint16_t width); + +/** + * @brief Convert a line from RGGB8 to RGB24 with 2x2 method + * + * @param i0 Buffer of the input row number 0 in bayer format (1 byte per pixel). + * @param i1 Buffer of the input row number 1 in bayer format (1 byte per pixel). + * @param rgb24 Buffer of the output row in RGB24 format (3 bytes per pixel). + * @param width Width of the lines in number of pixels. + */ +void pixel_rggb8line_to_rgb24line_2x2(const uint8_t *i0, const uint8_t *i1, uint8_t *rgb24, + uint16_t width); +/** + * @brief Convert a line from GBRG8 to RGB24 with 2x2 method + * @copydetails pixel_rggb8line_to_rgb24line_2x2() + */ +void pixel_gbrg8line_to_rgb24line_2x2(const uint8_t *i0, const uint8_t *i1, uint8_t *rgb24, + uint16_t width); +/** + * @brief Convert a line from BGGR8 to RGB24 with 2x2 method + * @copydetails pixel_rggb8line_to_rgb24line_2x2() + */ +void pixel_bggr8line_to_rgb24line_2x2(const uint8_t *i0, const uint8_t *i1, uint8_t *rgb24, + uint16_t width); +/** + * @brief Convert a line from GRBG8 to RGB24 with 2x2 method + * @copydetails pixel_rggb8line_to_rgb24line_2x2() + */ +void pixel_grbg8line_to_rgb24line_2x2(const uint8_t *i0, const uint8_t *i1, uint8_t *rgb24, + uint16_t width); + +/** + * @brief Define a stream converter from RGGB8 to RGB24 with the 3x3 method + * + * @param name The symbol of the @ref pixel_stream that will be defined. + * @param width The total width of the input frame in number of pixels. + * @param height The total height of the input frame in number of pixels. + */ +#define PIXEL_RGGB8STREAM_TO_RGB24STREAM_3X3(name, width, height) \ + PIXEL_BAYER_DEFINE(name, pixel_rggb8stream_to_rgb24stream_3x3, (width), (height), 3) +/** + * @brief Define a stream converter from GRBG8 to RGB24 with the 3x3 method + * @copydetails PIXEL_RGGB8STREAM_TO_RGB24STREAM_3X3() + */ +#define PIXEL_GRBG8STREAM_TO_RGB24STREAM_3X3(name, width, height) \ + PIXEL_BAYER_DEFINE(name, pixel_grbg8stream_to_rgb24stream_3x3, (width), (height), 3) +/** + * @brief Define a stream converter from BGGR8 to RGB24 with the 3x3 method + * @copydetails PIXEL_RGGB8STREAM_TO_RGB24STREAM_3X3() + */ +#define PIXEL_BGGR8STREAM_TO_RGB24STREAM_3X3(name, width, height) \ + PIXEL_BAYER_DEFINE(name, pixel_bggr8stream_to_rgb24stream_3x3, (width), (height), 3) +/** + * @brief Define a stream converter from GBRG8 to RGB24 with the 3x3 method + * @copydetails PIXEL_RGGB8STREAM_TO_RGB24STREAM_3X3() + */ +#define PIXEL_GBRG8STREAM_TO_RGB24STREAM_3X3(name, width, height) \ + PIXEL_BAYER_DEFINE(name, pixel_gbrg8stream_to_rgb24stream_3x3, (width), (height), 3) +/** + * @brief Define a stream converter from RGGB8 to RGB24 with the 2x2 method + * @copydetails PIXEL_RGGB8STREAM_TO_RGB24STREAM_3X3() + */ +#define PIXEL_RGGB8STREAM_TO_RGB24STREAM_2X2(name, width, height) \ + PIXEL_BAYER_DEFINE(name, pixel_rggb8stream_to_rgb24stream_2x2, (width), (height), 2) +/** + * @brief Define a stream converter from GBRG8 to RGB24 with the 2x2 method + * @copydetails PIXEL_RGGB8STREAM_TO_RGB24STREAM_3X3() + */ +#define PIXEL_GBRG8STREAM_TO_RGB24STREAM_2X2(name, width, height) \ + PIXEL_BAYER_DEFINE(name, pixel_gbrg8stream_to_rgb24stream_2x2, (width), (height), 2) +/** + * @brief Define a stream converter from BGGR8 to RGB24 with the 2x2 method + * @copydetails PIXEL_RGGB8STREAM_TO_RGB24STREAM_3X3() + */ +#define PIXEL_BGGR8STREAM_TO_RGB24STREAM_2X2(name, width, height) \ + PIXEL_BAYER_DEFINE(name, pixel_bggr8stream_to_rgb24stream_2x2, (width), (height), 2) +/** + * @brief Define a stream converter from GRBG8 to RGB24 with the 2x2 method + * @copydetails PIXEL_RGGB8STREAM_TO_RGB24STREAM_3X3() + */ +#define PIXEL_GRBG8STREAM_TO_RGB24STREAM_2X2(name, width, height) \ + PIXEL_BAYER_DEFINE(name, pixel_grbg8stream_to_rgb24stream_2x2, (width), (height), 2) + +/** @cond INTERNAL_HIDDEN */ +#define PIXEL_BAYER_DEFINE(name, fn, width, height, window_height) \ + PIXEL_STREAM_DEFINE((name), (fn), (width), (height), (width), (window_height) * (width)) +void pixel_rggb8stream_to_rgb24stream_3x3(struct pixel_stream *strm); +void pixel_grbg8stream_to_rgb24stream_3x3(struct pixel_stream *strm); +void pixel_bggr8stream_to_rgb24stream_3x3(struct pixel_stream *strm); +void pixel_gbrg8stream_to_rgb24stream_3x3(struct pixel_stream *strm); +void pixel_rggb8stream_to_rgb24stream_2x2(struct pixel_stream *strm); +void pixel_gbrg8stream_to_rgb24stream_2x2(struct pixel_stream *strm); +void pixel_bggr8stream_to_rgb24stream_2x2(struct pixel_stream *strm); +void pixel_grbg8stream_to_rgb24stream_2x2(struct pixel_stream *strm); +/** @endcond */ + +#endif /* ZEPHYR_INCLUDE_PIXEL_BAYER_H_ */ diff --git a/include/zephyr/pixel/formats.h b/include/zephyr/pixel/formats.h new file mode 100644 index 000000000000..a7ac8afe396e --- /dev/null +++ b/include/zephyr/pixel/formats.h @@ -0,0 +1,184 @@ +/* + * Copyright (c) 2025 tinyVision.ai Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef ZEPHYR_INCLUDE_PIXEL_FORMATS_H_ +#define ZEPHYR_INCLUDE_PIXEL_FORMATS_H_ + +#include +#include + +#include +#include +#include + +/** + * @brief Get the luminance (luma channel) of an RGB24 pixel. + * + * @param rgb24 Pointer to an RGB24 pixel: red, green, blue channels. + */ +uint8_t pixel_rgb24_get_luma_bt709(const uint8_t rgb24[3]); + +/** + * @brief Convert a line of pixel data from RGB332 to RGB24. + * + * See @ref video_pixel_formats for the definition of the input and output formats. + * + * @param src Buffer of the input line in the format, @c XXX in @c pixel_XXXline_to_YYYline(). + * @param dst Buffer of the output line in the format, @c YYY in @c pixel_XXXline_to_YYYline(). + * @param width Width of the lines in number of pixels. + */ +void pixel_rgb332line_to_rgb24line(const uint8_t *src, uint8_t *dst, uint16_t width); +/** + * @brief Convert a line of pixel data from RGB24 to RGB332 little-endian. + * @copydetails pixel_rgb332line_to_rgb24line() + */ +void pixel_rgb24line_to_rgb332line(const uint8_t *src, uint8_t *dst, uint16_t width); +/** + * @brief Convert a line of pixel data from RGB565 little-endian to RGB24. + * @copydetails pixel_rgb332line_to_rgb24line() + */ +void pixel_rgb565leline_to_rgb24line(const uint8_t *src, uint8_t *dst, uint16_t width); +/** + * @brief Convert a line of pixel data from RGB565 big-endian to RGB24. + * @copydetails pixel_rgb332line_to_rgb24line() + */ +void pixel_rgb565beline_to_rgb24line(const uint8_t *src, uint8_t *dst, uint16_t width); +/** + * @brief Convert a line of pixel data from RGB24 to RGB565 little-endian. + * @copydetails pixel_rgb332line_to_rgb24line() + */ +void pixel_rgb24line_to_rgb565leline(const uint8_t *src, uint8_t *dst, uint16_t width); +/** + * @brief Convert a line of pixel data from RGB24 to RGB565 big-endian. + * @copydetails pixel_rgb332line_to_rgb24line() + */ +void pixel_rgb24line_to_rgb565beline(const uint8_t *src, uint8_t *dst, uint16_t width); +/** + * @brief Convert a line of pixel data from YUYV to RGB24 (BT.709 coefficients). + * @copydetails pixel_rgb332line_to_rgb24line() + */ +void pixel_yuyvline_to_rgb24line_bt709(const uint8_t *src, uint8_t *dst, uint16_t width); +/** + * @brief Convert a line of pixel data from RGB24 to YUYV (BT.709 coefficients). + * @copydetails pixel_rgb332line_to_rgb24line() + */ +void pixel_rgb24line_to_yuyvline_bt709(const uint8_t *src, uint8_t *dst, uint16_t width); +/** + * @brief Convert a line of pixel data from RGB24 to YUV24 (BT.709 coefficients). + * @copydetails pixel_rgb332line_to_rgb24line() + */ +void pixel_rgb24line_to_yuv24line_bt709(const uint8_t *src, uint8_t *dst, uint16_t width); +/** + * @brief Convert a line of pixel data from YUV24 to RGB24 (BT.709 coefficients). + * @copydetails pixel_rgb332line_to_rgb24line() + */ +void pixel_yuv24line_to_rgb24line_bt709(const uint8_t *src, uint8_t *dst, uint16_t width); +/** + * @brief Convert a line of pixel data from YUYV to YUV24 + * @copydetails pixel_rgb332line_to_rgb24line() + */ +void pixel_yuyvline_to_yuv24line(const uint8_t *src, uint8_t *dst, uint16_t width); +/** + * @brief Convert a line of pixel data from YUV24 to YUYV + * @copydetails pixel_rgb332line_to_rgb24line() + */ +void pixel_yuv24line_to_yuyvline(const uint8_t *src, uint8_t *dst, uint16_t width); + +/** + * @brief Convert a stream of pixel data from RGB332 to RGB24. + * + * @param name The symbol of the @ref pixel_stream that will be defined. + * @param width The total width of the input frame in number of pixels. + * @param height The total height of the input frame in number of pixels. + */ +#define PIXEL_RGB332STREAM_TO_RGB24STREAM(name, width, height) \ + PIXEL_FORMAT_DEFINE(name, pixel_rgb332stream_to_rgb24stream, (width), (height), 1) +/** + * @brief Convert a stream of pixel data from RGB24 to RGB332 + * @copydetails PIXEL_RGB332STREAM_TO_RGB24STREAM() + */ +#define PIXEL_RGB24STREAM_TO_RGB332STREAM(name, width, height) \ + PIXEL_FORMAT_DEFINE(name, pixel_rgb24stream_to_rgb332stream, (width), (height), 3) +/** + * @brief Convert a stream of pixel data from RGB565 little-endian to RGB24 + * @copydetails PIXEL_RGB332STREAM_TO_RGB24STREAM() + */ +#define PIXEL_RGB565LESTREAM_TO_RGB24STREAM(name, width, height) \ + PIXEL_FORMAT_DEFINE(name, pixel_rgb565lestream_to_rgb24stream, (width), (height), 2) +/** + * @brief Convert a stream of pixel data from RGB24 to RGB565 little-endian + * @copydetails PIXEL_RGB332STREAM_TO_RGB24STREAM() + */ +#define PIXEL_RGB24STREAM_TO_RGB565LESTREAM(name, width, height) \ + PIXEL_FORMAT_DEFINE(name, pixel_rgb24stream_to_rgb565lestream, (width), (height), 3) +/** + * @brief Convert a stream of pixel data from RGB565 big-endian to RGB24 + * @copydetails PIXEL_RGB332STREAM_TO_RGB24STREAM() + */ +#define PIXEL_RGB565BESTREAM_TO_RGB24STREAM(name, width, height) \ + PIXEL_FORMAT_DEFINE(name, pixel_rgb565bestream_to_rgb24stream, (width), (height), 2) +/** + * @brief Convert a stream of pixel data from RGB24 to RGB565 big-endian + * @copydetails PIXEL_RGB332STREAM_TO_RGB24STREAM() + */ +#define PIXEL_RGB24STREAM_TO_RGB565BESTREAM(name, width, height) \ + PIXEL_FORMAT_DEFINE(name, pixel_rgb24stream_to_rgb565bestream, (width), (height), 3) +/** + * @brief Convert a stream of pixel data from YUYV to RGB24 (BT.709 coefficients) + * @copydetails PIXEL_RGB332STREAM_TO_RGB24STREAM() + */ +#define PIXEL_YUYVSTREAM_TO_RGB24STREAM_BT709(name, width, height) \ + PIXEL_FORMAT_DEFINE(name, pixel_yuyvstream_to_rgb24stream_bt709, (width), (height), 2) +/** + * @brief Convert a stream of pixel data from RGB24 to YUYV (BT.709 coefficients) + * @copydetails PIXEL_RGB332STREAM_TO_RGB24STREAM() + */ +#define PIXEL_RGB24STREAM_TO_YUYVSTREAM_BT709(name, width, height) \ + PIXEL_FORMAT_DEFINE(name, pixel_rgb24stream_to_yuyvstream_bt709, (width), (height), 3) +/** + * @brief Convert a stream of pixel data from YUV24 to RGB24 (BT.709 coefficients) + * @copydetails PIXEL_RGB332STREAM_TO_RGB24STREAM() + */ +#define PIXEL_YUV24STREAM_TO_RGB24STREAM_BT709(name, width, height) \ + PIXEL_FORMAT_DEFINE(name, pixel_yuv24stream_to_rgb24stream_bt709, (width), (height), 3) +/** + * @brief Convert a stream of pixel data from RGB24 to YUV24 (BT.709 coefficients) + * @copydetails PIXEL_RGB332STREAM_TO_RGB24STREAM() + */ +#define PIXEL_RGB24STREAM_TO_YUV24STREAM_BT709(name, width, height) \ + PIXEL_FORMAT_DEFINE(name, pixel_rgb24stream_to_yuv24stream_bt709, (width), (height), 3) +/** + * @brief Convert a stream of pixel data from YUYV to YUV24 + * @copydetails PIXEL_RGB332STREAM_TO_RGB24STREAM() + */ +#define PIXEL_YUYVSTREAM_TO_YUV24STREAM(name, width, height) \ + PIXEL_FORMAT_DEFINE(name, pixel_yuyvstream_to_yuv24stream, (width), (height), 2) +/** + * @brief Convert a stream of pixel data from YUV24 to YUYV + * @copydetails PIXEL_RGB332STREAM_TO_RGB24STREAM() + */ +#define PIXEL_YUV24STREAM_TO_YUYVSTREAM(name, width, height) \ + PIXEL_FORMAT_DEFINE(name, pixel_yuv24stream_to_yuyvstream, (width), (height), 3) + +/** @cond INTERNAL_HIDDEN */ +#define PIXEL_FORMAT_DEFINE(name, fn, width, height, bytes_per_pixel) \ + PIXEL_STREAM_DEFINE(name, (fn), (width), (height), \ + (width) * (bytes_per_pixel), 1 * (width) * (bytes_per_pixel)) +void pixel_rgb24stream_to_rgb332stream(struct pixel_stream *strm); +void pixel_rgb332stream_to_rgb24stream(struct pixel_stream *strm); +void pixel_rgb565lestream_to_rgb24stream(struct pixel_stream *strm); +void pixel_rgb24stream_to_rgb565lestream(struct pixel_stream *strm); +void pixel_rgb565bestream_to_rgb24stream(struct pixel_stream *strm); +void pixel_rgb24stream_to_rgb565bestream(struct pixel_stream *strm); +void pixel_yuyvstream_to_rgb24stream_bt709(struct pixel_stream *strm); +void pixel_rgb24stream_to_yuyvstream_bt709(struct pixel_stream *strm); +void pixel_yuv24stream_to_rgb24stream_bt709(struct pixel_stream *strm); +void pixel_rgb24stream_to_yuv24stream_bt709(struct pixel_stream *strm); +void pixel_yuyvstream_to_yuv24stream(struct pixel_stream *strm); +void pixel_yuv24stream_to_yuyvstream(struct pixel_stream *strm); +/** @endcond */ + +#endif /* ZEPHYR_INCLUDE_PIXEL_FORMATS_H_ */ diff --git a/include/zephyr/pixel/kernel.h b/include/zephyr/pixel/kernel.h new file mode 100644 index 000000000000..fa26e19fad8e --- /dev/null +++ b/include/zephyr/pixel/kernel.h @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2025 tinyVision.ai Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef ZEPHYR_INCLUDE_PIXEL_KERNEL_H_ +#define ZEPHYR_INCLUDE_PIXEL_KERNEL_H_ + +#include +#include + +#include + +/** + * @brief Define a 3x3 identity kernel operation for RGB24 data. + * + * @param name The symbol of the @ref pixel_stream that will be defined. + * @param width The total width of the input frame in number of pixels. + * @param height The total height of the input frame in number of pixels. + */ +#define PIXEL_IDENTITY_RGB24STREAM_3X3(name, width, height) \ + PIXEL_KERNEL_3X3_DEFINE(name, pixel_identity_rgb24stream_3x3, (width), (height), 3) +/** + * @brief Define a 5x5 identity kernel operation for RGB24 data. + * @copydetails PIXEL_RGGB8STREAM_TO_RGB24STREAM_3X3() + */ +#define PIXEL_IDENTITY_RGB24STREAM_5X5(name, width, height) \ + PIXEL_KERNEL_5X5_DEFINE(name, pixel_identity_rgb24stream_5x5, (width), (height), 3) +/** + * @brief Define a 3x3 sharpen kernel operation for RGB24 data. + * @copydetails PIXEL_RGGB8STREAM_TO_RGB24STREAM_3X3() + */ +#define PIXEL_SHARPEN_RGB24STREAM_3X3(name, width, height) \ + PIXEL_KERNEL_3X3_DEFINE(name, pixel_sharpen_rgb24stream_3x3, (width), (height), 3) +/** + * @brief Define a 3x3 edge detection kernel operation for RGB24 data. + * @copydetails PIXEL_RGGB8STREAM_TO_RGB24STREAM_3X3() + */ +#define PIXEL_EDGEDETECT_RGB24STREAM_3X3(name, width, height) \ + PIXEL_KERNEL_3X3_DEFINE(name, pixel_edgedetect_rgb24stream_3x3, (width), (height), 3) +/** + * @brief Define a 3x3 gaussian blur kernel operation for RGB24 data. + * @copydetails PIXEL_RGGB8STREAM_TO_RGB24STREAM_3X3() + */ +#define PIXEL_GAUSSIANBLUR_RGB24STREAM_5X5(name, width, height) \ + PIXEL_KERNEL_5X5_DEFINE(name, pixel_gaussianblur_rgb24stream_5x5, (width), (height), 3) +/** + * @brief Define a 3x3 gaussian blur kernel operation for RGB24 data. + * @copydetails PIXEL_RGGB8STREAM_TO_RGB24STREAM_3X3() + */ +#define PIXEL_GAUSSIANBLUR_RGB24STREAM_3X3(name, width, height) \ + PIXEL_KERNEL_3X3_DEFINE(name, pixel_gaussianblur_rgb24stream_3x3, (width), (height), 3) +/** + * @brief Define a 5x5 unsharp kernel operation for RGB24 data to sharpen it. + * @copydetails PIXEL_RGGB8STREAM_TO_RGB24STREAM_3X3() + */ +#define PIXEL_UNSHARP_RGB24STREAM_5X5(name, width, height) \ + PIXEL_KERNEL_5X5_DEFINE(name, pixel_unsharp_rgb24stream_5x5, (width), (height), 3) +/** + * @brief Define a 3x3 median kernel operation for RGB24 data. + * @copydetails PIXEL_RGGB8STREAM_TO_RGB24STREAM_3X3() + */ +#define PIXEL_MEDIAN_RGB24STREAM_3X3(name, width, height) \ + PIXEL_KERNEL_3X3_DEFINE(name, pixel_median_rgb24stream_3x3, (width), (height), 3) +/** + * @brief Define a 5x5 median kernel operation for RGB24 data. + * @copydetails PIXEL_RGGB8STREAM_TO_RGB24STREAM_3X3() + */ +#define PIXEL_MEDIAN_RGB24STREAM_5X5(name, width, height) \ + PIXEL_KERNEL_5X5_DEFINE(name, pixel_median_rgb24stream_5x5, (width), (height), 3) + +/** + * @brief Apply a 5x5 identity kernel to an RGB24 input window and produce one RGB24 line. + * + * @param in Array of input line buffers to convert. + * @param out Pointer to the output line converted. + * @param width Width of the input and output lines in pixels. + */ +void pixel_identity_rgb24line_3x3(const uint8_t *in[3], uint8_t *out, uint16_t width); +/** + * @brief Apply a 3x3 identity kernel to an RGB24 input window and produce one RGB24 line. + * @copydetails pixel_identity_rgb24line_3x3() + */ +void pixel_identity_rgb24line_5x5(const uint8_t *in[5], uint8_t *out, uint16_t width); +/** + * @brief Apply a 3x3 sharpen kernel to an RGB24 input window and produce one RGB24 line. + * @copydetails pixel_identity_rgb24line_3x3() + */ +void pixel_sharpen_rgb24line_3x3(const uint8_t *in[3], uint8_t *out, uint16_t width); +/** + * @brief Apply a 3x3 edge detection kernel to an RGB24 input window and produce one RGB24 line. + * @copydetails pixel_identity_rgb24line_3x3() + */ +void pixel_edgedetect_rgb24line_3x3(const uint8_t *in[3], uint8_t *out, uint16_t width); +/** + * @brief Apply a 3x3 gaussian blur kernel to an RGB24 input window and produce one RGB24 line. + * @copydetails pixel_identity_rgb24line_3x3() + */ +void pixel_gaussianblur_rgb24line_3x3(const uint8_t *in[3], uint8_t *out, uint16_t width); +/** + * @brief Apply a 5x5 unsharp kernel to an RGB24 input window and produce one RGB24 line. + * @copydetails pixel_identity_rgb24line_3x3() + */ +void pixel_unsharp_rgb24line_5x5(const uint8_t *in[5], uint8_t *out, uint16_t width); +/** + * @brief Apply a 3x3 median denoise kernel to an RGB24 input window and produce one RGB24 line. + * @copydetails pixel_identity_rgb24line_3x3() + */ +void pixel_median_rgb24line_3x3(const uint8_t *in[3], uint8_t *out, uint16_t width); +/** + * @brief Apply a 5x5 median denoise kernel to an RGB24 input window and produce one RGB24 line. + * @copydetails pixel_identity_rgb24line_3x3() + */ +void pixel_median_rgb24line_5x5(const uint8_t *in[5], uint8_t *out, uint16_t width); + +/** @cond INTERNAL_HIDDEN */ +#define PIXEL_KERNEL_5X5_DEFINE(name, fn, width, height, bytes_per_pixel) \ + PIXEL_STREAM_DEFINE((name), (fn), (width), (height), (width) * (bytes_per_pixel), \ + 5 * (width) * (bytes_per_pixel)) +#define PIXEL_KERNEL_3X3_DEFINE(name, fn, width, height, bytes_per_pixel) \ + PIXEL_STREAM_DEFINE((name), (fn), (width), (height), (width) * (bytes_per_pixel), \ + 3 * (width) * (bytes_per_pixel)) +void pixel_identity_rgb24stream_3x3(struct pixel_stream *strm); +void pixel_identity_rgb24stream_5x5(struct pixel_stream *strm); +void pixel_sharpen_rgb24stream_3x3(struct pixel_stream *strm); +void pixel_edgedetect_rgb24stream_3x3(struct pixel_stream *strm); +void pixel_gaussianblur_rgb24stream_3x3(struct pixel_stream *strm); +void pixel_gaussianblur_rgb24stream_5x5(struct pixel_stream *strm); +void pixel_unsharp_rgb24stream_5x5(struct pixel_stream *strm); +void pixel_median_rgb24stream_3x3(struct pixel_stream *strm); +void pixel_median_rgb24stream_5x5(struct pixel_stream *strm); +/** @endcond */ + +#endif /* ZEPHYR_INCLUDE_PIXEL_KERNEL_H_ */ diff --git a/include/zephyr/pixel/print.h b/include/zephyr/pixel/print.h new file mode 100644 index 000000000000..f7925b8bdbf9 --- /dev/null +++ b/include/zephyr/pixel/print.h @@ -0,0 +1,149 @@ +/* + * Copyright (c) 2025 tinyVision.ai Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef ZEPHYR_INCLUDE_PIXEL_PRINT_H_ +#define ZEPHYR_INCLUDE_PIXEL_PRINT_H_ + +#include +#include + +#include + +/** + * @brief Show a frame in the RGB24 format using 256COLOR terminal escape codes + * + * The 256COLOR variants use 256COLOR 8-bit RGB format, lower quality, more compact and supported. + * The TRUECOLOR variants use true 24-bit RGB24 colors, available on a wide range of terminals. + * + * @param buf Input buffer to display in the terminal. + * @param size Size of the input buffer in bytes. + * @param width Number of pixel of the input buffer in width + * @param height Max number of rows to print + */ +void pixel_print_rgb24frame_256color(const uint8_t *buf, size_t size, uint16_t width, + uint16_t height); +/** + * @brief Show a frame in the RGB24 format using TRUECOLOR terminal escape codes + * @copydetails pixel_print_rgb24frame_256color() + */ +void pixel_print_rgb24frame_truecolor(const uint8_t *buf, size_t size, uint16_t width, + uint16_t height); +/** + * @brief Show a frame in the RGB565 little-endian format using 256COLOR terminal escape codes + */ +void pixel_print_rgb565leframe_256color(const uint8_t *buf, size_t size, uint16_t width, + uint16_t height); +/** + * @brief Show a frame in the RGB565 little-endian format using TRUECOLOR terminal escape codes + * @copydetails pixel_print_rgb24frame_256color() + */ +void pixel_print_rgb565leframe_truecolor(const uint8_t *buf, size_t size, uint16_t width, + uint16_t height); +/** + * @brief Show a frame in the RGB565 big-endian format using 256COLOR terminal escape codes + * @copydetails pixel_print_rgb24frame_256color() + */ +void pixel_print_rgb565beframe_256color(const uint8_t *buf, size_t size, uint16_t width, + uint16_t height); +/** + * @brief Show a frame in the RGB565 big-endian format using TRUECOLOR terminal escape codes + * @copydetails pixel_print_rgb24frame_256color() + */ +void pixel_print_rgb565beframe_truecolor(const uint8_t *buf, size_t size, uint16_t width, + uint16_t height); +/** + * @brief Show a frame in the RGB332 format using 256COLOR terminal escape codes + * @copydetails pixel_print_rgb24frame_256color() + */ +void pixel_print_rgb332frame_256color(const uint8_t *buf, size_t size, uint16_t width, + uint16_t height); +/** + * @brief Show a frame in the RGB332 format using TRUECOLOR terminal escape codes + * @copydetails pixel_print_rgb24frame_256color() + */ +void pixel_print_rgb332frame_truecolor(const uint8_t *buf, size_t size, uint16_t width, + uint16_t height); +/** + * @brief Show a frame in the YUYV (BT.709) format using 256COLOR terminal escape codes + * @copydetails pixel_print_rgb24frame_256color() + */ +void pixel_print_yuyvframe_bt709_256color(const uint8_t *buf, size_t size, uint16_t width, + uint16_t height); +/** + * @brief Show a frame in the YUYV (BT.709) format using TRUECOLOR terminal escape codes + * @copydetails pixel_print_rgb24frame_256color() + */ +void pixel_print_yuyvframe_bt709_truecolor(const uint8_t *buf, size_t size, uint16_t width, + uint16_t height); +/** + * @brief Show a frame in the YUV24 (BT.709) format using 256COLOR terminal escape codes + * @copydetails pixel_print_rgb24frame_256color() + */ +void pixel_print_yuv24frame_bt709_256color(const uint8_t *buf, size_t size, uint16_t width, + uint16_t height); +/** + * @brief Show a frame in the YUV24 (BT.709) format using TRUECOLOR terminal escape codes + * @copydetails pixel_print_rgb24frame_256color() + */ +void pixel_print_yuv24frame_bt709_truecolor(const uint8_t *buf, size_t size, uint16_t width, + uint16_t height); + +/** + * @brief Hexdump a frame with pixels in the RAW8 format + * + * The 256COLOR variants use 256COLOR 8-bit RGB format, lower quality, more compact and supported. + * The TRUECOLOR variants use true 24-bit RGB24 colors, available on a wide range of terminals. + * + * @param buf Input buffer to display in the terminal. + * @param size Size of the input buffer in bytes. + * @param width Number of pixel of the input buffer in width + * @param height Max number of rows to print + */ +void pixel_print_raw8frame_hex(const uint8_t *buf, size_t size, uint16_t width, uint16_t height); +/** + * @brief Hexdump a frame with pixels in the RGB24 format + * @copydetails pixel_print_raw8frame_hex() + */ +void pixel_print_rgb24frame_hex(const uint8_t *buf, size_t size, uint16_t width, uint16_t height); +/** + * @brief Hexdump a frame with pixels in the RGB565 format + * @copydetails pixel_print_raw8frame_hex() + */ +void pixel_print_rgb565frame_hex(const uint8_t *buf, size_t size, uint16_t width, uint16_t height); +/** + * @brief Hexdump a frame with pixels in the YUYV format + * @copydetails pixel_print_raw8frame_hex() + */ +void pixel_print_yuyvframe_hex(const uint8_t *buf, size_t size, uint16_t width, uint16_t height); + +/** + * @brief Printing RGB histograms to the terminal for debug and quick insights purpose. + * + * @param rgb24hist Buffer storing 3 histograms one after the other, for the R, G, B channels. + * @param size Total number of buckets in total contained within @p rgb24hist all channels included. + * @param height Desired height of the chart in pixels. + */ +void pixel_print_rgb24hist(const uint16_t *rgb24hist, size_t size, uint16_t height); + +/** + * @brief Printing Y histograms to the terminal for debug and quick insights purpose. + * + * @param y8hist Buffer storing the histogram for the Y (luma) channel. + * @param size Total number of buckets in total contained within @p hist. + * @param height Desired height of the chart in pixels. + */ +void pixel_print_y8hist(const uint16_t *y8hist, size_t size, uint16_t height); + +/** + * @brief Set the shell instance to use when printing via the shell back-end. + * + * @see CONFIG_PIXEL_PRINT + * + * @param sh Shell instance set as a global variable. + */ +void pixel_print_set_shell(struct shell *sh); + +#endif /* ZEPHYR_INCLUDE_PIXEL_PRINT_H_ */ diff --git a/include/zephyr/pixel/resize.h b/include/zephyr/pixel/resize.h new file mode 100644 index 000000000000..91fed336eb27 --- /dev/null +++ b/include/zephyr/pixel/resize.h @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2025 tinyVision.ai Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef ZEPHYR_INCLUDE_PIXEL_RESIZE_H_ +#define ZEPHYR_INCLUDE_PIXEL_RESIZE_H_ + +#include + +#include + +/** + * @brief Resize an RGB24 line by subsampling the pixels horizontally. + * + * @param src_buf Input buffer to resize + * @param src_width Number of pixels to resize to a different format. + * @param dst_buf Output buffer in which the stretched/compressed is stored. + * @param dst_width Number of pixels in the output buffer. + */ +void pixel_subsample_rgb24line(const uint8_t *src_buf, size_t src_width, uint8_t *dst_buf, + size_t dst_width); +/** + * @brief Resize an RGB565BE/RGB565LE line by subsampling the pixels horizontally. + * @copydetails pixel_subsample_rgb24line() + */ +void pixel_subsample_rgb565line(const uint8_t *src_buf, size_t src_width, uint8_t *dst_buf, + size_t dst_width); + +/** + * @brief Resize an RGB24 frame by subsampling the pixels horizontally/vertically. + * + * @param src_buf Input buffer to resize + * @param src_width Width of the input in number of pixels. + * @param src_height Height of the input in number of pixels. + * @param dst_buf Output buffer in which the stretched/compressed is stored. + * @param dst_width Width of the output in number of pixels. + * @param dst_height Height of the output in number of pixels. + */ +void pixel_subsample_rgb24frame(const uint8_t *src_buf, size_t src_width, size_t src_height, + uint8_t *dst_buf, size_t dst_width, size_t dst_height); +/** + * @brief Resize an RGB565BE/RGB565LE frame by subsampling the pixels horizontally/vertically. + * @copydetails pixel_subsample_rgb24frame() + */ +void pixel_subsample_rgb565frame(const uint8_t *src_buf, size_t src_width, size_t src_height, + uint8_t *dst_buf, size_t dst_width, size_t dst_height); + +/** + * @brief Resize an RGB24 stream by subsampling the pixels vertically/horizontally. + * + * @note the "width" and "height" macro parameters are for the input like any stream element. + * The output size is configured implicitly by connecting this block to another one of a + * different size. + * + * @param name The symbol of the @ref pixel_stream that will be defined. + * @param width The total width of the input frame in number of pixels. + * @param height The total height of the input frame in number of pixels. + */ +#define PIXEL_SUBSAMPLE_RGB24STREAM(name, width, height) \ + PIXEL_RESIZE_DEFINE(name, pixel_subsample_rgb24stream, (width), (height), 3) +/** + * @brief Resize an RGB565BE/RGB565LE stream by subsampling the pixels vertically/horizontally. + * @copydetails PIXEL_SUBSAMPLE_RGB24STREAM() + */ +#define PIXEL_SUBSAMPLE_RGB565STREAM(name, width, height) \ + PIXEL_RESIZE_DEFINE(name, pixel_subsample_rgb565stream, (width), (height), 2) + +/** @cond INTERNAL_HIDDEN */ +#define PIXEL_RESIZE_DEFINE(name, fn, width, height, bytes_per_pixel) \ + PIXEL_STREAM_DEFINE(name, (fn), (width), (height), \ + (width) * (bytes_per_pixel), 1 * (width) * (bytes_per_pixel)) +void pixel_subsample_rgb24stream(struct pixel_stream *strm); +void pixel_subsample_rgb565stream(struct pixel_stream *strm); +/** @endcond */ + +#endif /* ZEPHYR_INCLUDE_PIXEL_RESIZE_H_ */ diff --git a/include/zephyr/pixel/stats.h b/include/zephyr/pixel/stats.h new file mode 100644 index 000000000000..6d6b0e854cfd --- /dev/null +++ b/include/zephyr/pixel/stats.h @@ -0,0 +1,139 @@ +/* + * Copyright (c) 2025 tinyVision.ai Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef ZEPHYR_INCLUDE_PIXEL_STATS_H +#define ZEPHYR_INCLUDE_PIXEL_STATS_H + +#include + +/** + * @brief Collect red, green, blue channel averages of all pixels in an RGB24 frame. + * + * @param buf Buffer of pixels in RGB24 format (3 bytes per pixel) to collect the statistics from.. + * @param size Size of this input buffer. + * @param rgb24avg The channel averages stored as an RGB24 pixel. + * @param nval The number of values to collect in order to perform the statistics. + */ +void pixel_rgb24frame_to_rgb24avg(const uint8_t *buf, size_t size, uint8_t rgb24avg[3], + uint16_t nval); + +/** + * @brief Collect red, green, blue channel averages of all pixels in an RGGB8 frame. + * + * @param buf Buffer of pixels in bayer format (1 byte per pixel) to collect the statistics from.. + * @param size Size of this input buffer. + * @param width Width of the lines in number of pixels. + * @param rgb24avg The channel averages stored as an RGB24 pixel. + * @param nval The number of values to collect in order to perform the statistics. + */ +void pixel_rggb8frame_to_rgb24avg(const uint8_t *buf, size_t size, uint16_t width, + uint8_t rgb24avg[3], uint16_t nval); +/** + * @brief Collect red, green, blue channel averages of all pixels in an BGGR8 frame. + * @copydetails pixel_rggb8frame_to_rgb24avg() + */ +void pixel_bggr8frame_to_rgb24avg(const uint8_t *buf, size_t size, uint16_t width, + uint8_t rgb24avg[3], uint16_t nval); +/** + * @brief Collect red, green, blue channel averages of all pixels in an GBRG8 frame. + * @copydetails pixel_rggb8frame_to_rgb24avg() + */ +void pixel_gbrg8frame_to_rgb24avg(const uint8_t *buf, size_t size, uint16_t width, + uint8_t rgb24avg[3], uint16_t nval); +/** + * @brief Collect red, green, blue channel averages of all pixels in an GRBG8 frame. + * @copydetails pixel_rggb8frame_to_rgb24avg() + */ +void pixel_grbg8frame_to_rgb24avg(const uint8_t *buf, size_t size, uint16_t width, + uint8_t rgb24avg[3], uint16_t nval); + +/** + * @brief Collect an histogram for each of the red, green, blue channels of an RGB24 frame. + * + * @param buf Buffer of pixels in RGB24 format (3 bytes per pixel) to collect the statistics from.. + * @param buf_size Size of this input buffer. + * @param rgb24hist Buffer storing 3 histograms one after the other, for the R, G, B channels. + * @param hist_size Total number of buckets in the histogram, all channels included. + * @param nval The number of values to collect in order to perform the statistics. + */ +void pixel_rgb24frame_to_rgb24hist(const uint8_t *buf, size_t buf_size, uint16_t *rgb24hist, + size_t hist_size, uint16_t nval); + +/** + * @brief Collect an histogram for each of the red, green, blue channels of an RGGB8 frame. + * + * @param buf Buffer of pixels to collect the statistics from.. + * @param buf_size Size of this input buffer. + * @param width Width of the lines in number of pixels. + * @param rgb24hist Buffer storing 3 histograms one after the other, for the R, G, B channels. + * @param hist_size Total number of buckets in the histogram, all channels included. + * @param nval The number of values to collect in order to perform the statistics. + */ +void pixel_rggb8frame_to_rgb24hist(const uint8_t *buf, size_t buf_size, uint16_t width, + uint16_t *rgb24hist, size_t hist_size, uint16_t nval); +/** + * @brief Collect an histogram for each of the red, green, blue channels of GBRG8 frame. + * @copydetails pixel_rggb8frame_to_rgb24hist() + */ +void pixel_gbrg8frame_to_rgb24hist(const uint8_t *buf, size_t buf_size, uint16_t width, + uint16_t *rgb24hist, size_t hist_size, uint16_t nval); +/** + * @brief Collect an histogram for each of the red, green, blue channels of BGGR8 frame. + * @copydetails pixel_rggb8frame_to_rgb24hist() + */ +void pixel_bggr8frame_to_rgb24hist(const uint8_t *buf, size_t buf_size, uint16_t width, + uint16_t *rgb24hist, size_t hist_size, uint16_t nval); +/** + * @brief Collect an histogram for each of the red, green, blue channels of GRBG8 frame. + * @copydetails pixel_rggb8frame_to_rgb24hist() + */ +void pixel_grbg8frame_to_rgb24hist(const uint8_t *buf, size_t buf_size, uint16_t width, + uint16_t *rgb24hist, size_t hist_size, uint16_t nval); + +/** + * @brief Collect an histogram for the Y channel, obtained from the pixel values of the image. + * + * @param buf Buffer of pixels in RGB24 format (3 bytes per pixel) to collect the statistics from.. + * @param buf_size Size of this input buffer. + * @param y8hist Buffer storing the histogram for the Y (luma) channel. + * @param hist_size Total number of buckets in the histogram, all channels included. + * @param nval The number of values to collect in order to perform the statistics. + */ +void pixel_rgb24frame_to_y8hist(const uint8_t *buf, size_t buf_size, uint16_t *y8hist, + size_t hist_size, uint16_t nval); + +/** + * @brief Collect an histogram for the Y channel, obtained from the values of an RGGB8 frame. + * + * @param buf Buffer of pixels in bayer format (1 byte per pixel) to collect the statistics from.. + * @param buf_size Size of this input buffer. + * @param width Width of the lines in number of pixels. + * @param y8hist Buffer storing the histogram for the Y (luma) channel. + * @param hist_size Total number of buckets in the histogram, all channels included. + * @param nval The number of values to collect in order to perform the statistics. + */ +void pixel_rggb8frame_to_y8hist(const uint8_t *buf, size_t buf_size, uint16_t width, + uint16_t *y8hist, size_t hist_size, uint16_t nval); +/** + * @brief Collect an histogram for the Y channel, obtained from the values of an GBRG8 frame. + * @copydetails pixel_rggb8frame_to_y8hist() + */ +void pixel_gbrg8frame_to_y8hist(const uint8_t *buf, size_t buf_size, uint16_t width, + uint16_t *y8hist, size_t hist_size, uint16_t nval); +/** + * @brief Collect an histogram for the Y channel, obtained from the values of an BGGR8 frame. + * @copydetails pixel_rggb8frame_to_y8hist() + */ +void pixel_bggr8frame_to_y8hist(const uint8_t *buf, size_t buf_size, uint16_t width, + uint16_t *y8hist, size_t hist_size, uint16_t nval); +/** + * @brief Collect an histogram for the Y channel, obtained from the values of an GRBG8 frame. + * @copydetails pixel_rggb8frame_to_y8hist() + */ +void pixel_grbg8frame_to_y8hist(const uint8_t *buf, size_t buf_size, uint16_t width, + uint16_t *y8hist, size_t hist_size, uint16_t nval); + +#endif /* ZEPHYR_INCLUDE_PIXEL_STATS_H */ diff --git a/include/zephyr/pixel/stream.h b/include/zephyr/pixel/stream.h new file mode 100644 index 000000000000..5690fa611f93 --- /dev/null +++ b/include/zephyr/pixel/stream.h @@ -0,0 +1,352 @@ +/* + * Copyright (c) 2025 tinyVision.ai Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef ZEPHYR_INCLUDE_PIXEL_STREAM_H +#define ZEPHYR_INCLUDE_PIXEL_STREAM_H + +#include +#include + +#include +#include +#include +#include + +/** + * @brief One step of a line stream pipeline + * + * @c pixel_stream structs are chained together into a linked list. + * Each step of the linked list contain a ring buffer for the input data, and a pointer to a + * conversion function processing it. + * Along with extra metadata, this is used to process data as a stream of lines. + */ +struct pixel_stream { + /* Display name, useful for debugging the stream */ + uint8_t *name; + /* Ring buffer that keeps track of the position in bytes */ + struct ring_buf ring; + /* Number of bytes returned while asking for an input or output line */ + size_t pitch; + /* Current position within the frame */ + uint16_t line_offset; + /* Total number of lines in the frame */ + uint16_t width; + /* Total number of lines in the frame */ + uint16_t height; + /* Connection to the next element of the stream */ + struct pixel_stream *next; + /* Function that performs the I/O */ + void (*run)(struct pixel_stream *strm); + /* Timestamp since the strm started working in CPU cycles */ + uint32_t start_time; + /* Total time spent working in this strm through the stream in CPU cycles */ + uint32_t total_time; +}; + +/** + * @brief Load a buffer into a stream. + * + * The parameters such as line pitch or image height are to be configured inside each individual + * strm before calling this function. + * + * @param first Pipeline stream into which load the buffer one pitch worth of data at a time. + * @param buf Buffer of data to load into the stream. + * @param size Total of bytes in this source buffer. + */ +void pixel_stream_load(struct pixel_stream *first, const uint8_t *buf, size_t size); + +/** + * @brief Load an input frame into a stream and store it into a 8-bits-per-pixel output frame. + * + * Convert an input buffer to an output buffer according to a list of steps passed as arguments. + * + * @param src_buf Source buffer to load into the stream + * @param src_size Size of the source buffer in bytes + * @param src_width Width of the source image in number + * @param dst_buf Destination buffer to load into the stream + * @param dst_size Size of the destination buffer in bytes + * @param dst_width Width of the destination image in number + * @param strm Linked-list of stream steps: pipeline to run. + */ +void pixel_stream_to_raw8frame(const uint8_t *src_buf, size_t src_size, uint16_t src_width, + uint8_t *dst_buf, size_t dst_size, uint16_t dst_width, + struct pixel_stream *strm); +/** + * @brief Load an input frame into a stream and store it into a 16-bits-per-pixel output frame. + * @copydetails pixel_stream_to_raw8frame() + */ +void pixel_stream_to_raw16frame(const uint8_t *src_buf, size_t src_size, uint16_t src_width, + uint8_t *dst_buf, size_t dst_size, uint16_t dst_width, + struct pixel_stream *strm); +/** + * @brief Load an input frame into a stream and store it into a 24-bits-per-pixel output frame. + * @copydetails pixel_stream_to_raw8frame() + */ +void pixel_stream_to_raw24frame(const uint8_t *src_buf, size_t src_size, uint16_t src_width, + uint8_t *dst_buf, size_t dst_size, uint16_t dst_width, + struct pixel_stream *strm); + +/** @copydoc pixel_stream_to_raw8frame() */ +#define pixel_stream_to_rgb332frame pixel_stream_to_raw8frame +/** @copydoc pixel_stream_to_raw8frame() */ +#define pixel_stream_to_rggb8frame pixel_stream_to_raw8frame +/** @copydoc pixel_stream_to_raw8frame() */ +#define pixel_stream_to_bggr8frame pixel_stream_to_raw8frame +/** @copydoc pixel_stream_to_raw8frame() */ +#define pixel_stream_to_gbrg8frame pixel_stream_to_raw8frame +/** @copydoc pixel_stream_to_raw8frame() */ +#define pixel_stream_to_grbg8frame pixel_stream_to_raw8frame +/** @copydoc pixel_stream_to_raw16frame() */ +#define pixel_stream_to_rgb565beframe pixel_stream_to_raw16frame +/** @copydoc pixel_stream_to_raw16frame() */ +#define pixel_stream_to_rgb565leframe pixel_stream_to_raw16frame +/** @copydoc pixel_stream_to_raw16frame() */ +#define pixel_stream_to_yuyvframe pixel_stream_to_raw16frame +/** @copydoc pixel_stream_to_raw24frame() */ +#define pixel_stream_to_rgb24frame pixel_stream_to_raw24frame +/** @copydoc pixel_stream_to_raw24frame() */ +#define pixel_stream_to_yuv24frame pixel_stream_to_raw24frame + +/** + * @brief Get a pointer to an output line from the next step of the stream. + * + * The buffer obtained can then be used to store the output of the conversion. + * + * The lines will be considered as converted as soon as @ref pixel_stream_done() is called, which + * will feed the line into the next step of the stream. + * + * There is no need to pass @c strm->next as argument as @c pixel_stream_get_output_line() will + * take care of it internally. + * + * @param strm Stream from which the next output line buffer will be taken. + * @return Pointer to the requested line buffer, never NULL. + */ +static inline uint8_t *pixel_stream_get_output_line(struct pixel_stream *strm) +{ + uint8_t *lines; + uint32_t size; + + __ASSERT_NO_MSG(strm != NULL); + __ASSERT_NO_MSG(strm->next != NULL); + + size = ring_buf_put_claim(&strm->next->ring, &lines, strm->next->pitch); + ring_buf_put_finish(&strm->next->ring, size); + __ASSERT(size == strm->next->pitch, + "%s asked for %zu output bytes, only have %u, total used %u, total free %u", + strm->name, strm->pitch, size, ring_buf_size_get(&strm->ring), + ring_buf_space_get(&strm->ring)); + + return lines; +} + +/** + * @brief Get a pointer to a given number of input lines, and consume them from the stream. + * + * The lines are considered as processed, which will free them from the input ring buffer, and + * allow more data to flow in. + * + * @param strm Stream from which get the input lines. + * @param nb Number of lines to get in one block. + * @return Pointer to the requested number of lines, never NULL. + */ +static inline const uint8_t *pixel_stream_get_input_lines(struct pixel_stream *strm, size_t nb) +{ + uint8_t *lines; + uint32_t size; + + __ASSERT_NO_MSG(strm != NULL); + + strm->line_offset += nb; + __ASSERT(strm->line_offset <= strm->height, + "Trying to read at position %u beyond the height of the frame %u", + strm->line_offset, strm->height); + + size = ring_buf_get_claim(&strm->ring, &lines, strm->pitch * nb); + ring_buf_get_finish(&strm->ring, size); + __ASSERT(size == strm->pitch * nb, + "%s asked for %zu input bytes, obtained only %u, total used %u, total free %u", + strm->name, strm->pitch * nb, size, ring_buf_size_get(&strm->ring), + ring_buf_space_get(&strm->ring)); + + return lines; +} + +/** + * @brief Shorthand for @ref pixel_stream_get_input_lines() to get a single input line. + * + * @param strm Stream from which get the input lines. + * @return Pointer to the requested number of lines, never NULL. + */ +static inline const uint8_t *pixel_stream_get_input_line(struct pixel_stream *strm) +{ + return pixel_stream_get_input_lines(strm, 1); +} + +/** + * @brief Request a pointer to the next line of data without affecting the input sream. + * + * This permits to implement a lookahead operation when one or several lines of context is needed + * in addition to the line converted. + * + * @return The pointer to the input data. + */ +static inline uint8_t *pixel_stream_peek_input_line(struct pixel_stream *strm) +{ + uint8_t *line; + uint32_t size; + + __ASSERT_NO_MSG(strm != NULL); + + size = ring_buf_get_claim(&strm->ring, &line, strm->pitch); + __ASSERT_NO_MSG(size == strm->pitch); + + return line; +} + +/** + * @brief Request a pointer to the entire input buffer content, consumed from the input stream. + * + * @param strm Stream from which get the buffer + * @return The pointer to the input data. + */ +static inline const uint8_t *pixel_stream_get_all_input(struct pixel_stream *strm) +{ + uint8_t *remaining; + uint32_t size; + + __ASSERT_NO_MSG(strm != NULL); + + strm->line_offset = strm->height; + __ASSERT_NO_MSG(strm->line_offset <= strm->height); + + size = ring_buf_get_claim(&strm->ring, &remaining, ring_buf_capacity_get(&strm->ring)); + ring_buf_get_finish(&strm->ring, size); + __ASSERT(size == ring_buf_capacity_get(&strm->ring), + "Could not dequeue the entire input buffer of %s, %u used, %u free", strm->name, + ring_buf_size_get(&strm->ring), ring_buf_space_get(&strm->ring)); + + return remaining; +} + +/** + * @brief Mark the line obtained with @ref pixel_stream_get_output_line as converted. + * + * This will let the next step of the stream know that a new line was converted. + * This allows the pipeline to trigger the next step if there is enough data submitted to it. + * + * @param strm Stream to which confirm the line conversion. + */ +static inline void pixel_stream_done(struct pixel_stream *strm) +{ + __ASSERT_NO_MSG(strm != NULL); + + /* Ignore any "peek" operation done previouslyl */ + ring_buf_get_finish(&strm->ring, 0); + ring_buf_put_finish(&strm->ring, 0); + + /* Flush the timestamp to the counter */ + strm->total_time += strm->start_time == 0 ? 0 : k_cycle_get_32() - strm->start_time; + + if (strm->next != NULL && strm->next->run && ring_buf_space_get(&strm->next->ring) == 0) { + /* Start the counter of the next stream */ + strm->next->start_time = k_cycle_get_32(); + + /* Run the next stream */ + strm->next->run(strm->next); + + /* Resuming to this stream, upgrade the start time */ + strm->start_time = k_cycle_get_32(); + } + + /* Ignore the processing done downstream, reset the time counter */ + strm->start_time = k_cycle_get_32(); +} + +/** + * @brief Helper to turn a line-conversion function into a stream-conversion function + * + * The line conversion function is free to perform any processing on the input line. + * The @c w argument is the width of both the source and destination buffers. + * + * @param strm Input stream to process. + * @param fn Line conversion function to turn into a stream conversion function. + */ +static inline void pixel_line_to_stream(struct pixel_stream *strm, + void (*fn)(const uint8_t *src, uint8_t *dst, uint16_t w)) +{ + fn(pixel_stream_get_input_line(strm), pixel_stream_get_output_line(strm), strm->width); + pixel_stream_done(strm); +} + +/** @cond INTERNAL_HIDDEN */ +#define PIXEL_STREAM_DEFINE(_name, _run, _width, _height, _pitch, _bufsize) \ + struct pixel_stream _name = { \ + .name = "[" #_name " " #_run " " STRINGIFY(_width) "x" STRINGIFY(_height) "]", \ + .ring = RING_BUF_INIT((uint8_t [_bufsize]) {0}, _bufsize), \ + .pitch = (_pitch), \ + .width = (_width), \ + .height = (_height), \ + .run = (_run), \ + } +/* @endcond */ + +/** + * @brief Define an 24-bit-per-pixel stream conversion step out of a 1-line conversion function + * + * @param name The symbol of the @ref pixel_stream that will be defined. + * @param fn The line conversion function to repeat over a full frame. + * @param width The total width of the input frame in number of pixels. + * @param height The total height of the input frame in number of pixels. + */ +#define PIXEL_RAW24LINE_DEFINE(name, fn, width, height) \ + static void name##_stream(struct pixel_stream *strm) { pixel_line_to_stream(strm, fn); } \ + PIXEL_STREAM_DEFINE(name, name##_stream, width, height, (width) * 3, (width) * 3) +/** + * @brief Define an 16-bit-per-pixel stream conversion step out of a 1-line conversion function + * @copydetails PIXEL_RAW24LINE_DEFINE + */ +#define PIXEL_RAW16LINE_DEFINE(name, fn, width, height) \ + static void name##_stream(struct pixel_stream *strm) { pixel_line_to_stream(strm, fn); } \ + PIXEL_STREAM_DEFINE(name, name##_stream, width, height, (width) * 2, (width) * 2) +/** + * @brief Define an 8-bit-per-pixel stream conversion step out of a 1-line conversion function + * @copydetails PIXEL_RAW24LINE_DEFINE + */ +#define PIXEL_RAW8LINE_DEFINE(name, fn, width, height) \ + static void name##_stream(struct pixel_stream *strm) { pixel_line_to_stream(strm, fn); } \ + PIXEL_STREAM_DEFINE(name, name##_stream, width, height, (width) * 1, (width) * 1) + +/** @copydoc PIXEL_RAW8LINE_DEFINE() */ +#define PIXEL_RGB332LINE_DEFINE PIXEL_RAW8LINE_DEFINE +/** @copydoc PIXEL_RAW8LINE_DEFINE() */ +#define PIXEL_RGGB8LINE_DEFINE PIXEL_RAW8LINE_DEFINE +/** @copydoc PIXEL_RAW8LINE_DEFINE() */ +#define PIXEL_BGGR8LINE_DEFINE PIXEL_RAW8LINE_DEFINE +/** @copydoc PIXEL_RAW8LINE_DEFINE() */ +#define PIXEL_GBRG8LINE_DEFINE PIXEL_RAW8LINE_DEFINE +/** @copydoc PIXEL_RAW8LINE_DEFINE() */ +#define PIXEL_GRBG8LINE_DEFINE PIXEL_RAW8LINE_DEFINE +/** @copydoc PIXEL_RAW16LINE_DEFINE() */ +#define PIXEL_RGB565BELINE_DEFINE PIXEL_RAW16LINE_DEFINE +/** @copydoc PIXEL_RAW16LINE_DEFINE() */ +#define PIXEL_RGB565LELINE_DEFINE PIXEL_RAW16LINE_DEFINE +/** @copydoc PIXEL_RAW16LINE_DEFINE() */ +#define PIXEL_YUYVLINE_DEFINE PIXEL_RAW16LINE_DEFINE +/** @copydoc PIXEL_RAW24LINE_DEFINE() */ +#define PIXEL_RGB24LINE_DEFINE PIXEL_RAW24LINE_DEFINE +/** @copydoc PIXEL_RAW24LINE_DEFINE() */ +#define PIXEL_YUV24LINE_DEFINE PIXEL_RAW24LINE_DEFINE + +/** + * @brief Connect the steps of a stream together in a pipeline. + * + * @param step The first step of the stream to which add the others + * @param ... A NULL-terminated list of arguments, in the order of which they should be connected. + * @return The stream constructed. + */ +struct pixel_stream *pixel_stream(struct pixel_stream *step, ...); + +#endif /* ZEPHYR_INCLUDE_PIXEL_STREAM_H */ diff --git a/include/zephyr/usb/class/usbd_uvc.h b/include/zephyr/usb/class/usbd_uvc.h new file mode 100644 index 000000000000..87f89967018a --- /dev/null +++ b/include/zephyr/usb/class/usbd_uvc.h @@ -0,0 +1,21 @@ +/* + * Copyright (c) 2025 tinyVision.ai Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +/** + * @file + * @brief USB Device Firmware Upgrade (DFU) public header + * + * Header exposes API for registering DFU images. + */ + +#ifndef ZEPHYR_INCLUDE_USB_CLASS_USBD_UVC_H +#define ZEPHYR_INCLUDE_USB_CLASS_USBD_UVC_H + +#include + +void uvc_set_video_dev(const struct device *const dev, const struct device *const video_dev); + +#endif /* ZEPHYR_INCLUDE_USB_CLASS_USBD_UVC_H */ diff --git a/lib/CMakeLists.txt b/lib/CMakeLists.txt index b945968c8fbe..d8de2732c0f5 100644 --- a/lib/CMakeLists.txt +++ b/lib/CMakeLists.txt @@ -16,6 +16,7 @@ add_subdirectory(heap) add_subdirectory(mem_blocks) add_subdirectory_ifdef(CONFIG_NET_BUF net_buf) add_subdirectory(os) +add_subdirectory_ifdef(CONFIG_PIXEL pixel) add_subdirectory(utils) add_subdirectory_ifdef(CONFIG_SMF smf) add_subdirectory_ifdef(CONFIG_OPENAMP open-amp) diff --git a/lib/Kconfig b/lib/Kconfig index ae97399b1995..bf5ae2371ee6 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -19,6 +19,8 @@ source "lib/net_buf/Kconfig" source "lib/os/Kconfig" +source "lib/pixel/Kconfig" + source "lib/posix/Kconfig" source "lib/open-amp/Kconfig" @@ -32,4 +34,5 @@ source "lib/runtime/Kconfig" source "lib/utils/Kconfig" source "lib/uuid/Kconfig" + endmenu diff --git a/lib/pixel/CMakeLists.txt b/lib/pixel/CMakeLists.txt new file mode 100644 index 000000000000..20908783672c --- /dev/null +++ b/lib/pixel/CMakeLists.txt @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: Apache-2.0 + +zephyr_library() + +# zephyr-keep-sorted-start +zephyr_library_sources(bayer.c) +zephyr_library_sources(formats.c) +zephyr_library_sources(kernel.c) +zephyr_library_sources(print.c) +zephyr_library_sources(resize.c) +zephyr_library_sources(stats.c) +zephyr_library_sources(stream.c) +# zephyr-keep-sorted-stop diff --git a/lib/pixel/Kconfig b/lib/pixel/Kconfig new file mode 100644 index 000000000000..ed2ea5333f43 --- /dev/null +++ b/lib/pixel/Kconfig @@ -0,0 +1,44 @@ +# Copyright (c) 2025 tinyVision.ai Inc. +# SPDX-License-Identifier: Apache-2.0 + +menuconfig PIXEL + bool "Pixel and Image Manipulation Library" + imply RING_BUFFER_LARGE + +if PIXEL + +choice PIXEL_PRINT + bool "Select the print function to use for sending characters out." + default PIXEL_PRINT_PRINTF + help + The default is to use printf() as it most often leads to the output being printed out. + +config PIXEL_PRINT_PRINTF + bool "Image output to print is sent to printf()" + help + The data is directly handled by the libc bypassing most configuration. + +config PIXEL_PRINT_PRINTK + bool "Image output to print is sent to printk()" + help + The printk() function will sometimes drop characters to avoid slowing-down the rest of + the firmware, and can be configured to use the logging subsystem. + +config PIXEL_PRINT_SHELL + bool "Image output to print is sent to shell_print()" + help + The shell instance used is set by pixel_print_set_shell(). + +config PIXEL_PRINT_NONE + bool "Image output is dropped and not sent anywhere" + help + This is useful for environments where UTF-8 or escape characters can be a problem, + or where a human is not watching, like CI, or quickly toggle on/off this feature. + +endchoice + +module = PIXEL +module-str = pixel +source "subsys/logging/Kconfig.template.log_config" + +endif diff --git a/lib/pixel/bayer.c b/lib/pixel/bayer.c new file mode 100644 index 000000000000..0241b7751075 --- /dev/null +++ b/lib/pixel/bayer.c @@ -0,0 +1,307 @@ +/* + * Copyright (c) 2025 tinyVision.ai Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include + +#include + +#define FOLD_L_3X3(l0, l1, l2) \ + { \ + {l0[1], l0[0], l0[1]}, \ + {l1[1], l1[0], l1[1]}, \ + {l2[1], l2[0], l2[1]}, \ + } + +#define FOLD_R_3X3(l0, l1, l2, n) \ + { \ + {l0[(n) - 2], l0[(n) - 1], l0[(n) - 2]}, \ + {l1[(n) - 2], l1[(n) - 1], l1[(n) - 2]}, \ + {l2[(n) - 2], l2[(n) - 1], l2[(n) - 2]}, \ + } + +static inline void pixel_rggb8_to_rgb24_3x3(const uint8_t rgr0[3], const uint8_t gbg1[3], + const uint8_t rgr2[3], uint8_t rgb24[3]) +{ + rgb24[0] = ((uint16_t)rgr0[0] + rgr0[2] + rgr2[0] + rgr2[2]) / 4; + rgb24[1] = ((uint16_t)rgr0[1] + gbg1[2] + gbg1[0] + rgr2[1]) / 4; + rgb24[2] = gbg1[1]; +} + +static inline void pixel_bggr8_to_rgb24_3x3(const uint8_t bgb0[3], const uint8_t grg1[3], + const uint8_t bgb2[3], uint8_t rgb24[3]) +{ + rgb24[0] = grg1[1]; + rgb24[1] = ((uint16_t)bgb0[1] + grg1[2] + grg1[0] + bgb2[1]) / 4; + rgb24[2] = ((uint16_t)bgb0[0] + bgb0[2] + bgb2[0] + bgb2[2]) / 4; +} + +static inline void pixel_grbg8_to_rgb24_3x3(const uint8_t grg0[3], const uint8_t bgb1[3], + const uint8_t grg2[3], uint8_t rgb24[3]) +{ + rgb24[0] = ((uint16_t)grg0[1] + grg2[1]) / 2; + rgb24[1] = bgb1[1]; + rgb24[2] = ((uint16_t)bgb1[0] + bgb1[2]) / 2; +} + +static inline void pixel_gbrg8_to_rgb24_3x3(const uint8_t gbg0[3], const uint8_t rgr1[3], + const uint8_t gbg2[3], uint8_t rgb24[3]) +{ + rgb24[0] = ((uint16_t)rgr1[0] + rgr1[2]) / 2; + rgb24[1] = rgr1[1]; + rgb24[2] = ((uint16_t)gbg0[1] + gbg2[1]) / 2; +} + +static inline void pixel_rggb8_to_rgb24_2x2(uint8_t r0, uint8_t g0, uint8_t g1, uint8_t b0, + uint8_t rgb24[3]) +{ + rgb24[0] = r0; + rgb24[1] = ((uint16_t)g0 + g1) / 2; + rgb24[2] = b0; +} + +static inline void pixel_gbrg8_to_rgb24_2x2(uint8_t g1, uint8_t b0, uint8_t r0, uint8_t g0, + uint8_t rgb24[3]) +{ + rgb24[0] = r0; + rgb24[1] = ((uint16_t)g0 + g1) / 2; + rgb24[2] = b0; +} + +static inline void pixel_bggr8_to_rgb24_2x2(uint8_t b0, uint8_t g0, uint8_t g1, uint8_t r0, + uint8_t rgb24[3]) +{ + rgb24[0] = r0; + rgb24[1] = ((uint16_t)g0 + g1) / 2; + rgb24[2] = b0; +} + +static inline void pixel_grbg8_to_rgb24_2x2(uint8_t g1, uint8_t r0, uint8_t b0, uint8_t g0, + uint8_t rgb24[3]) +{ + rgb24[0] = r0; + rgb24[1] = ((uint16_t)g0 + g1) / 2; + rgb24[2] = b0; +} + +__weak void pixel_rggb8line_to_rgb24line_3x3(const uint8_t *i0, const uint8_t *i1, + const uint8_t *i2, uint8_t *o0, uint16_t w) +{ + __ASSERT_NO_MSG(w >= 4 && w % 2 == 0); + uint8_t il[3][3] = FOLD_L_3X3(i0, i1, i2); + uint8_t ir[3][3] = FOLD_R_3X3(i0, i1, i2, w); + + pixel_grbg8_to_rgb24_3x3(il[0], il[1], il[2], &o0[0]); + for (size_t i = 0, o = 3; i + 4 <= w; i += 2, o += 6) { + pixel_rggb8_to_rgb24_3x3(&i0[i + 0], &i1[i + 0], &i2[i + 0], &o0[o + 0]); + pixel_grbg8_to_rgb24_3x3(&i0[i + 1], &i1[i + 1], &i2[i + 1], &o0[o + 3]); + } + pixel_rggb8_to_rgb24_3x3(ir[0], ir[1], ir[2], &o0[w * 3 - 3]); +} + +__weak void pixel_grbg8line_to_rgb24line_3x3(const uint8_t *i0, const uint8_t *i1, + const uint8_t *i2, uint8_t *o0, uint16_t w) +{ + __ASSERT_NO_MSG(w >= 4 && w % 2 == 0); + uint8_t il[3][4] = FOLD_L_3X3(i0, i1, i2); + uint8_t ir[3][4] = FOLD_R_3X3(i0, i1, i2, w); + + pixel_rggb8_to_rgb24_3x3(il[0], il[1], il[2], &o0[0]); + for (size_t i = 0, o = 3; i + 4 <= w; i += 2, o += 6) { + pixel_grbg8_to_rgb24_3x3(&i0[i + 0], &i1[i + 0], &i2[i + 0], &o0[o + 0]); + pixel_rggb8_to_rgb24_3x3(&i0[i + 1], &i1[i + 1], &i2[i + 1], &o0[o + 3]); + } + pixel_grbg8_to_rgb24_3x3(ir[0], ir[1], ir[2], &o0[w * 3 - 3]); +} + +__weak void pixel_bggr8line_to_rgb24line_3x3(const uint8_t *i0, const uint8_t *i1, + const uint8_t *i2, uint8_t *o0, uint16_t w) +{ + __ASSERT_NO_MSG(w >= 4 && w % 2 == 0); + uint8_t il[3][4] = FOLD_L_3X3(i0, i1, i2); + uint8_t ir[3][4] = FOLD_R_3X3(i0, i1, i2, w); + + pixel_gbrg8_to_rgb24_3x3(il[0], il[1], il[2], &o0[0]); + for (size_t i = 0, o = 3; i + 4 <= w; i += 2, o += 6) { + pixel_bggr8_to_rgb24_3x3(&i0[i + 0], &i1[i + 0], &i2[i + 0], &o0[o + 0]); + pixel_gbrg8_to_rgb24_3x3(&i0[i + 1], &i1[i + 1], &i2[i + 1], &o0[o + 3]); + } + pixel_bggr8_to_rgb24_3x3(ir[0], ir[1], ir[2], &o0[w * 3 - 3]); +} + +__weak void pixel_gbrg8line_to_rgb24line_3x3(const uint8_t *i0, const uint8_t *i1, + const uint8_t *i2, uint8_t *o0, uint16_t w) +{ + __ASSERT_NO_MSG(w >= 4 && w % 2 == 0); + uint8_t il[3][4] = FOLD_L_3X3(i0, i1, i2); + uint8_t ir[3][4] = FOLD_R_3X3(i0, i1, i2, w); + + pixel_bggr8_to_rgb24_3x3(il[0], il[1], il[2], &o0[0]); + for (size_t i = 0, o = 3; i + 4 <= w; i += 2, o += 6) { + pixel_gbrg8_to_rgb24_3x3(&i0[i + 0], &i1[i + 0], &i2[i + 0], &o0[o + 0]); + pixel_bggr8_to_rgb24_3x3(&i0[i + 1], &i1[i + 1], &i2[i + 1], &o0[o + 3]); + } + pixel_gbrg8_to_rgb24_3x3(ir[0], ir[1], ir[2], &o0[w * 3 - 3]); +} + +__weak void pixel_rggb8line_to_rgb24line_2x2(const uint8_t *i0, const uint8_t *i1, uint8_t *o0, + uint16_t w) +{ + __ASSERT_NO_MSG(w >= 2 && w % 2 == 0); + + for (size_t i = 0, o = 0; i + 3 <= w; i += 2, o += 6) { + pixel_rggb8_to_rgb24_2x2(i0[i + 0], i0[i + 1], i1[i + 0], i1[i + 1], &o0[o + 0]); + pixel_grbg8_to_rgb24_2x2(i0[i + 1], i0[i + 2], i1[i + 1], i1[i + 2], &o0[o + 3]); + } + pixel_rggb8_to_rgb24_2x2(i0[w - 1], i0[w - 2], i1[w - 1], i1[w - 2], &o0[w * 3 - 6]); + pixel_grbg8_to_rgb24_2x2(i0[w - 2], i0[w - 1], i1[w - 2], i1[w - 1], &o0[w * 3 - 3]); +} + +__weak void pixel_gbrg8line_to_rgb24line_2x2(const uint8_t *i0, const uint8_t *i1, uint8_t *o0, + uint16_t w) +{ + __ASSERT_NO_MSG(w >= 2 && w % 2 == 0); + + for (size_t i = 0, o = 0; i + 3 <= w; i += 2, o += 6) { + pixel_gbrg8_to_rgb24_2x2(i0[i + 0], i0[i + 1], i1[i + 0], i1[i + 1], &o0[o + 0]); + pixel_bggr8_to_rgb24_2x2(i0[i + 1], i0[i + 2], i1[i + 1], i1[i + 2], &o0[o + 3]); + } + pixel_gbrg8_to_rgb24_2x2(i0[w - 1], i0[w - 2], i1[w - 1], i1[w - 2], &o0[w * 3 - 6]); + pixel_bggr8_to_rgb24_2x2(i0[w - 2], i0[w - 1], i1[w - 2], i1[w - 1], &o0[w * 3 - 3]); +} + +__weak void pixel_bggr8line_to_rgb24line_2x2(const uint8_t *i0, const uint8_t *i1, uint8_t *o0, + uint16_t w) +{ + __ASSERT_NO_MSG(w >= 2 && w % 2 == 0); + + for (size_t i = 0, o = 0; i + 3 <= w; i += 2, o += 6) { + pixel_bggr8_to_rgb24_2x2(i0[i + 0], i0[i + 1], i1[i + 0], i1[i + 1], &o0[o + 0]); + pixel_gbrg8_to_rgb24_2x2(i0[i + 1], i0[i + 2], i1[i + 1], i1[i + 2], &o0[o + 3]); + } + pixel_bggr8_to_rgb24_2x2(i0[w - 1], i0[w - 2], i1[w - 1], i1[w - 2], &o0[w * 3 - 6]); + pixel_gbrg8_to_rgb24_2x2(i0[w - 2], i0[w - 1], i1[w - 2], i1[w - 1], &o0[w * 3 - 3]); +} + +__weak void pixel_grbg8line_to_rgb24line_2x2(const uint8_t *i0, const uint8_t *i1, uint8_t *o0, + uint16_t w) +{ + __ASSERT_NO_MSG(w >= 2 && w % 2 == 0); + + for (size_t i = 0, o = 0; i + 3 <= w; i += 2, o += 6) { + pixel_grbg8_to_rgb24_2x2(i0[i + 0], i0[i + 1], i1[i + 0], i1[i + 1], &o0[o + 0]); + pixel_rggb8_to_rgb24_2x2(i0[i + 1], i0[i + 2], i1[i + 1], i1[i + 2], &o0[o + 3]); + } + pixel_grbg8_to_rgb24_2x2(i0[w - 1], i0[w - 2], i1[w - 1], i1[w - 2], &o0[w * 3 - 6]); + pixel_rggb8_to_rgb24_2x2(i0[w - 2], i0[w - 1], i1[w - 2], i1[w - 1], &o0[w * 3 - 3]); +} + +typedef void fn_3x3_t(const uint8_t *i0, const uint8_t *i1, const uint8_t *i2, uint8_t *o0, + uint16_t width); + +typedef void fn_2x2_t(const uint8_t *i0, const uint8_t *i1, uint8_t *o0, uint16_t width); + +static inline void pixel_bayerstream_to_rgb24stream_3x3(struct pixel_stream *strm, fn_3x3_t *fn0, + fn_3x3_t *fn1) +{ + uint16_t prev_line_offset = strm->line_offset; + const uint8_t *i0 = pixel_stream_get_input_line(strm); + const uint8_t *i1 = pixel_stream_peek_input_line(strm); + const uint8_t *i2 = pixel_stream_peek_input_line(strm); + + if (prev_line_offset == 0) { + fn1(i1, i0, i1, pixel_stream_get_output_line(strm), strm->width); + pixel_stream_done(strm); + } + + if (prev_line_offset % 2 == 0) { + fn0(i0, i1, i2, pixel_stream_get_output_line(strm), strm->width); + pixel_stream_done(strm); + } else { + fn1(i0, i1, i2, pixel_stream_get_output_line(strm), strm->width); + pixel_stream_done(strm); + } + + if (strm->line_offset + 2 == strm->height) { + fn0(i1, i2, i1, pixel_stream_get_output_line(strm), strm->width); + pixel_stream_done(strm); + + /* Skip the two lines of lookahead context, now that the conversion is complete */ + pixel_stream_get_input_line(strm); + pixel_stream_get_input_line(strm); + } +} + +static inline void pixel_bayerstream_to_rgb24stream_2x2(struct pixel_stream *strm, fn_2x2_t *fn0, + fn_2x2_t *fn1) +{ + uint16_t prev_line_offset = strm->line_offset; + const uint8_t *i0 = pixel_stream_get_input_line(strm); + const uint8_t *i1 = pixel_stream_peek_input_line(strm); + + if (prev_line_offset % 2 == 0) { + fn0(i0, i1, pixel_stream_get_output_line(strm), strm->width); + pixel_stream_done(strm); + } else { + fn1(i0, i1, pixel_stream_get_output_line(strm), strm->width); + pixel_stream_done(strm); + } + + if (strm->line_offset + 1 == strm->height) { + fn0(i1, i0, pixel_stream_get_output_line(strm), strm->width); + pixel_stream_done(strm); + + /* Skip the two lines of lookahead context, now that the conversion is complete */ + pixel_stream_get_input_line(strm); + } +} + +void pixel_rggb8stream_to_rgb24stream_3x3(struct pixel_stream *strm) +{ + pixel_bayerstream_to_rgb24stream_3x3(strm, &pixel_rggb8line_to_rgb24line_3x3, + &pixel_gbrg8line_to_rgb24line_3x3); +} + +void pixel_gbrg8stream_to_rgb24stream_3x3(struct pixel_stream *strm) +{ + pixel_bayerstream_to_rgb24stream_3x3(strm, &pixel_gbrg8line_to_rgb24line_3x3, + &pixel_rggb8line_to_rgb24line_3x3); +} + +void pixel_bggr8stream_to_rgb24stream_3x3(struct pixel_stream *strm) +{ + pixel_bayerstream_to_rgb24stream_3x3(strm, &pixel_bggr8line_to_rgb24line_3x3, + &pixel_grbg8line_to_rgb24line_3x3); +} + +void pixel_grbg8stream_to_rgb24stream_3x3(struct pixel_stream *strm) +{ + pixel_bayerstream_to_rgb24stream_3x3(strm, &pixel_grbg8line_to_rgb24line_3x3, + &pixel_bggr8line_to_rgb24line_3x3); +} + +void pixel_rggb8stream_to_rgb24stream_2x2(struct pixel_stream *strm) +{ + pixel_bayerstream_to_rgb24stream_2x2(strm, &pixel_rggb8line_to_rgb24line_2x2, + &pixel_gbrg8line_to_rgb24line_2x2); +} + +void pixel_gbrg8stream_to_rgb24stream_2x2(struct pixel_stream *strm) +{ + pixel_bayerstream_to_rgb24stream_2x2(strm, &pixel_gbrg8line_to_rgb24line_2x2, + &pixel_rggb8line_to_rgb24line_2x2); +} + +void pixel_bggr8stream_to_rgb24stream_2x2(struct pixel_stream *strm) +{ + pixel_bayerstream_to_rgb24stream_2x2(strm, &pixel_bggr8line_to_rgb24line_2x2, + &pixel_grbg8line_to_rgb24line_2x2); +} + +void pixel_grbg8stream_to_rgb24stream_2x2(struct pixel_stream *strm) +{ + pixel_bayerstream_to_rgb24stream_2x2(strm, &pixel_grbg8line_to_rgb24line_2x2, + pixel_bggr8line_to_rgb24line_2x2); +} diff --git a/lib/pixel/formats.c b/lib/pixel/formats.c new file mode 100644 index 000000000000..795e93e4e431 --- /dev/null +++ b/lib/pixel/formats.c @@ -0,0 +1,257 @@ +/* + * Copyright (c) 2025 tinyVision.ai Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include + +LOG_MODULE_REGISTER(pixel_formats, CONFIG_PIXEL_LOG_LEVEL); + +/* RGB332 <-> RGB24 */ + +__weak void pixel_rgb24line_to_rgb332line(const uint8_t *rgb24, uint8_t *rgb332, uint16_t width) +{ + for (size_t i = 0, o = 0, w = 0; w < width; w++, i += 3, o += 1) { + rgb332[o] = 0; + rgb332[o] |= (uint16_t)rgb24[i + 0] >> 5 << (0 + 3 + 2); + rgb332[o] |= (uint16_t)rgb24[i + 1] >> 5 << (0 + 0 + 2); + rgb332[o] |= (uint16_t)rgb24[i + 2] >> 6 << (0 + 0 + 0); + } +} + +void pixel_rgb24stream_to_rgb332stream(struct pixel_stream *strm) +{ + pixel_line_to_stream(strm, pixel_rgb24line_to_rgb332line); +} + +__weak void pixel_rgb332line_to_rgb24line(const uint8_t *rgb332, uint8_t *rgb24, uint16_t width) +{ + for (size_t i = 0, o = 0, w = 0; w < width; w++, i += 1, o += 3) { + rgb24[o + 0] = rgb332[i] >> (0 + 3 + 2) << 5; + rgb24[o + 1] = rgb332[i] >> (0 + 0 + 2) << 5; + rgb24[o + 2] = rgb332[i] >> (0 + 0 + 0) << 6; + } +} + +void pixel_rgb332stream_to_rgb24stream(struct pixel_stream *strm) +{ + pixel_line_to_stream(strm, pixel_rgb332line_to_rgb24line); +} + +/* RGB565 <-> RGB24 */ + +static inline uint16_t pixel_rgb24_to_rgb565(const uint8_t rgb24[3]) +{ + uint16_t rgb565 = 0; + + rgb565 |= ((uint16_t)rgb24[0] >> 3 << (0 + 6 + 5)); + rgb565 |= ((uint16_t)rgb24[1] >> 2 << (0 + 0 + 5)); + rgb565 |= ((uint16_t)rgb24[2] >> 3 << (0 + 0 + 0)); + return rgb565; +} + +static inline void pixel_rgb565_to_rgb24(uint16_t rgb565, uint8_t rgb24[3]) +{ + rgb24[0] = rgb565 >> (0 + 6 + 5) << 3; + rgb24[1] = rgb565 >> (0 + 0 + 5) << 2; + rgb24[2] = rgb565 >> (0 + 0 + 0) << 3; +} + +__weak void pixel_rgb24line_to_rgb565beline(const uint8_t *rgb24, uint8_t *rgb565be, uint16_t width) +{ + for (size_t i = 0, o = 0, w = 0; w < width; w++, i += 3, o += 2) { + *(uint16_t *)&rgb565be[o] = sys_cpu_to_be16(pixel_rgb24_to_rgb565(&rgb24[i])); + } +} + +void pixel_rgb24stream_to_rgb565bestream(struct pixel_stream *strm) +{ + pixel_line_to_stream(strm, pixel_rgb24line_to_rgb565beline); +} + +__weak void pixel_rgb24line_to_rgb565leline(const uint8_t *rgb24, uint8_t *rgb565le, uint16_t width) +{ + for (size_t i = 0, o = 0, w = 0; w < width; w++, i += 3, o += 2) { + *(uint16_t *)&rgb565le[o] = sys_cpu_to_le16(pixel_rgb24_to_rgb565(&rgb24[i])); + } +} + +void pixel_rgb24stream_to_rgb565lestream(struct pixel_stream *strm) +{ + pixel_line_to_stream(strm, pixel_rgb24line_to_rgb565leline); +} + +__weak void pixel_rgb565beline_to_rgb24line(const uint8_t *rgb565be, uint8_t *rgb24, uint16_t width) +{ + for (size_t i = 0, o = 0, w = 0; w < width; w++, i += 2, o += 3) { + pixel_rgb565_to_rgb24(sys_be16_to_cpu(*(uint16_t *)&rgb565be[i]), &rgb24[o]); + } +} + +void pixel_rgb565bestream_to_rgb24stream(struct pixel_stream *strm) +{ + pixel_line_to_stream(strm, pixel_rgb565beline_to_rgb24line); +} + +__weak void pixel_rgb565leline_to_rgb24line(const uint8_t *rgb565le, uint8_t *rgb24, uint16_t width) +{ + for (size_t i = 0, o = 0, w = 0; w < width; w++, i += 2, o += 3) { + pixel_rgb565_to_rgb24(sys_le16_to_cpu(*(uint16_t *)&rgb565le[i]), &rgb24[o]); + } +} + +void pixel_rgb565lestream_to_rgb24stream(struct pixel_stream *strm) +{ + pixel_line_to_stream(strm, pixel_rgb565leline_to_rgb24line); +} + +/* YUV support */ + +#define Q21(val) ((int32_t)((val) * (1 << 21))) + +static inline uint8_t pixel_rgb24_to_y8_bt709(const uint8_t rgb24[3]) +{ + int16_t r = rgb24[0], g = rgb24[1], b = rgb24[2]; + + return CLAMP(((Q21(+0.1826) * r + Q21(+0.6142) * g + Q21(+0.0620) * b) >> 21) + 16, + 0x00, 0xff); +} + +uint8_t pixel_rgb24_get_luma_bt709(const uint8_t rgb24[3]) +{ + return pixel_rgb24_to_y8_bt709(rgb24); +} + +static inline uint8_t pixel_rgb24_to_u8_bt709(const uint8_t rgb24[3]) +{ + int16_t r = rgb24[0], g = rgb24[1], b = rgb24[2]; + + return CLAMP(((Q21(-0.1006) * r + Q21(-0.3386) * g + Q21(+0.4392) * b) >> 21) + 128, + 0x00, 0xff); +} + +static inline uint8_t pixel_rgb24_to_v8_bt709(const uint8_t rgb24[3]) +{ + int16_t r = rgb24[0], g = rgb24[1], b = rgb24[2]; + + return CLAMP(((Q21(+0.4392) * r + Q21(-0.3989) * g + Q21(-0.0403) * b) >> 21) + 128, + 0x00, 0xff); +} + +static inline void pixel_yuv24_to_rgb24_bt709(const uint8_t y, uint8_t u, uint8_t v, + uint8_t rgb24[3]) +{ + int32_t yy = (int32_t)y - 16, uu = (int32_t)u - 128, vv = (int32_t)v - 128; + + /* Y range [16:235], U/V range [16:240], RGB range[0:255] (full range) */ + rgb24[0] = CLAMP((Q21(+1.1644) * yy + Q21(+0.0000) * uu + Q21(+1.7928) * vv) >> 21, + 0x00, 0xff); + rgb24[1] = CLAMP((Q21(+1.1644) * yy + Q21(-0.2133) * uu + Q21(-0.5330) * vv) >> 21, + 0x00, 0xff); + rgb24[2] = CLAMP((Q21(+1.1644) * yy + Q21(+2.1124) * uu + Q21(+0.0000) * vv) >> 21, + 0x00, 0xff); +} + +#undef Q21 + +/* YUV24 <-> RGB24 */ + +__weak void pixel_yuv24line_to_rgb24line_bt709(const uint8_t *yuv24, uint8_t *rgb24, uint16_t width) +{ + for (size_t i = 0, o = 0, w = 0; w < width; w++, i += 3, o += 3) { + pixel_yuv24_to_rgb24_bt709(yuv24[i + 0], yuv24[i + 1], yuv24[i + 2], &rgb24[o]); + } +} + +void pixel_yuv24stream_to_rgb24stream_bt709(struct pixel_stream *strm) +{ + pixel_line_to_stream(strm, pixel_yuv24line_to_rgb24line_bt709); +} + +void pixel_rgb24line_to_yuv24line_bt709(const uint8_t *rgb24, uint8_t *yuv24, uint16_t width) +{ + for (size_t i = 0, o = 0, w = 0; w < width; w++, i += 3, o += 3) { + yuv24[o + 0] = pixel_rgb24_to_y8_bt709(&rgb24[i]); + yuv24[o + 1] = pixel_rgb24_to_u8_bt709(&rgb24[i]); + yuv24[o + 2] = pixel_rgb24_to_v8_bt709(&rgb24[i]); + } +} + +void pixel_rgb24stream_to_yuv24stream_bt709(struct pixel_stream *strm) +{ + pixel_line_to_stream(strm, pixel_rgb24line_to_yuv24line_bt709); +} + +/* YUYV <-> YUV24 */ + +__weak void pixel_yuv24line_to_yuyvline(const uint8_t *yuv24, uint8_t *yuyv, uint16_t width) +{ + for (size_t i = 0, o = 0, w = 0; w + 2 <= width; w += 2, i += 6, o += 4) { + /* Pixel 0 */ + yuyv[o + 0] = yuv24[i + 0]; + yuyv[o + 1] = yuv24[i + 1]; + /* Pixel 1 */ + yuyv[o + 2] = yuv24[i + 3]; + yuyv[o + 3] = yuv24[i + 5]; + } +} + +void pixel_yuv24stream_to_yuyvstream(struct pixel_stream *strm) +{ + pixel_line_to_stream(strm, pixel_yuv24line_to_yuyvline); +} + +__weak void pixel_yuyvline_to_yuv24line(const uint8_t *yuyv, uint8_t *yuv24, uint16_t width) +{ + for (size_t i = 0, o = 0, w = 0; w + 2 <= width; w += 2, i += 4, o += 6) { + /* Pixel 0 */ + yuv24[o + 0] = yuyv[i + 0]; + yuv24[o + 1] = yuyv[i + 1]; + yuv24[o + 2] = yuyv[i + 3]; + /* Pixel 1 */ + yuv24[o + 3] = yuyv[i + 2]; + yuv24[o + 4] = yuyv[i + 1]; + yuv24[o + 5] = yuyv[i + 3]; + } +} + +void pixel_yuyvstream_to_yuv24stream(struct pixel_stream *strm) +{ + pixel_line_to_stream(strm, pixel_yuyvline_to_yuv24line); +} + +/* YUYV <-> RGB24 */ + +__weak void pixel_rgb24line_to_yuyvline_bt709(const uint8_t *rgb24, uint8_t *yuyv, uint16_t width) +{ + for (size_t i = 0, o = 0, w = 0; w + 2 <= width; w += 2, i += 6, o += 4) { + /* Pixel 0 */ + yuyv[o + 0] = pixel_rgb24_to_y8_bt709(&rgb24[i + 0]); + yuyv[o + 1] = pixel_rgb24_to_u8_bt709(&rgb24[i + 0]); + /* Pixel 1 */ + yuyv[o + 2] = pixel_rgb24_to_y8_bt709(&rgb24[i + 3]); + yuyv[o + 3] = pixel_rgb24_to_v8_bt709(&rgb24[i + 3]); + } +} + +void pixel_rgb24stream_to_yuyvstream_bt709(struct pixel_stream *strm) +{ + pixel_line_to_stream(strm, pixel_rgb24line_to_yuyvline_bt709); +} + +__weak void pixel_yuyvline_to_rgb24line_bt709(const uint8_t *yuyv, uint8_t *rgb24, uint16_t width) +{ + for (size_t i = 0, o = 0, w = 0; w + 2 <= width; w += 2, i += 4, o += 6) { + /* Pixel 0 */ + pixel_yuv24_to_rgb24_bt709(yuyv[i + 0], yuyv[i + 1], yuyv[i + 3], &rgb24[o + 0]); + /* Pixel 1 */ + pixel_yuv24_to_rgb24_bt709(yuyv[i + 2], yuyv[i + 1], yuyv[i + 3], &rgb24[o + 3]); + } +} + +void pixel_yuyvstream_to_rgb24stream_bt709(struct pixel_stream *strm) +{ + pixel_line_to_stream(strm, pixel_yuyvline_to_rgb24line_bt709); +} diff --git a/lib/pixel/kernel.c b/lib/pixel/kernel.c new file mode 100644 index 000000000000..0fe2ca7e415f --- /dev/null +++ b/lib/pixel/kernel.c @@ -0,0 +1,426 @@ +/* + * Copyir (c) 2025 tinyVision.ai Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include + +#include +#include + +LOG_MODULE_REGISTER(pixel_kernel, CONFIG_PIXEL_LOG_LEVEL); + +/* Function that processes a 3x3 or 5x5 pixel block described by line buffers and column indexes */ +typedef void kernel_3x3_t(const uint8_t *in[3], int i0, int i1, int i2, + uint8_t *out, int o0, uint16_t base, const uint16_t *kernel); +typedef void kernel_5x5_t(const uint8_t *in[3], int i0, int i1, int i2, int i3, int i4, + uint8_t *out, int o0, uint16_t base, const uint16_t *kernel); + +/* Function that repeats a 3x3 or 5x5 block operation to each channel of a pixel format */ +typedef void pixfmt_3x3_t(const uint8_t *in[3], int i0, int i1, int i2, + uint8_t *out, int o0, uint16_t base, kernel_3x3_t *kernel_fn, + const uint16_t *kernel); +typedef void pixfmt_5x5_t(const uint8_t *in[5], int i0, int i1, int i2, int i3, int i4, + uint8_t *out, int o0, uint16_t base, kernel_5x5_t *kernel_fn, + const uint16_t *kernel); + +/* Function that repeats a 3x3 or 5x5 kernel operation over an entire line line */ +typedef void line_3x3_t(const uint8_t *in[3], uint8_t *out, uint16_t width); +typedef void line_5x5_t(const uint8_t *in[5], uint8_t *out, uint16_t width); + +/* + * Convolution kernels: multiply a grid of coefficient with the input data and um them to produce + * one output value. + */ + +static void pixel_convolve_3x3(const uint8_t *in[3], int i0, int i1, int i2, + uint8_t *out, int o0, uint16_t base, const uint16_t *kernel) +{ + int16_t result = 0; + int k = 0; + + /* Apply the coefficients on 3 rows */ + for (int h = 0; h < 3; h++) { + /* Apply the coefficients on 5 columns */ + result += in[h][base + i0] * kernel[k++]; /* line h column 0 */ + result += in[h][base + i1] * kernel[k++]; /* line h column 1 */ + result += in[h][base + i2] * kernel[k++]; /* line h column 2 */ + } + + /* Store the scaled-down output */ + out[base + o0] = result >> kernel[k]; +} + +static void pixel_convolve_5x5(const uint8_t *in[5], int i0, int i1, int i2, int i3, int i4, + uint8_t *out, int o0, uint16_t base, const uint16_t *kernel) +{ + int16_t result = 0; + int k = 0; + + /* Apply the coefficients on 5 rows */ + for (int h = 0; h < 5; h++) { + /* Apply the coefficients on 5 columns */ + result += in[h][base + i0] * kernel[k++]; /* line h column 0 */ + result += in[h][base + i1] * kernel[k++]; /* line h column 1 */ + result += in[h][base + i2] * kernel[k++]; /* line h column 2 */ + result += in[h][base + i3] * kernel[k++]; /* line h column 3 */ + result += in[h][base + i4] * kernel[k++]; /* line h column 4 */ + } + + /* Store the scaled-down output */ + out[base + o0] = result >> kernel[k]; +} + +/* + * Median kernels: find the median value of the input block and send it as output. The effect is to + * denoise the input image while preserving sharpness of the large color regions. + */ + +static inline uint8_t pixel_median(const uint8_t **in, int *idx, uint8_t size) +{ + uint8_t pivot_bot = 0x00; + uint8_t pivot_top = 0xff; + uint8_t num_higher; + int16_t median; + + /* Binary-search of the appropriate median value, 8 steps for 8-bit depth */ + for (int i = 0; i < 8; i++) { + num_higher = 0; + median = (pivot_top + pivot_bot) / 2; + + for (uint16_t h = 0; h < size; h++) { + for (uint16_t w = 0; w < size; w++) { + num_higher += in[h][idx[w]] > median; /* line h column w */ + } + } + + if (num_higher > size * size / 2) { + pivot_bot = median; + } else if (num_higher < size * size / 2) { + pivot_top = median; + } + } + + /* Output the median value */ + return (pivot_top + pivot_bot) / 2; +} + +static void pixel_median_3x3(const uint8_t *in[3], int i0, int i1, int i2, + uint8_t *out, int o0, uint16_t base, const uint16_t *unused) +{ + int idx[] = {base + i0, base + i1, base + i2}; + + out[base + o0] = pixel_median(in, idx, 3); +} + +static void pixel_median_5x5(const uint8_t *in[5], int i0, int i1, int i2, int i3, int i4, + uint8_t *out, int o0, uint16_t base, const uint16_t *unused) +{ + int idx[] = {base + i0, base + i1, base + i2, base + i3, base + i4}; + + out[base + o0] = pixel_median(in, idx, 5); +} + +/* + * Convert pixel offsets into byte offset, and repeat a kernel function for every channel of a + * pixel format for a single position. + */ + +static void pixel_kernel_rgb24_3x3(const uint8_t *in[3], int i0, int i1, int i2, + uint8_t *out, int o0, uint16_t base, kernel_3x3_t *kernel_fn, + const uint16_t *kernel) +{ + i0 *= 3, i1 *= 3, i2 *= 3, o0 *= 3, base *= 3; + kernel_fn(in, i0, i1, i2, out, o0, base + 0, kernel); /* R */ + kernel_fn(in, i0, i1, i2, out, o0, base + 1, kernel); /* G */ + kernel_fn(in, i0, i1, i2, out, o0, base + 2, kernel); /* B */ +} + +static void pixel_kernel_rgb24_5x5(const uint8_t *in[5], int i0, int i1, int i2, int i3, int i4, + uint8_t *out, int o0, uint16_t base, kernel_5x5_t *kernel_fn, + const uint16_t *kernel) +{ + i0 *= 3, i1 *= 3, i2 *= 3, i3 *= 3, i4 *= 3, o0 *= 3, base *= 3; + kernel_fn(in, i0, i1, i2, i3, i4, out, o0, base + 0, kernel); /* R */ + kernel_fn(in, i0, i1, i2, i3, i4, out, o0, base + 1, kernel); /* G */ + kernel_fn(in, i0, i1, i2, i3, i4, out, o0, base + 2, kernel); /* B */ +} + +/* + * Portable/default C implementation of line processing functions. They are inlined into + * line-conversion functions at the bottom of this file declared as __weak. + */ + +static inline void pixel_kernel_line_3x3(const uint8_t *in[3], uint8_t *out, uint16_t width, + pixfmt_3x3_t *pixfmt_fn, kernel_3x3_t *kernel_fn, + const uint16_t *kernel) +{ + uint16_t w = 0; + + /* Edge case on first two columns */ + pixfmt_fn(in, 0, 0, 1, out, 0, w + 0, kernel_fn, kernel); + + /* process the entire line except the first two and last two columns (edge cases) */ + for (w = 0; w + 3 <= width; w++) { + pixfmt_fn(in, 0, 1, 2, out, 1, w, kernel_fn, kernel); + } + + /* Edge case on last two columns */ + pixfmt_fn(in, 0, 1, 1, out, 1, w, kernel_fn, kernel); +} + +static inline void pixel_kernel_line_5x5(const uint8_t *in[5], uint8_t *out, uint16_t width, + pixfmt_5x5_t *pixfmt_fn, kernel_5x5_t *kernel_fn, + const uint16_t *kernel) +{ + uint16_t w = 0; + + /* Edge case on first two columns, repeat the left column to fill the blank */ + pixfmt_fn(in, 0, 0, 0, 1, 2, out, 0, w, kernel_fn, kernel); + pixfmt_fn(in, 0, 0, 1, 2, 3, out, 1, w, kernel_fn, kernel); + + /* process the entire line except the first two and last two columns (edge cases) */ + for (w = 0; w + 5 <= width; w++) { + pixfmt_fn(in, 0, 1, 2, 3, 4, out, 2, w, kernel_fn, kernel); + } + + /* Edge case on last two columns, repeat the right column to fill the blank */ + pixfmt_fn(in, 0, 1, 2, 3, 3, out, 2, w, kernel_fn, kernel); + pixfmt_fn(in, 1, 2, 3, 3, 3, out, 3, w, kernel_fn, kernel); +} + +/* + * Call a line-processing function on every line, handling the edge-cases on first line and last + * line by repeating the lines at the edge to fill the gaps. + */ + +static inline void pixel_kernel_stream_3x3(struct pixel_stream *strm, line_3x3_t *line_fn) +{ + uint16_t prev_line_offset = strm->line_offset; + const uint8_t *in[] = { + pixel_stream_get_input_line(strm), + pixel_stream_peek_input_line(strm), + pixel_stream_peek_input_line(strm), + }; + + __ASSERT_NO_MSG(strm->width >= 3); + __ASSERT_NO_MSG(strm->height >= 3); + + /* Allow overflowing before the top by repeating the first line */ + if (prev_line_offset == 0) { + const uint8_t *top[] = {in[0], in[0], in[1]}; + + line_fn(top, pixel_stream_get_output_line(strm), strm->width); + pixel_stream_done(strm); + } + + /* Process one more line */ + line_fn(in, pixel_stream_get_output_line(strm), strm->width); + pixel_stream_done(strm); + + /* Allow overflowing after the bottom by repeating the last line */ + if (prev_line_offset + 3 >= strm->height) { + const uint8_t *bot[] = {in[1], in[2], in[2]}; + + line_fn(bot, pixel_stream_get_output_line(strm), strm->width); + pixel_stream_done(strm); + + /* Flush the remaining lines that were used for lookahead context */ + pixel_stream_get_input_line(strm); + pixel_stream_get_input_line(strm); + } +} + +static inline void pixel_kernel_stream_5x5(struct pixel_stream *strm, line_5x5_t *line_fn) +{ + uint16_t prev_line_offset = strm->line_offset; + const uint8_t *in[] = { + pixel_stream_get_input_line(strm), + pixel_stream_peek_input_line(strm), + pixel_stream_peek_input_line(strm), + pixel_stream_peek_input_line(strm), + pixel_stream_peek_input_line(strm), + }; + + __ASSERT_NO_MSG(strm->width >= 5); + __ASSERT_NO_MSG(strm->height >= 5); + + /* Allow overflowing before the top by repeating the first line */ + if (prev_line_offset == 0) { + const uint8_t *top[] = {in[0], in[0], in[0], in[1], in[2], in[3]}; + + line_fn(&top[0], pixel_stream_get_output_line(strm), strm->width); + pixel_stream_done(strm); + + line_fn(&top[1], pixel_stream_get_output_line(strm), strm->width); + pixel_stream_done(strm); + } + + /* Process one more line */ + line_fn(in, pixel_stream_get_output_line(strm), strm->width); + pixel_stream_done(strm); + + /* Allow overflowing after the bottom by repeating the last line */ + if (prev_line_offset + 5 >= strm->height) { + const uint8_t *bot[] = {in[1], in[2], in[3], in[4], in[4], in[4]}; + + line_fn(&bot[0], pixel_stream_get_output_line(strm), strm->width); + pixel_stream_done(strm); + + line_fn(&bot[1], pixel_stream_get_output_line(strm), strm->width); + pixel_stream_done(strm); + + /* Flush the remaining lines that were used for lookahead context */ + pixel_stream_get_input_line(strm); + pixel_stream_get_input_line(strm); + pixel_stream_get_input_line(strm); + pixel_stream_get_input_line(strm); + } +} + +/* + * Declaration of convolution kernels, with the line-processing functions declared as __weak to + * allow them to be replaced with optimized versions + */ + +static const int16_t pixel_identity_3x3[] = { + 0, 0, 0, + 0, 1, 0, + 0, 0, 0, 0 +}; + +__weak void pixel_identity_rgb24line_3x3(const uint8_t *in[3], uint8_t *out, uint16_t width) +{ + pixel_kernel_line_3x3(in, out, width, pixel_kernel_rgb24_3x3, pixel_convolve_3x3, + pixel_identity_3x3); +} +void pixel_identity_rgb24stream_3x3(struct pixel_stream *strm) +{ + pixel_kernel_stream_3x3(strm, pixel_identity_rgb24line_3x3); +} + +static const int16_t pixel_identity_5x5[] = { + 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + 0, 0, 1, 0, 0, + 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0 +}; + +__weak void pixel_identity_rgb24line_5x5(const uint8_t *in[5], uint8_t *out, uint16_t width) +{ + pixel_kernel_line_5x5(in, out, width, pixel_kernel_rgb24_5x5, pixel_convolve_5x5, + pixel_identity_5x5); +} +void pixel_identity_rgb24stream_5x5(struct pixel_stream *strm) +{ + pixel_kernel_stream_5x5(strm, pixel_identity_rgb24line_5x5); +} + +static const int16_t pixel_edgedetect_3x3[] = { + -1, -1, -1, + -1, 8, -1, + -1, -1, -1, 0 +}; + +__weak void pixel_edgedetect_rgb24line_3x3(const uint8_t *in[3], uint8_t *out, uint16_t width) +{ + pixel_kernel_line_3x3(in, out, width, pixel_kernel_rgb24_3x3, pixel_convolve_3x3, + pixel_edgedetect_3x3); +} +void pixel_edgedetect_rgb24stream_3x3(struct pixel_stream *strm) +{ + pixel_kernel_stream_3x3(strm, pixel_edgedetect_rgb24line_3x3); +} + +static const int16_t pixel_gaussianblur_3x3[] = { + 1, 2, 1, + 2, 4, 2, + 1, 2, 1, 4 +}; + +__weak void pixel_gaussianblur_rgb24line_3x3(const uint8_t *in[3], uint8_t *out, uint16_t width) +{ + pixel_kernel_line_3x3(in, out, width, pixel_kernel_rgb24_3x3, pixel_convolve_3x3, + pixel_gaussianblur_3x3); +} +void pixel_gaussianblur_rgb24stream_3x3(struct pixel_stream *strm) +{ + pixel_kernel_stream_3x3(strm, pixel_gaussianblur_rgb24line_3x3); +} + +static const int16_t pixel_gaussianblur_5x5[] = { + 1, 4, 6, 4, 1, + 4, 16, 24, 16, 4, + 6, 24, 36, 24, 6, + 4, 16, 24, 16, 4, + 1, 4, 6, 4, 1, 8 +}; + +__weak void pixel_gaussianblur_rgb24line_5x5(const uint8_t *in[5], uint8_t *out, uint16_t width) +{ + pixel_kernel_line_5x5(in, out, width, pixel_kernel_rgb24_5x5, pixel_convolve_5x5, + pixel_gaussianblur_5x5); +} +void pixel_gaussianblur_rgb24stream_5x5(struct pixel_stream *strm) +{ + pixel_kernel_stream_5x5(strm, pixel_gaussianblur_rgb24line_5x5); +} + +static const int16_t pixel_sharpen_3x3[] = { + 0, -1, 0, + -1, 5, -1, + 0, -1, 0, 0 +}; + +__weak void pixel_sharpen_rgb24line_3x3(const uint8_t *in[3], uint8_t *out, uint16_t width) +{ + pixel_kernel_line_3x3(in, out, width, pixel_kernel_rgb24_3x3, pixel_convolve_3x3, + pixel_sharpen_3x3); +} +void pixel_sharpen_rgb24stream_3x3(struct pixel_stream *strm) +{ + pixel_kernel_stream_3x3(strm, pixel_sharpen_rgb24line_3x3); +} + +static const int16_t pixel_unsharp_5x5[] = { + -1, -4, -6, -4, -1, + -4, -16, -24, -16, -4, + -6, -24, 476, -24, -6, + -4, -16, -24, -16, -4, + -1, -4, -6, -4, -1, 8 +}; + +__weak void pixel_unsharp_rgb24line_5x5(const uint8_t *in[5], uint8_t *out, uint16_t width) +{ + pixel_kernel_line_5x5(in, out, width, pixel_kernel_rgb24_5x5, pixel_convolve_5x5, + pixel_unsharp_5x5); +} +void pixel_unsharp_rgb24stream_5x5(struct pixel_stream *strm) +{ + pixel_kernel_stream_5x5(strm, pixel_unsharp_rgb24line_5x5); +} + +/* + * Declaration of median kernels, with the line-processing functions declared as __weak to + * allow them to be replaced with optimized versions + */ + +__weak void pixel_median_rgb24line_5x5(const uint8_t *in[5], uint8_t *out, uint16_t width) +{ + pixel_kernel_line_5x5(in, out, width, pixel_kernel_rgb24_5x5, pixel_median_5x5, NULL); +} +void pixel_median_rgb24stream_5x5(struct pixel_stream *strm) +{ + pixel_kernel_stream_5x5(strm, pixel_median_rgb24line_5x5); +} + +__weak void pixel_median_rgb24line_3x3(const uint8_t *in[3], uint8_t *out, uint16_t width) +{ + pixel_kernel_line_3x3(in, out, width, pixel_kernel_rgb24_3x3, pixel_median_3x3, NULL); +} +void pixel_median_rgb24stream_3x3(struct pixel_stream *strm) +{ + pixel_kernel_stream_3x3(strm, pixel_median_rgb24line_3x3); +} diff --git a/lib/pixel/print.c b/lib/pixel/print.c new file mode 100644 index 000000000000..ca60135f04cb --- /dev/null +++ b/lib/pixel/print.c @@ -0,0 +1,370 @@ +/* + * Copyright (c) 2025 tinyVision.ai Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include + +#include +#include +#include +#include + +#ifdef CONFIG_PIXEL_PRINT_NONE +#define PIXEL_PRINT(...) +#endif + +#ifdef CONFIG_PIXEL_PRINT_PRINTF +#define PIXEL_PRINT(...) printf(__VA_ARGS__) +#endif + +#ifdef CONFIG_PIXEL_PRINT_PRINTK +#define PIXEL_PRINT(...) printk(__VA_ARGS__) +#endif + +#ifdef CONFIG_PIXEL_PRINT_SHELL +#define PIXEL_PRINT(...) shell_print(pixel_print_shell, __VA_ARGS__) +#endif + +static struct shell *pixel_print_shell; + +void pixel_print_set_shell(struct shell *sh) +{ + pixel_print_shell = sh; +} + +static inline uint8_t pixel_rgb24_to_256color(const uint8_t rgb24[3]) +{ + return 16 + rgb24[0] * 6 / 256 * 36 + rgb24[1] * 6 / 256 * 6 + rgb24[2] * 6 / 256 * 1; +} + +static inline uint8_t pixel_gray8_to_256color(uint8_t gray8) +{ + return 232 + gray8 * 24 / 256; +} + +static void pixel_print_truecolor(const uint8_t rgb24row0[3], const uint8_t rgb24row1[3]) +{ + PIXEL_PRINT("\e[48;2;%u;%u;%um\e[38;2;%u;%u;%um▄", + rgb24row0[0], rgb24row0[1], rgb24row0[2], + rgb24row1[0], rgb24row1[1], rgb24row1[2]); +} + +static void pixel_print_256color(const uint8_t rgb24row0[3], const uint8_t rgb24row1[3]) +{ + PIXEL_PRINT("\e[48;5;%um\e[38;5;%um▄", + pixel_rgb24_to_256color(rgb24row0), + pixel_rgb24_to_256color(rgb24row1)); +} + +static void pixel_print_256gray(uint8_t gray8row0, uint8_t gray8row1) +{ + PIXEL_PRINT("\e[48;5;%um\e[38;5;%um▄", + pixel_gray8_to_256color(gray8row0), + pixel_gray8_to_256color(gray8row1)); +} + +typedef void fn_print_t(const uint8_t rgb24row0[3], const uint8_t rgb24row1[3]); + +typedef void fn_conv_t(const uint8_t *src, uint8_t *dst, uint16_t w); + +static inline void pixel_print(const uint8_t *src, size_t size, uint16_t width, uint16_t height, + fn_print_t *fn_print, fn_conv_t *fn_conv, int bytespp, int npix) +{ + size_t pitch = width * bytespp; + + for (size_t i = 0, h = 0; h + 2 <= height; h += 2) { + for (size_t w = 0; w + npix <= width; w += npix, i += bytespp * npix) { + uint8_t rgb24a[3 * 2], rgb24b[3 * 2]; + + __ASSERT_NO_MSG(npix <= 2); + + fn_conv(&src[i + pitch * 0], rgb24a, npix); + fn_conv(&src[i + pitch * 1], rgb24b, npix); + + if (i + pitch > size) { + PIXEL_PRINT("\e[m *** early end of buffer at %zu bytes ***\n", + size); + return; + } + + for (int n = 0; n < npix; n++) { + fn_print(&rgb24a[n * 3], &rgb24b[n * 3]); + } + } + PIXEL_PRINT("\e[m|\n"); + + /* Skip the second h being printed at the same time */ + i += pitch; + } +} + +static void pixel_rgb24line_to_rgb24line(const uint8_t *rgb24i, uint8_t *rgb24o, uint16_t width) +{ + memcpy(rgb24o, rgb24i, width * 3); +} + +void pixel_print_rgb24frame_256color(const uint8_t *rgb24, size_t size, uint16_t width, + uint16_t height) +{ + pixel_print(rgb24, size, width, height, pixel_print_256color, + pixel_rgb24line_to_rgb24line, 3, 1); +} + +void pixel_print_rgb24frame_truecolor(const uint8_t *rgb24, size_t size, uint16_t width, + uint16_t height) +{ + pixel_print(rgb24, size, width, height, pixel_print_truecolor, + pixel_rgb24line_to_rgb24line, 3, 1); +} + +void pixel_print_rgb565leframe_256color(const uint8_t *rgb565, size_t size, uint16_t width, + uint16_t height) +{ + pixel_print(rgb565, size, width, height, pixel_print_256color, + pixel_rgb565leline_to_rgb24line, 2, 1); +} + +void pixel_print_rgb565leframe_truecolor(const uint8_t *rgb565, size_t size, uint16_t width, + uint16_t height) +{ + pixel_print(rgb565, size, width, height, pixel_print_truecolor, + pixel_rgb565leline_to_rgb24line, 2, 1); +} + +void pixel_print_rgb565beframe_256color(const uint8_t *rgb565, size_t size, uint16_t width, + uint16_t height) +{ + pixel_print(rgb565, size, width, height, pixel_print_256color, + pixel_rgb565beline_to_rgb24line, 2, 1); +} + +void pixel_print_rgb565beframe_truecolor(const uint8_t *rgb565, size_t size, uint16_t width, + uint16_t height) +{ + pixel_print(rgb565, size, width, height, pixel_print_truecolor, + pixel_rgb565beline_to_rgb24line, 2, 1); +} + +void pixel_print_rgb332frame_256color(const uint8_t *rgb332, size_t size, uint16_t width, + uint16_t height) +{ + pixel_print(rgb332, size, width, height, pixel_print_256color, + pixel_rgb332line_to_rgb24line, 1, 1); +} + +void pixel_print_rgb332frame_truecolor(const uint8_t *rgb332, size_t size, uint16_t width, + uint16_t height) +{ + pixel_print(rgb332, size, width, height, pixel_print_truecolor, + pixel_rgb332line_to_rgb24line, 1, 1); +} + +void pixel_print_yuyvframe_bt709_256color(const uint8_t *yuyv, size_t size, uint16_t width, + uint16_t height) +{ + pixel_print(yuyv, size, width, height, pixel_print_256color, + pixel_yuyvline_to_rgb24line_bt709, 2, 2); +} + +void pixel_print_yuyvframe_bt709_truecolor(const uint8_t *yuyv, size_t size, uint16_t width, + uint16_t height) +{ + pixel_print(yuyv, size, width, height, pixel_print_truecolor, + pixel_yuyvline_to_rgb24line_bt709, 2, 2); +} + +void pixel_print_yuv24frame_bt709_256color(const uint8_t *yuv24, size_t size, uint16_t width, + uint16_t height) +{ + pixel_print(yuv24, size, width, height, pixel_print_256color, + pixel_yuv24line_to_rgb24line_bt709, 3, 1); +} + +void pixel_print_yuv24frame_bt709_truecolor(const uint8_t *yuv24, size_t size, uint16_t width, + uint16_t height) +{ + pixel_print(yuv24, size, width, height, pixel_print_truecolor, + pixel_yuv24line_to_rgb24line_bt709, 3, 1); +} + +void pixel_print_raw8frame_hex(const uint8_t *raw8, size_t size, uint16_t width, uint16_t height) +{ + for (uint16_t h = 0; h < height; h++) { + for (uint16_t w = 0; w < width; w++) { + size_t i = h * width * 1 + w * 1; + + if (i >= size) { + PIXEL_PRINT("\e[m *** early end of buffer at %zu bytes ***\n", + size); + return; + } + + PIXEL_PRINT(" %02x", raw8[i]); + } + PIXEL_PRINT(" row%u\n", h); + } +} + +void pixel_print_rgb24frame_hex(const uint8_t *rgb24, size_t size, uint16_t width, uint16_t height) +{ + PIXEL_PRINT(" "); + for (uint16_t w = 0; w < width; w++) { + PIXEL_PRINT("col%-7u", w); + } + PIXEL_PRINT("\n"); + + for (uint16_t w = 0; w < width; w++) { + PIXEL_PRINT(" R G B "); + } + PIXEL_PRINT("\n"); + + for (uint16_t h = 0; h < height; h++) { + for (uint16_t w = 0; w < width; w++) { + size_t i = h * width * 3 + w * 3; + + if (i + 2 >= size) { + PIXEL_PRINT("\e[m *** early end of buffer at %zu bytes ***\n", + size); + return; + } + + PIXEL_PRINT(" %02x %02x %02x ", rgb24[i + 0], rgb24[i + 1], rgb24[i + 2]); + } + PIXEL_PRINT(" row%u\n", h); + } +} + +void pixel_print_rgb565frame_hex(const uint8_t *rgb565, size_t size, uint16_t width, + uint16_t height) +{ + PIXEL_PRINT(" "); + for (uint16_t w = 0; w < width; w++) { + PIXEL_PRINT("col%-4u", w); + } + PIXEL_PRINT("\n"); + + for (uint16_t w = 0; w < width; w++) { + PIXEL_PRINT(" RGB565"); + } + PIXEL_PRINT("\n"); + + for (uint16_t h = 0; h < height; h++) { + for (uint16_t w = 0; w < width; w++) { + size_t i = h * width * 2 + w * 2; + + if (i + 1 >= size) { + PIXEL_PRINT("\e[m *** early end of buffer at %zu bytes ***\n", + size); + return; + } + + PIXEL_PRINT(" %02x %02x ", rgb565[i + 0], rgb565[i + 1]); + } + PIXEL_PRINT(" row%u\n", h); + } +} + +void pixel_print_yuyvframe_hex(const uint8_t *yuyv, size_t size, uint16_t width, uint16_t height) +{ + PIXEL_PRINT(" "); + for (uint16_t w = 0; w < width; w++) { + PIXEL_PRINT("col%-3u", w); + if ((w + 1) % 2 == 0) { + PIXEL_PRINT(" "); + } + } + PIXEL_PRINT("\n"); + + for (uint16_t w = 0; w < width; w++) { + PIXEL_PRINT(" %c%u", "YUYV"[w % 2 * 2 + 0], w % 2); + PIXEL_PRINT(" %c%u", "YUYV"[w % 2 * 2 + 1], w % 2); + if ((w + 1) % 2 == 0) { + PIXEL_PRINT(" "); + } + } + PIXEL_PRINT("\n"); + + for (uint16_t h = 0; h < height; h++) { + for (uint16_t w = 0; w < width; w++) { + size_t i = h * width * 2 + w * 2; + + if (i + 1 >= size) { + PIXEL_PRINT("\e[m *** early end of buffer at %zu bytes ***\n", + size); + return; + } + + PIXEL_PRINT(" %02x %02x", yuyv[i], yuyv[i + 1]); + if ((w + 1) % 2 == 0) { + PIXEL_PRINT(" "); + } + } + PIXEL_PRINT(" row%u\n", h); + } +} + +static void pixel_print_hist_scale(size_t size) +{ + for (uint16_t i = 0; i < size; i++) { + pixel_print_256gray(0, i * 256 / size); + } + PIXEL_PRINT("\e[m\n"); +} + +void pixel_print_rgb24hist(const uint16_t *rgb24hist, size_t size, uint16_t height) +{ + const uint16_t *r8hist = &rgb24hist[size / 3 * 0]; + const uint16_t *g8hist = &rgb24hist[size / 3 * 1]; + const uint16_t *b8hist = &rgb24hist[size / 3 * 2]; + uint32_t max = 1; + + __ASSERT(size % 3 == 0, "Each of R, G, B channel should have the same size."); + + for (size_t i = 0; i < size; i++) { + max = rgb24hist[i] > max ? rgb24hist[i] : max; + } + + for (uint16_t h = height; h > 1; h--) { + for (size_t i = 0; i < size / 3; i++) { + uint8_t rgb24row0[3]; + uint8_t rgb24row1[3]; + + rgb24row0[0] = (r8hist[i] * height / max > h - 0) ? 0xff : 0x00; + rgb24row0[1] = (g8hist[i] * height / max > h - 0) ? 0xff : 0x00; + rgb24row0[2] = (b8hist[i] * height / max > h - 0) ? 0xff : 0x00; + rgb24row1[0] = (r8hist[i] * height / max > h - 1) ? 0xff : 0x00; + rgb24row1[1] = (g8hist[i] * height / max > h - 1) ? 0xff : 0x00; + rgb24row1[2] = (b8hist[i] * height / max > h - 1) ? 0xff : 0x00; + + pixel_print_256color(rgb24row0, rgb24row1); + } + PIXEL_PRINT("\e[m| - %u\n", h * max / height); + } + + pixel_print_hist_scale(size / 3); +} + +void pixel_print_y8hist(const uint16_t *y8hist, size_t size, uint16_t height) +{ + uint32_t max = 1; + + for (size_t i = 0; i < size; i++) { + max = y8hist[i] > max ? y8hist[i] : max; + } + + for (uint16_t h = height; h > 1; h--) { + for (size_t i = 0; i < size; i++) { + uint8_t gray8row0 = (y8hist[i] * height / max > h - 0) ? 0xff : 0x00; + uint8_t gray8row1 = (y8hist[i] * height / max > h - 1) ? 0xff : 0x00; + + pixel_print_256gray(gray8row0, gray8row1); + } + PIXEL_PRINT("\e[m| - %u\n", h * max / height); + } + + pixel_print_hist_scale(size); +} diff --git a/lib/pixel/resize.c b/lib/pixel/resize.c new file mode 100644 index 000000000000..956c2dabf35b --- /dev/null +++ b/lib/pixel/resize.c @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2025 tinyVision.ai Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include + +#include +#include +#include + +LOG_MODULE_REGISTER(pixel_resize, CONFIG_PIXEL_LOG_LEVEL); + +static inline void pixel_subsample_line(const uint8_t *src_buf, size_t src_width, + uint8_t *dst_buf, size_t dst_width, + uint8_t bits_per_pixel) +{ + for (size_t dst_w = 0; dst_w < dst_width; dst_w++) { + size_t src_w = dst_w * src_width / dst_width; + size_t src_i = src_w * bits_per_pixel / BITS_PER_BYTE; + size_t dst_i = dst_w * bits_per_pixel / BITS_PER_BYTE; + + memmove(&dst_buf[dst_i], &src_buf[src_i], bits_per_pixel / BITS_PER_BYTE); + } +} + +static inline void pixel_subsample_frame(const uint8_t *src_buf, size_t src_width, + size_t src_height, uint8_t *dst_buf, size_t dst_width, + size_t dst_height, uint8_t bits_per_pixel) +{ + for (size_t dst_h = 0; dst_h < dst_height; dst_h++) { + size_t src_h = dst_h * src_height / dst_height; + size_t src_i = src_h * src_width * bits_per_pixel / BITS_PER_BYTE; + size_t dst_i = dst_h * dst_width * bits_per_pixel / BITS_PER_BYTE; + + pixel_subsample_line(&src_buf[src_i], src_width, &dst_buf[dst_i], dst_width, + bits_per_pixel); + } +} + +__weak void pixel_subsample_rgb24frame(const uint8_t *src_buf, size_t src_width, size_t src_height, + uint8_t *dst_buf, size_t dst_width, size_t dst_height) +{ + pixel_subsample_frame(src_buf, src_width, src_height, dst_buf, dst_width, dst_height, 24); +} + +__weak void pixel_subsample_rgb565frame(const uint8_t *src_buf, size_t src_width, size_t src_height, + uint8_t *dst_buf, size_t dst_width, size_t dst_height) +{ + pixel_subsample_frame(src_buf, src_width, src_height, dst_buf, dst_width, dst_height, 16); +} + +static inline void pixel_subsample_stream(struct pixel_stream *strm, uint8_t bits_per_pixel) +{ + uint16_t prev_offset = (strm->line_offset + 1) * strm->next->height / strm->height; + const uint8_t *line_in = pixel_stream_get_input_line(strm); + uint16_t next_offset = (strm->line_offset + 1) * strm->next->height / strm->height; + + for (uint16_t i = 0; prev_offset + i < next_offset; i++) { + pixel_subsample_line(line_in, strm->width, pixel_stream_get_output_line(strm), + strm->next->width, bits_per_pixel); + pixel_stream_done(strm); + } +} + +__weak void pixel_subsample_rgb24stream(struct pixel_stream *strm) +{ + pixel_subsample_stream(strm, 24); +} + +__weak void pixel_subsample_rgb565stream(struct pixel_stream *strm) +{ + pixel_subsample_stream(strm, 16); +} diff --git a/lib/pixel/stats.c b/lib/pixel/stats.c new file mode 100644 index 000000000000..3acbcd432239 --- /dev/null +++ b/lib/pixel/stats.c @@ -0,0 +1,274 @@ +/* + * Copyright (c) 2025 tinyVision.ai Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include + +#include +#include + +#define PIXEL_IDX_R 0 +#define PIXEL_IDX_G 1 +#define PIXEL_IDX_B 2 + +static const uint8_t pixel_idx_rggb8[4] = {PIXEL_IDX_R, PIXEL_IDX_G, PIXEL_IDX_G, PIXEL_IDX_B}; +static const uint8_t pixel_idx_bggr8[4] = {PIXEL_IDX_B, PIXEL_IDX_G, PIXEL_IDX_G, PIXEL_IDX_R}; +static const uint8_t pixel_idx_gbrg8[4] = {PIXEL_IDX_G, PIXEL_IDX_B, PIXEL_IDX_R, PIXEL_IDX_G}; +static const uint8_t pixel_idx_grbg8[4] = {PIXEL_IDX_G, PIXEL_IDX_R, PIXEL_IDX_B, PIXEL_IDX_G}; + +/* Extract a random value from the buffer */ + +static inline uint32_t pixel_rand(void) +{ + static uint32_t lcg_state; + + /* Linear Congruent Generator (LCG) are low-quality but very fast, here considered enough + * as even a fixed offset would have been enough.The % phase is skipped as there is already + * "% vbuf->bytesused" downstream in the code. + * + * The constants are from https://en.wikipedia.org/wiki/Linear_congruential_generator + */ + lcg_state = lcg_state * 1103515245 + 12345; + return lcg_state; +} + +static inline void pixel_sample_rgb24(const uint8_t *buf, size_t size, uint8_t rgb24[3]) +{ + uint32_t pos = pixel_rand() % size; + + /* Align on 24-bit pixel boundary */ + pos -= pos % 3; + + rgb24[0] = buf[pos + 0]; + rgb24[1] = buf[pos + 1]; + rgb24[2] = buf[pos + 2]; +} + +static inline void pixel_sample_bayer(const uint8_t *buf, size_t size, uint16_t width, + uint8_t rgb24[3], const uint8_t *idx) +{ + uint32_t pos = pixel_rand() % size; + + /* Make sure to be on even row and column position */ + pos -= pos % 2; + pos -= pos / width % 2 * width; + + rgb24[idx[0]] = buf[pos + 0]; + rgb24[idx[1]] = buf[pos + 1]; + rgb24[idx[2]] = buf[pos + width + 0]; + rgb24[idx[3]] = buf[pos + width + 1]; +} + +static inline void pixel_sums_to_rgb24avg(uint32_t sums[3], uint8_t rgb24avg[3], uint16_t nval) +{ + rgb24avg[0] = sums[0] / nval; + rgb24avg[1] = sums[1] / nval; + rgb24avg[2] = sums[2] / nval; +} + +static inline void pixel_sums_add_rgb24(uint32_t sums[3], uint8_t rgb24[3]) +{ + sums[0] += rgb24[0], sums[1] += rgb24[1]; + sums[2] += rgb24[2]; +} + +/* Channel average statistics */ + +void pixel_rgb24frame_to_rgb24avg(const uint8_t *buf, size_t size, uint8_t rgb24avg[3], + uint16_t nval) +{ + uint32_t sums[3] = {0, 0, 0}; + uint8_t rgb24[3]; + + for (uint16_t n = 0; n < nval; n++) { + pixel_sample_rgb24(buf, size, rgb24); + pixel_sums_add_rgb24(sums, rgb24); + } + pixel_sums_to_rgb24avg(sums, rgb24avg, nval); +} + +static inline void pixel_bayerframe_to_rgb24avg(const uint8_t *buf, size_t size, uint16_t width, + uint8_t rgb24avg[3], uint16_t nval, + const uint8_t *idx) +{ + uint32_t sums[3] = {0, 0, 0}; + uint8_t rgb24[3]; + + for (uint16_t n = 0; n < nval; n++) { + pixel_sample_bayer(buf, size, width, rgb24, idx); + pixel_sums_add_rgb24(sums, rgb24); + } + pixel_sums_to_rgb24avg(sums, rgb24avg, nval); +} + +void pixel_rggb8frame_to_rgb24avg(const uint8_t *buf, size_t size, uint16_t width, + uint8_t rgb24avg[3], uint16_t nval) +{ + pixel_bayerframe_to_rgb24avg(buf, size, width, rgb24avg, nval, pixel_idx_rggb8); +} + +void pixel_bggr8frame_to_rgb24avg(const uint8_t *buf, size_t size, uint16_t width, + uint8_t rgb24avg[3], uint16_t nval) +{ + pixel_bayerframe_to_rgb24avg(buf, size, width, rgb24avg, nval, pixel_idx_bggr8); +} + +void pixel_gbrg8frame_to_rgb24avg(const uint8_t *buf, size_t size, uint16_t width, + uint8_t rgb24avg[3], uint16_t nval) +{ + pixel_bayerframe_to_rgb24avg(buf, size, width, rgb24avg, nval, pixel_idx_gbrg8); +} + +void pixel_grbg8frame_to_rgb24avg(const uint8_t *buf, size_t size, uint16_t width, + uint8_t rgb24avg[3], uint16_t nval) +{ + pixel_bayerframe_to_rgb24avg(buf, size, width, rgb24avg, nval, pixel_idx_grbg8); +} + +/* RGB24 histogram statistics */ + +static inline void pixel_rgb24hist_add_rgb24(uint16_t *rgb24hist, uint8_t rgb24[3], + uint8_t bit_depth) +{ + uint16_t *r8hist = &rgb24hist[0 * (1 << bit_depth)], r8 = rgb24[0]; + uint16_t *g8hist = &rgb24hist[1 * (1 << bit_depth)], g8 = rgb24[1]; + uint16_t *b8hist = &rgb24hist[2 * (1 << bit_depth)], b8 = rgb24[2]; + + r8hist[r8 >> (BITS_PER_BYTE - bit_depth)]++; + g8hist[g8 >> (BITS_PER_BYTE - bit_depth)]++; + b8hist[b8 >> (BITS_PER_BYTE - bit_depth)]++; +} + +void pixel_rgb24frame_to_rgb24hist(const uint8_t *buf, size_t buf_size, uint16_t *rgb24hist, + size_t hist_size, uint16_t nval) +{ + uint8_t bit_depth = LOG2(hist_size / 3); + uint8_t rgb24[3]; + + __ASSERT(hist_size % 3 == 0, "Each of R, G, B channel should have the same size."); + __ASSERT(1 << bit_depth == hist_size / 3, "Each channel size should be a power of two."); + + memset(rgb24hist, 0x00, hist_size * sizeof(*rgb24hist)); + + for (uint16_t n = 0; n < nval; n++) { + pixel_sample_rgb24(buf, buf_size, rgb24); + pixel_rgb24hist_add_rgb24(rgb24hist, rgb24, bit_depth); + } +} + +static inline void pixel_bayerframe_to_rgb24hist(const uint8_t *buf, size_t buf_size, + uint16_t width, uint16_t *rgb24hist, + size_t hist_size, uint16_t nval, + const uint8_t *idx) +{ + uint8_t bit_depth = LOG2(hist_size / 3); + uint8_t rgb24[3]; + + __ASSERT(hist_size % 3 == 0, "Each of R, G, B channel should have the same size."); + __ASSERT(1 << bit_depth == hist_size / 3, "Each channel size should be a power of two."); + + memset(rgb24hist, 0x00, hist_size * sizeof(*rgb24hist)); + + for (uint16_t n = 0; n < nval; n++) { + pixel_sample_bayer(buf, buf_size, width, rgb24, idx); + pixel_rgb24hist_add_rgb24(rgb24hist, rgb24, bit_depth); + } +} + +void pixel_rggb8frame_to_rgb24hist(const uint8_t *buf, size_t buf_size, uint16_t width, + uint16_t *rgb24hist, size_t hist_size, uint16_t nval) +{ + pixel_bayerframe_to_rgb24hist(buf, buf_size, width, rgb24hist, hist_size, nval, + pixel_idx_rggb8); +} + +void pixel_gbrg8frame_to_rgb24hist(const uint8_t *buf, size_t buf_size, uint16_t width, + uint16_t *rgb24hist, size_t hist_size, uint16_t nval) +{ + pixel_bayerframe_to_rgb24hist(buf, buf_size, width, rgb24hist, hist_size, nval, + pixel_idx_gbrg8); +} + +void pixel_bggr8frame_to_rgb24hist(const uint8_t *buf, size_t buf_size, uint16_t width, + uint16_t *rgb24hist, size_t hist_size, uint16_t nval) +{ + pixel_bayerframe_to_rgb24hist(buf, buf_size, width, rgb24hist, hist_size, nval, + pixel_idx_bggr8); +} + +void pixel_grbg8frame_to_rgb24hist(const uint8_t *buf, size_t buf_size, uint16_t width, + uint16_t *rgb24hist, size_t hist_size, uint16_t nval) +{ + pixel_bayerframe_to_rgb24hist(buf, buf_size, width, rgb24hist, hist_size, nval, + pixel_idx_grbg8); +} + +/* Y8 histogram statistics + * Use BT.709 (sRGB) as an arbitrary choice, instead of BT.601 like libcamera + */ + +static inline void pixel_y8hist_add_y8(uint16_t *y8hist, uint8_t y8, uint8_t bit_depth) +{ + y8hist[y8 >> (BITS_PER_BYTE - bit_depth)]++; +} + +void pixel_rgb24frame_to_y8hist(const uint8_t *buf, size_t buf_size, uint16_t *y8hist, + size_t hist_size, uint16_t nval) +{ + uint8_t bit_depth = LOG2(hist_size); + uint8_t rgb24[3]; + + __ASSERT(1 << bit_depth == hist_size, "Histogram channel size should be a power of two."); + + memset(y8hist, 0x00, hist_size * sizeof(*y8hist)); + + for (uint16_t n = 0; n < nval; n++) { + pixel_sample_rgb24(buf, buf_size, rgb24); + pixel_y8hist_add_y8(y8hist, pixel_rgb24_get_luma_bt709(rgb24), bit_depth); + } +} + +static inline void pixel_bayerframe_to_y8hist(const uint8_t *buf, size_t buf_size, uint16_t width, + uint16_t *y8hist, size_t hist_size, uint16_t nval, + const uint8_t *idx) +{ + uint8_t bit_depth = LOG2(hist_size); + uint8_t rgb24[3]; + + __ASSERT(1 << bit_depth == hist_size, "Histogram channel size should be a power of two."); + + memset(y8hist, 0x00, hist_size * sizeof(*y8hist)); + + for (uint16_t n = 0; n < nval; n++) { + pixel_sample_bayer(buf, buf_size, width, rgb24, idx); + pixel_y8hist_add_y8(y8hist, pixel_rgb24_get_luma_bt709(rgb24), bit_depth); + } +} + +void pixel_rggb8frame_to_y8hist(const uint8_t *buf, size_t buf_size, uint16_t width, + uint16_t *y8hist, size_t hist_size, uint16_t nval) +{ + pixel_bayerframe_to_y8hist(buf, buf_size, width, y8hist, hist_size, nval, pixel_idx_rggb8); +} + +void pixel_gbrg8frame_to_y8hist(const uint8_t *buf, size_t buf_size, uint16_t width, + uint16_t *y8hist, size_t hist_size, uint16_t nval) +{ + pixel_bayerframe_to_y8hist(buf, buf_size, width, y8hist, hist_size, nval, pixel_idx_gbrg8); +} + +void pixel_bggr8frame_to_y8hist(const uint8_t *buf, size_t buf_size, uint16_t width, + uint16_t *y8hist, size_t hist_size, uint16_t nval) +{ + pixel_bayerframe_to_y8hist(buf, buf_size, width, y8hist, hist_size, nval, pixel_idx_bggr8); +} + +void pixel_grbg8frame_to_y8hist(const uint8_t *buf, size_t buf_size, uint16_t width, + uint16_t *y8hist, size_t hist_size, uint16_t nval) +{ + pixel_bayerframe_to_y8hist(buf, buf_size, width, y8hist, hist_size, nval, pixel_idx_grbg8); +} diff --git a/lib/pixel/stream.c b/lib/pixel/stream.c new file mode 100644 index 000000000000..ca734037954f --- /dev/null +++ b/lib/pixel/stream.c @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2025 tinyVision.ai Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include + +#include +#include +#include + +LOG_MODULE_REGISTER(pixel_stream, CONFIG_PIXEL_LOG_LEVEL); + +struct pixel_stream *pixel_stream(struct pixel_stream *strm, ...) +{ + va_list ap; + + va_start(ap, strm); + for (struct pixel_stream *step = strm; step != NULL;) { + step = step->next = va_arg(ap, void *); + } + va_end(ap); + + return strm; +} + +static void assert_complete(struct pixel_stream *strm, bool begin) +{ + for (; strm != NULL; strm = strm->next) { + if (strm->run == NULL) { + continue; + } + + __ASSERT(ring_buf_size_get(&strm->ring) == 0, + "Core %s did not empty its input buffer, %u bytes out of %u left", + strm->name, ring_buf_size_get(&strm->ring), strm->ring.size); + + if (begin && strm->line_offset == 0) { + continue; + } + + __ASSERT(strm->line_offset == strm->height, + "Core %s did only process %u lines out of %u", + strm->name, strm->line_offset, strm->height); + } +} + +void pixel_stream_load(struct pixel_stream *strm, const uint8_t *buf, size_t size) +{ + struct pixel_stream root = {.next = strm}; + struct pixel_stream *step; + + if (strm == NULL) { + LOG_INF("Pipeline empty, skipping execution"); + return; + } + + __ASSERT_NO_MSG(buf != NULL); + assert_complete(strm, true); + + LOG_DBG("Loading %zu bytes into this pipeline:", size); + + for (step = strm; step != NULL; step = step->next) { + LOG_DBG("- %s", step->name); + step->line_offset = 0; + step->total_time = 0; + } + + for (size_t i = 0; i + strm->pitch <= size; i += strm->pitch) { + LOG_DBG("bytes %zu-%zu/%zu into %s", i, i + strm->pitch, size, strm->name); + ring_buf_put(&strm->ring, &buf[i], strm->pitch); + pixel_stream_done(&root); + } + + LOG_DBG("Processed a full buffer of %zu bytes", size); + + for (step = root.next; step != NULL; step = step->next) { + LOG_INF(" %4u us on %s", k_cyc_to_us_ceil32(step->total_time), step->name); + } + + assert_complete(strm, false); +} + +static inline void pixel_stream_to_frame(const uint8_t *src_buf, size_t src_size, + uint16_t src_width, uint8_t *dst_buf, size_t dst_size, + uint16_t dst_width, int bytes_per_pixel, + struct pixel_stream *strm) +{ + struct pixel_stream step_export = { + .ring = RING_BUF_INIT(dst_buf, dst_size), + .pitch = dst_width * bytes_per_pixel, + .width = dst_width, + .height = dst_size / bytes_per_pixel / dst_width, + .name = "[export]", + }; + struct pixel_stream root = { + .width = src_width, + .next = strm, + }; + struct pixel_stream *step = &root; + + __ASSERT(dst_size % step_export.pitch == 0, + "The output buffer has %zu bytes but lines are %zu bytes each, %zu bytes too much", + dst_size, step_export.pitch, dst_size % step_export.pitch); + + __ASSERT(src_width == root.next->width, + "The width does not match between the arguments (%u) and first step %s (%u)", + src_width, root.next->name, root.next->width); + + /* Add an export step at the end */ + while (step->next != NULL) { + step = step->next; + } + step->next = &step_export; + + /* Load the input buffer into the pipeline and flush the output */ + pixel_stream_load(root.next, src_buf, src_size); + pixel_stream_get_all_input(&step_export); +} + +void pixel_stream_to_raw8frame(const uint8_t *src_buf, size_t src_size, uint16_t src_width, + uint8_t *dst_buf, size_t dst_size, uint16_t dst_width, + struct pixel_stream *strm) +{ + pixel_stream_to_frame(src_buf, src_size, src_width, dst_buf, dst_size, dst_width, 1, strm); +} + +void pixel_stream_to_raw16frame(const uint8_t *src_buf, size_t src_size, uint16_t src_width, + uint8_t *dst_buf, size_t dst_size, uint16_t dst_width, + struct pixel_stream *strm) +{ + pixel_stream_to_frame(src_buf, src_size, src_width, dst_buf, dst_size, dst_width, 2, strm); +} + +void pixel_stream_to_raw24frame(const uint8_t *src_buf, size_t src_size, uint16_t src_width, + uint8_t *dst_buf, size_t dst_size, uint16_t dst_width, + struct pixel_stream *strm) +{ + pixel_stream_to_frame(src_buf, src_size, src_width, dst_buf, dst_size, dst_width, 3, strm); +} diff --git a/samples/drivers/video/capture/README.rst b/samples/drivers/video/capture/README.rst index 2a8fefc0ff0e..15bd0ac6de73 100644 --- a/samples/drivers/video/capture/README.rst +++ b/samples/drivers/video/capture/README.rst @@ -72,7 +72,14 @@ commands: For testing purpose without the need of any real video capture and/or display hardwares, a video software pattern generator is supported by the above build commands without -specifying the shields. +specifying the shields, and using :ref:`snippet-video-sw-generator`: + +.. zephyr-app-commands:: + :zephyr-app: samples/drivers/video/capture + :board: native_sim + :snippets: video-sw-generator + :goals: build + :compact: Sample Output ============= diff --git a/samples/drivers/video/capture/prj.conf b/samples/drivers/video/capture/prj.conf index b96f5f78ed2b..f8b221d5a776 100644 --- a/samples/drivers/video/capture/prj.conf +++ b/samples/drivers/video/capture/prj.conf @@ -1,5 +1,4 @@ CONFIG_VIDEO=y -CONFIG_VIDEO_SW_GENERATOR=y CONFIG_SHELL=y CONFIG_DEVICE_SHELL=y CONFIG_PRINTK=y diff --git a/samples/drivers/video/capture/sample.yaml b/samples/drivers/video/capture/sample.yaml index 4cee752b3f38..fa8609f69ade 100644 --- a/samples/drivers/video/capture/sample.yaml +++ b/samples/drivers/video/capture/sample.yaml @@ -7,7 +7,7 @@ tests: - shield - samples extra_args: - - platform:mimxrt1064_evk:SHIELD="dvp_fpc24_mt9m114;rk043fn66hs_ctg" + - platform:mimxrt1064_evk/mimxrt1064:SHIELD="dvp_fpc24_mt9m114;rk043fn66hs_ctg" - platform:mimxrt1170_evk/mimxrt1176/cm7:SHIELD="nxp_btb44_ov5640;rk055hdmipi4ma0" - platform:mimxrt1170_evk@B/mimxrt1176/cm7:SHIELD="nxp_btb44_ov5640;rk055hdmipi4ma0" extra_configs: @@ -25,12 +25,12 @@ tests: - "Pattern OK" platform_allow: - arduino_nicla_vision/stm32h747xx/m7 - - mimxrt1064_evk + - mimxrt1064_evk/mimxrt1064 - mimxrt1170_evk/mimxrt1176/cm7 - mimxrt1170_evk@B/mimxrt1176/cm7 - mm_swiftio - esp32s3_eye/esp32s3/procpu depends_on: video integration_platforms: - - mimxrt1064_evk + - mimxrt1064_evk/mimxrt1064 - mimxrt1170_evk/mimxrt1176/cm7 diff --git a/samples/drivers/video/capture/src/main.c b/samples/drivers/video/capture/src/main.c index 501ebd8794ab..3a79f107104a 100644 --- a/samples/drivers/video/capture/src/main.c +++ b/samples/drivers/video/capture/src/main.c @@ -13,18 +13,15 @@ #include #include -LOG_MODULE_REGISTER(main); #ifdef CONFIG_TEST #include "check_test_pattern.h" -#define LOG_LEVEL LOG_LEVEL_DBG +LOG_MODULE_REGISTER(main, LOG_LEVEL_DBG); #else -#define LOG_LEVEL CONFIG_LOG_DEFAULT_LEVEL +LOG_MODULE_REGISTER(main, CONFIG_LOG_DEFAULT_LEVEL); #endif -#define VIDEO_DEV_SW "VIDEO_SW_GENERATOR" - #if DT_HAS_CHOSEN(zephyr_display) static inline int display_setup(const struct device *const display_dev, const uint32_t pixfmt) { @@ -83,6 +80,7 @@ static inline void video_display_frame(const struct device *const display_dev, int main(void) { + const struct device *video_dev; struct video_buffer *buffers[CONFIG_VIDEO_BUFFER_POOL_NUM_MAX], *vbuf; struct video_format fmt; struct video_caps caps; @@ -93,21 +91,11 @@ int main(void) int i = 0; int err; -#if DT_HAS_CHOSEN(zephyr_camera) - const struct device *const video_dev = DEVICE_DT_GET(DT_CHOSEN(zephyr_camera)); - + video_dev = DEVICE_DT_GET(DT_CHOSEN(zephyr_camera)); if (!device_is_ready(video_dev)) { LOG_ERR("%s: video device is not ready", video_dev->name); return 0; } -#else - const struct device *const video_dev = device_get_binding(VIDEO_DEV_SW); - - if (video_dev == NULL) { - LOG_ERR("%s: video device not found or failed to initialized", VIDEO_DEV_SW); - return 0; - } -#endif LOG_INF("Video device: %s", video_dev->name); diff --git a/samples/drivers/video/capture_to_lvgl/README.rst b/samples/drivers/video/capture_to_lvgl/README.rst index f3fa5e992ad3..d6bd5b36e78f 100644 --- a/samples/drivers/video/capture_to_lvgl/README.rst +++ b/samples/drivers/video/capture_to_lvgl/README.rst @@ -37,6 +37,16 @@ For :zephyr:board:`mini_stm32h743`, build this sample application with the follo :gen-args: -DCONFIG_BOOT_DELAY=2000 :compact: +For testing purpose without the need of any real video capture hardware, +a video software pattern generator is supported by using :ref:`snippet-video-sw-generator`: + +.. zephyr-app-commands:: + :zephyr-app: samples/drivers/video/capture + :board: native_sim + :snippets: video-sw-generator + :goals: build + :compact: + Sample Output ============= diff --git a/samples/drivers/video/capture_to_lvgl/prj.conf b/samples/drivers/video/capture_to_lvgl/prj.conf index 4a64bbaad53d..340f0b5a75af 100644 --- a/samples/drivers/video/capture_to_lvgl/prj.conf +++ b/samples/drivers/video/capture_to_lvgl/prj.conf @@ -1,5 +1,4 @@ CONFIG_VIDEO=y -CONFIG_VIDEO_SW_GENERATOR=y CONFIG_PRINTK=y CONFIG_LOG=y diff --git a/samples/drivers/video/capture_to_lvgl/src/main.c b/samples/drivers/video/capture_to_lvgl/src/main.c index 201e0a8bed90..b27295f0a6f0 100644 --- a/samples/drivers/video/capture_to_lvgl/src/main.c +++ b/samples/drivers/video/capture_to_lvgl/src/main.c @@ -9,21 +9,18 @@ #include #include #include -#include - -#define LOG_LEVEL CONFIG_LOG_DEFAULT_LEVEL #include -LOG_MODULE_REGISTER(main); +#include -#define VIDEO_DEV_SW "VIDEO_SW_GENERATOR" +LOG_MODULE_REGISTER(main, CONFIG_LOG_DEFAULT_LEVEL); int main(void) { struct video_buffer *buffers[2], *vbuf; const struct device *display_dev; + const struct device *video_dev; struct video_format fmt; struct video_caps caps; - const struct device *video_dev; size_t bsize; int i = 0; @@ -33,19 +30,11 @@ int main(void) return 0; } -#if DT_HAS_CHOSEN(zephyr_camera) video_dev = DEVICE_DT_GET(DT_CHOSEN(zephyr_camera)); if (!device_is_ready(video_dev)) { LOG_ERR("%s device is not ready", video_dev->name); return 0; } -#else - video_dev = device_get_binding(VIDEO_DEV_SW); - if (video_dev == NULL) { - LOG_ERR("%s device not found", VIDEO_DEV_SW); - return 0; - } -#endif LOG_INF("- Device name: %s", video_dev->name); diff --git a/samples/drivers/video/sw_pipeline/CMakeLists.txt b/samples/drivers/video/sw_pipeline/CMakeLists.txt new file mode 100644 index 000000000000..c21878f07dc3 --- /dev/null +++ b/samples/drivers/video/sw_pipeline/CMakeLists.txt @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: Apache-2.0 + +cmake_minimum_required(VERSION 3.20.0) +find_package(Zephyr REQUIRED HINTS $ENV{ZEPHYR_BASE}) +project(video_sw_pipeline) + +include(${ZEPHYR_BASE}/samples/subsys/usb/common/common.cmake) +target_sources(app PRIVATE src/main.c src/pipeline.c) diff --git a/samples/drivers/video/sw_pipeline/Kconfig b/samples/drivers/video/sw_pipeline/Kconfig new file mode 100644 index 000000000000..d1b0c2bec39c --- /dev/null +++ b/samples/drivers/video/sw_pipeline/Kconfig @@ -0,0 +1,9 @@ +# Copyright The Zephyr Project Contributors +# SPDX-License-Identifier: Apache-2.0 + +# Source common USB sample options used to initialize new experimental USB +# device stack. The scope of these options is limited to USB samples in project +# tree, you cannot use them in your own application. +source "samples/subsys/usb/common/Kconfig.sample_usbd" + +source "Kconfig.zephyr" diff --git a/samples/drivers/video/sw_pipeline/README.rst b/samples/drivers/video/sw_pipeline/README.rst new file mode 100644 index 000000000000..4df4066836b1 --- /dev/null +++ b/samples/drivers/video/sw_pipeline/README.rst @@ -0,0 +1,38 @@ +.. zephyr:code-sample:: video_sw_pipeline + :name: Video software pipeline sample + :relevant-api: video_interface + + Capture video frames from the default camera stream then send video frames over USB. + +Overview +******** + +This sample demonstrates how to use the ``zephyr,video-sw-pipeline`` device in order to process +the video stream from a camera, and send it elsewhere, by sending the video stream through USB. + +The devkit acts as a webcam from which the transformed video stream can be studied. + +Requirements +************ + +This sample uses the new USB device stack and requires the USB device +controller ported to the :ref:`udc_api`. + +If a camera is not present in the system, :ref:`snippet-video-sw-generator` +can be used to see the result of the conversion on a test pattern. + +Building and Running +******************** + +If a board is equipped with a supported video sensor, and ``zephyr,camera`` +node is chosen for the board, it will be used as the video source. +The sample can be built as follows: + +.. zephyr-app-commands:: + :zephyr-app: samples/drivers/video/sw_pipeline/ + :board: frdm_mcxn947/mcxn947/cpu0 + :snippets: video-sw-generator + :goals: build flash + :compact: + +See the :zephyr:code-sample:`uvc` sample for examples of how to access the webcam stream. diff --git a/samples/drivers/video/sw_pipeline/app.overlay b/samples/drivers/video/sw_pipeline/app.overlay new file mode 100644 index 000000000000..6605b4bb1996 --- /dev/null +++ b/samples/drivers/video/sw_pipeline/app.overlay @@ -0,0 +1,15 @@ +/* + * Copyright The Zephyr Project Contributors + * + * SPDX-License-Identifier: Apache-2.0 + */ + +/ { + uvc: uvc { + compatible = "zephyr,uvc-device"; + }; + + video_sw_pipeline: video_sw_pipeline { + compatible = "zephyr,video-sw-pipeline"; + }; +}; diff --git a/samples/drivers/video/sw_pipeline/boards/arduino_nicla_vision_stm32h747xx_m7.conf b/samples/drivers/video/sw_pipeline/boards/arduino_nicla_vision_stm32h747xx_m7.conf new file mode 100644 index 000000000000..7fcfeea35e66 --- /dev/null +++ b/samples/drivers/video/sw_pipeline/boards/arduino_nicla_vision_stm32h747xx_m7.conf @@ -0,0 +1,2 @@ +# Enough two 320x240 YUYV frames +CONFIG_VIDEO_BUFFER_POOL_SZ_MAX=163840 diff --git a/samples/drivers/video/sw_pipeline/boards/frdm_mcxn947_mcxn947_cpu0.overlay b/samples/drivers/video/sw_pipeline/boards/frdm_mcxn947_mcxn947_cpu0.overlay new file mode 100644 index 000000000000..cfca9ad99bd1 --- /dev/null +++ b/samples/drivers/video/sw_pipeline/boards/frdm_mcxn947_mcxn947_cpu0.overlay @@ -0,0 +1,19 @@ +/* + * Copyright (c) 2025 tinyVision.ai Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "../app.overlay" + +&ov7670 { + status = "disabled"; +}; + +&smartdma { + status = "disabled"; +}; + +&video_sdma { + status = "disabled"; +}; diff --git a/samples/drivers/video/sw_pipeline/prj.conf b/samples/drivers/video/sw_pipeline/prj.conf new file mode 100644 index 000000000000..46e4011fead9 --- /dev/null +++ b/samples/drivers/video/sw_pipeline/prj.conf @@ -0,0 +1,16 @@ +CONFIG_LOG=y +CONFIG_PIXEL=y +CONFIG_POLL=y +CONFIG_SAMPLE_USBD_PID=0x0011 +CONFIG_SAMPLE_USBD_PRODUCT="Video Software Pipeline Example" +CONFIG_UDC_DRIVER_LOG_LEVEL_WRN=y +CONFIG_USBD_LOG_LEVEL_WRN=y +CONFIG_USBD_VIDEO_CLASS=y +CONFIG_USBD_VIDEO_LOG_LEVEL_WRN=y +CONFIG_USB_DEVICE_STACK_NEXT=y +CONFIG_VIDEO=y +CONFIG_VIDEO_BUFFER_POOL_NUM_MAX=4 +CONFIG_VIDEO_BUFFER_POOL_SZ_MAX=24576 +CONFIG_VIDEO_LOG_LEVEL_WRN=y +CONFIG_VIDEO_SW_GENERATOR=y +CONFIG_VIDEO_SW_PIPELINE=y diff --git a/samples/drivers/video/sw_pipeline/sample.yaml b/samples/drivers/video/sw_pipeline/sample.yaml new file mode 100644 index 000000000000..4078936b3966 --- /dev/null +++ b/samples/drivers/video/sw_pipeline/sample.yaml @@ -0,0 +1,14 @@ +sample: + name: Video software pipeline sample +tests: + sample.drivers.video.sw_pipeline: + depends_on: usbd + tags: usb video pixel + integration_platforms: + - rpi_pico + extra_args: + - SNIPPET=video-sw-generator + sample.drivers.video.sw_pipeline.camera: + depends_on: usbd + tags: usb video pixel + filter: dt_chosen_enabled("zephyr,camera") diff --git a/samples/drivers/video/sw_pipeline/src/main.c b/samples/drivers/video/sw_pipeline/src/main.c new file mode 100644 index 000000000000..d4a2de2c05fc --- /dev/null +++ b/samples/drivers/video/sw_pipeline/src/main.c @@ -0,0 +1,210 @@ +/* + * Copyright (c) 2025 tinyVision.ai Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include + +#include + +#include +#include +#include +#include +#include +#include + +LOG_MODULE_REGISTER(sw_pipeline_example, LOG_LEVEL_INF); + +int main(void) +{ + const struct device *camera_dev; + const struct device *pipe_dev; + const struct device *uvc_dev; + struct usbd_context *sample_usbd; + struct video_buffer *vbuf; + struct video_format fmt = {0}; + struct k_poll_signal sig = {0}; + struct k_poll_event evt[1] = {0}; + k_timeout_t timeout = K_FOREVER; + int ret; + + camera_dev = DEVICE_DT_GET(DT_CHOSEN(zephyr_camera)); + if (camera_dev == NULL || !device_is_ready(camera_dev)) { + LOG_ERR("%s is not ready or failed to initialize", camera_dev->name); + return -ENODEV; + } + + pipe_dev = DEVICE_DT_GET(DT_NODELABEL(video_sw_pipeline)); + if (pipe_dev == NULL || !device_is_ready(pipe_dev)) { + LOG_ERR("%s is not ready or failed to initialize", pipe_dev->name); + return -ENODEV; + } + + uvc_dev = DEVICE_DT_GET(DT_NODELABEL(uvc)); + if (uvc_dev == NULL || !device_is_ready(uvc_dev)) { + LOG_ERR("%s is not ready or failed to initialize", uvc_dev->name); + return -ENODEV; + } + + video_sw_pipeline_set_source(pipe_dev, camera_dev); + uvc_set_video_dev(uvc_dev, pipe_dev); + + sample_usbd = sample_usbd_init_device(NULL); + if (sample_usbd == NULL) { + return -ENODEV; + } + + ret = usbd_enable(sample_usbd); + if (ret != 0) { + return ret; + } + + LOG_INF("Waiting the host to select the video format"); + + /* Get the video format once it is selected by the host */ + while (true) { + ret = video_get_format(uvc_dev, VIDEO_EP_IN, &fmt); + if (ret == 0) { + break; + } + if (ret != -EAGAIN) { + LOG_ERR("Failed to get the video format"); + return ret; + } + + k_sleep(K_MSEC(10)); + } + + LOG_INF("The host selected format %ux%u '%s'", + fmt.width, fmt.height, VIDEO_FOURCC_TO_STR(fmt.pixelformat)); + + LOG_INF("Preparing %u buffers of %u bytes", + CONFIG_VIDEO_BUFFER_POOL_NUM_MAX, fmt.pitch * fmt.height); + + /* Half of the buffers between the camera device and the pipeline device */ + for (int i = 0; i < CONFIG_VIDEO_BUFFER_POOL_NUM_MAX / 2; i++) { + vbuf = video_buffer_alloc(fmt.pitch * fmt.height, K_NO_WAIT); + if (vbuf == NULL) { + LOG_ERR("Failed to allocate the video buffer"); + return -ENOMEM; + } + + ret = video_enqueue(camera_dev, VIDEO_EP_OUT, vbuf); + if (ret != 0) { + LOG_ERR("Failed to enqueue video buffer"); + return ret; + } + } + + /* Half of the buffers between the pipeline device and the sink device */ + for (int i = 0; i < CONFIG_VIDEO_BUFFER_POOL_NUM_MAX / 2; i++) { + vbuf = video_buffer_alloc(fmt.pitch * fmt.height, K_NO_WAIT); + if (vbuf == NULL) { + LOG_ERR("Failed to allocate the video buffer"); + return -ENOMEM; + } + + ret = video_enqueue(pipe_dev, VIDEO_EP_OUT, vbuf); + if (ret != 0) { + LOG_ERR("Failed to enqueue video buffer"); + return ret; + } + } + + LOG_DBG("Preparing signaling for %s input/output", camera_dev->name); + + k_poll_signal_init(&sig); + k_poll_event_init(&evt[0], K_POLL_TYPE_SIGNAL, K_POLL_MODE_NOTIFY_ONLY, &sig); + + ret = video_set_signal(camera_dev, VIDEO_EP_OUT, &sig); + if (ret != 0) { + LOG_WRN("Failed to setup the signal on %s output endpoint", camera_dev->name); + timeout = K_MSEC(1); + } + + ret = video_set_signal(pipe_dev, VIDEO_EP_OUT, &sig); + if (ret != 0) { + LOG_WRN("Failed to setup the signal on %s output endpoint", pipe_dev->name); + timeout = K_MSEC(1); + } + + ret = video_set_signal(uvc_dev, VIDEO_EP_IN, &sig); + if (ret != 0) { + LOG_WRN("Failed to setup the signal on %s input endpoint", uvc_dev->name); + timeout = K_MSEC(1); + } + + LOG_INF("Starting the video transfer"); + + ret = video_stream_start(camera_dev); + if (ret != 0) { + LOG_ERR("Failed to start %s", camera_dev->name); + return ret; + } + + ret = video_stream_start(pipe_dev); + if (ret != 0) { + LOG_ERR("Failed to start %s", pipe_dev->name); + return ret; + } + + while (true) { + ret = k_poll(evt, ARRAY_SIZE(evt), timeout); + if (ret != 0 && ret != -EAGAIN) { + LOG_ERR("Poll exited with status %d", ret); + return ret; + } + + if (video_dequeue(camera_dev, VIDEO_EP_OUT, &vbuf, K_NO_WAIT) == 0) { + LOG_DBG("Dequeued %p from %s, enqueueing to %s", + (void *)vbuf, camera_dev->name, uvc_dev->name); + + ret = video_enqueue(pipe_dev, VIDEO_EP_IN, vbuf); + if (ret != 0) { + LOG_ERR("Failed to enqueue video buffer to %s", pipe_dev->name); + return ret; + } + } + + if (video_dequeue(pipe_dev, VIDEO_EP_IN, &vbuf, K_NO_WAIT) == 0) { + LOG_DBG("Dequeued %p from %s, enqueueing to %s", + (void *)vbuf, pipe_dev->name, uvc_dev->name); + + ret = video_enqueue(camera_dev, VIDEO_EP_OUT, vbuf); + if (ret != 0) { + LOG_ERR("Failed to enqueue video buffer to %s", uvc_dev->name); + return ret; + } + } + + if (video_dequeue(pipe_dev, VIDEO_EP_OUT, &vbuf, K_NO_WAIT) == 0) { + LOG_DBG("Dequeued %p from %s, enqueueing to %s", + (void *)vbuf, pipe_dev->name, uvc_dev->name); + + ret = video_enqueue(uvc_dev, VIDEO_EP_IN, vbuf); + if (ret != 0) { + LOG_ERR("Failed to enqueue video buffer to %s", uvc_dev->name); + return ret; + } + } + + if (video_dequeue(uvc_dev, VIDEO_EP_IN, &vbuf, K_NO_WAIT) == 0) { + LOG_DBG("Dequeued %p from %s, enqueueing to %s", + (void *)vbuf, uvc_dev->name, pipe_dev->name); + + ret = video_enqueue(pipe_dev, VIDEO_EP_OUT, vbuf); + if (ret != 0) { + LOG_ERR("Failed to enqueue video buffer to %s", pipe_dev->name); + return ret; + } + } + + k_poll_signal_reset(&sig); + } + + CODE_UNREACHABLE; + return 0; +} diff --git a/samples/drivers/video/sw_pipeline/src/pipeline.c b/samples/drivers/video/sw_pipeline/src/pipeline.c new file mode 100644 index 000000000000..3bea7fafbdc4 --- /dev/null +++ b/samples/drivers/video/sw_pipeline/src/pipeline.c @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2025 tinyVision.ai Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +LOG_MODULE_REGISTER(sw_pipeline_definition, LOG_LEVEL_INF); + +#define WIDTH_IN 64 +#define HEIGHT_IN 64 + +#define WIDTH_OUT 128 +#define HEIGHT_OUT 32 + +/* Color tuning converting one line of data */ +void app_tune_rgb24line(const uint8_t *rgb24in, uint8_t *rgb24out, uint16_t width) +{ + for (size_t w = 0; w < width; w++) { + rgb24out[w * 3 + 0] = CLAMP((-64 + rgb24in[w * 3 + 0]) * 2, 0x00, 0xff); + rgb24out[w * 3 + 1] = CLAMP((-64 + rgb24in[w * 3 + 1]) * 1, 0x00, 0xff); + rgb24out[w * 3 + 2] = CLAMP((-64 + rgb24in[w * 3 + 2]) * 2, 0x00, 0xff); + } +} + +/* Declare the pipeline elements and their intermediate buffers */ +PIXEL_RGB565LESTREAM_TO_RGB24STREAM(step_rgb565_to_rgb24, WIDTH_IN, HEIGHT_IN); +PIXEL_SUBSAMPLE_RGB24STREAM(step_resize_rgb24, WIDTH_IN, HEIGHT_IN); +PIXEL_RGB24LINE_DEFINE(step_tune_rgb24, app_tune_rgb24line, WIDTH_OUT, HEIGHT_OUT); +PIXEL_RGB24STREAM_TO_YUYVSTREAM_BT709(step_rgb24_to_yuyv, WIDTH_OUT, HEIGHT_OUT); + +int app_init_pipeline(void) +{ + const struct device *dev = DEVICE_DT_GET(DT_NODELABEL(video_sw_pipeline)); + struct pixel_stream *strm; + + /* Build the pipeline */ + strm = pixel_stream(&step_rgb565_to_rgb24, &step_resize_rgb24, &step_tune_rgb24, + &step_rgb24_to_yuyv, NULL); + + /* Load it into the driver */ + video_sw_pipeline_set_pipeline(dev, strm, VIDEO_PIX_FMT_RGB565, WIDTH_IN, HEIGHT_IN, + VIDEO_PIX_FMT_YUYV, WIDTH_OUT, HEIGHT_OUT); + + return 0; +} + +/* Initialize before UVC so it can generate the USB descriptors from the formats above */ +SYS_INIT(app_init_pipeline, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE); diff --git a/samples/drivers/video/sw_pipeline/video-emul.overlay b/samples/drivers/video/sw_pipeline/video-emul.overlay new file mode 100644 index 000000000000..6cadcace61c6 --- /dev/null +++ b/samples/drivers/video/sw_pipeline/video-emul.overlay @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2025 tinyVision.ai Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +/ { + chosen { + zephyr,camera = &rx0; + }; + + imager0: emul_imager_0 { + compatible = "zephyr,video-emul-imager"; + + port { + imager0_ep_out: endpoint { + remote-endpoint-label = "rx0_ep_in"; + }; + }; + }; + + rx0: video_emul_rx_0 { + compatible = "zephyr,video-emul-rx"; + + port { + #address-cells = <1>; + #size-cells = <0>; + + rx0_ep_in: endpoint@0 { + reg = <0x0>; + remote-endpoint-label = "imager0_ep_out"; + }; + }; + }; +}; diff --git a/samples/drivers/video/tcpserversink/README.rst b/samples/drivers/video/tcpserversink/README.rst index 8245edea4c45..be25ff1cf213 100644 --- a/samples/drivers/video/tcpserversink/README.rst +++ b/samples/drivers/video/tcpserversink/README.rst @@ -39,6 +39,16 @@ If a mt9m114 camera shield is missing, video software generator will be used ins :goals: build :compact: +For testing purpose without the need of any real video capture hardware, +a video software pattern generator is supported by using :ref:`snippet-video-sw-generator`: + +.. zephyr-app-commands:: + :zephyr-app: samples/drivers/video/capture + :board: native_sim + :snippets: video-sw-generator + :goals: build + :compact: + Sample Output ============= diff --git a/samples/drivers/video/tcpserversink/prj.conf b/samples/drivers/video/tcpserversink/prj.conf index 824c424b2fa8..a49aa3de90ff 100644 --- a/samples/drivers/video/tcpserversink/prj.conf +++ b/samples/drivers/video/tcpserversink/prj.conf @@ -40,4 +40,3 @@ CONFIG_NET_CONFIG_SETTINGS=y CONFIG_NET_CONFIG_MY_IPV4_ADDR="192.0.2.1" CONFIG_VIDEO=y -CONFIG_VIDEO_SW_GENERATOR=y diff --git a/samples/drivers/video/tcpserversink/sample.yaml b/samples/drivers/video/tcpserversink/sample.yaml index 717923a6fa19..9e9123b48c07 100644 --- a/samples/drivers/video/tcpserversink/sample.yaml +++ b/samples/drivers/video/tcpserversink/sample.yaml @@ -8,11 +8,11 @@ tests: - net - socket - shield - platform_allow: mimxrt1064_evk + platform_allow: mimxrt1064_evk/mimxrt1064 depends_on: - video - netif integration_platforms: - - mimxrt1064_evk + - mimxrt1064_evk/mimxrt1064 extra_args: - - platform:mimxrt1064_evk:SHIELD=dvp_fpc24_mt9m114 + - platform:mimxrt1064_evk/mimxrt1064:SHIELD=dvp_fpc24_mt9m114 diff --git a/samples/drivers/video/tcpserversink/src/main.c b/samples/drivers/video/tcpserversink/src/main.c index da3f9e4a8319..4a7a560abeae 100644 --- a/samples/drivers/video/tcpserversink/src/main.c +++ b/samples/drivers/video/tcpserversink/src/main.c @@ -6,16 +6,12 @@ #include #include - #include - #include - -#define LOG_LEVEL CONFIG_LOG_DEFAULT_LEVEL #include -LOG_MODULE_REGISTER(main); -#define VIDEO_DEV_SW "VIDEO_SW_GENERATOR" +LOG_MODULE_REGISTER(main, CONFIG_LOG_DEFAULT_LEVEL); + #define MY_PORT 5000 #define MAX_CLIENT_QUEUE 1 @@ -42,21 +38,14 @@ int main(void) int i, ret, sock, client; struct video_format fmt; struct video_caps caps; -#if DT_HAS_CHOSEN(zephyr_camera) - const struct device *const video = DEVICE_DT_GET(DT_CHOSEN(zephyr_camera)); + const struct device *video_dev; - if (!device_is_ready(video)) { - LOG_ERR("%s: video device not ready.", video->name); + video_dev = DEVICE_DT_GET(DT_CHOSEN(zephyr_camera)); + if (!device_is_ready(video_dev)) { + LOG_ERR("%s: video device not ready.", video_dev->name); return 0; } -#else - const struct device *const video = device_get_binding(VIDEO_DEV_SW); - if (video == NULL) { - LOG_ERR("%s: video device not found or failed to initialized.", VIDEO_DEV_SW); - return 0; - } -#endif /* Prepare Network */ (void)memset(&addr, 0, sizeof(addr)); addr.sin_family = AF_INET; @@ -83,13 +72,13 @@ int main(void) } /* Get capabilities */ - if (video_get_caps(video, VIDEO_EP_OUT, &caps)) { + if (video_get_caps(video_dev, VIDEO_EP_OUT, &caps)) { LOG_ERR("Unable to retrieve video capabilities"); return 0; } /* Get default/native format */ - if (video_get_format(video, VIDEO_EP_OUT, &fmt)) { + if (video_get_format(video_dev, VIDEO_EP_OUT, &fmt)) { LOG_ERR("Unable to retrieve video format"); return 0; } @@ -125,11 +114,11 @@ int main(void) /* Enqueue Buffers */ for (i = 0; i < ARRAY_SIZE(buffers); i++) { - video_enqueue(video, VIDEO_EP_OUT, buffers[i]); + video_enqueue(video_dev, VIDEO_EP_OUT, buffers[i]); } /* Start video capture */ - if (video_stream_start(video)) { + if (video_stream_start(video_dev)) { LOG_ERR("Unable to start video"); return 0; } @@ -139,7 +128,7 @@ int main(void) /* Capture loop */ i = 0; do { - ret = video_dequeue(video, VIDEO_EP_OUT, &vbuf, K_FOREVER); + ret = video_dequeue(video_dev, VIDEO_EP_OUT, &vbuf, K_FOREVER); if (ret) { LOG_ERR("Unable to dequeue video buf"); return 0; @@ -155,18 +144,18 @@ int main(void) close(client); } - (void)video_enqueue(video, VIDEO_EP_OUT, vbuf); + (void)video_enqueue(video_dev, VIDEO_EP_OUT, vbuf); } while (!ret); /* stop capture */ - if (video_stream_stop(video)) { + if (video_stream_stop(video_dev)) { LOG_ERR("Unable to stop video"); return 0; } /* Flush remaining buffers */ do { - ret = video_dequeue(video, VIDEO_EP_OUT, &vbuf, K_NO_WAIT); + ret = video_dequeue(video_dev, VIDEO_EP_OUT, &vbuf, K_NO_WAIT); } while (!ret); } while (1); diff --git a/samples/lib/lib.rst b/samples/lib/lib.rst new file mode 100644 index 000000000000..89fd6e8db9cb --- /dev/null +++ b/samples/lib/lib.rst @@ -0,0 +1,6 @@ +.. zephyr:code-sample-category:: lib + :name: Libraries + :show-listing: + :live-search: + + These samples demonstrate how to use the libraries present in Zephyr. diff --git a/samples/lib/pixel/pixel.rst b/samples/lib/pixel/pixel.rst new file mode 100644 index 000000000000..a202015abf0b --- /dev/null +++ b/samples/lib/pixel/pixel.rst @@ -0,0 +1,15 @@ +.. zephyr:code-sample-category:: lib_pixel + :name: Pixel Library + :show-listing: + :live-search: + + These samples demonstrate how to use the Pixel processing library of Zephyr. + +These samples can be used as starting point for test benches that print an input image, +perform some custom processing, and print the color image back along with the logs directly +on the terminal. + +This helps debugging when other methods are not available. + +The ``truecolor`` printing functions give accurate 24-bit RGB colors but slower than the +``256color`` variants. diff --git a/samples/lib/pixel/print/CMakeLists.txt b/samples/lib/pixel/print/CMakeLists.txt new file mode 100644 index 000000000000..1a296153270d --- /dev/null +++ b/samples/lib/pixel/print/CMakeLists.txt @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: Apache-2.0 + +cmake_minimum_required(VERSION 3.20.0) + +find_package(Zephyr REQUIRED HINTS $ENV{ZEPHYR_BASE}) +project(pixel) + +target_sources(app PRIVATE src/main.c) diff --git a/samples/lib/pixel/print/README.rst b/samples/lib/pixel/print/README.rst new file mode 100644 index 000000000000..4056ca0d33f2 --- /dev/null +++ b/samples/lib/pixel/print/README.rst @@ -0,0 +1,57 @@ +.. zephyr:code-sample:: lib_pixel_print + :name: Pixel Printiing Library + + Print images on the console. + +Overview +******** + +A sample showcasing how to make use of the pixel library to visualize an image or histogram data +by printing it out on the console using `ANSI escape codes`_. + +This way debug logs can be interleaved with small preview images for debug purposes. + +.. _ANSI escape codes: https://en.wikipedia.org/wiki/ANSI_escape_code + +Building and Running +******************** + +This application can be built and executed on QEMU as follows: + +.. zephyr-app-commands:: + :zephyr-app: samples/lib/pixel/print + :host-os: unix + :board: native_sim + :goals: run + :compact: + +To build for another board, change "native_sim" above to that board's name. + +Sample Output +============= + +.. code-block:: console + + *** Booting Zephyr OS build v4.1.0-2611-gfaa7b74cfda7 *** + [00:00:00.000,000] app: Printing the gradient #0070c5 -> #7929d2 + [00:00:00.000,000] app: hexdump: + col0 col1 col2 col3 col4 [...] col14 col15 + R G B R G B R G B R G B R G B [...] R G B R G B + 00 70 c5 00 6f c5 00 6f c5 00 6f c5 00 6f c5 [...] 00 6f c5 03 6d c5 row0 + 03 6d c5 04 6d c5 04 6d c5 04 6d c5 04 6d c5 [...] 04 6d c5 07 6b c5 row1 + 07 6b c5 07 6b c5 08 6b c5 08 6b c5 08 6b c5 [...] 07 6b c5 0b 69 c6 row2 + 0b 69 c6 0b 69 c6 0b 69 c6 0c 68 c6 0c 68 c6 [...] 0b 69 c6 0e 67 c6 row3 + 0f 67 c6 0f 66 c6 0f 66 c6 0f 66 c6 10 66 c6 [...] 0f 66 c6 12 65 c7 row4 + 12 64 c7 13 64 c7 13 64 c7 13 64 c7 13 64 c7 [...] 13 64 c7 16 62 c7 row5 + 16 62 c7 16 62 c7 17 62 c7 17 62 c7 17 62 c7 [...] 16 62 c7 1a 60 c7 row6 + [...] + [00:00:00.000,000] app: truecolor: + ▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄| + ▄▄▄ shows-up ▄▄▄| + ▄▄▄ as color ▄▄▄| + ▄▄▄ on the ▄▄▄| + ▄▄▄ terminal ▄▄▄| + ▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄| + [...] + +.. image:: preview.png diff --git a/samples/lib/pixel/print/preview.png b/samples/lib/pixel/print/preview.png new file mode 100644 index 000000000000..6ac9b1913794 Binary files /dev/null and b/samples/lib/pixel/print/preview.png differ diff --git a/samples/lib/pixel/print/prj.conf b/samples/lib/pixel/print/prj.conf new file mode 100644 index 000000000000..ee16fb9972b2 --- /dev/null +++ b/samples/lib/pixel/print/prj.conf @@ -0,0 +1,6 @@ +CONFIG_PIXEL=y +CONFIG_ASSERT=y +CONFIG_LOG=y + +# Required to make sure the test harnesses catch the log output +CONFIG_LOG_MODE_IMMEDIATE=y diff --git a/samples/lib/pixel/print/sample.yaml b/samples/lib/pixel/print/sample.yaml new file mode 100644 index 000000000000..f559a1cd313b --- /dev/null +++ b/samples/lib/pixel/print/sample.yaml @@ -0,0 +1,21 @@ +sample: + description: Pixel Print sample, print images in the terminal for debug purpose + name: pixel print +common: + min_ram: 32 + tags: pixel + integration_platforms: + - native_sim + harness: console + harness_config: + type: one_line + regex: + - "truecolor:" + - "256color:" + - "hexdump:" + - "histogram" +tests: + sample.pixel.print: + tags: pixel + extra_configs: + - CONFIG_PIXEL_PRINT_NONE=y diff --git a/samples/lib/pixel/print/src/main.c b/samples/lib/pixel/print/src/main.c new file mode 100644 index 000000000000..1e183964f762 --- /dev/null +++ b/samples/lib/pixel/print/src/main.c @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2025 tinyVision.ai Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include + +#include +#include +#include + +LOG_MODULE_REGISTER(app, LOG_LEVEL_INF); + +static uint8_t rgb24frame[16 * 32 * 3]; + +void print_image(void) +{ + const uint8_t beg[] = {0x00, 0x70, 0xc5}; + const uint8_t end[] = {0x79, 0x29, 0xd2}; + + /* Generate an image with a gradient of the two colors above */ + for (size_t i = 0, size = sizeof(rgb24frame); i + 3 <= size; i += 3) { + rgb24frame[i + 0] = (beg[0] * (size - i) + end[0] * i) / size; + rgb24frame[i + 1] = (beg[1] * (size - i) + end[1] * i) / size; + rgb24frame[i + 2] = (beg[2] * (size - i) + end[2] * i) / size; + } + + LOG_INF("Printing the gradient #%02x%02x%02x -> #%02x%02x%02x", + beg[0], beg[1], beg[2], end[0], end[1], end[2]); + + LOG_INF("hexdump:"); + pixel_print_rgb24frame_hex(rgb24frame, sizeof(rgb24frame), 16, 32); + + LOG_INF("truecolor:"); + pixel_print_rgb24frame_truecolor(rgb24frame, sizeof(rgb24frame), 16, 32); + + LOG_INF("256color:"); + pixel_print_rgb24frame_256color(rgb24frame, sizeof(rgb24frame), 16, 32); +} + +void print_histogram(void) +{ + static const uint16_t rgb24hist[] = { + 9, 4, 7, 1, 0, 5, 1, 0, 0, 2, 2, 3, 0, 1, 3, 0, + 7, 6, 5, 1, 1, 4, 2, 0, 1, 2, 3, 4, 1, 1, 2, 2, + 8, 4, 7, 4, 2, 3, 1, 2, 2, 2, 2, 2, 0, 0, 1, 1, + }; + + static const uint16_t y8hist[] = { + 8, 5, 6, 2, 1, 4, 1, 1, 1, 2, 3, 3, 1, 1, 2, 1, + }; + + LOG_INF("Printing a histogram of %zu RGB buckets", ARRAY_SIZE(rgb24hist)); + pixel_print_rgb24hist(rgb24hist, ARRAY_SIZE(rgb24hist), 8); + + LOG_INF("Printing a histogram of %zu Y (luma) buckets", ARRAY_SIZE(y8hist)); + pixel_print_y8hist(y8hist, ARRAY_SIZE(y8hist), 8); +} + +int main(void) +{ + print_image(); + print_histogram(); + + return 0; +} diff --git a/samples/lib/pixel/resize/CMakeLists.txt b/samples/lib/pixel/resize/CMakeLists.txt new file mode 100644 index 000000000000..1a296153270d --- /dev/null +++ b/samples/lib/pixel/resize/CMakeLists.txt @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: Apache-2.0 + +cmake_minimum_required(VERSION 3.20.0) + +find_package(Zephyr REQUIRED HINTS $ENV{ZEPHYR_BASE}) +project(pixel) + +target_sources(app PRIVATE src/main.c) diff --git a/samples/lib/pixel/resize/README.rst b/samples/lib/pixel/resize/README.rst new file mode 100644 index 000000000000..deb549b40c94 --- /dev/null +++ b/samples/lib/pixel/resize/README.rst @@ -0,0 +1,62 @@ +.. zephyr:code-sample:: lib_pixel_resize + :name: Pixel Resizing Library + + Resize an image using subsampling. + +Overview +******** + +A sample showcasing how to make use of the pixel library to resize an input image to a smaller or +bigger output image, using the subsampling method. This helps generating a smaller preview of an +input image. + +The input and output are printed as preview images on the console. + +Building and Running +******************** + +This application can be built and executed on the native simulator as follows: + +.. zephyr-app-commands:: + :zephyr-app: samples/lib/pixel/resize + :host-os: unix + :board: native_sim + :goals: run + :compact: + +To build for another board, change "native_sim" above to that board's name. + +Sample Output +============= + +.. code-block:: console + + *** Booting Zephyr OS build v4.1.0-2611-gfaa7b74cfda7 *** + [00:00:00.000,000] app: input image, 32x16, 1536 bytes: + ▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄| + ▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄| + ▄▄▄▄▄▄▄▄▄▄▄ shows-up ▄▄▄▄▄▄▄▄▄▄▄| + ▄▄▄▄▄▄▄▄▄▄▄ as color ▄▄▄▄▄▄▄▄▄▄▄| + ▄▄▄▄▄▄▄▄▄▄▄ on the ▄▄▄▄▄▄▄▄▄▄▄| + ▄▄▄▄▄▄▄▄▄▄▄ terminal ▄▄▄▄▄▄▄▄▄▄▄| + ▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄| + ▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄| + [00:00:00.000,000] app: output image, bigger, 120x16, 7200 bytes: + ▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄[...]▄▄| + ▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄[...]▄▄| + ▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄[...]▄▄| + ▄▄▄▄▄▄▄▄▄▄▄ shows-up ▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄[...]▄▄| + ▄▄▄▄▄▄▄▄▄▄▄ as color ▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄[...]▄▄| + ▄▄▄▄▄▄▄▄▄▄▄ on the ▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄[...]▄▄| + ▄▄▄▄▄▄▄▄▄▄▄ terminal ▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄[...]▄▄| + ▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄[...]▄▄| + ▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄[...]▄▄| + ▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄[...]▄▄| + [00:00:00.000,000] app: output image, smaller, 10x10, 300 bytes: + ▄▄▄▄▄▄▄▄▄▄| + ▄▄▄▄▄▄▄▄▄▄| + ▄▄▄▄▄▄▄▄▄▄| + ▄▄▄▄▄▄▄▄▄▄| + ▄▄▄▄▄▄▄▄▄▄| + +.. image:: preview.png diff --git a/samples/lib/pixel/resize/preview.png b/samples/lib/pixel/resize/preview.png new file mode 100644 index 000000000000..6ac9b1913794 Binary files /dev/null and b/samples/lib/pixel/resize/preview.png differ diff --git a/samples/lib/pixel/resize/prj.conf b/samples/lib/pixel/resize/prj.conf new file mode 100644 index 000000000000..ee16fb9972b2 --- /dev/null +++ b/samples/lib/pixel/resize/prj.conf @@ -0,0 +1,6 @@ +CONFIG_PIXEL=y +CONFIG_ASSERT=y +CONFIG_LOG=y + +# Required to make sure the test harnesses catch the log output +CONFIG_LOG_MODE_IMMEDIATE=y diff --git a/samples/lib/pixel/resize/sample.yaml b/samples/lib/pixel/resize/sample.yaml new file mode 100644 index 000000000000..344526446dbc --- /dev/null +++ b/samples/lib/pixel/resize/sample.yaml @@ -0,0 +1,19 @@ +sample: + description: Pixel Resize sample, down-scale/up-scale an image + name: pixel resize +common: + min_ram: 32 + tags: pixel + integration_platforms: + - native_sim + harness: console + harness_config: + type: one_line + regex: + - "output image, bigger," + - "output image, smaller," +tests: + sample.pixel.resize: + tags: pixel + extra_configs: + - CONFIG_PIXEL_PRINT_NONE=y diff --git a/samples/lib/pixel/resize/src/main.c b/samples/lib/pixel/resize/src/main.c new file mode 100644 index 000000000000..ce3cbac342da --- /dev/null +++ b/samples/lib/pixel/resize/src/main.c @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2025 tinyVision.ai Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include + +#include +#include +#include +#include + +LOG_MODULE_REGISTER(app, LOG_LEVEL_INF); + +static void gradient(uint8_t *rgb24buf, size_t size, const uint8_t beg[3], const uint8_t end[3]) +{ + for (int i = 0; i + 3 <= size; i += 3) { + rgb24buf[i + 0] = (beg[0] * (size - i) + end[0] * i) / size; + rgb24buf[i + 1] = (beg[1] * (size - i) + end[1] * i) / size; + rgb24buf[i + 2] = (beg[2] * (size - i) + end[2] * i) / size; + } +} + +static uint8_t rgb24frame0[32 * 16 * 3]; +static uint8_t rgb24frame1[120 * 20 * 3]; +static uint8_t rgb24frame2[10 * 10 * 3]; + +int main(void) +{ + const uint8_t beg[] = {0x00, 0x70, 0xc5}; + const uint8_t end[] = {0x79, 0x29, 0xd2}; + + LOG_INF("input image, 32x16, %zu bytes:", sizeof(rgb24frame0)); + gradient(rgb24frame0, sizeof(rgb24frame0), beg, end); + pixel_print_rgb24frame_truecolor(rgb24frame0, sizeof(rgb24frame0), 32, 16); + + LOG_INF("output image, bigger, 120x16, %zu bytes:", sizeof(rgb24frame1)); + pixel_subsample_rgb24frame(rgb24frame0, 32, 16, rgb24frame1, 120, 20); + pixel_print_rgb24frame_truecolor(rgb24frame1, sizeof(rgb24frame1), 120, 20); + + LOG_INF("output image, smaller, 10x10, %zu bytes:", sizeof(rgb24frame2)); + pixel_subsample_rgb24frame(rgb24frame0, 32, 16, rgb24frame2, 10, 10); + pixel_print_rgb24frame_truecolor(rgb24frame2, sizeof(rgb24frame2), 10, 10); + + return 0; +} diff --git a/samples/lib/pixel/stats/CMakeLists.txt b/samples/lib/pixel/stats/CMakeLists.txt new file mode 100644 index 000000000000..1a296153270d --- /dev/null +++ b/samples/lib/pixel/stats/CMakeLists.txt @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: Apache-2.0 + +cmake_minimum_required(VERSION 3.20.0) + +find_package(Zephyr REQUIRED HINTS $ENV{ZEPHYR_BASE}) +project(pixel) + +target_sources(app PRIVATE src/main.c) diff --git a/samples/lib/pixel/stats/README.rst b/samples/lib/pixel/stats/README.rst new file mode 100644 index 000000000000..c34c04c88ff4 --- /dev/null +++ b/samples/lib/pixel/stats/README.rst @@ -0,0 +1,57 @@ +.. zephyr:code-sample:: lib_pixel_stats + :name: Pixel Statistics Library + + Collect statistics of an image. + +Overview +******** + +A sample showcasing how to make use of the pixel library to collect statistics of an input image +buffer and display both the image and statistics out on the console. + +Building and Running +******************** + +This application can be built and executed on the native simulator as follows: + +.. zephyr-app-commands:: + :zephyr-app: samples/lib/pixel/stats + :host-os: unix + :board: native_sim + :goals: run + :compact: + +To build for another board, change "native_sim" above to that board's name. + +Sample Output +============= + +.. code-block:: console + + *** Booting Zephyr OS build v4.1.0-2611-gfaa7b74cfda7 *** + [00:00:00.000,000] app: Input image preview: + ▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄| + ▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄| + [00:00:00.000,000] app: RGB histogram of the image + ▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄| - 60 + ▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄| - 56 + ▄▄▄▄▄▄▄▄▄▄ shows-up ▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄| - 52 + ▄▄▄▄▄▄▄▄▄▄ as color ▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄| - 48 + ▄▄▄▄▄▄▄▄▄▄ on the ▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄| - 45 + ▄▄▄▄▄▄▄▄▄▄ terminal ▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄| - 41 + ▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄| - 37 + ▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄| - 33 + ▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄| - 30 + ▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄| - 26 + ▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄| - 22 + ▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄| - 18 + ▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄| - 15 + ▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄| - 11 + ▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄| - 7 + ▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄ + [00:00:00.000,000] app: RGB channel averages of the image + [00:00:00.000,000] app: - R: 0x47/0xff + [00:00:00.000,000] app: - G: 0x88/0xff + [00:00:00.000,000] app: - B: 0xec/0xff + +.. image:: preview.png diff --git a/samples/lib/pixel/stats/preview.png b/samples/lib/pixel/stats/preview.png new file mode 100644 index 000000000000..7419606ea70f Binary files /dev/null and b/samples/lib/pixel/stats/preview.png differ diff --git a/samples/lib/pixel/stats/prj.conf b/samples/lib/pixel/stats/prj.conf new file mode 100644 index 000000000000..ee16fb9972b2 --- /dev/null +++ b/samples/lib/pixel/stats/prj.conf @@ -0,0 +1,6 @@ +CONFIG_PIXEL=y +CONFIG_ASSERT=y +CONFIG_LOG=y + +# Required to make sure the test harnesses catch the log output +CONFIG_LOG_MODE_IMMEDIATE=y diff --git a/samples/lib/pixel/stats/sample.yaml b/samples/lib/pixel/stats/sample.yaml new file mode 100644 index 000000000000..978492482273 --- /dev/null +++ b/samples/lib/pixel/stats/sample.yaml @@ -0,0 +1,19 @@ +sample: + description: Pixel Stats sample, collect statistics of an input buffer + name: pixel stats +common: + min_ram: 32 + tags: pixel + integration_platforms: + - native_sim + harness: console + harness_config: + type: one_line + regex: + - "RGB histogram of the image" + - "RGB channel averages of the image" +tests: + sample.pixel.stats: + tags: pixel + extra_configs: + - CONFIG_PIXEL_PRINT_NONE=y diff --git a/samples/lib/pixel/stats/src/main.c b/samples/lib/pixel/stats/src/main.c new file mode 100644 index 000000000000..2c9ab8aa105e --- /dev/null +++ b/samples/lib/pixel/stats/src/main.c @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2025 tinyVision.ai Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include + +#include +#include +#include +#include + +LOG_MODULE_REGISTER(app, LOG_LEVEL_INF); + +#define NVAL 100 + +static const uint8_t image_rgb24[20 * 4 * 3] = { + 0x47, 0x84, 0xee, 0x46, 0x84, 0xee, 0x47, 0x84, 0xee, 0x46, 0x83, 0xee, 0x47, 0x84, 0xee, + 0x78, 0xaa, 0xec, 0x74, 0xb2, 0xe0, 0x67, 0xaa, 0xdd, 0x78, 0xb2, 0xef, 0x39, 0x8c, 0xf1, + 0x3a, 0x8c, 0xf2, 0x39, 0x8b, 0xf1, 0x3a, 0x8b, 0xf1, 0x3a, 0x8b, 0xf1, 0x3a, 0x8b, 0xf1, + 0x3a, 0x8b, 0xf1, 0x3b, 0x8b, 0xf1, 0x3b, 0x8b, 0xf1, 0x3b, 0x8b, 0xf1, 0x3b, 0x8a, 0xf1, + 0x47, 0x82, 0xed, 0x47, 0x82, 0xed, 0x47, 0x82, 0xee, 0x47, 0x82, 0xed, 0x47, 0x82, 0xed, + 0x47, 0x82, 0xed, 0x5d, 0x93, 0xed, 0x5f, 0x9d, 0xeb, 0x3a, 0x8a, 0xf1, 0x3a, 0x8a, 0xf0, + 0x3a, 0x8a, 0xf1, 0x3a, 0x8a, 0xf0, 0x3b, 0x8a, 0xf0, 0x3b, 0x8a, 0xf0, 0x3b, 0x8a, 0xf0, + 0x3b, 0x89, 0xf0, 0x3b, 0x8a, 0xf0, 0x3b, 0x89, 0xf0, 0x3c, 0x89, 0xf0, 0x3c, 0x89, 0xf0, + 0x49, 0x82, 0xee, 0x49, 0x82, 0xed, 0x49, 0x82, 0xee, 0x48, 0x82, 0xed, 0x49, 0x82, 0xee, + 0x49, 0x82, 0xed, 0x73, 0x92, 0xe9, 0x50, 0x65, 0xd4, 0x4c, 0x93, 0xf2, 0x3c, 0x8a, 0xf1, + 0x3c, 0x8a, 0xf1, 0x3c, 0x8a, 0xf0, 0x3c, 0x8a, 0xf1, 0x3c, 0x89, 0xf0, 0x3d, 0x8a, 0xf1, + 0x3d, 0x89, 0xf0, 0x3d, 0x89, 0xf0, 0x3d, 0x89, 0xf0, 0x3e, 0x89, 0xf0, 0x3d, 0x89, 0xf0, + 0x4a, 0x81, 0xed, 0x49, 0x81, 0xed, 0x4a, 0x81, 0xed, 0x49, 0x81, 0xed, 0x49, 0x81, 0xed, + 0x71, 0x8c, 0xe5, 0x3e, 0x4c, 0xcc, 0x3d, 0x4c, 0xcb, 0x65, 0x85, 0xe1, 0x3d, 0x89, 0xf0, + 0x3d, 0x89, 0xf0, 0x3d, 0x88, 0xf0, 0x3d, 0x89, 0xf0, 0x3d, 0x88, 0xf0, 0x3e, 0x88, 0xf0, + 0x3e, 0x88, 0xf0, 0x3e, 0x88, 0xf0, 0x3e, 0x88, 0xf0, 0x3f, 0x88, 0xf0, 0x3e, 0x88, 0xef, +}; + +static uint16_t rgb24hist[3 * 64]; +static uint8_t rgb24avg[3]; + +int main(void) +{ + LOG_INF("Input image preview:"); + pixel_print_rgb24frame_truecolor(image_rgb24, sizeof(image_rgb24), 20, 4); + + LOG_INF("RGB histogram of the image"); + pixel_rgb24frame_to_rgb24hist(image_rgb24, sizeof(image_rgb24), + rgb24hist, ARRAY_SIZE(rgb24hist), NVAL); + pixel_print_rgb24hist(rgb24hist, ARRAY_SIZE(rgb24hist), 16); + + LOG_INF("RGB channel averages of the image"); + pixel_rgb24frame_to_rgb24avg(image_rgb24, sizeof(image_rgb24), rgb24avg, NVAL); + LOG_INF("- R: 0x%02x/0xff", rgb24avg[0]); + LOG_INF("- G: 0x%02x/0xff", rgb24avg[1]); + LOG_INF("- B: 0x%02x/0xff", rgb24avg[2]); + + return 0; +} diff --git a/samples/subsys/usb/common/sample_usbd_init.c b/samples/subsys/usb/common/sample_usbd_init.c index 69642372a585..ca5281301756 100644 --- a/samples/subsys/usb/common/sample_usbd_init.c +++ b/samples/subsys/usb/common/sample_usbd_init.c @@ -81,7 +81,8 @@ static void sample_fix_code_triple(struct usbd_context *uds_ctx, IS_ENABLED(CONFIG_USBD_CDC_ECM_CLASS) || IS_ENABLED(CONFIG_USBD_CDC_NCM_CLASS) || IS_ENABLED(CONFIG_USBD_MIDI2_CLASS) || - IS_ENABLED(CONFIG_USBD_AUDIO2_CLASS)) { + IS_ENABLED(CONFIG_USBD_AUDIO2_CLASS) || + IS_ENABLED(CONFIG_USBD_VIDEO_CLASS)) { /* * Class with multiple interfaces have an Interface * Association Descriptor available, use an appropriate triple diff --git a/samples/subsys/usb/uvc/CMakeLists.txt b/samples/subsys/usb/uvc/CMakeLists.txt new file mode 100644 index 000000000000..62b0a45e66a4 --- /dev/null +++ b/samples/subsys/usb/uvc/CMakeLists.txt @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: Apache-2.0 + +cmake_minimum_required(VERSION 3.20.0) +find_package(Zephyr REQUIRED HINTS $ENV{ZEPHYR_BASE}) +project(usb_video) + +include(${ZEPHYR_BASE}/samples/subsys/usb/common/common.cmake) +target_sources(app PRIVATE src/main.c) diff --git a/samples/subsys/usb/uvc/Kconfig b/samples/subsys/usb/uvc/Kconfig new file mode 100644 index 000000000000..d1b0c2bec39c --- /dev/null +++ b/samples/subsys/usb/uvc/Kconfig @@ -0,0 +1,9 @@ +# Copyright The Zephyr Project Contributors +# SPDX-License-Identifier: Apache-2.0 + +# Source common USB sample options used to initialize new experimental USB +# device stack. The scope of these options is limited to USB samples in project +# tree, you cannot use them in your own application. +source "samples/subsys/usb/common/Kconfig.sample_usbd" + +source "Kconfig.zephyr" diff --git a/samples/subsys/usb/uvc/README.rst b/samples/subsys/usb/uvc/README.rst new file mode 100644 index 000000000000..b7900f6ede76 --- /dev/null +++ b/samples/subsys/usb/uvc/README.rst @@ -0,0 +1,186 @@ +.. zephyr:code-sample:: uvc + :name: USB Video sample + :relevant-api: usbd_api video_interface + + Send video frames over USB. + +Overview +******** + +This sample demonstrates how to use a USB Video Class instance to +send video data over USB. + +Upon connection, a video device will show-up on the host, +usable like a regular webcam device. + +Any software on the host can then access the video stream as a local video source. + +Requirements +************ + +This sample uses the new USB device stack and requires the USB device +controller ported to the :ref:`udc_api`. + +Building and Running +******************** + +If a board is equipped with a supported image sensor, and ``zephyr,camera`` +node is chosen for the board, it will be used as the video source. +The sample can be built as follows: + +.. zephyr-app-commands:: + :zephyr-app: samples/subsys/usb/uvc + :board: arduino_nicla_vision/stm32h747xx/m7 + :goals: build flash + :compact: + +If not, it is possible to test with a test pattern generator by using the +``video-sw-generator`` snippet with any board: + +.. zephyr-app-commands:: + :zephyr-app: samples/subsys/usb/uvc + :board: frdm_mcxn947/mcxn947/cpu0 + :snippets: video-sw-generator + :goals: build flash + :compact: + +The device is expected to be detected as a webcam device: + +.. tabs:: + + .. group-tab:: Ubuntu + + The ``dmesg`` logs are expected to mention a ``generic UVC device``. + + The ``lsusb`` is expected to show an entry for a Zephyr device. + + Refers to `Ideas on board FAQ `_ + for how to get more debug information. + + .. group-tab:: MacOS + + The ``dmesg`` logs are expected to mention a video device. + + The ``ioreg -p IOUSB`` command list the USB devices including cameras. + + The ``system_profiler SPCameraDataType`` command list video input devices. + + .. group-tab:: Windows + + The Device Manager or USBView utilities permit to list the USB devices. + + The 3rd-party USB Tree View allows to review and debug the descriptors. + + In addition, the `USB3CV `_ tool + from USB-IF can check that the device is compliant with the UVC standard. + +Playing the Stream +================== + +The device is recognized by the system as a native webcam and can be used by any video application. + +For instance with VLC: +``Media`` > ``Open Capture Device`` > ``Capture Device`` > ``Video device name``. + +Or with Gstreamer and FFmpeg: + +.. tabs:: + + .. group-tab:: Ubuntu + + Assuming ``/dev/video0`` is your Zephyr device. + + .. code-block:: console + + ffplay -i /dev/video0 + + .. code-block:: console + + gst-launch-1.0 v4l2src device=/dev/video0 ! videoconvert ! autovideosink + + .. group-tab:: MacOS + + Assuming ``0:0`` is your Zephyr device. + + .. code-block:: console + + ffplay -f avfoundation -i 0:0 + + .. code-block:: console + + gst-launch-1.0 avfvideosrc device-index=0 ! autovideosink + + .. group-tab:: Windows + + Assuming ``UVC sample`` is your Zephyr device. + + .. code-block:: console + + ffplay.exe -f dshow -i video="UVC sample" + + .. code-block:: console + + gst-launch-1.0.exe ksvideosrc device-name="UVC sample" ! videoconvert ! autovideosink + +The video device can also be used by web and video call applications systems. + +Android and iPad (but not yet iOS) are also expected to work via dedicated applications. + +Accessing the Video Controls +============================ + +On the host system, the controls would be available as video source +control through various applications, like any webcam. + +.. tabs:: + + .. group-tab:: Ubuntu + + Assuming ``/dev/video0`` is your Zephyr device. + + .. code-block:: console + + $ v4l2-ctl --device /dev/video0 --list-ctrls + + Camera Controls + + auto_exposure 0x009a0901 (menu) : min=0 max=3 default=1 value=1 (Manual Mode) + exposure_dynamic_framerate 0x009a0903 (bool) : default=0 value=0 + exposure_time_absolute 0x009a0902 (int) : min=10 max=2047 step=1 default=384 value=384 flags=inactive + + $ v4l2-ctl --device /dev/video0 --set-ctrl auto_exposure=1 + $ v4l2-ctl --device /dev/video0 --set-ctrl exposure_time_absolute=1500 + + .. group-tab:: Windows + + The `VLC `_ client and `Pot Player `_ + client permit to further access the video controls. + + .. group-tab:: MacOS + + The `VLC `_ client and the system Webcam Settings panel + allows adjustment of the supported video controls. + +Software Processing +=================== + +Software processing tools can also use the video interface directly. + +Here is an example with OpenCV (``pip install opencv-python``): + +.. code-block:: python + + import cv2 + + # Number of the /dev/video# interface + devnum = 2 + + cv2.namedWindow("preview") + vc = cv2.VideoCapture(devnum) + + while (val := vc.read())[0]: + cv2.waitKey(20) + cv2.imshow("preview", val[1]) + + cv2.destroyWindow("preview") + vc.release() diff --git a/samples/subsys/usb/uvc/app.overlay b/samples/subsys/usb/uvc/app.overlay new file mode 100644 index 000000000000..8f7cc121413c --- /dev/null +++ b/samples/subsys/usb/uvc/app.overlay @@ -0,0 +1,11 @@ +/* + * Copyright (c) 2025 tinyVision.ai Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +/ { + uvc: uvc { + compatible = "zephyr,uvc-device"; + }; +}; diff --git a/samples/subsys/usb/uvc/boards/arduino_nicla_vision_stm32h747xx_m7.conf b/samples/subsys/usb/uvc/boards/arduino_nicla_vision_stm32h747xx_m7.conf new file mode 100644 index 000000000000..7fcfeea35e66 --- /dev/null +++ b/samples/subsys/usb/uvc/boards/arduino_nicla_vision_stm32h747xx_m7.conf @@ -0,0 +1,2 @@ +# Enough two 320x240 YUYV frames +CONFIG_VIDEO_BUFFER_POOL_SZ_MAX=163840 diff --git a/samples/subsys/usb/uvc/boards/frdm_mcxn947_mcxn947_cpu0.conf b/samples/subsys/usb/uvc/boards/frdm_mcxn947_mcxn947_cpu0.conf new file mode 100644 index 000000000000..2302ba1e88b9 --- /dev/null +++ b/samples/subsys/usb/uvc/boards/frdm_mcxn947_mcxn947_cpu0.conf @@ -0,0 +1,2 @@ +CONFIG_VIDEO_BUFFER_POOL_SZ_MAX=40000 +CONFIG_VIDEO_BUFFER_POOL_NUM_MAX=2 diff --git a/samples/subsys/usb/uvc/prj.conf b/samples/subsys/usb/uvc/prj.conf new file mode 100644 index 000000000000..a4aa7d775654 --- /dev/null +++ b/samples/subsys/usb/uvc/prj.conf @@ -0,0 +1,14 @@ +CONFIG_LOG=y +CONFIG_ASSERT=y +CONFIG_POLL=y +CONFIG_VIDEO=y +CONFIG_VIDEO_LOG_LEVEL_WRN=y +CONFIG_VIDEO_BUFFER_POOL_NUM_MAX=2 +CONFIG_VIDEO_BUFFER_POOL_SZ_MAX=24576 +CONFIG_USB_DEVICE_STACK_NEXT=y +CONFIG_USBD_LOG_LEVEL_WRN=y +CONFIG_USBD_VIDEO_CLASS=y +CONFIG_USBD_VIDEO_LOG_LEVEL_WRN=y +CONFIG_UDC_DRIVER_LOG_LEVEL_WRN=y +CONFIG_SAMPLE_USBD_PID=0x0011 +CONFIG_SAMPLE_USBD_PRODUCT="UVC sample" diff --git a/samples/subsys/usb/uvc/sample.yaml b/samples/subsys/usb/uvc/sample.yaml new file mode 100644 index 000000000000..9513fa859e63 --- /dev/null +++ b/samples/subsys/usb/uvc/sample.yaml @@ -0,0 +1,23 @@ +sample: + name: USB Video sample +tests: + sample.subsys.usb.uvc: + depends_on: + - usbd + tags: usb video + filter: dt_chosen_enabled("zephyr,camera") + integration_platforms: + - nrf52840dk/nrf52840 + - nrf54h20dk/nrf54h20/cpuapp + - frdm_k64f + - stm32f723e_disco + - nucleo_f413zh + - mimxrt685_evk/mimxrt685s/cm33 + - mimxrt1060_evk/mimxrt1062/qspi + sample.subsys.usb.uvc.camera: + depends_on: + - usbd + tags: usb video + filter: dt_chosen_enabled("zephyr,camera") + integration_platforms: + - arduino_nicla_vision/stm32h747xx/m7 diff --git a/samples/subsys/usb/uvc/src/main.c b/samples/subsys/usb/uvc/src/main.c new file mode 100644 index 000000000000..280479e9dddf --- /dev/null +++ b/samples/subsys/usb/uvc/src/main.c @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2025 tinyVision.ai Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include + +#include + +#include +#include +#include +#include +#include + +LOG_MODULE_REGISTER(uvc_sample, LOG_LEVEL_INF); + +const struct device *const uvc_dev = DEVICE_DT_GET(DT_NODELABEL(uvc)); +const struct device *const video_dev = DEVICE_DT_GET(DT_CHOSEN(zephyr_camera)); + +int main(void) +{ + struct usbd_context *sample_usbd; + struct video_buffer *vbuf; + struct video_format fmt = {0}; + struct video_caps caps; + struct k_poll_signal sig; + struct k_poll_event evt[1]; + k_timeout_t timeout = K_FOREVER; + size_t bsize; + int ret; + + if (!device_is_ready(video_dev)) { + LOG_ERR("video source %s failed to initialize", video_dev->name); + return -ENODEV; + } + + if (video_get_caps(video_dev, VIDEO_EP_OUT, &caps)) { + LOG_ERR("Unable to retrieve video capabilities"); + return 0; + } + + /* Must be done before initializing USB */ + uvc_set_video_dev(uvc_dev, video_dev); + + sample_usbd = sample_usbd_init_device(NULL); + if (sample_usbd == NULL) { + return -ENODEV; + } + + ret = usbd_enable(sample_usbd); + if (ret != 0) { + return ret; + } + + LOG_INF("Waiting the host to select the video format"); + + /* Get the video format once it is selected by the host */ + while (true) { + ret = video_get_format(uvc_dev, VIDEO_EP_IN, &fmt); + if (ret == 0) { + break; + } + if (ret != -EAGAIN) { + LOG_ERR("Failed to get the video format"); + return ret; + } + + k_sleep(K_MSEC(10)); + } + + LOG_INF("The host selected format '%s' %ux%u, preparing %u buffers of %u bytes", + VIDEO_FOURCC_TO_STR(fmt.pixelformat), fmt.width, fmt.height, + CONFIG_VIDEO_BUFFER_POOL_NUM_MAX, fmt.pitch * fmt.height); + + /* Size to allocate for each buffer */ + if (caps.min_line_count == LINE_COUNT_HEIGHT) { + bsize = fmt.pitch * fmt.height; + } else { + bsize = fmt.pitch * caps.min_line_count; + } + + for (int i = 0; i < CONFIG_VIDEO_BUFFER_POOL_NUM_MAX; i++) { + vbuf = video_buffer_alloc(bsize, K_NO_WAIT); + if (vbuf == NULL) { + LOG_ERR("Could not allocate the video buffer"); + return -ENOMEM; + } + + ret = video_enqueue(video_dev, VIDEO_EP_OUT, vbuf); + if (ret != 0) { + LOG_ERR("Could not enqueue video buffer"); + return ret; + } + } + + LOG_DBG("Preparing signaling for %s input/output", video_dev->name); + + k_poll_signal_init(&sig); + k_poll_event_init(&evt[0], K_POLL_TYPE_SIGNAL, K_POLL_MODE_NOTIFY_ONLY, &sig); + + ret = video_set_signal(video_dev, VIDEO_EP_OUT, &sig); + if (ret != 0) { + LOG_WRN("Failed to setup the signal on %s output endpoint", video_dev->name); + timeout = K_MSEC(1); + } + + ret = video_set_signal(uvc_dev, VIDEO_EP_IN, &sig); + if (ret != 0) { + LOG_ERR("Failed to setup the signal on %s input endpoint", uvc_dev->name); + return ret; + } + + LOG_INF("Starting the video transfer"); + + ret = video_stream_start(video_dev); + if (ret != 0) { + LOG_ERR("Failed to start %s", video_dev->name); + return ret; + } + + while (true) { + ret = k_poll(evt, ARRAY_SIZE(evt), timeout); + if (ret != 0 && ret != -EAGAIN) { + LOG_ERR("Poll exited with status %d", ret); + return ret; + } + + if (video_dequeue(video_dev, VIDEO_EP_OUT, &vbuf, K_NO_WAIT) == 0) { + LOG_DBG("Dequeued %p from %s, enqueueing to %s", + (void *)vbuf, video_dev->name, uvc_dev->name); + + ret = video_enqueue(uvc_dev, VIDEO_EP_IN, vbuf); + if (ret != 0) { + LOG_ERR("Could not enqueue video buffer to %s", uvc_dev->name); + return ret; + } + } + + if (video_dequeue(uvc_dev, VIDEO_EP_IN, &vbuf, K_NO_WAIT) == 0) { + LOG_DBG("Dequeued %p from %s, enqueueing to %s", + (void *)vbuf, uvc_dev->name, video_dev->name); + + ret = video_enqueue(video_dev, VIDEO_EP_OUT, vbuf); + if (ret != 0) { + LOG_ERR("Could not enqueue video buffer to %s", video_dev->name); + return ret; + } + } + + k_poll_signal_reset(&sig); + } + + CODE_UNREACHABLE; + return 0; +} diff --git a/snippets/video-sw-generator/README.rst b/snippets/video-sw-generator/README.rst new file mode 100644 index 000000000000..5a4198bc42c9 --- /dev/null +++ b/snippets/video-sw-generator/README.rst @@ -0,0 +1,20 @@ +.. _snippet-video-sw-generator: + +Video Software Generator Snippet (video-sw-generator) +##################################################### + +.. code-block:: console + + west build -S video-sw-generator [...] + +Overview +******** + +This snippet instantiate a fake video source generating a test pattern continuously +for test purpose. It is selected as the ``zephyr,camera`` :ref:`devicetree` chosen node. + +Requirements +************ + +No hardware support is required besides sufficiently memory for the video resolution +declared by :kconfig:option:`CONFIG_VIDEO_BUFFER_POOL_SZ_MAX`. diff --git a/snippets/video-sw-generator/snippet.yml b/snippets/video-sw-generator/snippet.yml new file mode 100644 index 000000000000..3d494e24c4f1 --- /dev/null +++ b/snippets/video-sw-generator/snippet.yml @@ -0,0 +1,3 @@ +name: video-sw-generator +append: + EXTRA_DTC_OVERLAY_FILE: video-sw-generator.overlay diff --git a/snippets/video-sw-generator/video-sw-generator.overlay b/snippets/video-sw-generator/video-sw-generator.overlay new file mode 100644 index 000000000000..29ff28f93e03 --- /dev/null +++ b/snippets/video-sw-generator/video-sw-generator.overlay @@ -0,0 +1,14 @@ +/* + * Copyright The Zephyr Project Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +/ { + chosen { + zephyr,camera = &video_sw_generator; + }; + + video_sw_generator: video-sw-generator { + compatible = "zephyr,video-sw-generator"; + }; +}; diff --git a/subsys/usb/device_next/CMakeLists.txt b/subsys/usb/device_next/CMakeLists.txt index e0120dc89a82..717cdb9da37b 100644 --- a/subsys/usb/device_next/CMakeLists.txt +++ b/subsys/usb/device_next/CMakeLists.txt @@ -78,6 +78,11 @@ zephyr_library_sources_ifdef( class/usbd_midi2.c ) +zephyr_library_sources_ifdef( + CONFIG_USBD_VIDEO_CLASS + class/usbd_uvc.c +) + zephyr_library_sources_ifdef( CONFIG_USBD_HID_SUPPORT class/usbd_hid.c diff --git a/subsys/usb/device_next/class/Kconfig b/subsys/usb/device_next/class/Kconfig index fc188653565b..d3d9a946488e 100644 --- a/subsys/usb/device_next/class/Kconfig +++ b/subsys/usb/device_next/class/Kconfig @@ -12,3 +12,4 @@ rsource "Kconfig.uac2" rsource "Kconfig.hid" rsource "Kconfig.midi2" rsource "Kconfig.dfu" +rsource "Kconfig.uvc" diff --git a/subsys/usb/device_next/class/Kconfig.uvc b/subsys/usb/device_next/class/Kconfig.uvc new file mode 100644 index 000000000000..82051cb58517 --- /dev/null +++ b/subsys/usb/device_next/class/Kconfig.uvc @@ -0,0 +1,48 @@ +# Copyright (c) 2025 tinyVision.ai Inc. +# +# SPDX-License-Identifier: Apache-2.0 + +config USBD_VIDEO_CLASS + bool "USB Video Class implementation [EXPERIMENTAL]" + depends on DT_HAS_ZEPHYR_UVC_DEVICE_ENABLED + select EXPERIMENTAL + help + USB Device Video Class (UVC) implementation. + +if USBD_VIDEO_CLASS + +config USBD_VIDEO_HEADER_SIZE + int "USB Video payload header size" + range 2 255 + default 8 + help + Sets the size of the video payload header to allow custom data to be + added after the mandatory fields. + The default value is arbitrary, made to fit situations, preserving a + 64-bit alignment of the payload data for ease of debugging. + +config USBD_VIDEO_MAX_FORMATS + int "Max number of format descriptors" + range 1 254 + default 32 + help + The table of format descriptors are generated at runtime. This options plans the + storage at build time to allow enough descriptors to be generated. The default value + aims a compromise between enough descriptors for most devices, but not too much memory + being used. + +config USBD_VIDEO_MAX_FRMIVAL + int "Max number of video output stream per USB Video interface" + range 1 255 + default 8 + help + Max number of Frame Interval listed on a frame descriptor. The + default value is selected arbitrarily to fit most situations without + requiring too much RAM. + +module = USBD_VIDEO +module-str = usbd uvc +default-count = 1 +source "subsys/logging/Kconfig.template.log_config" + +endif # USBD_VIDEO_CLASS diff --git a/subsys/usb/device_next/class/usbd_uvc.c b/subsys/usb/device_next/class/usbd_uvc.c new file mode 100644 index 000000000000..4361973b80fa --- /dev/null +++ b/subsys/usb/device_next/class/usbd_uvc.c @@ -0,0 +1,2244 @@ +/* + * Copyright (c) 2025 tinyVision.ai Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#define DT_DRV_COMPAT zephyr_uvc_device + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "usbd_uvc.h" +#include "../../../drivers/video/video_ctrls.h" + +LOG_MODULE_REGISTER(usbd_uvc, CONFIG_USBD_VIDEO_LOG_LEVEL); + +#define UVC_VBUF_DONE 1 +#define UVC_MAX_FS_DESC (CONFIG_USBD_VIDEO_MAX_FORMATS + 13) +#define UVC_MAX_HS_DESC (CONFIG_USBD_VIDEO_MAX_FORMATS + 13) +#define UVC_IDX_VC_UNIT 3 + +enum uvc_op { + UVC_OP_GET_ERRNO, + UVC_OP_VC_CTRL, + UVC_OP_VS_PROBE, + UVC_OP_VS_COMMIT, + UVC_OP_RETURN_ERROR, + UVC_OP_INVALID, +}; + +enum uvc_class_status { + UVC_STATE_INITIALIZED, + UVC_STATE_ENABLED, + UVC_STATE_STREAM_READY, + UVC_STATE_STREAM_RESTART, + UVC_STATE_PAUSED, +}; + +enum uvc_unit_id { + UVC_UNIT_ID_CT = 1, + UVC_UNIT_ID_SU, + UVC_UNIT_ID_PU, + UVC_UNIT_ID_XU, + UVC_UNIT_ID_OT, +}; + +enum uvc_control_type { + UVC_CONTROL_SIGNED, + UVC_CONTROL_UNSIGNED, +}; + +union uvc_fmt_desc { + struct usb_desc_header hdr; + struct uvc_format_descriptor fmt; + struct uvc_format_uncomp_descriptor fmt_uncomp; + struct uvc_format_mjpeg_descriptor fmt_mjpeg; + struct uvc_frame_descriptor frm; + struct uvc_frame_continuous_descriptor frm_cont; + struct uvc_frame_discrete_descriptor frm_disc; +}; + +struct uvc_desc { + struct usb_association_descriptor iad; + struct usb_if_descriptor if0; + struct uvc_control_header_descriptor if0_hdr; + struct uvc_camera_terminal_descriptor if0_ct; + struct uvc_selector_unit_descriptor if0_su; + struct uvc_processing_unit_descriptor if0_pu; + struct uvc_extension_unit_descriptor if0_xu; + struct uvc_output_terminal_descriptor if0_ot; + struct usb_if_descriptor if1; + struct uvc_stream_header_descriptor if1_hdr; + union uvc_fmt_desc if1_fmts[CONFIG_USBD_VIDEO_MAX_FORMATS]; + struct uvc_color_descriptor if1_color; + struct usb_ep_descriptor if1_ep_fs; + struct usb_ep_descriptor if1_ep_hs; +}; + +struct uvc_data { + /* Let the different parts of the code know of the current state */ + atomic_t state; + /* Index where newly generated descriptors are appened */ + size_t fs_desc_idx; + size_t hs_desc_idx; + size_t fmt_desc_idx; + /* UVC error from latest request */ + uint8_t err; + /* Input buffers to which enqueued video buffers land */ + struct k_fifo fifo_in; + /* Output buffers from which dequeued buffers are picked */ + struct k_fifo fifo_out; + /* Default video probe stored at boot time and sent back to the host when requested */ + struct uvc_probe default_probe; + /* Video payload header content sent before every frame, updated between every frame */ + struct uvc_payload_header payload_header; + /* Format currently selected by the host */ + uint8_t format_id; + /* Frame currently selected by the host */ + uint8_t frame_id; + /* Video device that is connected to this UVC stream */ + const struct device *video_dev; + /* Video format cached locally for efficiency */ + struct video_format video_fmt; + /* Current frame interval selected by the host */ + struct video_frmival video_frmival; + /* Signal to alert video devices of buffer-related evenets */ + struct k_poll_signal *video_sig; + /* Byte offset within the currently transmitted video buffer */ + size_t vbuf_offset; + /* Makes sure flushing the stream only happens in one context at a time */ + struct k_mutex mutex; + /* Zero Length packet used to reset a stream when restarted */ + struct net_buf zlp; +}; + +struct uvc_config { + /* Storage for the various descriptors available */ + struct uvc_desc *desc; + /* Class context used by the USB device stack */ + struct usbd_class_data *c_data; + /* Array of pointers to descriptors sent to the USB device stack and the host */ + struct usb_desc_header **fs_desc; + struct usb_desc_header **hs_desc; +}; + +/* Specialized version of UDC net_buf metadata with extra fields */ +struct uvc_buf_info { + /* Regular UDC buf info so that it can be passed to USBD directly */ + struct udc_buf_info udc; + /* Extra field at the end */ + struct video_buffer *vbuf; +} __packed; + +/* Mapping between UVC controls and Video controls */ +struct uvc_control_map { + /* Size to write out */ + uint8_t size; + /* Bit position in the UVC control */ + uint8_t bit; + /* UVC selector identifying this control */ + uint8_t selector; + /* Video CID to use for this control */ + uint32_t cid; + /* Whether the UVC value is signed, always false for bitmaps and boolean */ + enum uvc_control_type type; +}; + +struct uvc_guid_quirk { + /* A Video API format identifier, for which the UVC format GUID is not standard. */ + uint32_t fourcc; + /* GUIDs are 16-bytes long, with the first four bytes being the Four Character Code of the + * format and the rest constant, except for some exceptions listed in this table. + */ + uint8_t guid[16]; +}; + +UDC_BUF_POOL_VAR_DEFINE(uvc_buf_pool, DT_NUM_INST_STATUS_OKAY(DT_DRV_COMPAT) * 16, + 512 * DT_NUM_INST_STATUS_OKAY(DT_DRV_COMPAT) * 16, + sizeof(struct uvc_buf_info), NULL); + +static void uvc_flush_queue(const struct device *dev); + +/* UVC public API */ + +void uvc_set_video_dev(const struct device *const dev, const struct device *const video_dev) +{ + struct uvc_data *data = dev->data; + + data->video_dev = video_dev; +} + +/* UVC helper functions */ + +static const struct uvc_guid_quirk uvc_guid_quirks[] = { + {VIDEO_PIX_FMT_YUYV, UVC_FORMAT_GUID("YUY2")}, +}; + +static void uvc_fourcc_to_guid(uint8_t guid[16], uint32_t fourcc) +{ + for (int i = 0; i < ARRAY_SIZE(uvc_guid_quirks); i++) { + if (uvc_guid_quirks[i].fourcc == fourcc) { + memcpy(guid, uvc_guid_quirks[i].guid, 16); + return; + } + } + fourcc = sys_cpu_to_le32(fourcc); + memcpy(guid, UVC_FORMAT_GUID("...."), 16); + memcpy(guid, &fourcc, 4); +} + +static uint32_t uvc_guid_to_fourcc(uint8_t guid[16]) +{ + uint32_t fourcc; + + for (int i = 0; i < ARRAY_SIZE(uvc_guid_quirks); i++) { + if (memcmp(guid, uvc_guid_quirks[i].guid, 16) == 0) { + return uvc_guid_quirks[i].fourcc; + } + } + + memcpy(&fourcc, guid, 4); + + return sys_le32_to_cpu(fourcc); +} + +/* UVC control handling */ + +static const struct uvc_control_map uvc_control_map_ct[] = { + { + .size = 1, + .bit = 1, + .selector = UVC_CT_AE_MODE_CONTROL, + .cid = VIDEO_CID_EXPOSURE_AUTO, + .type = UVC_CONTROL_UNSIGNED, + }, + { + .size = 1, + .bit = 2, + .selector = UVC_CT_AE_PRIORITY_CONTROL, + .cid = VIDEO_CID_EXPOSURE_AUTO_PRIORITY, + .type = UVC_CONTROL_UNSIGNED, + }, + { + .size = 4, + .bit = 3, + .selector = UVC_CT_EXPOSURE_TIME_ABS_CONTROL, + .cid = VIDEO_CID_EXPOSURE_ABSOLUTE, + .type = UVC_CONTROL_UNSIGNED, + }, + { + .size = 4, + .bit = 3, + .selector = UVC_CT_EXPOSURE_TIME_REL_CONTROL, + .cid = VIDEO_CID_EXPOSURE, + .type = UVC_CONTROL_SIGNED, + }, + { + .size = 2, + .bit = 9, + .selector = UVC_CT_ZOOM_ABS_CONTROL, + .cid = VIDEO_CID_ZOOM_ABSOLUTE, + .type = UVC_CONTROL_UNSIGNED, + }, + { + .size = 2, + .bit = 9, + .selector = UVC_CT_ZOOM_REL_CONTROL, + .cid = VIDEO_CID_ZOOM_RELATIVE, + .type = UVC_CONTROL_SIGNED, + }, + { + .size = 2, + .bit = 5, + .selector = UVC_CT_FOCUS_ABS_CONTROL, + .cid = VIDEO_CID_FOCUS_ABSOLUTE, + .type = UVC_CONTROL_UNSIGNED, + }, + {0}, +}; + +static const struct uvc_control_map uvc_control_map_pu[] = { + { + .size = 2, + .bit = 0, + .selector = UVC_PU_BRIGHTNESS_CONTROL, + .cid = VIDEO_CID_BRIGHTNESS, + .type = UVC_CONTROL_SIGNED, + }, + { + .size = 1, + .bit = 1, + .selector = UVC_PU_CONTRAST_CONTROL, + .cid = VIDEO_CID_CONTRAST, + .type = UVC_CONTROL_UNSIGNED, + }, + { + .size = 2, + .bit = 9, + .selector = UVC_PU_GAIN_CONTROL, + .cid = VIDEO_CID_GAIN, + .type = UVC_CONTROL_UNSIGNED, + }, + { + .size = 2, + .bit = 3, + .selector = UVC_PU_SATURATION_CONTROL, + .cid = VIDEO_CID_SATURATION, + .type = UVC_CONTROL_UNSIGNED, + }, + { + .size = 2, + .bit = 6, + .selector = UVC_PU_WHITE_BALANCE_TEMP_CONTROL, + .cid = VIDEO_CID_WHITE_BALANCE_TEMPERATURE, + .type = UVC_CONTROL_UNSIGNED, + }, + {0}, +}; + +static const struct uvc_control_map uvc_control_map_su[] = { + { + .size = 1, + .bit = 0, + .selector = UVC_SU_INPUT_SELECT_CONTROL, + .cid = VIDEO_CID_TEST_PATTERN, + .type = UVC_CONTROL_UNSIGNED, + }, + {0}, +}; + +static const struct uvc_control_map uvc_control_map_xu[] = { + { + .size = 4, + .bit = 0, + .selector = UVC_XU_BASE_CONTROL + 0, + .cid = VIDEO_CID_PRIVATE_BASE + 0, + .type = UVC_CONTROL_UNSIGNED, + }, + { + .size = 4, + .bit = 1, + .selector = UVC_XU_BASE_CONTROL + 1, + .cid = VIDEO_CID_PRIVATE_BASE + 1, + .type = UVC_CONTROL_UNSIGNED, + }, + { + .size = 4, + .bit = 2, + .selector = UVC_XU_BASE_CONTROL + 2, + .cid = VIDEO_CID_PRIVATE_BASE + 2, + .type = UVC_CONTROL_UNSIGNED, + }, + { + .size = 4, + .bit = 3, + .selector = UVC_XU_BASE_CONTROL + 3, + .cid = VIDEO_CID_PRIVATE_BASE + 3, + .type = UVC_CONTROL_UNSIGNED, + }, + {0}, +}; + +/* Get the format and frame descriptors selected for the given VideoStreaming interface. */ +static void uvc_get_vs_fmtfrm_desc(const struct device *dev, + struct uvc_format_descriptor **format_desc, + struct uvc_frame_discrete_descriptor **frame_desc) +{ + const struct uvc_config *cfg = dev->config; + struct uvc_data *data = dev->data; + int i; + + *format_desc = NULL; + for (i = 0; i < ARRAY_SIZE(cfg->desc->if1_fmts); i++) { + struct uvc_format_descriptor *desc = &cfg->desc->if1_fmts[i].fmt; + + LOG_DBG("Walking through format %u, subtype %u, index %u, ptr %p", + i, desc->bDescriptorSubtype, desc->bFormatIndex, desc); + + if ((desc->bDescriptorSubtype == UVC_VS_FORMAT_UNCOMPRESSED || + desc->bDescriptorSubtype == UVC_VS_FORMAT_MJPEG) && + desc->bFormatIndex == data->format_id) { + *format_desc = desc; + break; + } + } + + *frame_desc = NULL; + for (i++; i < ARRAY_SIZE(cfg->desc->if1_fmts); i++) { + struct uvc_frame_discrete_descriptor *desc = &cfg->desc->if1_fmts[i].frm_disc; + + LOG_DBG("Walking through frame %u, subtype %u, index %u, ptr %p", + i, desc->bDescriptorSubtype, desc->bFrameIndex, desc); + + if (desc->bDescriptorSubtype != UVC_VS_FRAME_UNCOMPRESSED && + desc->bDescriptorSubtype != UVC_VS_FRAME_MJPEG) { + break; + } + + if (desc->bFrameIndex == data->frame_id) { + *frame_desc = desc; + break; + } + } +} + +static int uvc_add_int(struct net_buf *buf, uint16_t size, int64_t int64, bool is_sig) +{ + if (buf->size < size) { + LOG_WRN("wLength %u too small for control size %u, casting the value to fit", + buf->size, size); + size = buf->size; + } + + switch (size) { + case 1: + net_buf_add_u8(buf, is_sig ? (int8_t)int64 : (uint8_t)int64); + return 0; + case 2: + net_buf_add_le16(buf, is_sig ? (int16_t)int64 : (uint16_t)int64); + return 0; + case 4: + net_buf_add_le32(buf, is_sig ? (int32_t)int64 : (uint32_t)int64); + return 0; + case 8: + net_buf_add_le64(buf, is_sig ? (int64_t)int64 : (uint64_t)int64); + return 0; + default: + LOG_WRN("Invalid size %u", size); + return -ENOTSUP; + } +} + +static int uvc_get_int(const struct net_buf *buf, uint16_t size, int64_t *int64, bool is_sig) +{ + uint8_t data[sizeof(uint64_t)] = {0}; + + if (buf->len != size) { + LOG_ERR("invalid wLength %u for SET request, expected %u", buf->len, size); + return -EINVAL; + } + + /* Do not cast buf->data directly in case it is not aligned */ + memcpy(data, buf->data, MIN(sizeof(data), size)); + + switch (size) { + case 1: + *int64 = is_sig ? *(int8_t *)data : *(uint8_t *)data; + return 0; + case 2: + *int64 = is_sig ? (int16_t)sys_le16_to_cpu(*(uint16_t *)data) + : (uint16_t)sys_le16_to_cpu(*(uint16_t *)data); + return 0; + case 4: + *int64 = is_sig ? (int32_t)sys_le32_to_cpu(*(uint32_t *)data) + : (uint32_t)sys_le32_to_cpu(*(uint32_t *)data); + return 0; + case 8: + *int64 = is_sig ? (int64_t)sys_le64_to_cpu(*(uint64_t *)data) + : (uint64_t)sys_le64_to_cpu(*(uint64_t *)data); + return 0; + default: + LOG_ERR("Unsupported size, cannot convert"); + return -EINVAL; + } +} + +static int uvc_map_vc_int(bool is_get, int64_t *int64, const int *map, size_t map_sz) +{ + if (*int64 < 0) { + return -ERANGE; + } + + if (is_get) { + for (int i = 0; i < map_sz; i++) { + if ((1 << i) & *int64) { + *int64 = map[i]; + return 0; + } + } + } else { + for (int i = 0; i < map_sz; i++) { + if (map[i] == *int64) { + *int64 = 1 << i; + return 0; + } + } + } + + LOG_ERR("Value %lli out of range [0, %zu] for this control type", *int64, map_sz); + return -ERANGE; +} + +static int uvc_convert_vc_int(bool is_get, int64_t *int64, uint32_t cid) +{ + static const int ct_ae_mode[] = { + [0] = VIDEO_EXPOSURE_MANUAL, + [1] = VIDEO_EXPOSURE_AUTO, + [2] = VIDEO_EXPOSURE_SHUTTER_PRIORITY, + [3] = VIDEO_EXPOSURE_APERTURE_PRIORITY, + }; + + switch (cid) { + case VIDEO_CID_EXPOSURE_AUTO: + return uvc_map_vc_int(is_get, int64, ct_ae_mode, ARRAY_SIZE(ct_ae_mode)); + default: + return 0; + } +} + +static int uvc_add_vc_int(struct net_buf *buf, const struct uvc_control_map *map, int64_t int64) +{ + int ret; + + ret = uvc_convert_vc_int(true, &int64, map->cid); + if (ret != 0) { + return ret; + } + + return uvc_add_int(buf, map->size, int64, map->type == UVC_CONTROL_SIGNED); +} + +static int uvc_get_vc_int(const struct net_buf *buf, const struct uvc_control_map *map, + int64_t *int64) +{ + bool is_sig = map->type == UVC_CONTROL_SIGNED; + int ret; + + ret = uvc_get_int(buf, map->size, int64, is_sig); + if (ret != 0) { + return ret; + } + + LOG_DBG("Received %s %u-bit value %lld, converting and applying", + is_sig ? "signed" : "unsigned", map->size * 8, *int64); + + return uvc_convert_vc_int(false, int64, map->cid); +} + +static uint8_t uvc_get_bulk_in(const struct device *dev) +{ + const struct uvc_config *cfg = dev->config; + + switch (usbd_bus_speed(usbd_class_get_ctx(cfg->c_data))) { + case USBD_SPEED_FS: + return cfg->desc->if1_ep_fs.bEndpointAddress; + case USBD_SPEED_HS: + return cfg->desc->if1_ep_hs.bEndpointAddress; + default: + CODE_UNREACHABLE; + } +} + +static size_t uvc_get_bulk_mps(struct usbd_class_data *const c_data) +{ + struct usbd_context *uds_ctx = usbd_class_get_ctx(c_data); + + switch (usbd_bus_speed(uds_ctx)) { + case USBD_SPEED_FS: + return 64; + case USBD_SPEED_HS: + return 512; + default: + CODE_UNREACHABLE; + } +} + +static int uvc_get_vs_probe_format_index(const struct device *dev, struct uvc_probe *probe, + uint8_t request) +{ + const struct uvc_config *cfg = dev->config; + struct uvc_data *data = dev->data; + uint8_t max = 0; + + for (int i = 0; i < ARRAY_SIZE(cfg->desc->if1_fmts); i++) { + struct uvc_format_descriptor *desc = &cfg->desc->if1_fmts[i].fmt; + + max += desc->bDescriptorSubtype == UVC_VS_FORMAT_UNCOMPRESSED || + desc->bDescriptorSubtype == UVC_VS_FORMAT_MJPEG; + } + + switch (request) { + case UVC_GET_RES: + case UVC_GET_MIN: + probe->bFormatIndex = 1; + break; + case UVC_GET_MAX: + probe->bFormatIndex = max; + break; + case UVC_GET_CUR: + probe->bFormatIndex = data->format_id; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int uvc_get_vs_probe_frame_index(const struct device *dev, struct uvc_probe *probe, + uint8_t request) +{ + const struct uvc_config *cfg = dev->config; + struct uvc_data *data = dev->data; + uint8_t max = 0; + int i; + + /* Search the current format */ + for (i = 0; i < ARRAY_SIZE(cfg->desc->if1_fmts); i++) { + struct uvc_format_descriptor *desc = &cfg->desc->if1_fmts[i].fmt; + + if ((desc->bDescriptorSubtype == UVC_VS_FORMAT_UNCOMPRESSED || + desc->bDescriptorSubtype == UVC_VS_FORMAT_MJPEG) && + desc->bFormatIndex == data->format_id) { + break; + } + } + + /* Seek until the next format */ + for (i++; i < ARRAY_SIZE(cfg->desc->if1_fmts); i++) { + struct uvc_frame_discrete_descriptor *desc = &cfg->desc->if1_fmts[i].frm_disc; + + if (desc->bDescriptorSubtype != UVC_VS_FRAME_UNCOMPRESSED && + desc->bDescriptorSubtype != UVC_VS_FRAME_MJPEG) { + break; + } + max++; + } + + switch (request) { + case UVC_GET_RES: + case UVC_GET_MIN: + probe->bFrameIndex = 1; + break; + case UVC_GET_MAX: + probe->bFrameIndex = max; + break; + case UVC_GET_CUR: + probe->bFrameIndex = data->frame_id; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int uvc_get_vs_probe_frame_interval(const struct device *dev, struct uvc_probe *probe, + uint8_t request) +{ + struct uvc_data *data = dev->data; + struct uvc_format_descriptor *format_desc; + struct uvc_frame_discrete_descriptor *frame_desc; + int max; + + uvc_get_vs_fmtfrm_desc(dev, &format_desc, &frame_desc); + if (format_desc == NULL || frame_desc == NULL) { + LOG_DBG("Selected format ID or frame ID not found"); + return -EINVAL; + } + + switch (request) { + case UVC_GET_MIN: + probe->dwFrameInterval = sys_cpu_to_le32(frame_desc->dwFrameInterval[0]); + break; + case UVC_GET_MAX: + max = frame_desc->bFrameIntervalType - 1; + probe->dwFrameInterval = sys_cpu_to_le32(frame_desc->dwFrameInterval[max]); + break; + case UVC_GET_RES: + probe->dwFrameInterval = sys_cpu_to_le32(1); + break; + case UVC_GET_CUR: + probe->dwFrameInterval = sys_cpu_to_le32(data->video_frmival.numerator); + break; + default: + return -EINVAL; + } + + return 0; +} + +static int uvc_get_vs_probe_max_size(const struct device *dev, struct uvc_probe *probe, + uint8_t request) +{ + struct uvc_data *data = dev->data; + struct video_format *fmt = &data->video_fmt; + uint32_t max_frame_size = MAX(fmt->pitch, fmt->width) * fmt->height; + uint32_t max_payload_size = max_frame_size + CONFIG_USBD_VIDEO_HEADER_SIZE; + + switch (request) { + case UVC_GET_MIN: + case UVC_GET_MAX: + case UVC_GET_CUR: + probe->dwMaxPayloadTransferSize = sys_cpu_to_le32(max_payload_size); + probe->dwMaxVideoFrameSize = sys_cpu_to_le32(max_frame_size); + break; + case UVC_GET_RES: + probe->dwMaxPayloadTransferSize = sys_cpu_to_le32(1); + probe->dwMaxVideoFrameSize = sys_cpu_to_le32(1); + break; + default: + return -EINVAL; + } + + return 0; +} + +static int uvc_get_vs_format_from_desc(const struct device *dev, struct video_format *fmt) +{ + struct uvc_data *data = dev->data; + struct uvc_format_descriptor *format_desc = NULL; + struct uvc_frame_discrete_descriptor *frame_desc; + + /* Update the format based on the probe message from the host */ + uvc_get_vs_fmtfrm_desc(dev, &format_desc, &frame_desc); + if (format_desc == NULL || frame_desc == NULL) { + LOG_ERR("Invalid format ID (%u) and/or frame ID (%u)", + data->format_id, data->frame_id); + return -EINVAL; + } + + /* Translate between UVC pixel formats and Video pixel formats */ + if (format_desc->bDescriptorSubtype == UVC_VS_FORMAT_MJPEG) { + fmt->pixelformat = VIDEO_PIX_FMT_JPEG; + + LOG_DBG("Found descriptor for format %u, frame %u, MJPEG", + format_desc->bFormatIndex, frame_desc->bFrameIndex); + } else { + struct uvc_format_uncomp_descriptor *format_uncomp_desc = (void *)format_desc; + + fmt->pixelformat = uvc_guid_to_fourcc(format_uncomp_desc->guidFormat); + + LOG_DBG("Found descriptor for format %u, frame %u, GUID '%.4s', pixfmt %04x", + format_uncomp_desc->bFormatIndex, frame_desc->bFrameIndex, + format_uncomp_desc->guidFormat, fmt->pixelformat); + } + + /* Fill the format according to what the host selected */ + fmt->width = frame_desc->wWidth; + fmt->height = frame_desc->wHeight; + fmt->pitch = fmt->width * video_bits_per_pixel(fmt->pixelformat) / BITS_PER_BYTE; + + return 0; +} + +static int uvc_get_vs_probe_struct(const struct device *dev, struct uvc_probe *probe, + uint8_t request) +{ + struct uvc_data *data = dev->data; + struct video_format *fmt = &data->video_fmt; + int ret; + + ret = uvc_get_vs_probe_format_index(dev, probe, request); + if (ret != 0) { + return ret; + } + + ret = uvc_get_vs_probe_frame_index(dev, probe, request); + if (ret != 0) { + return ret; + } + + ret = uvc_get_vs_format_from_desc(dev, fmt); + if (ret != 0) { + return ret; + } + + ret = uvc_get_vs_probe_frame_interval(dev, probe, request); + if (ret != 0) { + return ret; + } + + ret = uvc_get_vs_probe_max_size(dev, probe, request); + if (ret != 0) { + return ret; + } + + probe->dwClockFrequency = sys_cpu_to_le32(1); + probe->bmFramingInfo = UVC_BMFRAMING_INFO_FID | UVC_BMFRAMING_INFO_EOF; + probe->bPreferedVersion = 1; + probe->bMinVersion = 1; + probe->bMaxVersion = 1; + probe->bUsage = 0; + probe->bBitDepthLuma = 0; + probe->bmSettings = 0; + probe->bMaxNumberOfRefFramesPlus1 = 1; + probe->bmRateControlModes = 0; + probe->bmLayoutPerStream = 0; + probe->wKeyFrameRate = sys_cpu_to_le16(0); + probe->wPFrameRate = sys_cpu_to_le16(0); + probe->wCompQuality = sys_cpu_to_le16(0); + probe->wCompWindowSize = sys_cpu_to_le16(0); + probe->wDelay = sys_cpu_to_le16(1); + + return 0; +} + +static int uvc_get_vs_probe(const struct device *dev, struct net_buf *buf, uint8_t request) +{ + struct uvc_data *data = dev->data; + size_t size = MIN(sizeof(struct uvc_probe), buf->size); + struct uvc_probe *probe; + + switch (request) { + case UVC_GET_INFO: + return uvc_add_int(buf, 1, UVC_INFO_SUPPORTS_GET | UVC_INFO_SUPPORTS_SET, false); + case UVC_GET_LEN: + return uvc_add_int(buf, 2, sizeof(struct uvc_probe), false); + case UVC_GET_DEF: + net_buf_add_mem(buf, &data->default_probe, size); + return 0; + case UVC_GET_MIN: + case UVC_GET_RES: + case UVC_GET_MAX: + case UVC_GET_CUR: + net_buf_add(buf, size); + probe = (void *)buf->data; + break; + default: + return -EINVAL; + } + + return uvc_get_vs_probe_struct(dev, probe, request); +} + +static int uvc_set_vs_probe(const struct device *dev, const struct net_buf *buf) +{ + struct uvc_data *data = dev->data; + struct uvc_probe *probe; + struct uvc_probe max = {0}; + int ret; + + if (buf->len != sizeof(*probe)) { + LOG_ERR("Expected probe message of %u bytes got %u", sizeof(*probe), buf->len); + return -EINVAL; + } + + probe = (struct uvc_probe *)buf->data; + + ret = uvc_get_vs_probe_struct(dev, &max, UVC_GET_MAX); + if (ret != 0) { + return ret; + } + + if (probe->bFrameIndex > max.bFrameIndex) { + LOG_WRN("The bFrameIndex %u requested is beyond the max %u", + probe->bFrameIndex, max.bFrameIndex); + return -ERANGE; + } + + if (probe->bFormatIndex > max.bFormatIndex) { + LOG_WRN("The bFormatIndex %u requested is beyond the max %u", + probe->bFormatIndex, max.bFormatIndex); + return -ERANGE; + } + + if (probe->dwFrameInterval != 0) { + data->video_frmival.numerator = sys_le32_to_cpu(probe->dwFrameInterval); + data->video_frmival.denominator = NSEC_PER_SEC / 100; + } + if (probe->bFrameIndex != 0) { + data->frame_id = probe->bFrameIndex; + } + if (probe->bFormatIndex != 0) { + data->format_id = probe->bFormatIndex; + } + + return 0; +} + +static int uvc_get_vs_commit(const struct device *dev, struct net_buf *buf, uint8_t request) +{ + if (request != UVC_GET_CUR) { + LOG_WRN("commit: invalid bRequest %u", request); + return -EINVAL; + } + + return uvc_get_vs_probe(dev, buf, UVC_GET_CUR); +} + +static int uvc_set_vs_commit(const struct device *dev, const struct net_buf *buf) +{ + struct uvc_data *data = dev->data; + struct video_format *fmt = &data->video_fmt; + struct video_frmival frmival = data->video_frmival; + int ret; + + __ASSERT_NO_MSG(data->video_dev != NULL); + + ret = uvc_set_vs_probe(dev, buf); + if (ret != 0) { + return ret; + } + + LOG_INF("Ready to transfer, setting source format to '%s' %ux%u", + VIDEO_FOURCC_TO_STR(fmt->pixelformat), fmt->width, fmt->height); + + ret = video_set_format(data->video_dev, VIDEO_EP_OUT, fmt); + if (ret != 0) { + LOG_ERR("Could not set the format of %s", data->video_dev->name); + return ret; + } + + LOG_DBG("Setting frame interval of %s to %u/%u", + data->video_dev->name, + data->video_frmival.numerator, data->video_frmival.denominator); + + ret = video_set_frmival(data->video_dev, VIDEO_EP_OUT, &frmival); + if (ret != 0) { + LOG_WRN("Could not set the framerate of %s", data->video_dev->name); + } + + LOG_DBG("UVC device ready, %s can now be started", data->video_dev->name); + + if (atomic_test_bit(&data->state, UVC_STATE_STREAM_READY)) { + atomic_set_bit(&data->state, UVC_STATE_STREAM_RESTART); + } + + atomic_set_bit(&data->state, UVC_STATE_STREAM_READY); + uvc_flush_queue(dev); + + return 0; +} + +static int uvc_get_vc_ctrl(const struct device *dev, struct net_buf *buf, + const struct uvc_control_map *map, uint8_t request) +{ + struct uvc_data *data = dev->data; + const struct device *video_dev = data->video_dev; + struct video_ctrl_query cq = {.id = map->cid}; + struct video_control ctrl = {.id = map->cid}; + bool is64; + int ret; + + __ASSERT_NO_MSG(video_dev != NULL); + + ret = video_query_ctrl(video_dev, &cq); + if (ret != 0) { + LOG_ERR("Failed to query the video device for control 0x%x", cq.id); + return ret; + } + + LOG_INF("Responding to GET control '%s', size %u", cq.name, map->size); + + if (cq.type != VIDEO_CTRL_TYPE_BOOLEAN && cq.type != VIDEO_CTRL_TYPE_MENU && + cq.type != VIDEO_CTRL_TYPE_INTEGER && cq.type != VIDEO_CTRL_TYPE_INTEGER64) { + LOG_ERR("Unsupported control type %u", cq.type); + return -ENOTSUP; + } + + is64 = (cq.type == VIDEO_CTRL_TYPE_INTEGER64); + + switch (request) { + case UVC_GET_MIN: + return uvc_add_vc_int(buf, map, is64 ? cq.range.min64 : cq.range.min); + case UVC_GET_MAX: + return uvc_add_vc_int(buf, map, is64 ? cq.range.max64 : cq.range.max); + case UVC_GET_RES: + return uvc_add_vc_int(buf, map, is64 ? cq.range.step64 : cq.range.step); + case UVC_GET_DEF: + return uvc_add_vc_int(buf, map, is64 ? cq.range.def64 : cq.range.def); + case UVC_GET_CUR: + ret = video_get_ctrl(video_dev, &ctrl); + if (ret != 0) { + LOG_INF("Failed to query %s", video_dev->name); + return ret; + } + + return uvc_add_vc_int(buf, map, is64 ? ctrl.val64 : ctrl.val); + case UVC_GET_INFO: + return uvc_add_int(buf, 1, UVC_INFO_SUPPORTS_GET | UVC_INFO_SUPPORTS_SET, false); + case UVC_GET_LEN: + return uvc_add_int(buf, buf->size, map->size, false); + default: + LOG_WRN("Unsupported request type %u", request); + return -ENOTSUP; + } +} + +static int uvc_set_vc_ctrl(const struct device *dev, const struct net_buf *buf, + const struct uvc_control_map *map) +{ + struct uvc_data *data = dev->data; + const struct device *video_dev = data->video_dev; + struct video_ctrl_query cq = {.id = map->cid}; + struct video_control ctrl = {.id = map->cid}; + int64_t int64; + int ret; + + __ASSERT_NO_MSG(video_dev != NULL); + + ret = video_query_ctrl(video_dev, &cq); + if (ret != 0) { + LOG_ERR("Failed to query the video device for control 0x%x", cq.id); + return ret; + } + + if (cq.type != VIDEO_CTRL_TYPE_BOOLEAN && cq.type != VIDEO_CTRL_TYPE_MENU && + cq.type != VIDEO_CTRL_TYPE_INTEGER && cq.type != VIDEO_CTRL_TYPE_INTEGER64) { + LOG_ERR("Unsupported control type %u", cq.type); + return -ENOTSUP; + } + + ret = uvc_get_vc_int(buf, map, &int64); + if (ret != 0) { + return ret; + } + + if (cq.type == VIDEO_CTRL_TYPE_INTEGER64) { + ctrl.val64 = int64; + } else { + ctrl.val = int64; + } + + LOG_DBG("Setting CID 0x08%x to %llu", map->cid, int64); + + ret = video_set_ctrl(video_dev, &ctrl); + if (ret != 0) { + LOG_ERR("Failed to configure target video device"); + return ret; + } + + return 0; +} + +static int uvc_get_errno(const struct device *dev, struct net_buf *buf, uint8_t request) +{ + struct uvc_data *data = dev->data; + + switch (request) { + case UVC_GET_INFO: + return uvc_add_int(buf, 1, UVC_INFO_SUPPORTS_GET, false); + case UVC_GET_CUR: + return uvc_add_int(buf, 1, data->err, false); + default: + LOG_WRN("Unsupported request type %u", request); + return -ENOTSUP; + } +} + +static void uvc_set_errno(const struct device *dev, int ret) +{ + struct uvc_data *data = dev->data; + + switch (ret) { + case 0: + data->err = 0; + break; + case EBUSY: + case EAGAIN: + case EINPROGRESS: + case EALREADY: + data->err = UVC_ERR_NOT_READY; + break; + case EOVERFLOW: + case ERANGE: + case E2BIG: + data->err = UVC_ERR_OUT_OF_RANGE; + break; + case EDOM: + case EINVAL: + data->err = UVC_ERR_INVALID_VALUE_WITHIN_RANGE; + break; + case ENODEV: + case ENOTSUP: + case ENOSYS: + data->err = UVC_ERR_INVALID_REQUEST; + break; + default: + data->err = UVC_ERR_UNKNOWN; + break; + } +} + +static int uvc_get_control_op(const struct device *dev, const struct usb_setup_packet *setup, + const struct uvc_control_map **map) +{ + const struct uvc_config *cfg = dev->config; + struct uvc_data *data = dev->data; + uint8_t ifnum = (setup->wIndex >> 0) & 0xff; + uint8_t unit_id = (setup->wIndex >> 8) & 0xff; + uint8_t selector = setup->wValue >> 8; + uint8_t subtype = 0; + const struct uvc_control_map *list = NULL; + + /* VideoStreaming operation */ + + if (ifnum == cfg->desc->if1.bInterfaceNumber) { + switch (selector) { + case UVC_VS_PROBE_CONTROL: + LOG_INF("Host sent a VideoStreaming PROBE control"); + return UVC_OP_VS_PROBE; + case UVC_VS_COMMIT_CONTROL: + LOG_INF("Host sent a VideoStreaming COMMIT control"); + return UVC_OP_VS_COMMIT; + default: + LOG_ERR("Invalid probe/commit operation for bInterfaceNumber %u", ifnum); + return UVC_OP_INVALID; + } + } + + /* VideoControl operation */ + + if (ifnum != cfg->desc->if0.bInterfaceNumber) { + LOG_WRN("Interface %u not found", ifnum); + data->err = UVC_ERR_INVALID_UNIT; + return UVC_OP_RETURN_ERROR; + } + + if (unit_id == 0) { + return UVC_OP_GET_ERRNO; + } + + for (int i = UVC_IDX_VC_UNIT;; i++) { + struct uvc_unit_descriptor *desc = (void *)cfg->fs_desc[i]; + + if (desc->bDescriptorType != USB_DESC_CS_INTERFACE || + (desc->bDescriptorSubtype != UVC_VC_INPUT_TERMINAL && + desc->bDescriptorSubtype != UVC_VC_ENCODING_UNIT && + desc->bDescriptorSubtype != UVC_VC_SELECTOR_UNIT && + desc->bDescriptorSubtype != UVC_VC_EXTENSION_UNIT && + desc->bDescriptorSubtype != UVC_VC_PROCESSING_UNIT)) { + break; + } + + if (unit_id == desc->bUnitID) { + subtype = desc->bDescriptorSubtype; + break; + } + } + + if (subtype == 0) { + goto err; + } + + switch (subtype) { + case UVC_VC_INPUT_TERMINAL: + list = uvc_control_map_ct; + break; + case UVC_VC_SELECTOR_UNIT: + list = uvc_control_map_su; + break; + case UVC_VC_PROCESSING_UNIT: + list = uvc_control_map_pu; + break; + case UVC_VC_EXTENSION_UNIT: + list = uvc_control_map_xu; + break; + default: + CODE_UNREACHABLE; + } + + *map = NULL; + for (int i = 0; list[i].size != 0; i++) { + if (list[i].selector == selector) { + *map = &list[i]; + break; + } + } + if (*map == NULL) { + goto err; + } + + return UVC_OP_VC_CTRL; +err: + LOG_WRN("No control matches selector %u and bUnitID %u", selector, unit_id); + data->err = UVC_ERR_INVALID_CONTROL; + return UVC_OP_RETURN_ERROR; +} + +static int uvc_control_to_host(struct usbd_class_data *const c_data, + const struct usb_setup_packet *const setup, + struct net_buf *const buf) +{ + const struct device *dev = usbd_class_get_private(c_data); + const struct uvc_control_map *map = NULL; + uint8_t request = setup->bRequest; + + LOG_INF("Host sent a %s request, wValue 0x%04x, wIndex 0x%04x, wLength %u", + request == UVC_GET_CUR ? "GET_CUR" : request == UVC_GET_MIN ? "GET_MIN" : + request == UVC_GET_MAX ? "GET_MAX" : request == UVC_GET_RES ? "GET_RES" : + request == UVC_GET_LEN ? "GET_LEN" : request == UVC_GET_DEF ? "GET_DEF" : + request == UVC_GET_INFO ? "GET_INFO" : "bad", + setup->wValue, setup->wIndex, setup->wLength); + + if (setup->wLength > buf->size) { + LOG_ERR("wLength %u larger than %u bytes", setup->wLength, buf->size); + errno = ENOMEM; + goto end; + } + + buf->size = setup->wLength; + + switch (uvc_get_control_op(dev, setup, &map)) { + case UVC_OP_VS_PROBE: + errno = -uvc_get_vs_probe(dev, buf, setup->bRequest); + break; + case UVC_OP_VS_COMMIT: + errno = -uvc_get_vs_commit(dev, buf, setup->bRequest); + break; + case UVC_OP_VC_CTRL: + errno = -uvc_get_vc_ctrl(dev, buf, map, setup->bRequest); + break; + case UVC_OP_GET_ERRNO: + errno = -uvc_get_errno(dev, buf, setup->bRequest); + break; + case UVC_OP_RETURN_ERROR: + errno = EINVAL; + return 0; + default: + LOG_WRN("Unhandled operation, stalling control command"); + errno = EINVAL; + } +end: + uvc_set_errno(dev, errno); + + return 0; +} + +static int uvc_control_to_dev(struct usbd_class_data *const c_data, + const struct usb_setup_packet *const setup, + const struct net_buf *const buf) +{ + const struct device *dev = usbd_class_get_private(c_data); + const struct uvc_control_map *map = NULL; + + if (setup->bRequest != UVC_SET_CUR) { + LOG_WRN("Host issued a control write message but the bRequest is not SET_CUR"); + errno = ENOMEM; + goto end; + } + + LOG_INF("Host sent a SET_CUR request, wValue 0x%04x, wIndex 0x%04x, wLength %u", + setup->wValue, setup->wIndex, setup->wLength); + + switch (uvc_get_control_op(dev, setup, &map)) { + case UVC_OP_VS_PROBE: + errno = -uvc_set_vs_probe(dev, buf); + break; + case UVC_OP_VS_COMMIT: + errno = -uvc_set_vs_commit(dev, buf); + break; + case UVC_OP_VC_CTRL: + errno = -uvc_set_vc_ctrl(dev, buf, map); + break; + case UVC_OP_RETURN_ERROR: + errno = EINVAL; + return 0; + default: + LOG_WRN("Unhandled operation, stalling control command"); + errno = EINVAL; + } +end: + uvc_set_errno(dev, errno); + + return 0; +} + +/* UVC descriptor handling */ + +static void *uvc_get_desc(struct usbd_class_data *const c_data, const enum usbd_speed speed) +{ + const struct device *dev = usbd_class_get_private(c_data); + const struct uvc_config *cfg = dev->config; + + cfg->desc->iad.bFirstInterface = cfg->desc->if0.bInterfaceNumber; + cfg->desc->if0_hdr.baInterfaceNr[0] = cfg->desc->if1.bInterfaceNumber; + + switch (speed) { + case USBD_SPEED_FS: + cfg->desc->if1_hdr.bEndpointAddress = cfg->desc->if1_ep_fs.bEndpointAddress; + return (void *)cfg->fs_desc; + case USBD_SPEED_HS: + cfg->desc->if1_hdr.bEndpointAddress = cfg->desc->if1_ep_hs.bEndpointAddress; + return (void *)cfg->hs_desc; + default: + CODE_UNREACHABLE; + } +} + +static int uvc_add_desc(const struct device *dev, void *desc, bool add_to_fs, bool add_to_hs) +{ + const struct uvc_config *cfg = dev->config; + struct uvc_data *data = dev->data; + static const struct usb_desc_header nil_desc; + + if (add_to_fs) { + if (data->fs_desc_idx + 1 >= UVC_MAX_FS_DESC) { + goto err; + } + cfg->fs_desc[data->fs_desc_idx] = (struct usb_desc_header *)desc; + data->fs_desc_idx++; + cfg->fs_desc[data->fs_desc_idx] = (struct usb_desc_header *)&nil_desc; + } + + if (add_to_hs) { + if (data->hs_desc_idx + 1 >= UVC_MAX_HS_DESC) { + goto err; + } + cfg->hs_desc[data->hs_desc_idx] = (struct usb_desc_header *)desc; + data->hs_desc_idx++; + cfg->hs_desc[data->hs_desc_idx] = (struct usb_desc_header *)&nil_desc; + } + + return 0; +err: + LOG_ERR("Out of descriptor pointers, raise CONFIG_USBD_VIDEO_MAX_FORMATS above %u", + CONFIG_USBD_VIDEO_MAX_FORMATS); + return -ENOMEM; +} + +static union uvc_fmt_desc *uvc_new_fmt_desc(const struct device *dev) +{ + const struct uvc_config *cfg = dev->config; + struct uvc_data *data = dev->data; + void *desc; + int ret; + + BUILD_ASSERT(CONFIG_USBD_VIDEO_MAX_FORMATS == ARRAY_SIZE(cfg->desc->if1_fmts)); + + if (data->fmt_desc_idx >= CONFIG_USBD_VIDEO_MAX_FORMATS) { + LOG_ERR("Out of descriptor pointers, raise CONFIG_USBD_VIDEO_MAX_FORMATS above %u", + CONFIG_USBD_VIDEO_MAX_FORMATS); + return NULL; + } + + desc = &cfg->desc->if1_fmts[data->fmt_desc_idx]; + data->fmt_desc_idx++; + + LOG_DBG("Allocated format/frame descriptor %u (%p)", data->fmt_desc_idx, desc); + + ret = uvc_add_desc(dev, desc, true, true); + if (ret != 0) { + return NULL; + } + + return desc; +} + +static int uvc_add_vs_format_desc(const struct device *dev, + struct uvc_format_descriptor **format_desc, + const struct video_format_cap *cap) +{ + const struct uvc_config *cfg = dev->config; + + __ASSERT_NO_MSG(format_desc != NULL); + + if (cap->pixelformat == VIDEO_PIX_FMT_JPEG) { + struct uvc_format_mjpeg_descriptor *desc; + + LOG_INF("Adding format descriptor #%u for MJPEG", + cfg->desc->if1_hdr.bNumFormats + 1); + + desc = &uvc_new_fmt_desc(dev)->fmt_mjpeg; + if (desc == NULL) { + return -ENOMEM; + } + desc->bDescriptorType = USB_DESC_CS_INTERFACE; + desc->bFormatIndex = cfg->desc->if1_hdr.bNumFormats + 1; + desc->bLength = sizeof(*desc); + desc->bDescriptorSubtype = UVC_VS_FORMAT_MJPEG; + desc->bDefaultFrameIndex = 1; + cfg->desc->if1_hdr.bNumFormats++; + cfg->desc->if1_hdr.wTotalLength += desc->bLength; + *format_desc = (struct uvc_format_descriptor *)desc; + } else { + struct uvc_format_uncomp_descriptor *desc; + + LOG_INF("Adding format descriptor #%u for '%s'", + cfg->desc->if1_hdr.bNumFormats + 1, VIDEO_FOURCC_TO_STR(cap->pixelformat)); + + desc = &uvc_new_fmt_desc(dev)->fmt_uncomp; + if (desc == NULL) { + return -ENOMEM; + } + desc->bDescriptorType = USB_DESC_CS_INTERFACE; + desc->bFormatIndex = cfg->desc->if1_hdr.bNumFormats + 1; + desc->bLength = sizeof(*desc); + desc->bDescriptorSubtype = UVC_VS_FORMAT_UNCOMPRESSED; + uvc_fourcc_to_guid(desc->guidFormat, cap->pixelformat); + desc->bBitsPerPixel = video_bits_per_pixel(cap->pixelformat); + desc->bDefaultFrameIndex = 1; + cfg->desc->if1_hdr.bNumFormats++; + cfg->desc->if1_hdr.wTotalLength += desc->bLength; + *format_desc = (struct uvc_format_descriptor *)desc; + } + + + __ASSERT_NO_MSG(*format_desc != NULL); + + return 0; +} + +static int uvc_compare_frmival_desc(const void *a, const void *b) +{ + uint32_t ia, ib; + + /* Copy in case a and b are not 32-bit aligned */ + memcpy(&ia, a, sizeof(uint32_t)); + memcpy(&ib, b, sizeof(uint32_t)); + + return ib - ia; +} + +static int uvc_add_vs_frame_interval(struct uvc_frame_discrete_descriptor *desc, + const struct video_frmival *frmival, + struct video_format *fmt) +{ + int i = desc->bFrameIntervalType; + uint32_t bitrate; + + if (i >= CONFIG_USBD_VIDEO_MAX_FRMIVAL) { + LOG_WRN("Out of frame interval fields"); + return -ENOSPC; + } + + desc->dwFrameInterval[i] = sys_cpu_to_le32(video_frmival_nsec(frmival) / 100); + desc->bFrameIntervalType++; + desc->bLength += sizeof(uint32_t); + + desc->dwMinBitRate = sys_le32_to_cpu(desc->dwMinBitRate); + desc->dwMaxBitRate = sys_le32_to_cpu(desc->dwMinBitRate); + bitrate = MAX(fmt->pitch, fmt->width) * fmt->height * + frmival->denominator / frmival->denominator; + desc->dwMinBitRate = MIN(desc->dwMinBitRate, bitrate); + desc->dwMaxBitRate = MAX(desc->dwMaxBitRate, bitrate); + + if (desc->dwMinBitRate > desc->dwMaxBitRate) { + LOG_WRN("The minimum bitrate is above the maximum bitrate"); + } + + if (desc->dwMaxBitRate == 0) { + LOG_WRN("maximum bitrate is zero"); + } + + desc->dwMinBitRate = sys_cpu_to_le32(desc->dwMinBitRate); + desc->dwMaxBitRate = sys_cpu_to_le32(desc->dwMinBitRate); + + return 0; +} + +static int uvc_add_vs_frame_desc(const struct device *dev, + struct uvc_format_descriptor *format_desc, + const struct video_format_cap *cap, bool min) +{ + const struct uvc_config *cfg = dev->config; + struct uvc_data *data = dev->data; + struct uvc_frame_discrete_descriptor *desc; + uint16_t w = min ? cap->width_min : cap->width_max; + uint16_t h = min ? cap->height_min : cap->height_max; + uint16_t p = MAX(video_bits_per_pixel(cap->pixelformat), 8) * w / BITS_PER_BYTE; + struct video_format fmt = {.pixelformat = cap->pixelformat, + .width = w, .height = h, .pitch = p}; + struct video_frmival_enum fie = {.format = &fmt}; + uint32_t max_size = MAX(p, w) * h; + + __ASSERT_NO_MSG(data->video_dev != NULL); + + LOG_INF("Adding frame descriptor #%u for %ux%u", + format_desc->bNumFrameDescriptors + 1, w, h); + + desc = &uvc_new_fmt_desc(dev)->frm_disc; + if (desc == NULL) { + return -ENOMEM; + } + + desc->bLength = sizeof(*desc) - CONFIG_USBD_VIDEO_MAX_FRMIVAL * sizeof(uint32_t); + desc->bDescriptorType = USB_DESC_CS_INTERFACE; + desc->bFrameIndex = format_desc->bNumFrameDescriptors + 1; + desc->wWidth = sys_cpu_to_le16(w); + desc->wHeight = sys_cpu_to_le16(h); + desc->dwMaxVideoFrameBufferSize = sys_cpu_to_le32(max_size); + desc->bDescriptorSubtype = (format_desc->bDescriptorSubtype == UVC_VS_FORMAT_UNCOMPRESSED) + ? UVC_VS_FRAME_UNCOMPRESSED : UVC_VS_FRAME_MJPEG; + desc->dwMinBitRate = UINT32_MAX; + desc->dwMaxBitRate = 0; + + /* Add the adwFrameInterval fields at the end of this descriptor */ + while (video_enum_frmival(data->video_dev, VIDEO_EP_OUT, &fie) == 0) { + switch (fie.type) { + case VIDEO_FRMIVAL_TYPE_DISCRETE: + LOG_DBG("Adding discrete frame interval %u", fie.index); + uvc_add_vs_frame_interval(desc, &fie.discrete, &fmt); + break; + case VIDEO_FRMIVAL_TYPE_STEPWISE: + LOG_DBG("Adding stepwise frame interval %u", fie.index); + uvc_add_vs_frame_interval(desc, &fie.stepwise.min, &fmt); + uvc_add_vs_frame_interval(desc, &fie.stepwise.max, &fmt); + break; + } + fie.index++; + } + + /* If no frame intrval supported, default to 1 FPS to 60 FPS with 15 increment */ + if (desc->bFrameIntervalType == 0) { + struct uvc_frame_continuous_descriptor *desc_cont = (void *)desc; + + desc_cont->bLength = sizeof(*desc_cont); + desc_cont->dwMinFrameInterval = NSEC_PER_SEC * 100 / 15; + desc_cont->dwMaxFrameInterval = NSEC_PER_SEC * 100 / 60; + desc_cont->dwFrameIntervalStep = 10000 / 15; + desc_cont->dwMinBitRate = 0; + desc_cont->dwMaxBitRate = UINT32_MAX; + } + + /* UVC requires the frame intervals to be sorted, but not Zephyr */ + qsort(desc->dwFrameInterval, desc->bFrameIntervalType, + sizeof(*desc->dwFrameInterval), uvc_compare_frmival_desc); + + __ASSERT_NO_MSG(format_desc != NULL); + + desc->dwDefaultFrameInterval = desc->dwFrameInterval[0]; + format_desc->bNumFrameDescriptors++; + cfg->desc->if1_hdr.wTotalLength += desc->bLength; + + return 0; +} + +static uint32_t uvc_get_mask(const struct device *video_dev, const struct uvc_control_map *list) +{ + uint32_t mask = 0; + bool ok; + + __ASSERT_NO_MSG(video_dev != NULL); + + LOG_DBG("Querying which controls are supported:"); + + for (int i = 0; list[i].size != 0; i++) { + struct video_ctrl_query cq = {.id = list[i].cid}; + + ok = video_query_ctrl(video_dev, &cq) == 0; + + LOG_DBG("%s supports control 0x%02x: %s", + video_dev->name, cq.id, ok ? "yes" : "no"); + + mask |= ok << list[i].bit; + } + + return mask; +} + +static int uvc_init(struct usbd_class_data *const c_data) +{ + const struct device *dev = usbd_class_get_private(c_data); + const struct uvc_config *cfg = dev->config; + struct uvc_data *data = dev->data; + struct uvc_format_descriptor *format_desc = NULL; + struct video_caps caps; + uint32_t prev_pixfmt = 0; + uint32_t mask = 0; + int ret; + + __ASSERT_NO_MSG(data->video_dev != NULL); + + if (atomic_test_bit(&data->state, UVC_STATE_INITIALIZED)) { + LOG_DBG("UVC instance '%s' is already initialized", dev->name); + return 0; + } + + + /* Generating VideoControl descriptors (interface 0) */ + + mask = uvc_get_mask(data->video_dev, uvc_control_map_ct); + cfg->desc->if0_ct.bmControls[0] = mask >> 0; + cfg->desc->if0_ct.bmControls[1] = mask >> 8; + cfg->desc->if0_ct.bmControls[2] = mask >> 16; + + mask = uvc_get_mask(data->video_dev, uvc_control_map_pu); + cfg->desc->if0_pu.bmControls[0] = mask >> 0; + cfg->desc->if0_pu.bmControls[1] = mask >> 8; + cfg->desc->if0_pu.bmControls[2] = mask >> 16; + + mask = uvc_get_mask(data->video_dev, uvc_control_map_xu); + cfg->desc->if0_xu.bmControls[0] = mask >> 0; + cfg->desc->if0_xu.bmControls[1] = mask >> 8; + cfg->desc->if0_xu.bmControls[2] = mask >> 16; + cfg->desc->if0_xu.bmControls[3] = mask >> 24; + + /* Generating VideoStreaming descriptors (interface 1) */ + + ret = video_get_caps(data->video_dev, VIDEO_EP_OUT, &caps); + if (ret != 0) { + LOG_ERR("Could not load %s video format list", data->video_dev->name); + return ret; + } + + cfg->desc->if1_hdr.wTotalLength = sys_le16_to_cpu(cfg->desc->if1_hdr.wTotalLength); + + for (int i = 0; caps.format_caps[i].pixelformat != 0; i++) { + const struct video_format_cap *cap = &caps.format_caps[i]; + + if (prev_pixfmt != cap->pixelformat) { + if (prev_pixfmt != 0) { + cfg->desc->if1_hdr.wTotalLength += cfg->desc->if1_color.bLength; + uvc_add_desc(dev, &cfg->desc->if1_color, true, true); + } + + ret = uvc_add_vs_format_desc(dev, &format_desc, cap); + if (ret != 0) { + return ret; + } + } + + ret = uvc_add_vs_frame_desc(dev, format_desc, cap, true); + if (ret != 0) { + return ret; + } + + if (cap->width_min != cap->width_max || cap->height_min != cap->height_max) { + ret = uvc_add_vs_frame_desc(dev, format_desc, cap, false); + if (ret != 0) { + return ret; + } + } + + prev_pixfmt = cap->pixelformat; + } + + cfg->desc->if1_hdr.wTotalLength += cfg->desc->if1_color.bLength; + uvc_add_desc(dev, &cfg->desc->if1_color, true, true); + uvc_add_desc(dev, &cfg->desc->if1_ep_fs, true, false); + uvc_add_desc(dev, &cfg->desc->if1_ep_hs, false, true); + + cfg->desc->if1_hdr.wTotalLength = sys_cpu_to_le16(cfg->desc->if1_hdr.wTotalLength); + + /* Generating the default probe message now that descriptors are complete */ + + ret = uvc_get_vs_probe_struct(dev, &data->default_probe, UVC_GET_CUR); + if (ret != 0) { + LOG_ERR("init: failed to query the default probe"); + return ret; + } + + atomic_set_bit(&data->state, UVC_STATE_INITIALIZED); + + return 0; +} + +/* UVC data handling */ + +static int uvc_request(struct usbd_class_data *const c_data, struct net_buf *buf, int err) +{ + const struct device *dev = usbd_class_get_private(c_data); + struct uvc_buf_info bi = *(struct uvc_buf_info *)udc_get_buf_info(buf); + struct uvc_data *data = dev->data; + + net_buf_unref(buf); + + if (bi.udc.ep == uvc_get_bulk_in(dev)) { + LOG_DBG("Request completed for USB buffer %p, video buffer %p", buf, bi.vbuf); + if (bi.vbuf != NULL) { + k_fifo_put(&data->fifo_out, bi.vbuf); + + if (IS_ENABLED(CONFIG_POLL) && data->video_sig != NULL) { + LOG_DBG("Raising VIDEO_BUF_DONE signal"); + k_poll_signal_raise(data->video_sig, VIDEO_BUF_DONE); + } + } + + /* There is now one more net_buff buffer available */ + uvc_flush_queue(dev); + } else { + LOG_WRN("Request on unknown endpoint 0x%02x", bi.udc.ep); + } + + return 0; +} + +/* + * Handling the start of USB transfers marked by 'v' below: + * v v + * [h+data:][data:::][data:::]...[data:::]...[h+data:][data:::][data:::]...[data:::]... + * [vbuf:::::::::::::::::::::::::::::::::]...[vbuf:::::::::::::::::::::::::::::::::]... + * [frame::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::]... + */ +static struct net_buf *uvc_initiate_transfer(const struct device *dev, struct video_buffer *vbuf, + size_t *next_line_offset, size_t *next_vbuf_offset) +{ + const struct uvc_config *cfg = dev->config; + struct uvc_data *data = dev->data; + struct video_format *fmt = &data->video_fmt; + size_t mps = uvc_get_bulk_mps(cfg->c_data); + struct net_buf *buf; + + buf = net_buf_alloc_len(&uvc_buf_pool, mps, K_NO_WAIT); + if (buf == NULL) { + LOG_DBG("Cannot allocate first USB buffer for now"); + return NULL; + } + + if (fmt->pitch > 0) { + *next_line_offset += vbuf->bytesused / fmt->pitch; + } + + LOG_INF("Start of transfer, bytes used %u, sending lines %u to %u out of %u", + vbuf->bytesused, vbuf->line_offset, *next_line_offset, fmt->height); + + /* Copy the header into the buffer (first 2 fields only) */ + net_buf_add_mem(buf, &data->payload_header, 2); + net_buf_add(buf, CONFIG_USBD_VIDEO_HEADER_SIZE - buf->len); + + /* Copy the payload into the buffer */ + *next_vbuf_offset = MIN(vbuf->bytesused, net_buf_tailroom(buf)); + net_buf_add_mem(buf, vbuf->buffer, *next_vbuf_offset); + + /* If this new USB transfer will complete this frame */ + if (fmt->pitch == 0 || *next_line_offset >= fmt->height) { + LOG_DBG("Last USB transfer for this buffer"); + + /* Flag that this current transfer is the last */ + ((struct uvc_payload_header *)buf->data)->bmHeaderInfo |= + UVC_BMHEADERINFO_END_OF_FRAME; + + /* Toggle the Frame ID of the next vbuf */ + data->payload_header.bmHeaderInfo ^= UVC_BMHEADERINFO_FRAMEID; + + *next_line_offset = 0; + } + + return buf; +} + +/* + * Handling the start of USB transfers marked by 'v' below: + * v v v v v v + * [h+data:][data:::][data:::]...[data:::]...[h+data:][data:::][data:::]...[data:::]... + * [vbuf:::::::::::::::::::::::::::::::::]...[vbuf:::::::::::::::::::::::::::::::::]... + * [frame::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::]... + */ +static struct net_buf *uvc_continue_transfer(const struct device *dev, struct video_buffer *vbuf, + size_t *next_line_offset, size_t *next_vbuf_offset) +{ + const struct uvc_config *cfg = dev->config; + struct uvc_data *data = dev->data; + size_t mps = uvc_get_bulk_mps(cfg->c_data); + struct net_buf *buf; + + /* Directly pass the vbuf content with zero-copy */ + buf = net_buf_alloc_with_data(&uvc_buf_pool, vbuf->buffer + data->vbuf_offset, + MIN(vbuf->bytesused - data->vbuf_offset, mps), K_NO_WAIT); + if (buf == NULL) { + LOG_DBG("Cannot allocate continuation USB buffer for now"); + return NULL; + } + + *next_vbuf_offset = data->vbuf_offset + buf->len; + + return buf; +} + +static int uvc_reset_transfer(const struct device *dev) +{ + const struct uvc_config *cfg = dev->config; + struct uvc_data *data = dev->data; + struct uvc_buf_info *bi; + struct net_buf *buf; + int ret; + + LOG_DBG("Stream restarted, terminating the transfer after %u bytes", data->vbuf_offset); + + buf = net_buf_alloc_with_data(&uvc_buf_pool, "", 1, K_NO_WAIT); + if (buf == NULL) { + LOG_DBG("Cannot allocate ZLP USB buffer for now"); + return -ENOMEM; + } + + bi = (struct uvc_buf_info *)udc_get_buf_info(buf); + bi->udc.ep = uvc_get_bulk_in(dev); + bi->vbuf = NULL; + bi->udc.zlp = true; + data->vbuf_offset = 0; + + ret = usbd_ep_enqueue(cfg->c_data, buf); + if (ret != 0) { + net_buf_unref(buf); + return ret; + } + + atomic_clear_bit(&data->state, UVC_STATE_STREAM_RESTART); + + return 0; +} + +/* + * The queue of video frame fragments (vbuf) is processed, each fragment (data) + * is prepended by the UVC header (h). The result is cut into USB packets (pkt) + * submitted to the USB. One vbuf per USB transfer + * + * [h+data:][data:::][data:::]...[data:::]...[h+data:][data:::][data:::]...[data:::]... + * [vbuf:::::::::::::::::::::::::::::::::]...[vbuf:::::::::::::::::::::::::::::::::]... + * [frame::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::]... + * + * @retval 0 if vbuf was partially transferred. + * @retval 1 if vbuf was fully transferred and can be released. + * @return Negative error code on failure. + */ +static int uvc_flush_vbuf(const struct device *dev, struct video_buffer *vbuf) +{ + const struct uvc_config *cfg = dev->config; + struct uvc_data *data = dev->data; + size_t next_vbuf_offset = data->vbuf_offset; + size_t next_line_offset = vbuf->line_offset; + struct net_buf *buf; + struct uvc_buf_info *bi; + int ret; + + if (atomic_test_bit(&data->state, UVC_STATE_STREAM_RESTART)) { + return uvc_reset_transfer(dev); + } + + buf = (data->vbuf_offset == 0) + ? uvc_initiate_transfer(dev, vbuf, &next_line_offset, &next_vbuf_offset) + : uvc_continue_transfer(dev, vbuf, &next_line_offset, &next_vbuf_offset); + if (buf == NULL) { + return -ENOMEM; + } + + bi = (struct uvc_buf_info *)udc_get_buf_info(buf); + bi->udc.ep = uvc_get_bulk_in(dev); + + LOG_DBG("Video buffer %p, offset %u/%u, size %u", + vbuf, data->vbuf_offset, vbuf->bytesused, buf->len); + + /* End-of-Transfer condition */ + if (next_vbuf_offset >= vbuf->bytesused) { + bi->vbuf = vbuf; + bi->udc.zlp = (buf->len == uvc_get_bulk_mps(cfg->c_data)); + } + + ret = usbd_ep_enqueue(cfg->c_data, buf); + if (ret != 0) { + net_buf_unref(buf); + return ret; + } + + /* End-of-Transfer condition */ + if (next_vbuf_offset >= vbuf->bytesused) { + data->vbuf_offset = 0; + return UVC_VBUF_DONE; + } + + data->vbuf_offset = next_vbuf_offset; + vbuf->line_offset = next_line_offset; + return 0; +} + +static void uvc_flush_queue(const struct device *dev) +{ + struct uvc_data *data = dev->data; + struct video_buffer *vbuf; + int ret; + + __ASSERT_NO_MSG(atomic_test_bit(&data->state, UVC_STATE_INITIALIZED)); + __ASSERT_NO_MSG(!k_is_in_isr()); + + if (!atomic_test_bit(&data->state, UVC_STATE_ENABLED) || + !atomic_test_bit(&data->state, UVC_STATE_STREAM_READY)) { + LOG_DBG("UVC not ready yet"); + return; + } + + /* Lock the access to the FIFO to make sure to only process one buffer at a time. + * K_FOREVER is not expected to take long, as uvc_flush_vbuf() never blocks. + */ + LOG_DBG("Locking the UVC stream"); + k_mutex_lock(&data->mutex, K_FOREVER); + + while ((vbuf = k_fifo_peek_head(&data->fifo_in)) != NULL) { + /* Pausing the UVC driver will accumulate buffers in the input queue */ + if (atomic_test_bit(&data->state, UVC_STATE_PAUSED)) { + break; + } + + ret = uvc_flush_vbuf(dev, vbuf); + if (ret == UVC_VBUF_DONE) { + LOG_DBG("Video buffer %p transferred, removing from the queue", vbuf); + k_fifo_get(&data->fifo_in, K_NO_WAIT); + } else if (ret != 0) { + LOG_DBG("Could not transfer video buffer %p for now", vbuf); + break; + } + } + + /* Now the other contexts calling this function can access the fifo safely. */ + LOG_DBG("Unlocking the UVC stream"); + k_mutex_unlock(&data->mutex); +} + +static void uvc_enable(struct usbd_class_data *const c_data) +{ + const struct device *dev = usbd_class_get_private(c_data); + struct uvc_data *data = dev->data; + + __ASSERT_NO_MSG(atomic_test_bit(&data->state, UVC_STATE_INITIALIZED)); + + atomic_set_bit(&data->state, UVC_STATE_ENABLED); + + /* Catch-up with buffers that might have been delayed */ + uvc_flush_queue(dev); +} + +static void uvc_disable(struct usbd_class_data *const c_data) +{ + const struct device *dev = usbd_class_get_private(c_data); + struct uvc_data *data = dev->data; + + __ASSERT_NO_MSG(atomic_test_bit(&data->state, UVC_STATE_INITIALIZED)); + + atomic_clear_bit(&data->state, UVC_STATE_ENABLED); +} + +static void uvc_update(struct usbd_class_data *const c_data, uint8_t iface, uint8_t alternate) +{ + LOG_DBG("Select alternate %u for interface %u", alternate, iface); +} + +static const struct usbd_class_api uvc_class_api = { + .enable = uvc_enable, + .disable = uvc_disable, + .request = uvc_request, + .update = uvc_update, + .control_to_host = uvc_control_to_host, + .control_to_dev = uvc_control_to_dev, + .init = uvc_init, + .get_desc = uvc_get_desc, +}; + +/* UVC video API */ + +static int uvc_enqueue(const struct device *dev, enum video_endpoint_id ep, + struct video_buffer *vbuf) +{ + struct uvc_data *data = dev->data; + + k_fifo_put(&data->fifo_in, vbuf); + uvc_flush_queue(dev); + + return 0; +} + +static int uvc_dequeue(const struct device *dev, enum video_endpoint_id ep, + struct video_buffer **vbuf, k_timeout_t timeout) +{ + struct uvc_data *data = dev->data; + + *vbuf = k_fifo_get(&data->fifo_out, timeout); + if (*vbuf == NULL) { + return -EAGAIN; + } + + return 0; +} + +static int uvc_get_format(const struct device *dev, enum video_endpoint_id ep, + struct video_format *fmt) +{ + struct uvc_data *data = dev->data; + + __ASSERT_NO_MSG(data->video_dev != NULL); + + if (!atomic_test_bit(&data->state, UVC_STATE_ENABLED) || + !atomic_test_bit(&data->state, UVC_STATE_STREAM_READY)) { + return -EAGAIN; + } + + LOG_DBG("Querying the format from %s", data->video_dev->name); + return video_get_format(data->video_dev, VIDEO_EP_OUT, fmt); +} + +static int uvc_set_stream(const struct device *dev, bool enable) +{ + struct uvc_data *data = dev->data; + + if (enable) { + atomic_clear_bit(&data->state, UVC_STATE_PAUSED); + uvc_flush_queue(dev); + } else { + atomic_set_bit(&data->state, UVC_STATE_PAUSED); + } + + return 0; +} + +static int uvc_get_caps(const struct device *dev, enum video_endpoint_id ep, + struct video_caps *caps) +{ + struct uvc_data *data = dev->data; + + __ASSERT_NO_MSG(data->video_dev != NULL); + + return video_get_caps(data->video_dev, VIDEO_EP_OUT, caps); +} + +#ifdef CONFIG_POLL +static int uvc_set_signal(const struct device *dev, enum video_endpoint_id ep, + struct k_poll_signal *sig) +{ + struct uvc_data *data = dev->data; + + data->video_sig = sig; + + return 0; +} +#endif + +static DEVICE_API(video, uvc_video_api) = { + .get_format = uvc_get_format, + .set_stream = uvc_set_stream, + .get_caps = uvc_get_caps, + .enqueue = uvc_enqueue, + .dequeue = uvc_dequeue, +#if CONFIG_POLL + .set_signal = uvc_set_signal, +#endif +}; + +static int uvc_preinit(const struct device *dev) +{ + struct uvc_data *data = dev->data; + + __ASSERT_NO_MSG(dev->config != NULL); + + data->payload_header.bHeaderLength = CONFIG_USBD_VIDEO_HEADER_SIZE; + data->format_id = 1; + data->frame_id = 1; + + k_fifo_init(&data->fifo_in); + k_fifo_init(&data->fifo_out); + k_mutex_init(&data->mutex); + + return 0; +} + +#define UVC_DEFINE_DESCRIPTOR(n) \ +static struct uvc_desc uvc_desc_##n = { \ + .iad = { \ + .bLength = sizeof(struct usb_association_descriptor), \ + .bDescriptorType = USB_DESC_INTERFACE_ASSOC, \ + .bFirstInterface = 0, \ + .bInterfaceCount = 2, \ + .bFunctionClass = USB_BCC_VIDEO, \ + .bFunctionSubClass = UVC_SC_VIDEO_INTERFACE_COLLECTION, \ + .bFunctionProtocol = 0, \ + .iFunction = 0, \ + }, \ + \ + .if0 = { \ + .bLength = sizeof(struct usb_if_descriptor), \ + .bDescriptorType = USB_DESC_INTERFACE, \ + .bInterfaceNumber = 0, \ + .bAlternateSetting = 0, \ + .bNumEndpoints = 0, \ + .bInterfaceClass = USB_BCC_VIDEO, \ + .bInterfaceSubClass = UVC_SC_VIDEOCONTROL, \ + .bInterfaceProtocol = 0, \ + .iInterface = 0, \ + }, \ + \ + .if0_hdr = { \ + .bLength = sizeof(struct uvc_control_header_descriptor), \ + .bDescriptorType = USB_DESC_CS_INTERFACE, \ + .bDescriptorSubtype = UVC_VC_HEADER, \ + .bcdUVC = sys_cpu_to_le16(0x0150), \ + .wTotalLength = sys_cpu_to_le16( \ + sizeof(struct uvc_control_header_descriptor) + \ + sizeof(struct uvc_camera_terminal_descriptor) + \ + sizeof(struct uvc_selector_unit_descriptor) + \ + sizeof(struct uvc_processing_unit_descriptor) + \ + sizeof(struct uvc_extension_unit_descriptor) + \ + sizeof(struct uvc_output_terminal_descriptor)), \ + .dwClockFrequency = sys_cpu_to_le32(30000000), \ + .bInCollection = 1, \ + .baInterfaceNr = {0}, \ + }, \ + \ + .if0_ct = { \ + .bLength = sizeof(struct uvc_camera_terminal_descriptor), \ + .bDescriptorType = USB_DESC_CS_INTERFACE, \ + .bDescriptorSubtype = UVC_VC_INPUT_TERMINAL, \ + .bTerminalID = UVC_UNIT_ID_CT, \ + .wTerminalType = sys_cpu_to_le16(UVC_ITT_CAMERA), \ + .bAssocTerminal = 0, \ + .iTerminal = 0, \ + .wObjectiveFocalLengthMin = sys_cpu_to_le16(0), \ + .wObjectiveFocalLengthMax = sys_cpu_to_le16(0), \ + .wOcularFocalLength = sys_cpu_to_le16(0), \ + .bControlSize = 3, \ + .bmControls = {0}, \ + }, \ + \ + .if0_su = { \ + .bLength = sizeof(struct uvc_selector_unit_descriptor), \ + .bDescriptorType = USB_DESC_CS_INTERFACE, \ + .bDescriptorSubtype = UVC_VC_SELECTOR_UNIT, \ + .bUnitID = UVC_UNIT_ID_SU, \ + .bNrInPins = 1, \ + .baSourceID = {UVC_UNIT_ID_CT}, \ + .iSelector = 0, \ + }, \ + \ + .if0_pu = { \ + .bLength = sizeof(struct uvc_processing_unit_descriptor), \ + .bDescriptorType = USB_DESC_CS_INTERFACE, \ + .bDescriptorSubtype = UVC_VC_PROCESSING_UNIT, \ + .bUnitID = UVC_UNIT_ID_PU, \ + .bSourceID = UVC_UNIT_ID_SU, \ + .wMaxMultiplier = sys_cpu_to_le16(0), \ + .bControlSize = 3, \ + .bmControls = {0}, \ + .iProcessing = 0, \ + .bmVideoStandards = 0, \ + }, \ + \ + .if0_xu = { \ + .bLength = sizeof(struct uvc_extension_unit_descriptor), \ + .bDescriptorType = USB_DESC_CS_INTERFACE, \ + .bDescriptorSubtype = UVC_VC_EXTENSION_UNIT, \ + .bUnitID = UVC_UNIT_ID_XU, \ + .guidExtensionCode = {0}, \ + .bNumControls = 0, \ + .bNrInPins = 1, \ + .baSourceID = {UVC_UNIT_ID_PU}, \ + .bControlSize = 4, \ + .bmControls = {0}, \ + .iExtension = 0, \ + }, \ + \ + .if0_ot = { \ + .bLength = sizeof(struct uvc_output_terminal_descriptor), \ + .bDescriptorType = USB_DESC_CS_INTERFACE, \ + .bDescriptorSubtype = UVC_VC_OUTPUT_TERMINAL, \ + .bTerminalID = UVC_UNIT_ID_OT, \ + .wTerminalType = sys_cpu_to_le16(UVC_TT_STREAMING), \ + .bAssocTerminal = UVC_UNIT_ID_CT, \ + .bSourceID = UVC_UNIT_ID_XU, \ + .iTerminal = 0, \ + }, \ + \ + .if1 = { \ + .bLength = sizeof(struct usb_if_descriptor), \ + .bDescriptorType = USB_DESC_INTERFACE, \ + .bInterfaceNumber = 0, \ + .bAlternateSetting = 0, \ + .bNumEndpoints = 1, \ + .bInterfaceClass = USB_BCC_VIDEO, \ + .bInterfaceSubClass = UVC_SC_VIDEOSTREAMING, \ + .bInterfaceProtocol = 0, \ + .iInterface = 0, \ + }, \ + \ + .if1_hdr = { \ + .bLength = sizeof(struct uvc_stream_header_descriptor), \ + .bDescriptorType = USB_DESC_CS_INTERFACE, \ + .bDescriptorSubtype = UVC_VS_INPUT_HEADER, \ + .bNumFormats = 0, \ + .wTotalLength = sys_cpu_to_le16( \ + sizeof(struct uvc_stream_header_descriptor)), \ + .bEndpointAddress = 0x81, \ + .bmInfo = 0, \ + .bTerminalLink = UVC_UNIT_ID_OT, \ + .bStillCaptureMethod = 0, \ + .bTriggerSupport = 0, \ + .bTriggerUsage = 0, \ + .bControlSize = 0, \ + }, \ + \ + .if1_color = { \ + .bLength = sizeof(struct uvc_color_descriptor), \ + .bDescriptorType = USB_DESC_CS_INTERFACE, \ + .bDescriptorSubtype = UVC_VS_COLORFORMAT, \ + .bColorPrimaries = UVC_COLOR_BT709, \ + .bTransferCharacteristics = UVC_COLOR_BT709, \ + .bMatrixCoefficients = UVC_COLOR_BT601, \ + }, \ + \ + .if1_ep_fs = { \ + .bLength = sizeof(struct usb_ep_descriptor), \ + .bDescriptorType = USB_DESC_ENDPOINT, \ + .bEndpointAddress = 0x81, \ + .bmAttributes = USB_EP_TYPE_BULK, \ + .wMaxPacketSize = sys_cpu_to_le16(64), \ + .bInterval = 0, \ + }, \ + \ + .if1_ep_hs = { \ + .bLength = sizeof(struct usb_ep_descriptor), \ + .bDescriptorType = USB_DESC_ENDPOINT, \ + .bEndpointAddress = 0x81, \ + .bmAttributes = USB_EP_TYPE_BULK, \ + .wMaxPacketSize = sys_cpu_to_le16(512), \ + .bInterval = 0, \ + }, \ +}; \ + \ +struct usb_desc_header *uvc_fs_desc_##n[UVC_MAX_FS_DESC] = { \ + (struct usb_desc_header *) &uvc_desc_##n.iad, \ + (struct usb_desc_header *) &uvc_desc_##n.if0, \ + (struct usb_desc_header *) &uvc_desc_##n.if0_hdr, \ + (struct usb_desc_header *) &uvc_desc_##n.if0_ct, \ + (struct usb_desc_header *) &uvc_desc_##n.if0_su, \ + (struct usb_desc_header *) &uvc_desc_##n.if0_pu, \ + (struct usb_desc_header *) &uvc_desc_##n.if0_xu, \ + (struct usb_desc_header *) &uvc_desc_##n.if0_ot, \ + (struct usb_desc_header *) &uvc_desc_##n.if1, \ + (struct usb_desc_header *) &uvc_desc_##n.if1_hdr, \ + /* More pointers are generated here at runtime */ \ + (struct usb_desc_header *) &uvc_desc_##n.if1_ep_fs, \ + (struct usb_desc_header *) NULL, \ +}; \ + \ +struct usb_desc_header *uvc_hs_desc_##n[UVC_MAX_HS_DESC] = { \ + (struct usb_desc_header *) &uvc_desc_##n.iad, \ + (struct usb_desc_header *) &uvc_desc_##n.if0, \ + (struct usb_desc_header *) &uvc_desc_##n.if0_hdr, \ + (struct usb_desc_header *) &uvc_desc_##n.if0_ct, \ + (struct usb_desc_header *) &uvc_desc_##n.if0_su, \ + (struct usb_desc_header *) &uvc_desc_##n.if0_pu, \ + (struct usb_desc_header *) &uvc_desc_##n.if0_xu, \ + (struct usb_desc_header *) &uvc_desc_##n.if0_ot, \ + (struct usb_desc_header *) &uvc_desc_##n.if1, \ + (struct usb_desc_header *) &uvc_desc_##n.if1_hdr, \ + /* More pointers are generated here at runtime */ \ + (struct usb_desc_header *) &uvc_desc_##n.if1_ep_hs, \ + (struct usb_desc_header *) NULL, \ +}; + +#define USBD_VIDEO_DT_DEVICE_DEFINE(n) \ + UVC_DEFINE_DESCRIPTOR(n) \ + \ + USBD_DEFINE_CLASS(uvc_c_data_##n, &uvc_class_api, \ + (void *)DEVICE_DT_INST_GET(n), NULL); \ + \ + const struct uvc_config uvc_cfg_##n = { \ + .c_data = &uvc_c_data_##n, \ + .desc = &uvc_desc_##n, \ + .fs_desc = uvc_fs_desc_##n, \ + .hs_desc = uvc_hs_desc_##n, \ + }; \ + \ + struct uvc_data uvc_data_##n = { \ + .fs_desc_idx = 10, \ + .hs_desc_idx = 10, \ + }; \ + \ + DEVICE_DT_INST_DEFINE(n, uvc_preinit, NULL, &uvc_data_##n, &uvc_cfg_##n,\ + POST_KERNEL, CONFIG_VIDEO_INIT_PRIORITY, &uvc_video_api); + +DT_INST_FOREACH_STATUS_OKAY(USBD_VIDEO_DT_DEVICE_DEFINE) diff --git a/subsys/usb/device_next/class/usbd_uvc.h b/subsys/usb/device_next/class/usbd_uvc.h new file mode 100644 index 000000000000..3364f83258d1 --- /dev/null +++ b/subsys/usb/device_next/class/usbd_uvc.h @@ -0,0 +1,484 @@ +/* + * Copyright (c) 2025 tinyVision.ai Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +/** + * @file + * @brief USB Video Class private header + * + * Header follows below documentation: + * - USB Device Class Definition for Video Devices (Revision 1.5) + * + * Additional documentation considered a part of UVC 1.5: + * - USB Device Class Definition for Video Devices: Uncompressed Payload (Revision 1.5) + * - USB Device Class Definition for Video Devices: Motion-JPEG Payload (Revision 1.5) + */ + +#ifndef ZEPHYR_INCLUDE_USBD_CLASS_UVC_H_ +#define ZEPHYR_INCLUDE_USBD_CLASS_UVC_H_ + +#include + +/* Video Class-Specific Request Codes */ +#define UVC_SET_CUR 0x01 +#define UVC_GET_CUR 0x81 +#define UVC_GET_MIN 0x82 +#define UVC_GET_MAX 0x83 +#define UVC_GET_RES 0x84 +#define UVC_GET_LEN 0x85 +#define UVC_GET_INFO 0x86 +#define UVC_GET_DEF 0x87 + +/* Flags announcing which controls are supported */ +#define UVC_INFO_SUPPORTS_GET BIT(0) +#define UVC_INFO_SUPPORTS_SET BIT(1) + +/* Request Error Code Control */ +#define UVC_ERR_NOT_READY 0x01 +#define UVC_ERR_WRONG_STATE 0x02 +#define UVC_ERR_OUT_OF_RANGE 0x04 +#define UVC_ERR_INVALID_UNIT 0x05 +#define UVC_ERR_INVALID_CONTROL 0x06 +#define UVC_ERR_INVALID_REQUEST 0x07 +#define UVC_ERR_INVALID_VALUE_WITHIN_RANGE 0x08 +#define UVC_ERR_UNKNOWN 0xff + +/* Video and Still Image Payload Headers */ +#define UVC_BMHEADERINFO_FRAMEID BIT(0) +#define UVC_BMHEADERINFO_END_OF_FRAME BIT(1) +#define UVC_BMHEADERINFO_HAS_PRESENTATIONTIME BIT(2) +#define UVC_BMHEADERINFO_HAS_SOURCECLOCK BIT(3) +#define UVC_BMHEADERINFO_PAYLOAD_SPECIFIC_BIT BIT(4) +#define UVC_BMHEADERINFO_STILL_IMAGE BIT(5) +#define UVC_BMHEADERINFO_ERROR BIT(6) +#define UVC_BMHEADERINFO_END_OF_HEADER BIT(7) + +/* Video Interface Subclass Codes */ +#define UVC_SC_VIDEOCONTROL 0x01 +#define UVC_SC_VIDEOSTREAMING 0x02 +#define UVC_SC_VIDEO_INTERFACE_COLLECTION 0x03 + +/* Video Class-Specific Video Control Interface Descriptor Subtypes */ +#define UVC_VC_DESCRIPTOR_UNDEFINED 0x00 +#define UVC_VC_HEADER 0x01 +#define UVC_VC_INPUT_TERMINAL 0x02 +#define UVC_VC_OUTPUT_TERMINAL 0x03 +#define UVC_VC_SELECTOR_UNIT 0x04 +#define UVC_VC_PROCESSING_UNIT 0x05 +#define UVC_VC_EXTENSION_UNIT 0x06 +#define UVC_VC_ENCODING_UNIT 0x07 + +/* Video Class-Specific Video Stream Interface Descriptor Subtypes */ +#define UVC_VS_UNDEFINED 0x00 +#define UVC_VS_INPUT_HEADER 0x01 +#define UVC_VS_OUTPUT_HEADER 0x02 +#define UVC_VS_STILL_IMAGE_FRAME 0x03 +#define UVC_VS_FORMAT_UNCOMPRESSED 0x04 +#define UVC_VS_FRAME_UNCOMPRESSED 0x05 +#define UVC_VS_FORMAT_MJPEG 0x06 +#define UVC_VS_FRAME_MJPEG 0x07 +#define UVC_VS_FORMAT_MPEG2TS 0x0A +#define UVC_VS_FORMAT_DV 0x0C +#define UVC_VS_COLORFORMAT 0x0D +#define UVC_VS_FORMAT_FRAME_BASED 0x10 +#define UVC_VS_FRAME_FRAME_BASED 0x11 +#define UVC_VS_FORMAT_STREAM_BASED 0x12 +#define UVC_VS_FORMAT_H264 0x13 +#define UVC_VS_FRAME_H264 0x14 +#define UVC_VS_FORMAT_H264_SIMULCAST 0x15 +#define UVC_VS_FORMAT_VP8 0x16 +#define UVC_VS_FRAME_VP8 0x17 +#define UVC_VS_FORMAT_VP8_SIMULCAST 0x18 + +/* Video Class-Specific Endpoint Descriptor Subtypes */ +#define UVC_EP_UNDEFINED 0x00 +#define UVC_EP_GENERAL 0x01 +#define UVC_EP_ENDPOINT 0x02 +#define UVC_EP_INTERRUPT 0x03 + +/* USB Terminal Types */ +#define UVC_TT_VENDOR_SPECIFIC 0x0100 +#define UVC_TT_STREAMING 0x0101 + +/* Input Terminal Types */ +#define UVC_ITT_VENDOR_SPECIFIC 0x0200 +#define UVC_ITT_CAMERA 0x0201 +#define UVC_ITT_MEDIA_TRANSPORT_INPUT 0x0202 + +/* Output Terminal Types */ +#define UVC_OTT_VENDOR_SPECIFIC 0x0300 +#define UVC_OTT_DISPLAY 0x0301 +#define UVC_OTT_MEDIA_TRANSPORT_OUTPUT 0x0302 + +/* External Terminal Types */ +#define UVC_EXT_EXTERNAL_VENDOR_SPECIFIC 0x0400 +#define UVC_EXT_COMPOSITE_CONNECTOR 0x0401 +#define UVC_EXT_SVIDEO_CONNECTOR 0x0402 +#define UVC_EXT_COMPONENT_CONNECTOR 0x0403 + +/* VideoStreaming Interface Controls */ +#define UVC_VS_PROBE_CONTROL 0x01 +#define UVC_VS_COMMIT_CONTROL 0x02 +#define UVC_VS_STILL_PROBE_CONTROL 0x03 +#define UVC_VS_STILL_COMMIT_CONTROL 0x04 +#define UVC_VS_STILL_IMAGE_TRIGGER_CONTROL 0x05 +#define UVC_VS_STREAM_ERROR_CODE_CONTROL 0x06 +#define UVC_VS_GENERATE_KEY_FRAME_CONTROL 0x07 +#define UVC_VS_UPDATE_FRAME_SEGMENT_CONTROL 0x08 +#define UVC_VS_SYNCH_DELAY_CONTROL 0x09 + +/* VideoControl Interface Controls */ +#define UVC_VC_CONTROL_UNDEFINED 0x00 +#define UVC_VC_VIDEO_POWER_MODE_CONTROL 0x01 +#define UVC_VC_REQUEST_ERROR_CODE_CONTROL 0x02 + +/* Selector Unit Controls */ +#define UVC_SU_INPUT_SELECT_CONTROL 0x01 + +/* Camera Terminal Controls */ +#define UVC_CT_SCANNING_MODE_CONTROL 0x01 +#define UVC_CT_AE_MODE_CONTROL 0x02 +#define UVC_CT_AE_PRIORITY_CONTROL 0x03 +#define UVC_CT_EXPOSURE_TIME_ABS_CONTROL 0x04 +#define UVC_CT_EXPOSURE_TIME_REL_CONTROL 0x05 +#define UVC_CT_FOCUS_ABS_CONTROL 0x06 +#define UVC_CT_FOCUS_REL_CONTROL 0x07 +#define UVC_CT_FOCUS_AUTO_CONTROL 0x08 +#define UVC_CT_IRIS_ABS_CONTROL 0x09 +#define UVC_CT_IRIS_REL_CONTROL 0x0A +#define UVC_CT_ZOOM_ABS_CONTROL 0x0B +#define UVC_CT_ZOOM_REL_CONTROL 0x0C +#define UVC_CT_PANTILT_ABS_CONTROL 0x0D +#define UVC_CT_PANTILT_REL_CONTROL 0x0E +#define UVC_CT_ROLL_ABS_CONTROL 0x0F +#define UVC_CT_ROLL_REL_CONTROL 0x10 +#define UVC_CT_PRIVACY_CONTROL 0x11 +#define UVC_CT_FOCUS_SIMPLE_CONTROL 0x12 +#define UVC_CT_WINDOW_CONTROL 0x13 +#define UVC_CT_REGION_OF_INTEREST_CONTROL 0x14 + +/* Processing Unit Controls */ +#define UVC_PU_BACKLIGHT_COMPENSATION_CONTROL 0x01 +#define UVC_PU_BRIGHTNESS_CONTROL 0x02 +#define UVC_PU_CONTRAST_CONTROL 0x03 +#define UVC_PU_GAIN_CONTROL 0x04 +#define UVC_PU_POWER_LINE_FREQUENCY_CONTROL 0x05 +#define UVC_PU_HUE_CONTROL 0x06 +#define UVC_PU_SATURATION_CONTROL 0x07 +#define UVC_PU_SHARPNESS_CONTROL 0x08 +#define UVC_PU_GAMMA_CONTROL 0x09 +#define UVC_PU_WHITE_BALANCE_TEMP_CONTROL 0x0A +#define UVC_PU_WHITE_BALANCE_TEMP_AUTO_CONTROL 0x0B +#define UVC_PU_WHITE_BALANCE_COMPONENT_CONTROL 0x0C +#define UVC_PU_WHITE_BALANCE_COMPONENT_AUTO_CONTROL 0x0D +#define UVC_PU_DIGITAL_MULTIPLIER_CONTROL 0x0E +#define UVC_PU_DIGITAL_MULTIPLIER_LIMIT_CONTROL 0x0F +#define UVC_PU_HUE_AUTO_CONTROL 0x10 +#define UVC_PU_ANALOG_VIDEO_STANDARD_CONTROL 0x11 +#define UVC_PU_ANALOG_LOCK_STATUS_CONTROL 0x12 +#define UVC_PU_CONTRAST_AUTO_CONTROL 0x13 + +/* Encoding Unit Controls */ +#define UVC_EU_SELECT_LAYER_CONTROL 0x01 +#define UVC_EU_PROFILE_TOOLSET_CONTROL 0x02 +#define UVC_EU_VIDEO_RESOLUTION_CONTROL 0x03 +#define UVC_EU_MIN_FRAME_INTERVAL_CONTROL 0x04 +#define UVC_EU_SLICE_MODE_CONTROL 0x05 +#define UVC_EU_RATE_CONTROL_MODE_CONTROL 0x06 +#define UVC_EU_AVERAGE_BITRATE_CONTROL 0x07 +#define UVC_EU_CPB_SIZE_CONTROL 0x08 +#define UVC_EU_PEAK_BIT_RATE_CONTROL 0x09 +#define UVC_EU_QUANTIZATION_PARAMS_CONTROL 0x0A +#define UVC_EU_SYNC_REF_FRAME_CONTROL 0x0B +#define UVC_EU_LTR_BUFFER_CONTROL 0x0C +#define UVC_EU_LTR_PICTURE_CONTROL 0x0D +#define UVC_EU_LTR_VALIDATION_CONTROL 0x0E +#define UVC_EU_LEVEL_IDC_LIMIT_CONTROL 0x0F +#define UVC_EU_SEI_PAYLOADTYPE_CONTROL 0x10 +#define UVC_EU_QP_RANGE_CONTROL 0x11 +#define UVC_EU_PRIORITY_CONTROL 0x12 +#define UVC_EU_START_OR_STOP_LAYER_CONTROL 0x13 +#define UVC_EU_ERROR_RESILIENCY_CONTROL 0x14 + +/* Extension Unit Controls */ +#define UVC_XU_BASE_CONTROL 0x00 + +/* Base GUID string present at the end of most GUID formats, preceded by the FourCC code */ +#define UVC_FORMAT_GUID(fourcc) fourcc "\x00\x00\x10\x00\x80\x00\x00\xaa\x00\x38\x9b\x71" + +struct uvc_if_descriptor { + uint8_t bLength; + uint8_t bDescriptorType; + uint8_t bDescriptorSubtype; +} __packed; + +struct uvc_control_header_descriptor { + uint8_t bLength; + uint8_t bDescriptorType; + uint8_t bDescriptorSubtype; + uint16_t bcdUVC; + uint16_t wTotalLength; + uint32_t dwClockFrequency; + uint8_t bInCollection; + uint8_t baInterfaceNr[1]; +} __packed; + +struct uvc_unit_descriptor { + uint8_t bLength; + uint8_t bDescriptorType; + uint8_t bDescriptorSubtype; + uint8_t bUnitID; +}; + +struct uvc_output_terminal_descriptor { + uint8_t bLength; + uint8_t bDescriptorType; + uint8_t bDescriptorSubtype; + uint8_t bTerminalID; + uint16_t wTerminalType; + uint8_t bAssocTerminal; + uint8_t bSourceID; + uint8_t iTerminal; +} __packed; + +struct uvc_camera_terminal_descriptor { + uint8_t bLength; + uint8_t bDescriptorType; + uint8_t bDescriptorSubtype; + uint8_t bTerminalID; + uint16_t wTerminalType; + uint8_t bAssocTerminal; + uint8_t iTerminal; + uint16_t wObjectiveFocalLengthMin; + uint16_t wObjectiveFocalLengthMax; + uint16_t wOcularFocalLength; + uint8_t bControlSize; + uint8_t bmControls[3]; +} __packed; + +struct uvc_selector_unit_descriptor { + uint8_t bLength; + uint8_t bDescriptorType; + uint8_t bDescriptorSubtype; + uint8_t bUnitID; + uint8_t bNrInPins; + uint8_t baSourceID[1]; + uint8_t iSelector; +} __packed; + +struct uvc_processing_unit_descriptor { + uint8_t bLength; + uint8_t bDescriptorType; + uint8_t bDescriptorSubtype; + uint8_t bUnitID; + uint8_t bSourceID; + uint16_t wMaxMultiplier; + uint8_t bControlSize; + uint8_t bmControls[3]; + uint8_t iProcessing; + uint8_t bmVideoStandards; +} __packed; + +struct uvc_encoding_unit_descriptor { + uint8_t bLength; + uint8_t bDescriptorType; + uint8_t bDescriptorSubtype; + uint8_t bUnitID; + uint8_t bSourceID; + uint8_t iEncoding; + uint8_t bControlSize; + uint8_t bmControls[3]; + uint8_t bmControlsRuntime[3]; +} __packed; + +struct uvc_extension_unit_descriptor { + uint8_t bLength; + uint8_t bDescriptorType; + uint8_t bDescriptorSubtype; + uint8_t bUnitID; + uint8_t guidExtensionCode[16]; + uint8_t bNumControls; + uint8_t bNrInPins; + uint8_t baSourceID[1]; + uint8_t bControlSize; + uint8_t bmControls[4]; + uint8_t iExtension; +} __packed; + +struct uvc_stream_header_descriptor { + uint8_t bLength; + uint8_t bDescriptorType; + uint8_t bDescriptorSubtype; + uint8_t bNumFormats; + uint16_t wTotalLength; + uint8_t bEndpointAddress; + uint8_t bmInfo; + uint8_t bTerminalLink; + uint8_t bStillCaptureMethod; + uint8_t bTriggerSupport; + uint8_t bTriggerUsage; + uint8_t bControlSize; +} __packed; + +struct uvc_frame_still_image_descriptor { + uint8_t bLength; + uint8_t bDescriptorType; + uint8_t bDescriptorSubtype; + uint8_t bEndpointAddress; + uint8_t bNumImageSizePatterns; + struct { + uint16_t wWidth; + uint16_t wHeight; + } n[1] __packed; + uint8_t bNumCompressionPattern; + uint8_t bCompression[1]; +} __packed; + +struct uvc_format_descriptor { + uint8_t bLength; + uint8_t bDescriptorType; + uint8_t bDescriptorSubtype; + uint8_t bFormatIndex; + uint8_t bNumFrameDescriptors; + /* Other fields depending on bDescriptorSubtype value */ +} __packed; + +struct uvc_format_uncomp_descriptor { + uint8_t bLength; + uint8_t bDescriptorType; + uint8_t bDescriptorSubtype; + uint8_t bFormatIndex; + uint8_t bNumFrameDescriptors; + uint8_t guidFormat[16]; + uint8_t bBitsPerPixel; + uint8_t bDefaultFrameIndex; + uint8_t bAspectRatioX; + uint8_t bAspectRatioY; + uint8_t bmInterlaceFlags; + uint8_t bCopyProtect; +} __packed; + +struct uvc_format_mjpeg_descriptor { + uint8_t bLength; + uint8_t bDescriptorType; + uint8_t bDescriptorSubtype; + uint8_t bFormatIndex; + uint8_t bNumFrameDescriptors; + uint8_t bmFlags; +#define UVC_MJPEG_FLAGS_FIXEDSIZESAMPLES (1 << 0) + uint8_t bDefaultFrameIndex; + uint8_t bAspectRatioX; + uint8_t bAspectRatioY; + uint8_t bmInterlaceFlags; + uint8_t bCopyProtect; +} __packed; + +struct uvc_frame_descriptor { + uint8_t bLength; + uint8_t bDescriptorType; + uint8_t bDescriptorSubtype; + uint8_t bFrameIndex; + uint8_t bmCapabilities; + uint16_t wWidth; + uint16_t wHeight; + uint32_t dwMinBitRate; + uint32_t dwMaxBitRate; + uint32_t dwMaxVideoFrameBufferSize; + uint32_t dwDefaultFrameInterval; + uint8_t bFrameIntervalType; + /* Other fields depending on bFrameIntervalType value */ +} __packed; + +struct uvc_frame_continuous_descriptor { + uint8_t bLength; + uint8_t bDescriptorType; + uint8_t bDescriptorSubtype; + uint8_t bFrameIndex; + uint8_t bmCapabilities; + uint16_t wWidth; + uint16_t wHeight; + uint32_t dwMinBitRate; + uint32_t dwMaxBitRate; + uint32_t dwMaxVideoFrameBufferSize; + uint32_t dwDefaultFrameInterval; + uint8_t bFrameIntervalType; + uint32_t dwMinFrameInterval; + uint32_t dwMaxFrameInterval; + uint32_t dwFrameIntervalStep; +} __packed; + +struct uvc_frame_discrete_descriptor { + uint8_t bLength; + uint8_t bDescriptorType; + uint8_t bDescriptorSubtype; + uint8_t bFrameIndex; + uint8_t bmCapabilities; + uint16_t wWidth; + uint16_t wHeight; + uint32_t dwMinBitRate; + uint32_t dwMaxBitRate; + uint32_t dwMaxVideoFrameBufferSize; + uint32_t dwDefaultFrameInterval; + uint8_t bFrameIntervalType; + uint32_t dwFrameInterval[CONFIG_USBD_VIDEO_MAX_FRMIVAL]; +} __packed; + +struct uvc_color_descriptor { + uint8_t bLength; + uint8_t bDescriptorType; + uint8_t bDescriptorSubtype; + uint8_t bColorPrimaries; + uint8_t bTransferCharacteristics; + uint8_t bMatrixCoefficients; +#define UVC_COLOR_BT709 1 +#define UVC_COLOR_BT470M 2 +#define UVC_COLOR_BT470BG 3 +#define UVC_COLOR_BT601 4 +#define UVC_COLOR_SMPTE170M 4 +#define UVC_COLOR_SMPTE240M 5 +#define UVC_COLOR_LINEAR 6 +#define UVC_COLOR_SRGB 7 +} __packed; + +struct uvc_probe { + uint16_t bmHint; + uint8_t bFormatIndex; + uint8_t bFrameIndex; + uint32_t dwFrameInterval; + uint16_t wKeyFrameRate; + uint16_t wPFrameRate; + uint16_t wCompQuality; + uint16_t wCompWindowSize; + uint16_t wDelay; + uint32_t dwMaxVideoFrameSize; + uint32_t dwMaxPayloadTransferSize; + uint32_t dwClockFrequency; + uint8_t bmFramingInfo; +#define UVC_BMFRAMING_INFO_FID BIT(0) +#define UVC_BMFRAMING_INFO_EOF BIT(1) +#define UVC_BMFRAMING_INFO_EOS BIT(2) + uint8_t bPreferedVersion; + uint8_t bMinVersion; + uint8_t bMaxVersion; + uint8_t bUsage; + uint8_t bBitDepthLuma; + uint8_t bmSettings; + uint8_t bMaxNumberOfRefFramesPlus1; + uint16_t bmRateControlModes; + uint64_t bmLayoutPerStream; +} __packed; + +/* This is a particular variant of this struct that is used by the Zephyr implementation. Other + * organization of the fields are allowed by the standard. + */ +struct uvc_payload_header { + uint8_t bHeaderLength; + uint8_t bmHeaderInfo; + uint32_t dwPresentationTime; /* optional */ + uint32_t scrSourceClockSTC; /* optional */ + uint16_t scrSourceClockSOF; /* optional */ +} __packed; + +#endif /* ZEPHYR_INCLUDE_USBD_CLASS_UVC_H_ */