diff --git a/drivers/serial/Kconfig.nrfx b/drivers/serial/Kconfig.nrfx index f91980c058b5..cb74a1e41430 100644 --- a/drivers/serial/Kconfig.nrfx +++ b/drivers/serial/Kconfig.nrfx @@ -78,6 +78,32 @@ config UART_ASYNC_TX_CACHE_SIZE in RAM, because EasyDMA in UARTE peripherals can only transfer data from RAM. +config UARTE_NRFX_UARTE_COUNT_BYTES_WITH_TIMER + bool "Use TIMER to count RX bytes" + depends on UART_ASYNC_API + depends on UART_NRFX_UARTE_LEGACY_SHIM + depends on !ARCH_POSIX # Mode not supported on BSIM target + select NRFX_GPPI + +config UART_NRFX_UARTE_BOUNCE_BUF_LEN + int "RX bounce buffer size" + depends on UARTE_NRFX_UARTE_COUNT_BYTES_WITH_TIMER + default 256 + range 64 1024 + help + Buffer is used when workaround with bounce buffers is applied + +config UART_NRFX_UARTE_BOUNCE_BUF_SWAP_LATENCY + int "RX bounce buffer swap latency (in microseconds)" + depends on UARTE_NRFX_UARTE_COUNT_BYTES_WITH_TIMER + default 300 + help + Option decides how long before current bounce buffer is filled driver + attempts to swap the buffer. It must be long enough to ensure that + space following the buffer is not overwritten. Too high value results + in more frequent buffer swaps so it impacts performance. Setting should + take into account potential interrupt handling latency. + config UART_NRFX_UARTE_DIRECT_ISR bool "Use direct ISR" diff --git a/drivers/serial/Kconfig.nrfx_uart_instance b/drivers/serial/Kconfig.nrfx_uart_instance index fa93a8144828..e7e8f09312b2 100644 --- a/drivers/serial/Kconfig.nrfx_uart_instance +++ b/drivers/serial/Kconfig.nrfx_uart_instance @@ -18,6 +18,13 @@ config UART_$(nrfx_uart_num)_ASYNC help This option enables UART Asynchronous API support on port $(nrfx_uart_num). +config UART_$(nrfx_uart_num)_COUNT_BYTES_WITH_TIMER + bool + depends on $(dt_nodelabel_has_prop,uart$(nrfx_uart_num),timer) + depends on HAS_HW_NRF_UARTE$(nrfx_uart_num) + default y + imply UARTE_NRFX_UARTE_COUNT_BYTES_WITH_TIMER + config UART_$(nrfx_uart_num)_ENHANCED_POLL_OUT bool "Efficient poll out on port $(nrfx_uart_num)" depends on !$(dt_nodelabel_bool_prop,uart$(nrfx_uart_num),endtx-stoptx-supported) diff --git a/drivers/serial/uart_nrfx_uarte.c b/drivers/serial/uart_nrfx_uarte.c index 46c5a8b83d2e..bfccf4ce5845 100644 --- a/drivers/serial/uart_nrfx_uarte.c +++ b/drivers/serial/uart_nrfx_uarte.c @@ -167,6 +167,16 @@ BUILD_ASSERT(IS_ENABLED(CONFIG_CLOCK_CONTROL)); /* Size of hardware fifo in RX path. */ #define UARTE_HW_RX_FIFO_SIZE 5 +/* TIMER CC channels for counting bytes with TIMER. */ +/* Channel used for capturing current counter value. */ +#define UARTE_TIMER_CAPTURE_CH 0 +/* Channel used to get compare event when number of received bytes reaches user buffer size. */ +#define UARTE_TIMER_USR_CNT_CH 1 +/* Channel used to get compare event when bounce buffer need to be switched. */ +#define UARTE_TIMER_BUF_SWITCH_CH 2 +/* Magic byte that is used to fill the buffer. */ +#define UARTE_MAGIC_BYTE 0xAA + #ifdef UARTE_ANY_ASYNC struct uarte_async_tx { @@ -180,6 +190,30 @@ struct uarte_async_tx { bool pending; }; +/* Structure with data for Count Bytes With Timer receiver mode (cbwt). */ +struct uarte_async_rx_cbwt { + uint8_t *curr_bounce_buf; + uint8_t *anomaly_byte_addr; + uint32_t usr_rd_off; + uint32_t usr_wr_off; + uint32_t bounce_off; + uint32_t bounce_limit; + uint32_t last_cnt; + uint32_t cc_usr; + uint32_t cc_swap; +#ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE + size_t bounce_buf_swap_len; +#endif +#ifdef UARTE_ANY_CACHE + uint8_t *anomaly_byte_dst; + uint8_t anomaly_byte; +#endif + uint8_t bounce_idx; + uint8_t ppi_ch; + bool in_irq; + bool discard_fifo; +}; + struct uarte_async_rx { struct k_timer timer; #ifdef CONFIG_HAS_NORDIC_DMM @@ -191,8 +225,9 @@ struct uarte_async_rx { size_t offset; uint8_t *next_buf; size_t next_buf_len; -#ifdef CONFIG_UART_NRFX_UARTE_ENHANCED_RX -#if !defined(UARTE_HAS_FRAME_TIMEOUT) +#if defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX) || \ + defined(CONFIG_UARTE_NRFX_UARTE_COUNT_BYTES_WITH_TIMER) +#if !defined(UARTE_HAS_FRAME_TIMEOUT) || defined(CONFIG_UARTE_NRFX_UARTE_COUNT_BYTES_WITH_TIMER) uint32_t idle_cnt; #endif k_timeout_t timeout; @@ -265,6 +300,10 @@ struct uarte_nrfx_data { #define UARTE_FLAG_POLL_OUT BIT(3) /* Flag indicating that a workaround for not working frame timeout is active. */ #define UARTE_FLAG_FTIMEOUT_WATCH BIT(4) +/* Flag indicating that UART_RX_BUF_REQUEST event need to be called from the interrupt context. */ +#define UARTE_FLAG_RX_BUF_REQ BIT(5) +/* Flag indicating that CC value in TIMER was set too late. */ +#define UARTE_FLAG_LATE_CC BIT(6) /* If enabled then ENDTX is PPI'ed to TXSTOP */ #define UARTE_CFG_FLAG_PPI_ENDTX BIT(0) @@ -283,6 +322,12 @@ struct uarte_nrfx_data { /* Indicates that workaround for spurious RXTO during restart shall be applied. */ #define UARTE_CFG_FLAG_SPURIOUS_RXTO BIT(3) +/* Indicates that UARTE/TIMER interrupt priority differs from system clock (GRTC/RTC). */ +#define UARTE_CFG_FLAG_VAR_IRQ BIT(4) + +/* Indicates that instance needs special handling of BAUDRATE register. */ +#define UARTE_CFG_FLAG_VOLATILE_BAUDRATE BIT(5) + /* Formula for getting the baudrate settings is following: * 2^12 * (2^20 / (f_PCLK / desired_baudrate)) where f_PCLK is a frequency that * drives the UARTE. @@ -319,6 +364,14 @@ struct uarte_nrfx_data { (baudrate) == 921600 ? NRF_UARTE_BAUDRATE_921600 : \ (baudrate) == 1000000 ? NRF_UARTE_BAUDRATE_1000000 : 0) +#define UARTE_MIN_BUF_SWAP_LEN 10 + +#define UARTE_US_TO_BYTES(baudrate) \ + DIV_ROUND_UP((CONFIG_UART_NRFX_UARTE_BOUNCE_BUF_SWAP_LATENCY * baudrate), 10000000) + +#define UARTE_BUF_SWAP_LEN(bounce_buf_len, baudrate) \ + ((bounce_buf_len) - MAX(UARTE_MIN_BUF_SWAP_LEN, UARTE_US_TO_BYTES(baudrate))) + #define LOW_POWER_ENABLED(_config) \ (IS_ENABLED(UARTE_ANY_LOW_POWER) && \ !IS_ENABLED(CONFIG_PM_DEVICE) && \ @@ -365,6 +418,15 @@ struct uarte_nrfx_config { #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */ #ifdef UARTE_ANY_ASYNC +#ifdef CONFIG_UARTE_NRFX_UARTE_COUNT_BYTES_WITH_TIMER + NRF_TIMER_Type * timer_regs; + IRQn_Type timer_irqn; + IRQn_Type uarte_irqn; + uint8_t *bounce_buf[2]; + size_t bounce_buf_len; + size_t bounce_buf_swap_len; + struct uarte_async_rx_cbwt *cbwt_data; +#endif nrfx_timer_t timer; uint8_t *tx_cache; uint8_t *rx_flush_buf; @@ -380,6 +442,12 @@ struct uarte_nrfx_config { (IS_ENABLED(UARTE_ANY_HW_ASYNC) ? \ (config->flags & UARTE_CFG_FLAG_HW_BYTE_COUNTING) : false) +/* Determine if instance is using an approach with counting bytes with TIMER (cbwt). */ +#define IS_CBWT(dev) \ + COND_CODE_1(CONFIG_UARTE_NRFX_UARTE_COUNT_BYTES_WITH_TIMER, \ + ((((const struct uarte_nrfx_config *)dev->config)->cbwt_data != NULL)), \ + (false)) + static inline NRF_UARTE_Type *get_uarte_instance(const struct device *dev) { const struct uarte_nrfx_config *config = dev->config; @@ -550,10 +618,23 @@ static int baudrate_set(const struct device *dev, uint32_t baudrate) return -EINVAL; } +#ifdef CONFIG_UARTE_NRFX_UARTE_COUNT_BYTES_WITH_TIMER + if (IS_CBWT(dev)) { + struct uarte_async_rx_cbwt *cbwt_data = config->cbwt_data; + + cbwt_data->bounce_buf_swap_len = UARTE_BUF_SWAP_LEN(config->bounce_buf_len, + baudrate); + } +#endif + #ifdef UARTE_BAUDRATE_RETENTION_WORKAROUND - struct uarte_nrfx_data *data = dev->data; + if (config->flags & UARTE_CFG_FLAG_VOLATILE_BAUDRATE) { + struct uarte_nrfx_data *data = dev->data; - data->nrf_baudrate = nrf_baudrate; + data->nrf_baudrate = nrf_baudrate; + } else { + nrf_uarte_baudrate_set(get_uarte_instance(dev), nrf_baudrate); + } #else nrf_uarte_baudrate_set(get_uarte_instance(dev), nrf_baudrate); #endif @@ -728,133 +809,885 @@ static void uarte_periph_enable(const struct device *dev) nrf_gpd_retain_pins_set(config->pcfg, false); #endif #if UARTE_BAUDRATE_RETENTION_WORKAROUND - nrf_uarte_baudrate_set(uarte, - COND_CODE_1(CONFIG_UART_USE_RUNTIME_CONFIGURE, - (data->nrf_baudrate), (config->nrf_baudrate))); + if (config->flags & UARTE_CFG_FLAG_VOLATILE_BAUDRATE) { + nrf_uarte_baudrate_set(uarte, + COND_CODE_1(CONFIG_UART_USE_RUNTIME_CONFIGURE, + (data->nrf_baudrate), (config->nrf_baudrate))); + } #endif -#ifdef UARTE_ANY_ASYNC - if (data->async) { - if (HW_RX_COUNTING_ENABLED(config)) { - const nrfx_timer_t *timer = &config->timer; +#ifdef UARTE_ANY_ASYNC + if (data->async) { + if (HW_RX_COUNTING_ENABLED(config)) { + const nrfx_timer_t *timer = &config->timer; + + nrfx_timer_enable(timer); + + for (int i = 0; i < data->async->rx.flush_cnt; i++) { + nrfx_timer_increment(timer); + } + } + return; + } +#endif + + if (IS_ENABLED(UARTE_ANY_NONE_ASYNC) && !config->disable_rx) { + nrf_uarte_rx_buffer_set(uarte, config->poll_in_byte, 1); + nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX); + nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTRX); +#if defined(UARTE_INTERRUPT_DRIVEN) && defined(CONFIG_PM_DEVICE) + if (data->int_driven && data->int_driven->rx_irq_enabled) { + nrf_uarte_int_enable(uarte, NRF_UARTE_INT_ENDRX_MASK); + } +#endif + } +} + +static void uarte_enable_locked(const struct device *dev, uint32_t act_mask) +{ + struct uarte_nrfx_data *data = dev->data; + bool already_active = (data->flags & UARTE_FLAG_LOW_POWER) != 0; + + data->flags |= act_mask; + if (already_active) { + /* Second direction already enabled so UARTE is enabled. */ + return; + } + + uarte_periph_enable(dev); +} + +/* At this point we should have irq locked and any previous transfer completed. + * Transfer can be started, no need to wait for completion. + */ +static void tx_start(const struct device *dev, const uint8_t *buf, size_t len) +{ + const struct uarte_nrfx_config *config = dev->config; + NRF_UARTE_Type *uarte = get_uarte_instance(dev); + +#if defined(CONFIG_PM_DEVICE) && !defined(CONFIG_PM_DEVICE_RUNTIME) + enum pm_device_state state; + + (void)pm_device_state_get(dev, &state); + if (state != PM_DEVICE_STATE_ACTIVE) { + return; + } +#endif + + if (IS_ENABLED(UARTE_ANY_CACHE) && (config->flags & UARTE_CFG_FLAG_CACHEABLE)) { + sys_cache_data_flush_range((void *)buf, len); + } + + nrf_uarte_tx_buffer_set(uarte, buf, len); + if (!IS_ENABLED(UARTE_HAS_ENDTX_STOPTX_SHORT)) { + nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDTX); + } + nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_TXSTOPPED); + + if (LOW_POWER_ENABLED(config)) { + uarte_enable_locked(dev, UARTE_FLAG_LOW_POWER_TX); + } + nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTTX); +} + +#if defined(UARTE_ANY_ASYNC) +static void rx_timeout(struct k_timer *timer); +static void tx_timeout(struct k_timer *timer); + +static void user_callback(const struct device *dev, struct uart_event *evt) +{ + struct uarte_nrfx_data *data = dev->data; + + if (data->async->user_callback) { + data->async->user_callback(dev, evt, data->async->user_data); + } +} + +static void rx_buf_release(const struct device *dev, uint8_t *buf) +{ + struct uart_event evt = { + .type = UART_RX_BUF_RELEASED, + .data.rx_buf.buf = buf, + }; + + user_callback(dev, &evt); +} + +static void rx_disable_finalize(const struct device *dev) +{ + const struct uarte_nrfx_config *cfg = dev->config; + struct uarte_nrfx_data *data = dev->data; + struct uarte_async_rx *async_rx = &data->async->rx; + struct uart_event evt = { + .type = UART_RX_DISABLED, + }; + + async_rx->enabled = false; + + if (LOW_POWER_ENABLED(cfg)) { + uint32_t key = irq_lock(); + + uarte_disable_locked(dev, UARTE_FLAG_LOW_POWER_RX); + irq_unlock(key); + } + + user_callback(dev, (struct uart_event *)&evt); + + /* runtime PM is put after the callback. In case uart is re-enabled from that + * callback we avoid suspending/resuming the device. + */ + if (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME)) { + pm_device_runtime_put_async(dev, K_NO_WAIT); + } +} + +static int rx_disable(const struct device *dev, bool api) +{ + struct uarte_nrfx_data *data = dev->data; + struct uarte_async_rx *async_rx = &data->async->rx; + NRF_UARTE_Type *uarte = get_uarte_instance(dev); + int key; + + k_timer_stop(&async_rx->timer); + + key = irq_lock(); + +#ifdef CONFIG_UARTE_NRFX_UARTE_COUNT_BYTES_WITH_TIMER + const struct uarte_nrfx_config *cfg = dev->config; + struct uarte_async_rx_cbwt *cbwt_data = cfg->cbwt_data; + + if (cbwt_data) { + nrf_timer_event_clear(cfg->timer_regs, + nrf_timer_compare_event_get(UARTE_TIMER_BUF_SWITCH_CH)); + nrf_timer_event_clear(cfg->timer_regs, + nrf_timer_compare_event_get(UARTE_TIMER_USR_CNT_CH)); + nrf_timer_int_disable(cfg->timer_regs, + nrf_timer_compare_int_get(UARTE_TIMER_BUF_SWITCH_CH) | + nrf_timer_compare_int_get(UARTE_TIMER_USR_CNT_CH)); + nrf_uarte_shorts_disable(cfg->uarte_regs, NRF_UARTE_SHORT_ENDRX_STARTRX); + } +#endif + + if (async_rx->next_buf != NULL) { + nrf_uarte_shorts_disable(uarte, NRF_UARTE_SHORT_ENDRX_STARTRX); + nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXSTARTED); + } + + async_rx->enabled = false; + if (api) { + async_rx->discard_fifo = true; + } + + nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPRX); + irq_unlock(key); + + return 0; +} + +static int uarte_nrfx_rx_disable(const struct device *dev) +{ + struct uarte_nrfx_data *data = dev->data; + struct uarte_async_rx *async_rx = &data->async->rx; + + if (async_rx->buf == NULL) { + return -EFAULT; + } + + return rx_disable(dev, true); +} + +#if !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX) +static void timer_handler(nrf_timer_event_t event_type, void *p_context) { } + +static int uarte_nrfx_rx_counting_init(const struct device *dev) +{ + struct uarte_nrfx_data *data = dev->data; + const struct uarte_nrfx_config *cfg = dev->config; + NRF_UARTE_Type *uarte = get_uarte_instance(dev); + int ret; + + if (HW_RX_COUNTING_ENABLED(cfg)) { + nrfx_timer_config_t tmr_config = NRFX_TIMER_DEFAULT_CONFIG( + NRF_TIMER_BASE_FREQUENCY_GET(cfg->timer.p_reg)); + uint32_t evt_addr = nrf_uarte_event_address_get(uarte, NRF_UARTE_EVENT_RXDRDY); + uint32_t tsk_addr = nrfx_timer_task_address_get(&cfg->timer, NRF_TIMER_TASK_COUNT); + + tmr_config.mode = NRF_TIMER_MODE_COUNTER; + tmr_config.bit_width = NRF_TIMER_BIT_WIDTH_32; + ret = nrfx_timer_init(&cfg->timer, + &tmr_config, + timer_handler); + if (ret != NRFX_SUCCESS) { + LOG_ERR("Timer already initialized"); + return -EINVAL; + } + + nrfx_timer_clear(&cfg->timer); + + ret = nrfx_gppi_channel_alloc(&data->async->rx.cnt.ppi); + if (ret != NRFX_SUCCESS) { + LOG_ERR("Failed to allocate PPI Channel"); + nrfx_timer_uninit(&cfg->timer); + return -EINVAL; + } + + nrfx_gppi_channel_endpoints_setup(data->async->rx.cnt.ppi, evt_addr, tsk_addr); + nrfx_gppi_channels_enable(BIT(data->async->rx.cnt.ppi)); + } else { + nrf_uarte_int_enable(uarte, NRF_UARTE_INT_RXDRDY_MASK); + } + + return 0; +} +#endif /* !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX) */ + +#ifdef CONFIG_UARTE_NRFX_UARTE_COUNT_BYTES_WITH_TIMER + +static uint32_t get_byte_cnt(NRF_TIMER_Type *timer) +{ + nrf_timer_task_trigger(timer, nrf_timer_capture_task_get(UARTE_TIMER_CAPTURE_CH)); + + nrf_barrier_w(); + + return nrf_timer_cc_get(timer, UARTE_TIMER_CAPTURE_CH); +} + +static void rx_buf_req(const struct device *dev) +{ + struct uart_event evt = { + .type = UART_RX_BUF_REQUEST, + }; + + user_callback(dev, &evt); +} + +static bool notify_rx_rdy(const struct device *dev) +{ + const struct uarte_nrfx_config *cfg = dev->config; + struct uarte_nrfx_data *data = dev->data; + struct uarte_async_rx *async_rx = &data->async->rx; + struct uarte_async_rx_cbwt *cbwt_data = cfg->cbwt_data; + size_t len = cbwt_data->usr_wr_off - cbwt_data->usr_rd_off; + + if (len == 0) { + return async_rx->buf != NULL; + } + + struct uart_event evt = { + .type = UART_RX_RDY, + .data.rx.buf = async_rx->buf, + .data.rx.len = len, + .data.rx.offset = cbwt_data->usr_rd_off + }; + user_callback(dev, &evt); + cbwt_data->usr_rd_off += len; + + if (cbwt_data->usr_rd_off == async_rx->buf_len) { + rx_buf_release(dev, async_rx->buf); + async_rx->buf = async_rx->next_buf; + async_rx->buf_len = async_rx->next_buf_len; + async_rx->next_buf_len = 0; + async_rx->next_buf = 0; + cbwt_data->usr_rd_off = 0; + cbwt_data->usr_wr_off = 0; + + if (async_rx->buf_len == 0) { + return false; + } + + /* Set past value to ensure that event will not expire after clearing but + * before setting the new value. + */ + nrf_timer_cc_set(cfg->timer_regs, UARTE_TIMER_USR_CNT_CH, cbwt_data->cc_usr - 10); + nrf_timer_event_clear(cfg->timer_regs, + nrf_timer_compare_event_get(UARTE_TIMER_USR_CNT_CH)); + cbwt_data->cc_usr += async_rx->buf_len; + nrf_timer_cc_set(cfg->timer_regs, UARTE_TIMER_USR_CNT_CH, cbwt_data->cc_usr); + + /* Check if CC is already in the past. In that case trigger CC handling.*/ + if (cbwt_data->cc_usr <= get_byte_cnt(cfg->timer_regs)) { + atomic_or(&data->flags, UARTE_FLAG_LATE_CC); + NRFX_IRQ_PENDING_SET(cfg->timer_irqn); + } else { + atomic_and(&data->flags, ~UARTE_FLAG_LATE_CC); + } + } + + return true; +} + +static void anomaly_byte_handle(const struct device *dev) +{ + const struct uarte_nrfx_config *cfg = dev->config; + struct uarte_async_rx_cbwt *cbwt_data = cfg->cbwt_data; + uint8_t curr_byte, anomaly_byte; + uint32_t diff; + + if (cbwt_data->anomaly_byte_addr == NULL) { + return; + } + + diff = cfg->uarte_regs->DMA.RX.PTR - (uint32_t)cbwt_data->curr_bounce_buf; + /* Anomaly can be checked only if more than 1 byte is received to the current buffer. */ + if (diff < 2) { + return; + } + + if (IS_ENABLED(UARTE_ANY_CACHE) && (cfg->flags & UARTE_CFG_FLAG_CACHEABLE)) { + sys_cache_data_invd_range(cbwt_data->curr_bounce_buf, 1); + sys_cache_data_invd_range(cbwt_data->anomaly_byte_addr, 1); + } + + curr_byte = cbwt_data->curr_bounce_buf[0]; + anomaly_byte = *cbwt_data->anomaly_byte_addr; + if ((curr_byte == UARTE_MAGIC_BYTE) && (anomaly_byte != UARTE_MAGIC_BYTE)) { +#ifdef UARTE_ANY_CACHE + if (cfg->flags & UARTE_CFG_FLAG_CACHEABLE) { + /* We cannot write directly to curr_bounce_buf as it is written by + * DMA and with cache operations data may be overwritten. Copying + * need to be postponed to the moment when user buffer is filled. + */ + cbwt_data->anomaly_byte = anomaly_byte; + cbwt_data->anomaly_byte_dst = &cbwt_data->curr_bounce_buf[0]; + } else { + cbwt_data->curr_bounce_buf[0] = anomaly_byte; + } +#else + cbwt_data->curr_bounce_buf[0] = anomaly_byte; +#endif + } + + cbwt_data->anomaly_byte_addr = NULL; +} + +static uint32_t fill_usr_buf(const struct device *dev, uint32_t len) +{ + const struct uarte_nrfx_config *cfg = dev->config; + struct uarte_nrfx_data *data = dev->data; + struct uarte_async_rx *async_rx = &data->async->rx; + struct uarte_async_rx_cbwt *cbwt_data = cfg->cbwt_data; + + uint8_t *buf = cfg->bounce_buf[cbwt_data->bounce_idx]; + uint32_t usr_rem = async_rx->buf_len - cbwt_data->usr_wr_off; + uint32_t bounce_rem = cbwt_data->bounce_limit - cbwt_data->bounce_off; + uint32_t cpy_len = MIN(bounce_rem, MIN(usr_rem, len)); + + __ASSERT(cpy_len + cbwt_data->bounce_off <= cfg->bounce_buf_len, + "Exceeding the buffer cpy_len:%d off:%d limit:%d", + cpy_len, cbwt_data->bounce_off, cbwt_data->bounce_limit); + + if (IS_ENABLED(UARTE_ANY_CACHE) && (cfg->flags & UARTE_CFG_FLAG_CACHEABLE)) { + sys_cache_data_invd_range(&buf[cbwt_data->bounce_off], cpy_len); + } + + memcpy(&async_rx->buf[cbwt_data->usr_wr_off], &buf[cbwt_data->bounce_off], cpy_len); +#ifdef UARTE_ANY_CACHE + if ((buf == cbwt_data->anomaly_byte_dst) && (cbwt_data->bounce_off == 0)) { + async_rx->buf[cbwt_data->usr_wr_off] = cbwt_data->anomaly_byte; + cbwt_data->anomaly_byte_dst = NULL; + } +#endif + cbwt_data->bounce_off += cpy_len; + cbwt_data->usr_wr_off += cpy_len; + cbwt_data->last_cnt += cpy_len; + if (cbwt_data->bounce_off == cbwt_data->bounce_limit) { + /* Bounce buffer drained */ + cbwt_data->bounce_idx = cbwt_data->bounce_idx == 0 ? 1 : 0; + cbwt_data->bounce_off = 0; + cbwt_data->bounce_limit = cfg->bounce_buf_len; + } + + return cpy_len; +} + +static bool update_usr_buf(const struct device *dev, uint32_t len, bool notify_any, bool buf_req) +{ + const struct uarte_nrfx_config *cfg = dev->config; + struct uarte_nrfx_data *data = dev->data; + struct uarte_async_rx *async_rx = &data->async->rx; + struct uarte_async_rx_cbwt *cbwt_data = cfg->cbwt_data; + + anomaly_byte_handle(dev); + + do { + uint32_t cpy_len = len ? fill_usr_buf(dev, len) : 0; + bool usr_buf_full = cbwt_data->usr_wr_off == async_rx->buf_len; + + len -= cpy_len; + if (((len == 0) && notify_any) || usr_buf_full) { + if (!notify_rx_rdy(dev)) { + return false; + } + + if (usr_buf_full && buf_req) { + rx_buf_req(dev); + } + } + } while (len > 0); + + return true; +} + +static void prepare_bounce_buf(const struct device *dev, uint8_t *buf, + size_t swap_len, size_t len) +{ + const struct uarte_nrfx_config *cfg = dev->config; + + buf[0] = UARTE_MAGIC_BYTE; + for (size_t i = swap_len; i < len; i++) { + buf[i] = UARTE_MAGIC_BYTE; + } + + if (IS_ENABLED(UARTE_ANY_CACHE) && (cfg->flags & UARTE_CFG_FLAG_CACHEABLE)) { + sys_cache_data_flush_range(buf, 1); + sys_cache_data_flush_range(&buf[swap_len], len); + } +} + +/* This function is responsible for swapping the bounce buffer and it is the most + * tricky part of the solution. Receiver is continuously working and we want to + * change DMA pointer on the fly. DMA is also incrementing that pointer so there are + * moments in the reception when updating the pointer will result in different behavior. + * + * There are two main cases that need to be handled: + * 1. PTR is updated and there was no byte boundary (in the middle of a byte or there is + * no byte on the line). It is a safe spot. + * + * The most common and simplest case. PTR is update but since + * DMA already started the reception of the previous byte it means that next byte will + * be stored in the previous PTR and bytes following that byte will be stored to the + * new bounce buffer + * + * 2. Updating the pointer collided with byte boundary. + * + * RXDRDY and RXSTARTED events are used to detect if collision occurred. + * There are few scenarios that may happen and the driver must detect which one occurred. + * Detection is done by reading back the PTR register. Following cases are considered: + * + * - PTR did not change. It means that it was written after byte boundary. It is the same + * case as if PTR was updated in the safe spot. + * + * - PTR is updated by 1. There is an anomaly and it is unclear where next byte will be + * copied. PTR state indicates that it should be copied to the beginning of the new + * bounce buffer but it might be copied to the previous bounce buffer. Both locations + * are written with a magic byte (0xAA) and later on it is checked which location has + * changed and if byte was written to the previous bounce buffer it is copied to the + * start of the new bounce buffer. + * + * - PTR is not updated with the new bounce buffer location. DMA is incrementing PTR content + * and it is possible that SW writes new value between read and modify and DMA may + * overwrite value written by the driver. In that case reception continuous to the + * previous bounce buffer and swap procedure need to be repeated. + */ +static int bounce_buf_swap(const struct device *dev, uint8_t *prev_bounce_buf) +{ + const struct uarte_nrfx_config *cfg = dev->config; + struct uarte_async_rx_cbwt *cbwt_data = cfg->cbwt_data; + uint32_t prev_buf_cnt, new_cnt, cnt, ptr; + uint32_t prev_buf_inc = 1; + int key; + + key = irq_lock(); + /* Clear events that indicates byte boundary and set PTR. If events are set + * after PTR is set then we know that setting PTR collided with byte boundary. + */ + nrf_uarte_event_clear(cfg->uarte_regs, NRF_UARTE_EVENT_RXSTARTED); + nrf_uarte_event_clear(cfg->uarte_regs, NRF_UARTE_EVENT_RXDRDY); + cfg->uarte_regs->DMA.RX.PTR = (uint32_t)cbwt_data->curr_bounce_buf; + cnt = get_byte_cnt(cfg->timer_regs); + + if (!nrf_uarte_event_check(cfg->uarte_regs, NRF_UARTE_EVENT_RXDRDY) && + !nrf_uarte_event_check(cfg->uarte_regs, NRF_UARTE_EVENT_RXSTARTED)) { + /* RXDRDY did not happen when PTR was set. Safest case. PTR was updated + * correctly. Last byte will be received to the previous buffer. + */ + new_cnt = 0; + prev_buf_cnt = cnt - cbwt_data->last_cnt; + goto no_collision; + } + + /* Setting PTR collided with byte boundary we need to detect what happened. */ + while (!nrf_uarte_event_check(cfg->uarte_regs, NRF_UARTE_EVENT_RXSTARTED)) { + } + + /* Read pointer when there is no new byte coming. */ + do { + cnt = get_byte_cnt(cfg->timer_regs); + ptr = cfg->uarte_regs->DMA.RX.PTR; + } while (cnt != get_byte_cnt(cfg->timer_regs)); + + new_cnt = ptr - (uint32_t)cbwt_data->curr_bounce_buf; + prev_buf_cnt = cnt - cbwt_data->last_cnt; + + if (new_cnt == 0) { + /* New PTR is not incremented. It was written after LIST post ENDRX + * incrementation. + */ + } else if (new_cnt == 1) { + /* new_cnt == 1. New PTR incremented. It's possible that data is already + * copied to that new location or it is written to the tail of the previous + * bounce buffer. We try to detect what happens. + */ + prev_buf_inc = 0; + cbwt_data->anomaly_byte_addr = &prev_bounce_buf[cbwt_data->bounce_off + prev_buf_cnt]; + } else if (new_cnt <= cfg->bounce_buf_len) { + prev_buf_inc = 0; + prev_buf_cnt = cnt - cbwt_data->last_cnt - (new_cnt - 1); + } else { + /* New PTR value is not set. Re-set PTR is needed. Transfer continues to + * previous buffer whole buffer swapping need to be repeat. + */ + irq_unlock(key); + return -EAGAIN; + } + +no_collision: + cbwt_data->bounce_limit = cbwt_data->bounce_off + prev_buf_cnt + prev_buf_inc; + __ASSERT(cbwt_data->bounce_limit < cfg->bounce_buf_len, + "Too high limit (%d, max:%d), increase latency", + cbwt_data->bounce_limit, cfg->bounce_buf_len); + irq_unlock(key); + + return prev_buf_cnt; +} + +static size_t get_swap_len(const struct device *dev) +{ + const struct uarte_nrfx_config *cfg = dev->config; +#ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE + struct uarte_async_rx_cbwt *cbwt_data = cfg->cbwt_data; + + return cbwt_data->bounce_buf_swap_len; +#else + return cfg->bounce_buf_swap_len; +#endif +} + +static void bounce_buf_switch(const struct device *dev) +{ + const struct uarte_nrfx_config *cfg = dev->config; + struct uarte_async_rx_cbwt *cbwt_data = cfg->cbwt_data; + int new_data = cbwt_data->cc_swap - cbwt_data->last_cnt; + uint8_t *prev_bounce_buf = cbwt_data->curr_bounce_buf; + int prev_cnt; + + /* Fill user buffer with all pending data. */ + if (!update_usr_buf(dev, new_data < 0 ? 0 : new_data, false, true)) { + rx_disable(dev, false); + return; + } + + cbwt_data->curr_bounce_buf = (cbwt_data->curr_bounce_buf == cfg->bounce_buf[0]) ? + cfg->bounce_buf[1] : cfg->bounce_buf[0]; + prepare_bounce_buf(dev, cbwt_data->curr_bounce_buf, get_swap_len(dev), + cfg->bounce_buf_len); + + /* Swapping may need retry. */ + while ((prev_cnt = bounce_buf_swap(dev, prev_bounce_buf)) < 0) { + } + + /* Update user buffer with data that was received during swapping. */ + if (update_usr_buf(dev, prev_cnt, false, true)) { + /* Set compare event for next moment when bounce buffers need to be swapped. */ + cbwt_data->cc_swap += get_swap_len(dev); + __ASSERT(cbwt_data->cc_swap > get_byte_cnt(cfg->timer_regs), + "Setting CC too late next:%d cnt:%d", + cbwt_data->cc_swap, get_byte_cnt(cfg->timer_regs)); + nrf_timer_cc_set(cfg->timer_regs, UARTE_TIMER_BUF_SWITCH_CH, cbwt_data->cc_swap); + } else { + /* Stop RX. */ + rx_disable(dev, false); + } +} + +static void usr_buf_complete(const struct device *dev) +{ + const struct uarte_nrfx_config *cfg = dev->config; + struct uarte_nrfx_data *data = dev->data; + struct uarte_async_rx *async_rx = &data->async->rx; + struct uarte_async_rx_cbwt *cbwt_data = cfg->cbwt_data; + uint32_t rem = async_rx->buf_len - cbwt_data->usr_wr_off; + + __ASSERT_NO_MSG(rem <= (get_byte_cnt(cfg->timer_regs) - cbwt_data->last_cnt)); + + if (!update_usr_buf(dev, rem, true, true)) { + /* Stop RX if there is no next buffer. */ + rx_disable(dev, false); + } +} + +static void notify_new_data(const struct device *dev, bool buf_req) +{ + const struct uarte_nrfx_config *cfg = dev->config; + struct uarte_async_rx_cbwt *cbwt_data = cfg->cbwt_data; + uint32_t cnt = get_byte_cnt(cfg->timer_regs); + uint32_t new_data = cnt - cbwt_data->last_cnt; + + (void)update_usr_buf(dev, new_data, true, buf_req); +} + +static void cbwt_rx_timeout(struct k_timer *timer) +{ + const struct device *dev = k_timer_user_data_get(timer); + const struct uarte_nrfx_config *cfg = dev->config; + struct uarte_nrfx_data *data = dev->data; + struct uarte_async_rx_cbwt *cbwt_data = cfg->cbwt_data; + struct uarte_async_rx *async_rx = &data->async->rx; + + if (nrf_uarte_event_check(cfg->uarte_regs, NRF_UARTE_EVENT_RXDRDY)) { + nrf_uarte_event_clear(cfg->uarte_regs, NRF_UARTE_EVENT_RXDRDY); + async_rx->idle_cnt = 0; + } else { + async_rx->idle_cnt++; + if (async_rx->idle_cnt == (RX_TIMEOUT_DIV - 1)) { + if (cfg->flags & UARTE_CFG_FLAG_VAR_IRQ) { + if (cbwt_data->in_irq) { + /* TIMER or UARTE interrupt preempted. Lets try again + * later. + */ + k_timer_start(timer, async_rx->timeout, K_NO_WAIT); + return; + } + irq_disable(cfg->uarte_irqn); + irq_disable(cfg->timer_irqn); + } + + nrf_uarte_int_enable(cfg->uarte_regs, NRF_UARTE_INT_RXDRDY_MASK); + notify_new_data(dev, true); + + if (cfg->flags & UARTE_CFG_FLAG_VAR_IRQ) { + irq_enable(cfg->uarte_irqn); + irq_enable(cfg->timer_irqn); + } + return; + } + } + + k_timer_start(timer, async_rx->timeout, K_NO_WAIT); +} + +static void cbwt_rx_flush_handle(const struct device *dev) +{ + const struct uarte_nrfx_config *cfg = dev->config; + NRF_UARTE_Type *uarte = get_uarte_instance(dev); + struct uarte_async_rx_cbwt *cbwt_data = cfg->cbwt_data; + uint32_t rem_data = get_byte_cnt(cfg->timer_regs) - cbwt_data->last_cnt; + uint32_t bbuf_rem_data = cbwt_data->bounce_limit - cbwt_data->bounce_off; + uint32_t amount; + uint8_t *dst; + + nrf_uarte_rx_buffer_set(uarte, cfg->rx_flush_buf, UARTE_HW_RX_FIFO_SIZE); + nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXSTARTED); + nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_FLUSHRX); + while (!nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDRX)) { + /* empty */ + } + + nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX); + nrf_uarte_rx_maxcnt_set(uarte, 1); + if (!nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXSTARTED)) { + /* FIFO is empty. */ + return; + } + + nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXSTARTED); + amount = nrf_uarte_rx_amount_get(uarte); + + if (rem_data <= bbuf_rem_data) { + /* instead of -1 it should be -amount but RXDRDY event is not generated + * for bytes following first that goes to FIFO they are generated during flushing. + */ + dst = &cfg->bounce_buf[cbwt_data->bounce_idx][cbwt_data->bounce_off + rem_data - 1]; + } else { + /* See comment in if clause. */ + dst = &cbwt_data->curr_bounce_buf[rem_data - bbuf_rem_data - 1]; + } + + if (IS_ENABLED(UARTE_ANY_CACHE) && (cfg->flags & UARTE_CFG_FLAG_CACHEABLE)) { + sys_cache_data_invd_range(cfg->rx_flush_buf, amount); + sys_cache_data_invd_range(dst, amount); + } + + memcpy(dst, cfg->rx_flush_buf, amount); +} + +static void cbwt_rxto_isr(const struct device *dev, bool do_flush) +{ + const struct uarte_nrfx_config *cfg = dev->config; + struct uarte_nrfx_data *data = dev->data; + struct uarte_async_rx *async_rx = &data->async->rx; + struct uarte_async_rx_cbwt *cbwt_data = cfg->cbwt_data; + + if (async_rx->buf) { + notify_new_data(dev, false); + } + + if (async_rx->buf) { + rx_buf_release(dev, async_rx->buf); + async_rx->buf = NULL; + } - nrfx_timer_enable(timer); + if (async_rx->next_buf) { + rx_buf_release(dev, async_rx->next_buf); + async_rx->next_buf = NULL; + } - for (int i = 0; i < data->async->rx.flush_cnt; i++) { - nrfx_timer_increment(timer); - } - } - return; + if (do_flush) { + cbwt_rx_flush_handle(dev); } -#endif - if (IS_ENABLED(UARTE_ANY_NONE_ASYNC) && !config->disable_rx) { - nrf_uarte_rx_buffer_set(uarte, config->poll_in_byte, 1); - nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX); - nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTRX); -#if defined(UARTE_INTERRUPT_DRIVEN) && defined(CONFIG_PM_DEVICE) - if (data->int_driven && data->int_driven->rx_irq_enabled) { - nrf_uarte_int_enable(uarte, NRF_UARTE_INT_ENDRX_MASK); - } -#endif + if (async_rx->discard_fifo) { + cbwt_data->discard_fifo = async_rx->discard_fifo; + async_rx->discard_fifo = false; } + nrf_timer_task_trigger(cfg->timer_regs, NRF_TIMER_TASK_STOP); + rx_disable_finalize(dev); } -static void uarte_enable_locked(const struct device *dev, uint32_t act_mask) +static bool timer_ch_evt_check_clear(NRF_TIMER_Type *timer, uint32_t ch) { - struct uarte_nrfx_data *data = dev->data; - bool already_active = (data->flags & UARTE_FLAG_LOW_POWER) != 0; + nrf_timer_event_t evt = nrf_timer_compare_event_get(ch); - data->flags |= act_mask; - if (already_active) { - /* Second direction already enabled so UARTE is enabled. */ - return; + if (nrf_timer_event_check(timer, evt)) { + nrf_timer_event_clear(timer, evt); + return true; } - uarte_periph_enable(dev); + return false; } -/* At this point we should have irq locked and any previous transfer completed. - * Transfer can be started, no need to wait for completion. - */ -static void tx_start(const struct device *dev, const uint8_t *buf, size_t len) +static void timer_isr(const void *arg) { - const struct uarte_nrfx_config *config = dev->config; - NRF_UARTE_Type *uarte = get_uarte_instance(dev); + const struct device *dev = arg; + const struct uarte_nrfx_config *cfg = dev->config; + struct uarte_nrfx_data *data = dev->data; + struct uarte_async_rx_cbwt *cbwt_data = cfg->cbwt_data; + static const uint32_t flags_to_check = UARTE_FLAG_RX_BUF_REQ | + UARTE_FLAG_TRIG_RXTO | + UARTE_FLAG_LATE_CC; + uint32_t flags = atomic_and(&data->flags, ~flags_to_check); -#if defined(CONFIG_PM_DEVICE) && !defined(CONFIG_PM_DEVICE_RUNTIME) - enum pm_device_state state; + cbwt_data->in_irq = true; - (void)pm_device_state_get(dev, &state); - if (state != PM_DEVICE_STATE_ACTIVE) { - return; + if (timer_ch_evt_check_clear(cfg->timer_regs, UARTE_TIMER_USR_CNT_CH) || + (flags & UARTE_FLAG_LATE_CC)) { + usr_buf_complete(dev); } -#endif - if (IS_ENABLED(UARTE_ANY_CACHE) && (config->flags & UARTE_CFG_FLAG_CACHEABLE)) { - sys_cache_data_flush_range((void *)buf, len); + /* Must be after user buf complet CC handling. */ + if (timer_ch_evt_check_clear(cfg->timer_regs, UARTE_TIMER_BUF_SWITCH_CH)) { + bounce_buf_switch(dev); } - nrf_uarte_tx_buffer_set(uarte, buf, len); - if (!IS_ENABLED(UARTE_HAS_ENDTX_STOPTX_SHORT)) { - nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDTX); + if (flags & UARTE_FLAG_RX_BUF_REQ) { + rx_buf_req(dev); } - nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_TXSTOPPED); - if (LOW_POWER_ENABLED(config)) { - uarte_enable_locked(dev, UARTE_FLAG_LOW_POWER_TX); + if (flags & UARTE_FLAG_TRIG_RXTO) { + cbwt_rxto_isr(dev, false); } - nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTTX); + + cbwt_data->in_irq = false; } -#if defined(UARTE_ANY_ASYNC) -static void rx_timeout(struct k_timer *timer); -static void tx_timeout(struct k_timer *timer); +static void cbwt_rx_enable(const struct device *dev, bool with_timeout) +{ + const struct uarte_nrfx_config *cfg = dev->config; + struct uarte_nrfx_data *data = dev->data; + struct uarte_async_rx *async_rx = &data->async->rx; + struct uarte_async_rx_cbwt *cbwt_data = cfg->cbwt_data; + uint32_t rem_data; + uint32_t len = async_rx->buf_len; + uint32_t rx_int_mask = NRF_UARTE_INT_RXTO_MASK | + (with_timeout ? NRF_UARTE_INT_RXDRDY_MASK : 0); + + if (cbwt_data->discard_fifo) { + rem_data = 0; + cbwt_data->discard_fifo = false; + } else { + rem_data = get_byte_cnt(cfg->timer_regs) - cbwt_data->last_cnt; + } -#if !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX) -static void timer_handler(nrf_timer_event_t event_type, void *p_context) { } + cbwt_data->usr_rd_off = 0; + cbwt_data->usr_wr_off = 0; -static int uarte_nrfx_rx_counting_init(const struct device *dev) + if (rem_data >= len) { + atomic_or(&data->flags, UARTE_FLAG_TRIG_RXTO); + NRFX_IRQ_PENDING_SET(cfg->timer_irqn); + return; + } else if (rem_data) { + (void)update_usr_buf(dev, rem_data, false, true); + len -= rem_data; + } + + prepare_bounce_buf(dev, cfg->bounce_buf[0], get_swap_len(dev), cfg->bounce_buf_len); + + cbwt_data->last_cnt = 0; + cbwt_data->bounce_off = 0; + cbwt_data->bounce_idx = 0; + cbwt_data->curr_bounce_buf = cfg->bounce_buf[0]; + cbwt_data->bounce_limit = cfg->bounce_buf_len; + /* Enable ArrayList. */ + nrf_uarte_shorts_enable(cfg->uarte_regs, NRF_UARTE_SHORT_ENDRX_STARTRX); + nrf_uarte_event_clear(cfg->uarte_regs, NRF_UARTE_EVENT_RXDRDY); + nrf_uarte_int_enable(cfg->uarte_regs, rx_int_mask); + nrf_uarte_rx_ptr_set(cfg->uarte_regs, cbwt_data->curr_bounce_buf); + + nrf_timer_event_clear(cfg->timer_regs, + nrf_timer_compare_event_get(UARTE_TIMER_BUF_SWITCH_CH)); + nrf_timer_event_clear(cfg->timer_regs, + nrf_timer_compare_event_get(UARTE_TIMER_USR_CNT_CH)); + nrf_timer_int_enable(cfg->timer_regs, + nrf_timer_compare_int_get(UARTE_TIMER_BUF_SWITCH_CH) | + nrf_timer_compare_int_get(UARTE_TIMER_USR_CNT_CH)); + nrf_timer_task_trigger(cfg->timer_regs, NRF_TIMER_TASK_CLEAR); + nrf_timer_task_trigger(cfg->timer_regs, NRF_TIMER_TASK_START); + cbwt_data->cc_usr = len; + cbwt_data->cc_swap = get_swap_len(dev); + nrf_timer_cc_set(cfg->timer_regs, UARTE_TIMER_BUF_SWITCH_CH, get_swap_len(dev)); + nrf_timer_cc_set(cfg->timer_regs, UARTE_TIMER_USR_CNT_CH, len); + + atomic_or(&data->flags, UARTE_FLAG_RX_BUF_REQ); + nrf_uarte_task_trigger(cfg->uarte_regs, NRF_UARTE_TASK_STARTRX); + NRFX_IRQ_PENDING_SET(cfg->timer_irqn); +} + +static int cbwt_uarte_async_init(const struct device *dev) { - struct uarte_nrfx_data *data = dev->data; + /* As this approach does not use nrfx_timer driver but only HAL special setup + * function is used. + */ const struct uarte_nrfx_config *cfg = dev->config; - NRF_UARTE_Type *uarte = get_uarte_instance(dev); - int ret; + struct uarte_async_rx_cbwt *cbwt_data = cfg->cbwt_data; + static const uint32_t rx_int_mask = NRF_UARTE_INT_ERROR_MASK | + NRF_UARTE_INT_RXTO_MASK; + uint32_t evt = nrf_uarte_event_address_get(cfg->uarte_regs, NRF_UARTE_EVENT_RXDRDY); + uint32_t tsk = nrf_timer_task_address_get(cfg->timer_regs, NRF_TIMER_TASK_COUNT); + nrfx_err_t ret; - if (HW_RX_COUNTING_ENABLED(cfg)) { - nrfx_timer_config_t tmr_config = NRFX_TIMER_DEFAULT_CONFIG( - NRF_TIMER_BASE_FREQUENCY_GET(cfg->timer.p_reg)); - uint32_t evt_addr = nrf_uarte_event_address_get(uarte, NRF_UARTE_EVENT_RXDRDY); - uint32_t tsk_addr = nrfx_timer_task_address_get(&cfg->timer, NRF_TIMER_TASK_COUNT); + nrf_timer_mode_set(cfg->timer_regs, NRF_TIMER_MODE_COUNTER); + nrf_timer_bit_width_set(cfg->timer_regs, NRF_TIMER_BIT_WIDTH_32); - tmr_config.mode = NRF_TIMER_MODE_COUNTER; - tmr_config.bit_width = NRF_TIMER_BIT_WIDTH_32; - ret = nrfx_timer_init(&cfg->timer, - &tmr_config, - timer_handler); - if (ret != NRFX_SUCCESS) { - LOG_ERR("Timer already initialized"); - return -EINVAL; - } else { - nrfx_timer_clear(&cfg->timer); - } + ret = nrfx_gppi_channel_alloc(&cbwt_data->ppi_ch); + if (ret != NRFX_SUCCESS) { + return -ENOMEM; + } - ret = nrfx_gppi_channel_alloc(&data->async->rx.cnt.ppi); - if (ret != NRFX_SUCCESS) { - LOG_ERR("Failed to allocate PPI Channel"); - nrfx_timer_uninit(&cfg->timer); - return -EINVAL; - } + nrfx_gppi_channel_endpoints_setup(cbwt_data->ppi_ch, evt, tsk); + nrfx_gppi_channels_enable(BIT(cbwt_data->ppi_ch)); - nrfx_gppi_channel_endpoints_setup(data->async->rx.cnt.ppi, evt_addr, tsk_addr); - nrfx_gppi_channels_enable(BIT(data->async->rx.cnt.ppi)); - } else { - nrf_uarte_int_enable(uarte, NRF_UARTE_INT_RXDRDY_MASK); - } +#ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE + cbwt_data->bounce_buf_swap_len = cfg->bounce_buf_swap_len; +#endif + + /* Enable EasyDMA LIST feature (it is exposed in SPIM but not in UARTE). */ + *(volatile uint32_t *)((uint32_t)cfg->uarte_regs + 0x714) = 1; + nrf_uarte_rx_maxcnt_set(cfg->uarte_regs, 1); + nrf_uarte_int_enable(cfg->uarte_regs, rx_int_mask); return 0; } -#endif /* !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX) */ +#endif /* CONFIG_UARTE_NRFX_UARTE_COUNT_BYTES_WITH_TIMER */ static int uarte_async_init(const struct device *dev) { @@ -868,6 +1701,17 @@ static int uarte_async_init(const struct device *dev) ((IS_ENABLED(CONFIG_UART_NRFX_UARTE_ENHANCED_RX) && !IS_ENABLED(UARTE_HAS_FRAME_TIMEOUT)) ? NRF_UARTE_INT_RXDRDY_MASK : 0); + k_timer_init(&data->async->rx.timer, rx_timeout, NULL); + k_timer_user_data_set(&data->async->rx.timer, (void *)dev); + k_timer_init(&data->async->tx.timer, tx_timeout, NULL); + k_timer_user_data_set(&data->async->tx.timer, (void *)dev); + +#ifdef CONFIG_UARTE_NRFX_UARTE_COUNT_BYTES_WITH_TIMER + if (IS_CBWT(dev)) { + return cbwt_uarte_async_init(dev); + } +#endif + #if !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX) int ret = uarte_nrfx_rx_counting_init(dev); @@ -878,11 +1722,6 @@ static int uarte_async_init(const struct device *dev) nrf_uarte_int_enable(uarte, rx_int_mask); - k_timer_init(&data->async->rx.timer, rx_timeout, NULL); - k_timer_user_data_set(&data->async->rx.timer, (void *)dev); - k_timer_init(&data->async->tx.timer, tx_timeout, NULL); - k_timer_user_data_set(&data->async->tx.timer, (void *)dev); - return 0; } @@ -1009,15 +1848,6 @@ static int uarte_nrfx_tx_abort(const struct device *dev) return 0; } -static void user_callback(const struct device *dev, struct uart_event *evt) -{ - struct uarte_nrfx_data *data = dev->data; - - if (data->async->user_callback) { - data->async->user_callback(dev, evt, data->async->user_data); - } -} - static void notify_uart_rx_rdy(const struct device *dev, size_t len) { struct uarte_nrfx_data *data = dev->data; @@ -1031,29 +1861,6 @@ static void notify_uart_rx_rdy(const struct device *dev, size_t len) user_callback(dev, &evt); } -static void rx_buf_release(const struct device *dev, uint8_t *buf) -{ - struct uart_event evt = { - .type = UART_RX_BUF_RELEASED, - .data.rx_buf.buf = buf, - }; - - user_callback(dev, &evt); -} - -static void notify_rx_disable(const struct device *dev) -{ - struct uart_event evt = { - .type = UART_RX_DISABLED, - }; - - user_callback(dev, (struct uart_event *)&evt); - - if (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME)) { - pm_device_runtime_put_async(dev, K_NO_WAIT); - } -} - #ifdef UARTE_HAS_FRAME_TIMEOUT static uint32_t us_to_bauds(uint32_t baudrate, int32_t timeout) { @@ -1063,6 +1870,7 @@ static uint32_t us_to_bauds(uint32_t baudrate, int32_t timeout) } #endif + static int uarte_nrfx_rx_enable(const struct device *dev, uint8_t *buf, size_t len, int32_t timeout) @@ -1071,6 +1879,7 @@ static int uarte_nrfx_rx_enable(const struct device *dev, uint8_t *buf, struct uarte_async_rx *async_rx = &data->async->rx; const struct uarte_nrfx_config *cfg = dev->config; NRF_UARTE_Type *uarte = get_uarte_instance(dev); + bool with_timeout = timeout != SYS_FOREVER_US; if (cfg->disable_rx) { __ASSERT(false, "TX only UARTE instance"); @@ -1086,35 +1895,45 @@ static int uarte_nrfx_rx_enable(const struct device *dev, uint8_t *buf, } #ifdef CONFIG_HAS_NORDIC_DMM - uint8_t *dma_buf; - int ret = 0; + if (!IS_CBWT(dev)) { + uint8_t *dma_buf; + int ret = 0; - ret = dmm_buffer_in_prepare(cfg->mem_reg, buf, len, (void **)&dma_buf); - if (ret < 0) { - return ret; - } + ret = dmm_buffer_in_prepare(cfg->mem_reg, buf, len, (void **)&dma_buf); + if (ret < 0) { + return ret; + } - async_rx->usr_buf = buf; - buf = dma_buf; + async_rx->usr_buf = buf; + buf = dma_buf; + } #endif -#ifdef CONFIG_UART_NRFX_UARTE_ENHANCED_RX +#if defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX) || \ + defined(CONFIG_UARTE_NRFX_UARTE_COUNT_BYTES_WITH_TIMER) + #ifdef UARTE_HAS_FRAME_TIMEOUT - if (timeout != SYS_FOREVER_US) { + if (!IS_CBWT(dev) && with_timeout) { uint32_t baudrate = COND_CODE_1(CONFIG_UART_USE_RUNTIME_CONFIGURE, - (data->uart_config.baudrate), (cfg->baudrate)); - - async_rx->timeout = K_USEC(timeout); + (data->uart_config.baudrate), (cfg->baudrate)); nrf_uarte_frame_timeout_set(uarte, us_to_bauds(baudrate, timeout)); nrf_uarte_shorts_enable(uarte, NRF_UARTE_SHORT_FRAME_TIMEOUT_STOPRX); + } +#endif +#if !defined(UARTE_HAS_FRAME_TIMEOUT) || defined(CONFIG_UARTE_NRFX_UARTE_COUNT_BYTES_WITH_TIMER) + async_rx->idle_cnt = 0; +#endif + + if (with_timeout) { + if (!IS_CBWT(dev) && IS_ENABLED(UARTE_HAS_FRAME_TIMEOUT)) { + async_rx->timeout = K_USEC(timeout); + } else { + async_rx->timeout = with_timeout ? + K_USEC(timeout / RX_TIMEOUT_DIV) : K_NO_WAIT; + } } else { async_rx->timeout = K_NO_WAIT; } -#else - async_rx->timeout = (timeout == SYS_FOREVER_US) ? - K_NO_WAIT : K_USEC(timeout / RX_TIMEOUT_DIV); - async_rx->idle_cnt = 0; -#endif /* UARTE_HAS_FRAME_TIMEOUT */ #else async_rx->timeout_us = timeout; async_rx->timeout_slab = timeout / RX_TIMEOUT_DIV; @@ -1142,7 +1961,19 @@ static int uarte_nrfx_rx_enable(const struct device *dev, uint8_t *buf, } } pm_device_runtime_get(dev); + } else if (LOW_POWER_ENABLED(cfg)) { + unsigned int key = irq_lock(); + + uarte_enable_locked(dev, UARTE_FLAG_LOW_POWER_RX); + irq_unlock(key); + } + +#ifdef CONFIG_UARTE_NRFX_UARTE_COUNT_BYTES_WITH_TIMER + if (IS_CBWT(dev)) { + cbwt_rx_enable(dev, with_timeout); + return 0; } +#endif if (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME) || LOW_POWER_ENABLED(cfg)) { if (async_rx->flush_cnt) { @@ -1180,7 +2011,7 @@ static int uarte_nrfx_rx_enable(const struct device *dev, uint8_t *buf, return 0; } else { #ifdef CONFIG_UART_NRFX_UARTE_ENHANCED_RX - if (!K_TIMEOUT_EQ(async_rx->timeout, K_NO_WAIT)) { + if (with_timeout) { nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXDRDY); k_timer_start(&async_rx->timer, async_rx->timeout, K_NO_WAIT); @@ -1205,13 +2036,6 @@ static int uarte_nrfx_rx_enable(const struct device *dev, uint8_t *buf, async_rx->enabled = true; - if (LOW_POWER_ENABLED(cfg)) { - unsigned int key = irq_lock(); - - uarte_enable_locked(dev, UARTE_FLAG_LOW_POWER_RX); - irq_unlock(key); - } - nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTRX); return 0; @@ -1230,29 +2054,33 @@ static int uarte_nrfx_rx_buf_rsp(const struct device *dev, uint8_t *buf, err = -EACCES; } else if (async_rx->next_buf == NULL) { #ifdef CONFIG_HAS_NORDIC_DMM - uint8_t *dma_buf; - const struct uarte_nrfx_config *config = dev->config; + if (!IS_CBWT(dev)) { + uint8_t *dma_buf; + const struct uarte_nrfx_config *config = dev->config; - err = dmm_buffer_in_prepare(config->mem_reg, buf, len, (void **)&dma_buf); - if (err < 0) { - return err; + err = dmm_buffer_in_prepare(config->mem_reg, buf, len, (void **)&dma_buf); + if (err < 0) { + return err; + } + async_rx->next_usr_buf = buf; + buf = dma_buf; } - async_rx->next_usr_buf = buf; - buf = dma_buf; #endif async_rx->next_buf = buf; async_rx->next_buf_len = len; - nrf_uarte_rx_buffer_set(uarte, buf, len); - /* If buffer is shorter than RX FIFO then there is a risk that due - * to interrupt handling latency ENDRX event is not handled on time - * and due to ENDRX_STARTRX short data will start to be overwritten. - * In that case short is not enabled and ENDRX event handler will - * manually start RX for that buffer. Thanks to RX FIFO there is - * 5 byte time for doing that. If interrupt latency is higher and - * there is no HWFC in both cases data will be lost or corrupted. - */ - if (len >= UARTE_HW_RX_FIFO_SIZE) { - nrf_uarte_shorts_enable(uarte, NRF_UARTE_SHORT_ENDRX_STARTRX); + if (!IS_CBWT(dev)) { + nrf_uarte_rx_buffer_set(uarte, buf, len); + /* If buffer is shorter than RX FIFO then there is a risk that due + * to interrupt handling latency ENDRX event is not handled on time + * and due to ENDRX_STARTRX short data will start to be overwritten. + * In that case short is not enabled and ENDRX event handler will + * manually start RX for that buffer. Thanks to RX FIFO there is + * 5 byte time for doing that. If interrupt latency is higher and + * there is no HWFC in both cases data will be lost or corrupted. + */ + if (len >= UARTE_HW_RX_FIFO_SIZE) { + nrf_uarte_shorts_enable(uarte, NRF_UARTE_SHORT_ENDRX_STARTRX); + } } err = 0; } else { @@ -1280,35 +2108,6 @@ static int uarte_nrfx_callback_set(const struct device *dev, return 0; } -static int uarte_nrfx_rx_disable(const struct device *dev) -{ - struct uarte_nrfx_data *data = dev->data; - struct uarte_async_rx *async_rx = &data->async->rx; - NRF_UARTE_Type *uarte = get_uarte_instance(dev); - int key; - - if (async_rx->buf == NULL) { - return -EFAULT; - } - - k_timer_stop(&async_rx->timer); - - key = irq_lock(); - - if (async_rx->next_buf != NULL) { - nrf_uarte_shorts_disable(uarte, NRF_UARTE_SHORT_ENDRX_STARTRX); - nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXSTARTED); - } - - async_rx->enabled = false; - async_rx->discard_fifo = true; - - nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPRX); - irq_unlock(key); - - return 0; -} - static void tx_timeout(struct k_timer *timer) { const struct device *dev = k_timer_user_data_get(timer); @@ -1331,6 +2130,13 @@ static void rx_timeout(struct k_timer *timer) NRF_UARTE_Type *uarte = get_uarte_instance(dev); #ifdef UARTE_HAS_FRAME_TIMEOUT +#ifdef CONFIG_UARTE_NRFX_UARTE_COUNT_BYTES_WITH_TIMER + if (IS_CBWT(dev)) { + cbwt_rx_timeout(timer); + return; + } +#endif + struct uarte_nrfx_data *data = dev->data; struct uarte_async_rx *async_rx = &data->async->rx; bool rxdrdy = nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXDRDY); @@ -1477,7 +2283,7 @@ static void error_isr(const struct device *dev) nrf_uarte_errorsrc_clear(uarte, err); user_callback(dev, &evt); - (void) uarte_nrfx_rx_disable(dev); + (void)rx_disable(dev, false); } static void rxstarted_isr(const struct device *dev) @@ -1696,7 +2502,6 @@ static void rxto_isr(const struct device *dev) * In the second case, additionally, data from the UARTE internal RX * FIFO need to be discarded. */ - async_rx->enabled = false; if (async_rx->discard_fifo) { async_rx->discard_fifo = false; #if !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX) @@ -1724,15 +2529,7 @@ static void rxto_isr(const struct device *dev) #endif nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXDRDY); #endif - - if (LOW_POWER_ENABLED(config)) { - uint32_t key = irq_lock(); - - uarte_disable_locked(dev, UARTE_FLAG_LOW_POWER_RX); - irq_unlock(key); - } - - notify_rx_disable(dev); + rx_disable_finalize(dev); } static void txstopped_isr(const struct device *dev) @@ -1822,10 +2619,12 @@ static void txstopped_isr(const struct device *dev) static void rxdrdy_isr(const struct device *dev) { -#if !defined(UARTE_HAS_FRAME_TIMEOUT) +#if !defined(UARTE_HAS_FRAME_TIMEOUT) || defined(CONFIG_UARTE_NRFX_UARTE_COUNT_BYTES_WITH_TIMER) struct uarte_nrfx_data *data = dev->data; -#if defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX) +#if defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX) || \ + defined(CONFIG_UARTE_NRFX_UARTE_COUNT_BYTES_WITH_TIMER) + NRF_UARTE_Type *uarte = get_uarte_instance(dev); data->async->rx.idle_cnt = 0; @@ -1857,8 +2656,9 @@ static void uarte_nrfx_isr_async(const void *arg) struct uarte_async_rx *async_rx = &data->async->rx; uint32_t imask = nrf_uarte_int_enable_check(uarte, UINT32_MAX); - if (!(HW_RX_COUNTING_ENABLED(config) || IS_ENABLED(UARTE_HAS_FRAME_TIMEOUT)) - && event_check_clear(uarte, NRF_UARTE_EVENT_RXDRDY, NRF_UARTE_INT_RXDRDY_MASK, imask)) { + if ((IS_CBWT(dev) || + !(HW_RX_COUNTING_ENABLED(config) || IS_ENABLED(UARTE_HAS_FRAME_TIMEOUT))) && + event_check_clear(uarte, NRF_UARTE_EVENT_RXDRDY, NRF_UARTE_INT_RXDRDY_MASK, imask)) { rxdrdy_isr(dev); } @@ -1886,6 +2686,12 @@ static void uarte_nrfx_isr_async(const void *arg) rxstarted_isr(dev); } +#ifdef CONFIG_UARTE_NRFX_UARTE_COUNT_BYTES_WITH_TIMER + if (IS_CBWT(dev) && + event_check_clear(uarte, NRF_UARTE_EVENT_RXTO, NRF_UARTE_INT_RXTO_MASK, imask)) { + cbwt_rxto_isr(dev, true); + } else +#endif /* RXTO must be handled after ENDRX which should notify the buffer. * Skip if ENDRX is set when RXTO is set. It means that * ENDRX occurred after check for ENDRX in isr which may happen when @@ -1910,7 +2716,8 @@ static void uarte_nrfx_isr_async(const void *arg) txstopped_isr(dev); } - if (atomic_and(&data->flags, ~UARTE_FLAG_TRIG_RXTO) & UARTE_FLAG_TRIG_RXTO) { + if (!IS_CBWT(dev) && + (atomic_and(&data->flags, ~UARTE_FLAG_TRIG_RXTO) & UARTE_FLAG_TRIG_RXTO)) { #ifdef CONFIG_HAS_NORDIC_DMM int ret; @@ -1925,7 +2732,7 @@ static void uarte_nrfx_isr_async(const void *arg) rx_buf_release(dev, async_rx->buf); async_rx->buf_len = 0; async_rx->buf = NULL; - notify_rx_disable(dev); + rx_disable_finalize(dev); } } @@ -2521,6 +3328,46 @@ static int uarte_instance_init(const struct device *dev, return pm_device_driver_init(dev, uarte_nrfx_pm_action); } + +#define UARTE_TIMER_REG(idx) (NRF_TIMER_Type *)DT_REG_ADDR(DT_PHANDLE(UARTE(idx), timer)) + +#define UARTE_TIMER_IRQN(idx) DT_IRQN(DT_PHANDLE(UARTE(idx), timer)) + +#define UARTE_TIMER_IRQ_PRIO(idx) DT_IRQ(DT_PHANDLE(UARTE(idx), timer), priority) + +#define UARTE_COUNT_BYTES_WITH_TIMER_CONFIG(idx) \ + IF_ENABLED(UARTE_HAS_PROP(idx, timer), \ + (.timer_regs = UARTE_TIMER_REG(idx), \ + .timer_irqn = UARTE_TIMER_IRQN(idx), \ + .uarte_irqn = DT_IRQN(UARTE(idx)), \ + .bounce_buf = { \ + uart##idx##_bounce_buf, \ + &uart##idx##_bounce_buf[sizeof(uart##idx##_bounce_buf) / 2] \ + }, \ + .bounce_buf_len = sizeof(uart##idx##_bounce_buf) / 2, \ + .bounce_buf_swap_len = UARTE_BUF_SWAP_LEN(sizeof(uart##idx##_bounce_buf) / 2,\ + UARTE_US_TO_BYTES(UARTE_PROP(idx, current_speed))), \ + .cbwt_data = &uart##idx##_bounce_data,)) + +#define UARTE_COUNT_BYTES_WITH_TIMER_VALIDATE_CONFIG(idx) \ + __ASSERT_NO_MSG(UARTE_TIMER_IRQ_PRIO(idx) == DT_IRQ(UARTE(idx), priority)) + +#define UARTE_TIMER_IRQ_CONNECT(idx, func) \ + IF_ENABLED(UTIL_AND(IS_ENABLED(CONFIG_UARTE_NRFX_UARTE_COUNT_BYTES_WITH_TIMER), \ + UARTE_HAS_PROP(idx, timer)), \ + (UARTE_COUNT_BYTES_WITH_TIMER_VALIDATE_CONFIG(idx); \ + IRQ_CONNECT(UARTE_TIMER_IRQN(idx), UARTE_TIMER_IRQ_PRIO(idx), func, \ + DEVICE_DT_GET(UARTE(idx)), 0); \ + irq_enable(UARTE_TIMER_IRQN(idx));)) + +/* Macro sets flag to indicate that uart use different interrupt priority than the system clock. */ +#define UARTE_HAS_VAR_PRIO(idx) \ + COND_CODE_1(UTIL_AND(IS_ENABLED(CONFIG_UARTE_NRFX_UARTE_COUNT_BYTES_WITH_TIMER), \ + UARTE_HAS_PROP(idx, timer)), \ + ((DT_IRQ(UARTE(idx), priority) != DT_IRQ(DT_NODELABEL(grtc), priority)) ? \ + UARTE_CFG_FLAG_VAR_IRQ : 0), (0)) + + #define UARTE_GET_ISR(idx) \ COND_CODE_1(CONFIG_UART_##idx##_ASYNC, (uarte_nrfx_isr_async), (uarte_nrfx_isr_int)) @@ -2536,16 +3383,18 @@ static int uarte_instance_init(const struct device *dev, )) /* Depending on configuration standard or direct IRQ is connected. */ -#define UARTE_IRQ_CONNECT(idx, irqn, prio) \ - COND_CODE_1(CONFIG_UART_NRFX_UARTE_NO_IRQ, (), \ - (COND_CODE_1(CONFIG_UART_NRFX_UARTE_DIRECT_ISR, \ - (IRQ_DIRECT_CONNECT(irqn, prio, uarte_##idx##_direct_isr, 0)), \ - (IRQ_CONNECT(irqn, prio, UARTE_GET_ISR(idx), DEVICE_DT_GET(UARTE(idx)), 0))))) +#define UARTE_IRQ_CONNECT(idx, irqn, prio) \ + COND_CODE_1(CONFIG_UART_NRFX_UARTE_NO_IRQ, (), \ + (COND_CODE_1(CONFIG_UART_NRFX_UARTE_DIRECT_ISR, \ + (IRQ_DIRECT_CONNECT(irqn, prio, uarte_##idx##_direct_isr, 0)), \ + (IRQ_CONNECT(irqn, prio, UARTE_GET_ISR(idx), \ + DEVICE_DT_GET(UARTE(idx)), 0))))) #define UARTE_IRQ_CONFIGURE(idx) \ do { \ UARTE_IRQ_CONNECT(idx, DT_IRQN(UARTE(idx)), DT_IRQ(UARTE(idx), priority)); \ irq_enable(DT_IRQN(UARTE(idx))); \ + UARTE_TIMER_IRQ_CONNECT(idx, timer_isr) \ } while (false) /* Low power mode is used when disable_rx is not defined or in async mode if @@ -2635,6 +3484,12 @@ static int uarte_instance_init(const struct device *dev, UARTE_INT_DRIVEN(idx); \ PINCTRL_DT_DEFINE(UARTE(idx)); \ IF_ENABLED(CONFIG_UART_##idx##_ASYNC, ( \ + IF_ENABLED(UTIL_AND(IS_ENABLED(CONFIG_UARTE_NRFX_UARTE_COUNT_BYTES_WITH_TIMER), \ + UARTE_HAS_PROP(idx, timer)), \ + (static uint8_t uart##idx##_bounce_buf[CONFIG_UART_NRFX_UARTE_BOUNCE_BUF_LEN] \ + DMM_MEMORY_SECTION(UARTE(idx)); \ + static struct uarte_async_rx_cbwt uart##idx##_bounce_data; \ + )) \ static uint8_t \ uarte##idx##_tx_cache[CONFIG_UART_ASYNC_TX_CACHE_SIZE] \ DMM_MEMORY_SECTION(UARTE(idx)); \ @@ -2676,6 +3531,10 @@ static int uarte_instance_init(const struct device *dev, (IS_ENABLED(CONFIG_UART_NRFX_UARTE_SPURIOUS_RXTO_WORKAROUND) && \ INSTANCE_IS_HIGH_SPEED(_, /*empty*/, idx, _) ? \ UARTE_CFG_FLAG_SPURIOUS_RXTO : 0) | \ + ((IS_ENABLED(UARTE_BAUDRATE_RETENTION_WORKAROUND) && \ + UARTE_IS_CACHEABLE(idx)) ? \ + UARTE_CFG_FLAG_VOLATILE_BAUDRATE : 0) | \ + UARTE_HAS_VAR_PRIO(idx) | \ USE_LOW_POWER(idx), \ UARTE_DISABLE_RX_INIT(UARTE(idx)), \ .poll_out_byte = &uarte##idx##_poll_out_byte, \ @@ -2683,6 +3542,8 @@ static int uarte_instance_init(const struct device *dev, IF_ENABLED(CONFIG_UART_##idx##_ASYNC, \ (.tx_cache = uarte##idx##_tx_cache, \ .rx_flush_buf = uarte##idx##_flush_buf,)) \ + IF_ENABLED(CONFIG_UARTE_NRFX_UARTE_COUNT_BYTES_WITH_TIMER, \ + (UARTE_COUNT_BYTES_WITH_TIMER_CONFIG(idx))) \ IF_ENABLED(CONFIG_UART_##idx##_NRF_HW_ASYNC, \ (.timer = NRFX_TIMER_INSTANCE( \ CONFIG_UART_##idx##_NRF_HW_ASYNC_TIMER),)) \ @@ -2729,5 +3590,4 @@ static int uarte_instance_init(const struct device *dev, #define COND_UART_NRF_UARTE_DEVICE(unused, prefix, i, _) \ IF_ENABLED(CONFIG_HAS_HW_NRF_UARTE##prefix##i, (UART_NRF_UARTE_DEVICE(prefix##i);)) - UARTE_FOR_EACH_INSTANCE(COND_UART_NRF_UARTE_DEVICE, (), ()) diff --git a/dts/bindings/serial/nordic,nrf-uarte.yaml b/dts/bindings/serial/nordic,nrf-uarte.yaml index e6ba4c0b2147..72f509676926 100644 --- a/dts/bindings/serial/nordic,nrf-uarte.yaml +++ b/dts/bindings/serial/nordic,nrf-uarte.yaml @@ -14,3 +14,11 @@ properties: type: boolean description: | UARTE has RX frame timeout HW feature. + + timer: + type: phandle + description: | + Timer instance used to count received bytes. Due to issues with frame timeout + feature it is required to reliably receive data in cases where flow control + is not used and new byte can appear on the line when frame timeout expires + but before it is handled. diff --git a/tests/drivers/uart/uart_async_api/boards/nrf54l15dk_nrf54l15_cpuapp.overlay b/tests/drivers/uart/uart_async_api/boards/nrf54l15dk_nrf54l15_cpuapp.overlay index a1e29cbf0ffc..d8995e369711 100644 --- a/tests/drivers/uart/uart_async_api/boards/nrf54l15dk_nrf54l15_cpuapp.overlay +++ b/tests/drivers/uart/uart_async_api/boards/nrf54l15dk_nrf54l15_cpuapp.overlay @@ -42,6 +42,7 @@ dut: &uart21 { pinctrl-1 = <&uart21_sleep_alt>; pinctrl-names = "default", "sleep"; current-speed = <115200>; + timer = <&timer21>; }; dut2: &uart00 { diff --git a/tests/drivers/uart/uart_async_dual/Kconfig b/tests/drivers/uart/uart_async_dual/Kconfig index 6e80ec3a7957..f0087aa48e3b 100644 --- a/tests/drivers/uart/uart_async_dual/Kconfig +++ b/tests/drivers/uart/uart_async_dual/Kconfig @@ -16,5 +16,13 @@ config PM_RUNTIME_IN_TEST select PM_DEVICE select PM_DEVICE_RUNTIME +config TEST_CHOPPED_TX + bool "Test chopped TX data" + default y + help + When enabled then test cases that transmits TX packets in random chunks are + performed. Some driver implementation do not support case when new TX data + collides with handling of the RX timeout. + # Include Zephyr's Kconfig source "Kconfig" diff --git a/tests/drivers/uart/uart_async_dual/boards/nrf54h20dk_nrf54h20_common.dtsi b/tests/drivers/uart/uart_async_dual/boards/nrf54h20dk_nrf54h20_common.dtsi index 79dac0ca9607..84ad9194e62a 100644 --- a/tests/drivers/uart/uart_async_dual/boards/nrf54h20dk_nrf54h20_common.dtsi +++ b/tests/drivers/uart/uart_async_dual/boards/nrf54h20dk_nrf54h20_common.dtsi @@ -60,6 +60,16 @@ }; }; +&timer134 { + status = "reserved"; +}; + +&dppic135 { + owned-channels = <0>; + source-channels = <0>; + status = "okay"; +}; + dut: &uart134 { status = "okay"; current-speed = <115200>; @@ -67,6 +77,7 @@ dut: &uart134 { pinctrl-1 = <&uart134_alt_sleep>; pinctrl-names = "default", "sleep"; hw-flow-control; + timer = <&timer134>; zephyr,pm-device-runtime-auto; }; @@ -80,12 +91,23 @@ dut_aux: &uart137 { zephyr,pm-device-runtime-auto; }; +&dppic120 { + owned-channels = <0>; + source-channels = <0>; + status = "okay"; +}; + +&timer120 { + status = "reserved"; +}; + dut2: &uart120 { pinctrl-0 = <&uart120_default_alt>; pinctrl-1 = <&uart120_sleep_alt>; pinctrl-names = "default", "sleep"; current-speed = <115200>; hw-flow-control; + timer = <&timer120>; zephyr,pm-device-runtime-auto; }; diff --git a/tests/drivers/uart/uart_async_dual/boards/nrf54l15dk_nrf54l15_cpuapp.overlay b/tests/drivers/uart/uart_async_dual/boards/nrf54l15dk_nrf54l15_cpuapp.overlay index cd0649411531..69c5c1830961 100644 --- a/tests/drivers/uart/uart_async_dual/boards/nrf54l15dk_nrf54l15_cpuapp.overlay +++ b/tests/drivers/uart/uart_async_dual/boards/nrf54l15dk_nrf54l15_cpuapp.overlay @@ -38,6 +38,10 @@ }; }; +&timer21 { + status = "reserved"; +}; + dut: &uart21 { status = "okay"; current-speed = <115200>; @@ -45,6 +49,8 @@ dut: &uart21 { pinctrl-1 = <&uart21_sleep>; pinctrl-names = "default", "sleep"; hw-flow-control; + timer = <&timer21>; + zephyr,pm-device-runtime-auto; }; dut_aux: &uart22 { @@ -54,6 +60,7 @@ dut_aux: &uart22 { pinctrl-1 = <&uart22_sleep>; pinctrl-names = "default", "sleep"; hw-flow-control; + zephyr,pm-device-runtime-auto; }; &timer20 { diff --git a/tests/drivers/uart/uart_async_dual/src/main.c b/tests/drivers/uart/uart_async_dual/src/main.c index 37ce73120acb..ccc418d14195 100644 --- a/tests/drivers/uart/uart_async_dual/src/main.c +++ b/tests/drivers/uart/uart_async_dual/src/main.c @@ -28,9 +28,10 @@ LOG_MODULE_REGISTER(test); #endif #define TX_TIMEOUT 100000 -#define RX_TIMEOUT 2000 +#define RX_TIMEOUT_BYTES 50 #define MAX_PACKET_LEN 128 +#define MIN_PACKET_LEN 10 struct dut_data { const struct device *dev; @@ -60,6 +61,25 @@ ZTEST_DMEM struct dut_data duts[] = { #endif }; +/* Array that contains potential payload. It is used to memcmp against incoming packets. */ +static const uint8_t test_buf[256] = { + 255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, + 240, 239, 238, 237, 236, 235, 234, 233, 232, 231, 230, 229, 228, 227, 226, + 225, 224, 223, 222, 221, 220, 219, 218, 217, 216, 215, 214, 213, 212, 211, + 210, 209, 208, 207, 206, 205, 204, 203, 202, 201, 200, 199, 198, 197, 196, + 195, 194, 193, 192, 191, 190, 189, 188, 187, 186, 185, 184, 183, 182, 181, + 180, 179, 178, 177, 176, 175, 174, 173, 172, 171, 170, 169, 168, 167, 166, + 165, 164, 163, 162, 161, 160, 159, 158, 157, 156, 155, 154, 153, 152, 151, + 150, 149, 148, 147, 146, 145, 144, 143, 142, 141, 140, 139, 138, 137, 136, + 135, 134, 133, 132, 131, 130, 129, 128, 127, 126, 125, 124, 123, 122, 121, + 120, 119, 118, 117, 116, 115, 114, 113, 112, 111, 110, 109, 108, 107, 106, + 105, 104, 103, 102, 101, 100, 99, 98, 97, 96, 95, 94, 93, 92, 91, 90, 89, + 88, 87, 86, 85, 84, 83, 82, 81, 80, 79, 78, 77, 76, 75, 74, 73, 72, 71, 70, + 69, 68, 67, 66, 65, 64, 63, 62, 61, 60, 59, 58, 57, 56, 55, 54, 53, 52, 51, 50, + 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, + 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, + 9, 8, 7, 6, 5, 4, 3, 2, 1, 0}; + static void pm_check(const struct device *dev, const struct device *second_dev, bool exp_on, int line) { @@ -100,6 +120,7 @@ static const struct device *tx_dev; enum test_tx_mode { TX_BULK, TX_PACKETS, + TX_CHOPPED, }; struct test_tx_data { @@ -111,6 +132,8 @@ struct test_tx_data { volatile bool cont; volatile enum test_tx_mode mode; struct k_sem sem; + uint32_t idx; + uint32_t rx_timeout; }; enum test_rx_state { @@ -121,17 +144,24 @@ enum test_rx_state { enum test_rx_mode { RX_CONT, RX_DIS, + RX_ALL, }; +typedef bool (*test_on_rx_rdy_t)(const struct device *dev, uint8_t *buf, size_t len); + struct test_rx_data { uint8_t hdr[1]; uint8_t buf[256]; uint32_t rx_cnt; + uint32_t payload_idx; enum test_rx_state state; enum test_rx_mode mode; volatile bool cont; bool buf_req; struct k_sem sem; + uint32_t timeout; + uint32_t buf_idx; + test_on_rx_rdy_t on_rx_rdy; }; static struct test_tx_data tx_data; @@ -143,8 +173,8 @@ static void fill_tx(struct test_tx_data *data) uint32_t len; int err; - if (data->mode == TX_PACKETS) { - err = k_sem_take(&data->sem, K_MSEC(100)); + if (data->mode != TX_BULK) { + err = k_sem_take(&data->sem, K_MSEC(200)); if (err < 0 && !data->cont) { return; } @@ -153,9 +183,10 @@ static void fill_tx(struct test_tx_data *data) uint8_t len = sys_rand8_get(); len = len % MAX_PACKET_LEN; - len = MAX(2, len); + len = MAX(MIN_PACKET_LEN, len); data->packet_len = len; + data->idx = 0; for (int i = 0; i < len; i++) { data->buf[i] = len - i; } @@ -163,12 +194,11 @@ static void fill_tx(struct test_tx_data *data) return; } - while ((len = ring_buf_put_claim(&data->rbuf, &buf, 255)) > 1) { + while ((len = ring_buf_put_claim(&data->rbuf, &buf, 255)) > 0) { uint8_t r = (sys_rand8_get() % MAX_PACKET_LEN) % len; - uint8_t packet_len = MAX(r, 2); - uint8_t rem = len - packet_len; + uint8_t packet_len = MAX(r, MIN_PACKET_LEN); - packet_len = (rem < 3) ? len : packet_len; + packet_len = (len <= MIN_PACKET_LEN) ? len : packet_len; buf[0] = packet_len; for (int i = 1; i < packet_len; i++) { buf[i] = packet_len - i; @@ -189,7 +219,7 @@ static void try_tx(const struct device *dev, bool irq) return; } - if ((tx_data.mode == TX_PACKETS) && (tx_data.packet_len > 0)) { + if (tx_data.mode == TX_PACKETS) { uint8_t len = tx_data.packet_len; tx_data.packet_len = 0; @@ -199,19 +229,50 @@ static void try_tx(const struct device *dev, bool irq) err, irq, tx_data.cont); return; } - zassert_true(tx_data.mode == TX_BULK); - if (!atomic_cas(&tx_data.busy, 0, 1)) { + if (tx_data.mode == TX_BULK) { + if (!atomic_cas(&tx_data.busy, 0, 1)) { + return; + } + + len = ring_buf_get_claim(&tx_data.rbuf, &buf, 255); + if (len > 0) { + err = uart_tx(dev, buf, len, TX_TIMEOUT); + zassert_equal(err, 0, + "Unexpected err:%d irq:%d cont:%d\n", + err, irq, tx_data.cont); + } else { + tx_data.busy = 0; + } return; } - len = ring_buf_get_claim(&tx_data.rbuf, &buf, 255); - if (len > 0) { - err = uart_tx(dev, buf, len, TX_TIMEOUT); - zassert_equal(err, 0, - "Unexpected err:%d irq:%d cont:%d\n", - err, irq, tx_data.cont); + zassert_true(tx_data.mode == TX_CHOPPED); + + uint32_t rem = tx_data.packet_len - tx_data.idx; + + if (tx_data.packet_len > 12) { + len = sys_rand8_get() % (tx_data.packet_len / 4); + } else { + len = 0; } + len = MAX(3, len); + len = MIN(rem, len); + + buf = &tx_data.buf[tx_data.idx]; + tx_data.idx += len; + + err = uart_tx(dev, buf, len, TX_TIMEOUT); + zassert_equal(err, 0, + "Unexpected err:%d irq:%d cont:%d\n", + err, irq, tx_data.cont); +} + +static void tx_backoff(uint32_t rx_timeout) +{ + uint32_t delay = (rx_timeout / 2) + (sys_rand32_get() % rx_timeout); + + k_busy_wait(delay); } static void on_tx_done(const struct device *dev, struct uart_event *evt) @@ -221,61 +282,117 @@ static void on_tx_done(const struct device *dev, struct uart_event *evt) return; } + if (tx_data.mode == TX_CHOPPED) { + if (tx_data.idx == tx_data.packet_len) { + k_sem_give(&tx_data.sem); + } else { + + tx_backoff(tx_data.rx_timeout); + try_tx(dev, true); + } + return; + } + /* Finish previous data chunk and start new if any pending. */ ring_buf_get_finish(&tx_data.rbuf, evt->data.tx.len); atomic_set(&tx_data.busy, 0); try_tx(dev, true); } -static void on_rx_rdy(const struct device *dev, struct uart_event *evt) +static bool on_rx_rdy_rx_all(const struct device *dev, uint8_t *buf, size_t len) { - uint32_t len = evt->data.rx.len; - uint32_t off = evt->data.rx.offset; - int err; + bool ok; - if (!rx_data.cont) { - return; + if (rx_data.payload_idx == 0) { + rx_data.payload_idx = buf[0] - 1; + buf++; + len--; } - rx_data.rx_cnt += evt->data.rx.len; - if (evt->data.rx.buf == rx_data.hdr) { - rx_data.state = RX_PAYLOAD; - if ((rx_data.mode == RX_CONT) && rx_data.buf_req) { - size_t l = rx_data.hdr[0] - 1; + ok = memcmp(buf, &test_buf[256 - rx_data.payload_idx], len) == 0; + rx_data.payload_idx -= len; - zassert_true(l > 0); - rx_data.buf_req = false; - err = uart_rx_buf_rsp(dev, rx_data.buf, rx_data.hdr[0] - 1); - } - } else { - /* Payload received */ - rx_data.state = RX_HDR; - zassert_equal(len, rx_data.hdr[0] - 1); + return ok; +} - for (int i = 0; i < len; i++) { - bool ok = evt->data.rx.buf[off + i] == (uint8_t)(len - i); +static bool on_rx_rdy_hdr(const struct device *dev, uint8_t *buf, size_t len); - if (!ok) { - LOG_ERR("Unexpected data at %d, exp:%02x got:%02x", - i, len - i, evt->data.rx.buf[off + i]); - } +static bool on_rx_rdy_payload(const struct device *dev, uint8_t *buf, size_t len) +{ + bool ok; + int err; - zassert_true(ok, "Unexpected data at %d, exp:%02x got:%02x", - i, len - i, evt->data.rx.buf[off + i]); - if (!ok) { - rx_data.cont = false; - tx_data.cont = false; - /* Avoid flood of errors as we are in the interrupt and ztest - * cannot abort from here. - */ - return; + ok = memcmp(buf, &test_buf[255 - rx_data.payload_idx], len) == 0; + if (!ok) { + for (int i = 0; i < len; i++) { + if (buf[i] != test_buf[255 - rx_data.payload_idx + i]) { + zassert_true(false, "Byte %d expected: %02x got: %02x", + i, buf[i], test_buf[255 - rx_data.payload_idx + i]); } } + rx_data.cont = false; + tx_data.cont = false; + zassert_true(ok); + return false; + } + + rx_data.payload_idx -= len; + + if (rx_data.payload_idx == 0) { + rx_data.state = RX_HDR; + rx_data.on_rx_rdy = on_rx_rdy_hdr; if ((rx_data.mode == RX_CONT) && rx_data.buf_req) { rx_data.buf_req = false; err = uart_rx_buf_rsp(dev, rx_data.hdr, 1); + zassert_equal(err, 0); } } + + return true; +} + +static bool on_rx_rdy_hdr(const struct device *dev, uint8_t *buf, size_t len) +{ + int err; + + zassert_equal(buf, rx_data.hdr); + zassert_equal(len, 1); + if (rx_data.hdr[0] == 1) { + /* single byte packet. */ + if ((rx_data.mode == RX_CONT) && rx_data.buf_req) { + err = uart_rx_buf_rsp(dev, rx_data.hdr, 1); + zassert_equal(err, 0); + } + return true; + } + + zassert_equal(rx_data.payload_idx, 0); + rx_data.on_rx_rdy = on_rx_rdy_payload; + rx_data.payload_idx = rx_data.hdr[0] - 1; + rx_data.state = RX_PAYLOAD; + if ((rx_data.mode == RX_CONT) && rx_data.buf_req) { + size_t l = rx_data.hdr[0] - 1; + + zassert_true(l > 0); + rx_data.buf_req = false; + err = uart_rx_buf_rsp(dev, rx_data.buf, buf[0] - 1); + } + + return true; +} + +static void on_rx_buf_req(const struct device *dev) +{ + if (rx_data.mode != RX_ALL) { + rx_data.buf_req = true; + return; + } + + size_t len = sizeof(rx_data.buf) / 2; + uint8_t *buf = &rx_data.buf[len * rx_data.buf_idx]; + + rx_data.buf_idx = (rx_data.buf_idx + 1) & 0x1; + uart_rx_buf_rsp(dev, buf, len); } static void on_rx_dis(const struct device *dev, struct uart_event *evt, void *user_data) @@ -283,8 +400,17 @@ static void on_rx_dis(const struct device *dev, struct uart_event *evt, void *us ARG_UNUSED(evt); struct test_rx_data *data = user_data; int err; - uint8_t *buf = (data->state == RX_HDR) ? data->hdr : data->buf; - uint32_t len = (data->state == RX_HDR) ? 1 : (data->hdr[0] - 1); + uint8_t *buf; + uint32_t len; + + if (data->mode == RX_ALL) { + buf = data->buf; + len = sizeof(data->buf) / 2; + } else { + buf = (data->state == RX_HDR) ? data->hdr : data->buf; + len = (data->state == RX_HDR) ? 1 : (data->hdr[0] - 1); + data->buf_idx = 1; + } data->buf_req = false; @@ -292,9 +418,8 @@ static void on_rx_dis(const struct device *dev, struct uart_event *evt, void *us return; } - zassert_true(len > 0); - err = uart_rx_enable(dev, buf, len, RX_TIMEOUT); + err = uart_rx_enable(dev, buf, len, data->timeout); zassert_equal(err, 0, "Unexpected err:%d", err); } @@ -324,14 +449,15 @@ static void uart_callback(const struct device *dev, struct uart_event *evt, void break; case UART_RX_RDY: zassert_true(dev == rx_dev); - on_rx_rdy(dev, evt); + rx_data.on_rx_rdy(dev, &evt->data.rx.buf[evt->data.rx.offset], evt->data.rx.len); + rx_data.rx_cnt += evt->data.rx.len; break; case UART_RX_BUF_RELEASED: zassert_true(dev == rx_dev); break; case UART_RX_BUF_REQUEST: - rx_data.buf_req = true; zassert_true(dev == rx_dev); + on_rx_buf_req(dev); break; case UART_RX_DISABLED: zassert_true(dev == rx_dev); @@ -346,7 +472,7 @@ static void uart_callback(const struct device *dev, struct uart_event *evt, void } } -static void config_baudrate(uint32_t rate) +static void config_baudrate(uint32_t rate, bool hwfc) { struct uart_config config; int err; @@ -354,6 +480,7 @@ static void config_baudrate(uint32_t rate) err = uart_config_get(rx_dev, &config); zassert_equal(err, 0, "Unexpected err:%d", err); + config.flow_ctrl = hwfc ? UART_CFG_FLOW_CTRL_RTS_CTS : UART_CFG_FLOW_CTRL_NONE; config.baudrate = rate; err = uart_configure(rx_dev, &config); @@ -365,6 +492,26 @@ static void config_baudrate(uint32_t rate) } } +static void report_progress(uint32_t start) +{ + static const uint32_t inc = CONFIG_UART_ASYNC_DUAL_TEST_TIMEOUT / 20; + static uint32_t next; + static uint32_t progress; + + if ((k_uptime_get_32() - start < inc) && progress) { + /* Reset state. */ + next = inc; + progress = 0; + } + + if (k_uptime_get_32() > (start + next)) { + progress += 5; + TC_PRINT("\r%d%%", progress); + next += inc; + } +} + + /* Test is running following scenario. Transmitter is sending packets which * has 1 byte header with length followed by the payload. Transmitter can send * packets in two modes: bulk where data is send in chunks without gaps between @@ -376,12 +523,14 @@ static void config_baudrate(uint32_t rate) * * Test has busy simulator running if it is enabled in the configuration. */ -static void var_packet_hwfc(uint32_t baudrate, bool tx_packets, bool cont) +static void var_packet(uint32_t baudrate, enum test_tx_mode tx_mode, + enum test_rx_mode rx_mode, bool hwfc) { int err; uint32_t load = 0; + uint32_t start = k_uptime_get_32(); - config_baudrate(baudrate); + config_baudrate(baudrate, hwfc); if (IS_ENABLED(CONFIG_TEST_BUSY_SIM)) { uint32_t active_avg = (baudrate == 1000000) ? 5 : 30; @@ -393,13 +542,15 @@ static void var_packet_hwfc(uint32_t baudrate, bool tx_packets, bool cont) memset(&tx_data, 0, sizeof(tx_data)); memset(&rx_data, 0, sizeof(rx_data)); tx_data.cont = true; - tx_data.mode = tx_packets ? TX_PACKETS : TX_BULK; - k_sem_init(&tx_data.sem, tx_packets ? 1 : 0, 1); + tx_data.mode = tx_mode; + k_sem_init(&tx_data.sem, (tx_mode != TX_BULK) ? 1 : 0, 1); + rx_data.timeout = (RX_TIMEOUT_BYTES * 1000000 * 10) / baudrate; + tx_data.rx_timeout = rx_data.timeout; rx_data.cont = true; rx_data.rx_cnt = 0; - rx_data.state = RX_HDR; - rx_data.mode = cont ? RX_CONT : RX_DIS; + rx_data.on_rx_rdy = rx_mode == RX_ALL ? on_rx_rdy_rx_all : on_rx_rdy_hdr; + rx_data.mode = rx_mode; ring_buf_init(&tx_data.rbuf, sizeof(tx_data.buf), tx_data.buf); @@ -420,8 +571,10 @@ static void var_packet_hwfc(uint32_t baudrate, bool tx_packets, bool cont) while (tx_data.cont || rx_data.cont) { fill_tx(&tx_data); k_msleep(1); + report_progress(start); try_tx(tx_dev, false); } + TC_PRINT("\n"); if (IS_ENABLED(CONFIG_CPU_LOAD)) { load = cpu_load_get(true); @@ -436,62 +589,82 @@ static void var_packet_hwfc(uint32_t baudrate, bool tx_packets, bool cont) /* Flush all TX data that may be already started. */ k_msleep(10); - (void)uart_rx_enable(rx_dev, rx_data.buf, sizeof(rx_data.buf), RX_TIMEOUT); + (void)uart_rx_enable(rx_dev, rx_data.buf, sizeof(rx_data.buf), rx_data.timeout); k_msleep(10); (void)uart_rx_disable(rx_dev); k_msleep(10); TC_PRINT("Received %d bytes for %d ms, CPU load:%d.%d\n", rx_data.rx_cnt, CONFIG_UART_ASYNC_DUAL_TEST_TIMEOUT, load / 10, load % 10); - zassert_true(rx_data.rx_cnt > 1000, "Unexected RX cnt: %d", rx_data.rx_cnt); + zassert_true(rx_data.rx_cnt > 1000, "Unexpected RX cnt: %d", rx_data.rx_cnt); } ZTEST(uart_async_dual, test_var_packets_tx_bulk_dis_hwfc) { /* TX in bulk mode, RX in DIS mode, 115k2 */ - var_packet_hwfc(115200, false, false); + var_packet(115200, TX_BULK, RX_DIS, true); } ZTEST(uart_async_dual, test_var_packets_tx_bulk_cont_hwfc) { /* TX in bulk mode, RX in CONT mode, 115k2 */ - var_packet_hwfc(115200, false, true); + var_packet(115200, TX_BULK, RX_CONT, true); } ZTEST(uart_async_dual, test_var_packets_tx_bulk_dis_hwfc_1m) { /* TX in bulk mode, RX in DIS mode, 1M */ - var_packet_hwfc(1000000, false, false); + var_packet(1000000, TX_BULK, RX_DIS, true); } ZTEST(uart_async_dual, test_var_packets_tx_bulk_cont_hwfc_1m) { /* TX in bulk mode, RX in CONT mode, 1M */ - var_packet_hwfc(1000000, false, true); + var_packet(1000000, TX_BULK, RX_CONT, true); } ZTEST(uart_async_dual, test_var_packets_dis_hwfc) { /* TX in packet mode, RX in DIS mode, 115k2 */ - var_packet_hwfc(115200, true, false); + var_packet(115200, TX_PACKETS, RX_DIS, true); } ZTEST(uart_async_dual, test_var_packets_cont_hwfc) { /* TX in packet mode, RX in CONT mode, 115k2 */ - var_packet_hwfc(115200, true, true); + var_packet(115200, TX_PACKETS, RX_CONT, true); } ZTEST(uart_async_dual, test_var_packets_dis_hwfc_1m) { /* TX in packet mode, RX in DIS mode, 1M */ - var_packet_hwfc(1000000, true, false); + var_packet(1000000, TX_PACKETS, RX_DIS, true); } ZTEST(uart_async_dual, test_var_packets_cont_hwfc_1m) { /* TX in packet mode, RX in CONT mode, 1M */ - var_packet_hwfc(1000000, true, true); + var_packet(1000000, TX_PACKETS, RX_CONT, true); +} + +ZTEST(uart_async_dual, test_var_packets_chopped_all) +{ + if (!IS_ENABLED(CONFIG_TEST_CHOPPED_TX)) { + ztest_test_skip(); + } + + /* TX in chopped mode, RX in receive ALL mode, 115k2 */ + var_packet(115200, TX_CHOPPED, RX_ALL, false); +} + +ZTEST(uart_async_dual, test_var_packets_chopped_all_1m) +{ + if (!IS_ENABLED(CONFIG_TEST_CHOPPED_TX)) { + ztest_test_skip(); + } + + /* TX in chopped mode, RX in receive ALL mode, 1M */ + var_packet(1000000, TX_CHOPPED, RX_ALL, false); } static void hci_like_callback(const struct device *dev, struct uart_event *evt, void *user_data) @@ -540,7 +713,7 @@ static bool rx(uint8_t *buf, size_t len) { int err; - err = uart_rx_enable(rx_dev, buf, len, RX_TIMEOUT); + err = uart_rx_enable(rx_dev, buf, len, rx_data.timeout); zassert_equal(err, 0, "Unexpected err:%d", err); err = k_sem_take(&rx_data.sem, K_MSEC(100)); @@ -653,6 +826,7 @@ static void hci_like_rx(void) uint8_t len; bool cont; bool explicit_pm = IS_ENABLED(CONFIG_PM_RUNTIME_IN_TEST); + uint32_t start = k_uptime_get_32(); while (1) { if (explicit_pm) { @@ -704,7 +878,9 @@ static void hci_like_rx(void) PM_CHECK(rx_dev, tx_dev, false); check_payload(rx_data.buf, len); + report_progress(start); } + TC_PRINT("\n"); } #define HCI_LIKE_TX_STACK_SIZE 2048 @@ -725,7 +901,7 @@ static void hci_like_test(uint32_t baudrate) int err; uint32_t load = 0; - config_baudrate(baudrate); + config_baudrate(baudrate, true); if (IS_ENABLED(CONFIG_TEST_BUSY_SIM)) { uint32_t active_avg = (baudrate == 1000000) ? 10 : 50; @@ -739,6 +915,7 @@ static void hci_like_test(uint32_t baudrate) tx_data.cnt = 0; tx_data.cont = true; rx_data.cont = true; + rx_data.timeout = (RX_TIMEOUT_BYTES * 1000000 * 10) / baudrate; k_sem_init(&tx_data.sem, 1, 1); k_sem_init(&rx_data.sem, 0, 1); @@ -776,7 +953,7 @@ static void hci_like_test(uint32_t baudrate) k_msleep(10); PM_CHECK(tx_dev, rx_dev, false); - (void)uart_rx_enable(rx_dev, rx_data.buf, sizeof(rx_data.buf), RX_TIMEOUT); + (void)uart_rx_enable(rx_dev, rx_data.buf, sizeof(rx_data.buf), rx_data.timeout); k_msleep(1); (void)uart_rx_disable(rx_dev); diff --git a/tests/drivers/uart/uart_async_dual/sysbuild/vpr_launcher/boards/nrf54h20dk_nrf54h20_cpuapp.overlay b/tests/drivers/uart/uart_async_dual/sysbuild/vpr_launcher/boards/nrf54h20dk_nrf54h20_cpuapp.overlay index fcdc838a54e4..65a2c52016e6 100644 --- a/tests/drivers/uart/uart_async_dual/sysbuild/vpr_launcher/boards/nrf54h20dk_nrf54h20_cpuapp.overlay +++ b/tests/drivers/uart/uart_async_dual/sysbuild/vpr_launcher/boards/nrf54h20dk_nrf54h20_cpuapp.overlay @@ -9,3 +9,15 @@ status = "reserved"; interrupt-parent = <&cpuppr_clic>; }; + +&timer134 { + interrupt-parent = <&cpuppr_clic>; +}; + +&dppic135 { + child-owned-channels = <0>; +}; + +&uart136 { + current-speed = <1000000>; +}; diff --git a/tests/drivers/uart/uart_async_dual/testcase.yaml b/tests/drivers/uart/uart_async_dual/testcase.yaml index 1a2d811bbfe9..1722d5e5e7d1 100644 --- a/tests/drivers/uart/uart_async_dual/testcase.yaml +++ b/tests/drivers/uart/uart_async_dual/testcase.yaml @@ -73,3 +73,18 @@ tests: - nrf52_bsim extra_configs: - CONFIG_PM_RUNTIME_IN_TEST=y + drivers.uart.async_dual.no_tx_chopped: + harness: ztest + harness_config: + fixture: uart_loopback + depends_on: gpio + platform_allow: + - nrf54l15dk/nrf54l15/cpuapp + - nrf54h20dk/nrf54h20/cpuapp + - nrf54h20dk/nrf54h20/cpurad + - nrf54h20dk/nrf54h20/cpuppr + - nrf9160dk/nrf9160 + - nrf52_bsim + extra_configs: + - CONFIG_TEST_CHOPPED_TX=n + - CONFIG_UARTE_NRFX_UARTE_COUNT_BYTES_WITH_TIMER=n diff --git a/west.yml b/west.yml index a6fb40716cea..8ccc70070fa5 100644 --- a/west.yml +++ b/west.yml @@ -200,7 +200,7 @@ manifest: groups: - hal - name: hal_nordic - revision: 9587b1dcb83d24ab74e89837843a5f7d573f7059 + revision: pull/306/head path: modules/hal/nordic groups: - hal