6
6
7
7
#ifdef CONFIG_UART_ASYNC_API
8
8
#include <zephyr/drivers/dma.h>
9
+ #include <wrap_max32_dma.h>
9
10
#endif
10
11
#include <zephyr/drivers/pinctrl.h>
11
12
#include <zephyr/drivers/uart.h>
@@ -43,9 +44,15 @@ struct max32_uart_config {
43
44
};
44
45
45
46
#ifdef CONFIG_UART_ASYNC_API
47
+ #define MAX32_UART_TX_CACHE_NUM 2
46
48
struct max32_uart_async_tx {
47
49
const uint8_t * buf ;
50
+ const uint8_t * src ;
48
51
size_t len ;
52
+ uint8_t cache [MAX32_UART_TX_CACHE_NUM ][CONFIG_UART_TX_CACHE_LEN ];
53
+ uint8_t cache_id ;
54
+ struct dma_block_config dma_blk ;
55
+ int32_t timeout ;
49
56
struct k_work_delayable timeout_work ;
50
57
};
51
58
@@ -86,6 +93,10 @@ struct max32_uart_data {
86
93
static void uart_max32_isr (const struct device * dev );
87
94
#endif
88
95
96
+ #ifdef CONFIG_UART_ASYNC_API
97
+ static int uart_max32_tx_dma_load (const struct device * dev , uint8_t * buf , size_t len );
98
+ #endif
99
+
89
100
static void api_poll_out (const struct device * dev , unsigned char c )
90
101
{
91
102
const struct max32_uart_config * cfg = dev -> config ;
@@ -492,13 +503,22 @@ static void async_user_callback(const struct device *dev, struct uart_event *evt
492
503
}
493
504
}
494
505
506
+ static uint32_t load_tx_cache (const uint8_t * src , size_t len , uint8_t * dest )
507
+ {
508
+ memcpy (dest , src , MIN (len , CONFIG_UART_TX_CACHE_LEN ));
509
+
510
+ return MIN (len , CONFIG_UART_TX_CACHE_LEN );
511
+ }
512
+
495
513
static void uart_max32_async_tx_callback (const struct device * dma_dev , void * user_data ,
496
514
uint32_t channel , int status )
497
515
{
498
516
const struct device * dev = user_data ;
499
517
const struct max32_uart_config * config = dev -> config ;
500
518
struct max32_uart_data * data = dev -> data ;
519
+ struct max32_uart_async_tx * tx = & data -> async .tx ;
501
520
struct dma_status dma_stat ;
521
+ int ret ;
502
522
503
523
unsigned int key = irq_lock ();
504
524
@@ -509,17 +529,73 @@ static void uart_max32_async_tx_callback(const struct device *dma_dev, void *use
509
529
return ;
510
530
}
511
531
512
- k_work_cancel_delayable (& data -> async . tx . timeout_work );
532
+ k_work_cancel_delayable (& tx -> timeout_work );
513
533
Wrap_MXC_UART_DisableTxDMA (config -> regs );
514
534
515
535
irq_unlock (key );
516
536
517
- struct uart_event tx_done = {
518
- .type = status == 0 ? UART_TX_DONE : UART_TX_ABORTED ,
519
- .data .tx .buf = data -> async .tx .buf ,
520
- .data .tx .len = data -> async .tx .len ,
521
- };
522
- async_user_callback (dev , & tx_done );
537
+ tx -> len -= tx -> dma_blk .block_size ;
538
+ if (tx -> len > 0 ) {
539
+ tx -> cache_id = !(tx -> cache_id );
540
+ ret = uart_max32_tx_dma_load (dev , tx -> cache [tx -> cache_id ],
541
+ MIN (tx -> len , CONFIG_UART_TX_CACHE_LEN ));
542
+ if (ret < 0 ) {
543
+ LOG_ERR ("Error configuring Tx DMA (%d)" , ret );
544
+ return ;
545
+ }
546
+
547
+ ret = dma_start (config -> tx_dma .dev , config -> tx_dma .channel );
548
+ if (ret < 0 ) {
549
+ LOG_ERR ("Error starting Tx DMA (%d)" , ret );
550
+ return ;
551
+ }
552
+
553
+ async_timer_start (& tx -> timeout_work , tx -> timeout );
554
+
555
+ Wrap_MXC_UART_SetTxDMALevel (config -> regs , 2 );
556
+ Wrap_MXC_UART_EnableTxDMA (config -> regs );
557
+
558
+ /* Load next chunk as well */
559
+ if (tx -> len > CONFIG_UART_TX_CACHE_LEN ) {
560
+ tx -> src += load_tx_cache (tx -> src , tx -> len - CONFIG_UART_TX_CACHE_LEN ,
561
+ tx -> cache [!(tx -> cache_id )]);
562
+ }
563
+ } else {
564
+ struct uart_event tx_done = {
565
+ .type = status == 0 ? UART_TX_DONE : UART_TX_ABORTED ,
566
+ .data .tx .buf = tx -> buf ,
567
+ .data .tx .len = tx -> len ,
568
+ };
569
+ async_user_callback (dev , & tx_done );
570
+ }
571
+ }
572
+
573
+ static int uart_max32_tx_dma_load (const struct device * dev , uint8_t * buf , size_t len )
574
+ {
575
+ int ret ;
576
+ const struct max32_uart_config * config = dev -> config ;
577
+ struct max32_uart_data * data = dev -> data ;
578
+ struct dma_config dma_cfg = {0 };
579
+ struct dma_block_config * dma_blk = & data -> async .tx .dma_blk ;
580
+
581
+ dma_cfg .channel_direction = MEMORY_TO_PERIPHERAL ;
582
+ dma_cfg .dma_callback = uart_max32_async_tx_callback ;
583
+ dma_cfg .user_data = (void * )dev ;
584
+ dma_cfg .dma_slot = config -> tx_dma .slot ;
585
+ dma_cfg .block_count = 1 ;
586
+ dma_cfg .source_data_size = 1U ;
587
+ dma_cfg .source_burst_length = 1U ;
588
+ dma_cfg .dest_data_size = 1U ;
589
+ dma_cfg .head_block = dma_blk ;
590
+ dma_blk -> block_size = len ;
591
+ dma_blk -> source_address = (uint32_t )buf ;
592
+
593
+ ret = dma_config (config -> tx_dma .dev , config -> tx_dma .channel , & dma_cfg );
594
+ if (ret < 0 ) {
595
+ return ret ;
596
+ }
597
+
598
+ return 0 ;
523
599
}
524
600
525
601
static int api_callback_set (const struct device * dev , uart_callback_t callback , void * user_data )
@@ -537,15 +613,14 @@ static int api_tx(const struct device *dev, const uint8_t *buf, size_t len, int3
537
613
struct max32_uart_data * data = dev -> data ;
538
614
const struct max32_uart_config * config = dev -> config ;
539
615
struct dma_status dma_stat ;
540
- struct dma_config dma_cfg = {0 };
541
- struct dma_block_config dma_blk = {0 };
542
616
int ret ;
617
+ bool use_cache = false;
543
618
unsigned int key = irq_lock ();
544
619
545
620
if (config -> tx_dma .channel == 0xFF ) {
546
621
LOG_ERR ("Tx DMA channel is not configured" );
547
- irq_unlock ( key ) ;
548
- return - ENOTSUP ;
622
+ ret = - ENOTSUP ;
623
+ goto unlock ;
549
624
}
550
625
551
626
ret = dma_get_status (config -> tx_dma .dev , config -> tx_dma .channel , & dma_stat );
@@ -557,38 +632,37 @@ static int api_tx(const struct device *dev, const uint8_t *buf, size_t len, int3
557
632
558
633
data -> async .tx .buf = buf ;
559
634
data -> async .tx .len = len ;
635
+ data -> async .tx .src = data -> async .tx .buf ;
636
+
637
+ if (((uint32_t )buf < MXC_SRAM_MEM_BASE ) ||
638
+ (((uint32_t )buf + len ) > (MXC_SRAM_MEM_BASE + MXC_SRAM_MEM_SIZE ))) {
639
+ use_cache = true;
640
+ len = load_tx_cache (data -> async .tx .src , MIN (len , CONFIG_UART_TX_CACHE_LEN ),
641
+ data -> async .tx .cache [0 ]);
642
+ data -> async .tx .src += len ;
643
+ data -> async .tx .cache_id = 0 ;
644
+ }
560
645
561
- dma_cfg .channel_direction = MEMORY_TO_PERIPHERAL ;
562
- dma_cfg .dma_callback = uart_max32_async_tx_callback ;
563
- dma_cfg .user_data = (void * )dev ;
564
- dma_cfg .dma_slot = config -> tx_dma .slot ;
565
- dma_cfg .block_count = 1 ;
566
- dma_cfg .source_data_size = 1U ;
567
- dma_cfg .source_burst_length = 1U ;
568
- dma_cfg .dest_data_size = 1U ;
569
- dma_cfg .head_block = & dma_blk ;
570
- dma_blk .block_size = len ;
571
- dma_blk .source_address = (uint32_t )buf ;
572
-
573
- ret = dma_config (config -> tx_dma .dev , config -> tx_dma .channel , & dma_cfg );
646
+ ret = uart_max32_tx_dma_load (dev , use_cache ? data -> async .tx .cache [0 ] : ((uint8_t * )buf ),
647
+ len );
574
648
if (ret < 0 ) {
575
649
LOG_ERR ("Error configuring Tx DMA (%d)" , ret );
576
- irq_unlock (key );
577
- return ret ;
650
+ goto unlock ;
578
651
}
579
652
580
653
ret = dma_start (config -> tx_dma .dev , config -> tx_dma .channel );
581
654
if (ret < 0 ) {
582
655
LOG_ERR ("Error starting Tx DMA (%d)" , ret );
583
- irq_unlock (key );
584
- return ret ;
656
+ goto unlock ;
585
657
}
586
658
659
+ data -> async .tx .timeout = timeout ;
587
660
async_timer_start (& data -> async .tx .timeout_work , timeout );
588
661
589
662
Wrap_MXC_UART_SetTxDMALevel (config -> regs , 2 );
590
663
Wrap_MXC_UART_EnableTxDMA (config -> regs );
591
664
665
+ unlock :
592
666
irq_unlock (key );
593
667
594
668
return ret ;
@@ -709,6 +783,12 @@ static void uart_max32_async_rx_callback(const struct device *dma_dev, void *use
709
783
unsigned int key = irq_lock ();
710
784
711
785
dma_get_status (config -> rx_dma .dev , config -> rx_dma .channel , & dma_stat );
786
+
787
+ if (dma_stat .pending_length > 0 ) {
788
+ irq_unlock (key );
789
+ return ;
790
+ }
791
+
712
792
total_rx = async -> rx .len - dma_stat .pending_length ;
713
793
714
794
api_irq_rx_disable (dev );
@@ -717,7 +797,6 @@ static void uart_max32_async_rx_callback(const struct device *dma_dev, void *use
717
797
718
798
if (total_rx > async -> rx .offset ) {
719
799
async -> rx .counter = total_rx - async -> rx .offset ;
720
-
721
800
struct uart_event rdy_event = {
722
801
.type = UART_RX_RDY ,
723
802
.data .rx .buf = async -> rx .buf ,
0 commit comments