34
34
#endif
35
35
LOG_MODULE_REGISTER (usbd_cdc_acm , CONFIG_USBD_CDC_ACM_LOG_LEVEL );
36
36
37
- UDC_BUF_POOL_DEFINE (cdc_acm_ep_pool ,
38
- DT_NUM_INST_STATUS_OKAY (DT_DRV_COMPAT ) * 2 ,
39
- USBD_MAX_BULK_MPS , sizeof (struct udc_buf_info ), NULL );
40
-
41
37
#define CDC_ACM_DEFAULT_LINECODING {sys_cpu_to_le32(115200), 0, 0, 8}
42
38
#define CDC_ACM_DEFAULT_INT_EP_MPS 16
43
39
#define CDC_ACM_INTERVAL_DEFAULT 10000UL
@@ -131,8 +127,15 @@ struct cdc_acm_uart_data {
131
127
132
128
static void cdc_acm_irq_rx_enable (const struct device * dev );
133
129
134
- struct net_buf * cdc_acm_buf_alloc (const uint8_t ep )
130
+ #if CONFIG_USBD_CDC_ACM_BUF_POOL
131
+ UDC_BUF_POOL_DEFINE (cdc_acm_ep_pool ,
132
+ DT_NUM_INST_STATUS_OKAY (DT_DRV_COMPAT ) * 2 ,
133
+ USBD_MAX_BULK_MPS , sizeof (struct udc_buf_info ), NULL );
134
+
135
+ static struct net_buf * cdc_acm_buf_alloc (struct usbd_class_data * const c_data ,
136
+ const uint8_t ep )
135
137
{
138
+ ARG_UNUSED (c_data );
136
139
struct net_buf * buf = NULL ;
137
140
struct udc_buf_info * bi ;
138
141
@@ -146,6 +149,17 @@ struct net_buf *cdc_acm_buf_alloc(const uint8_t ep)
146
149
147
150
return buf ;
148
151
}
152
+ #else
153
+ /*
154
+ * The required buffer is 128 bytes per instance on a full-speed device. Use
155
+ * common (UDC) buffer, as this results in a smaller footprint.
156
+ */
157
+ static struct net_buf * cdc_acm_buf_alloc (struct usbd_class_data * const c_data ,
158
+ const uint8_t ep )
159
+ {
160
+ return usbd_ep_buf_alloc (c_data , ep , USBD_MAX_BULK_MPS );
161
+ }
162
+ #endif /* CONFIG_USBD_CDC_ACM_BUF_POOL */
149
163
150
164
#if CONFIG_USBD_CDC_ACM_WORKQUEUE
151
165
static struct k_work_q cdc_acm_work_q ;
@@ -635,7 +649,7 @@ static void cdc_acm_tx_fifo_handler(struct k_work *work)
635
649
return ;
636
650
}
637
651
638
- buf = cdc_acm_buf_alloc (cdc_acm_get_bulk_in (c_data ));
652
+ buf = cdc_acm_buf_alloc (c_data , cdc_acm_get_bulk_in (c_data ));
639
653
if (buf == NULL ) {
640
654
atomic_clear_bit (& data -> state , CDC_ACM_TX_FIFO_BUSY );
641
655
cdc_acm_work_schedule (& data -> tx_fifo_work , K_MSEC (1 ));
@@ -669,7 +683,6 @@ static void cdc_acm_rx_fifo_handler(struct k_work *work)
669
683
const struct cdc_acm_uart_config * cfg ;
670
684
struct usbd_class_data * c_data ;
671
685
struct net_buf * buf ;
672
- uint8_t ep ;
673
686
int ret ;
674
687
675
688
data = CONTAINER_OF (work , struct cdc_acm_uart_data , rx_fifo_work );
@@ -692,8 +705,7 @@ static void cdc_acm_rx_fifo_handler(struct k_work *work)
692
705
return ;
693
706
}
694
707
695
- ep = cdc_acm_get_bulk_out (c_data );
696
- buf = cdc_acm_buf_alloc (ep );
708
+ buf = cdc_acm_buf_alloc (c_data , cdc_acm_get_bulk_out (c_data ));
697
709
if (buf == NULL ) {
698
710
return ;
699
711
}
@@ -703,7 +715,8 @@ static void cdc_acm_rx_fifo_handler(struct k_work *work)
703
715
704
716
ret = usbd_ep_enqueue (c_data , buf );
705
717
if (ret ) {
706
- LOG_ERR ("Failed to enqueue net_buf for 0x%02x" , ep );
718
+ LOG_ERR ("Failed to enqueue net_buf for 0x%02x" ,
719
+ cdc_acm_get_bulk_out (c_data ));
707
720
net_buf_unref (buf );
708
721
}
709
722
}
0 commit comments