@@ -1341,7 +1341,7 @@ static unsigned int stmmac_rx_offset(struct stmmac_priv *priv)
1341
1341
if (stmmac_xdp_is_enabled (priv ))
1342
1342
return XDP_PACKET_HEADROOM ;
1343
1343
1344
- return 0 ;
1344
+ return NET_SKB_PAD ;
1345
1345
}
1346
1346
1347
1347
static int stmmac_set_bfsize (int mtu , int bufsize )
@@ -2040,22 +2040,26 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2040
2040
struct stmmac_channel * ch = & priv -> channel [queue ];
2041
2041
bool xdp_prog = stmmac_xdp_is_enabled (priv );
2042
2042
struct page_pool_params pp_params = { 0 };
2043
- unsigned int num_pages ;
2043
+ unsigned int dma_buf_sz_pad , num_pages ;
2044
2044
unsigned int napi_id ;
2045
2045
int ret ;
2046
2046
2047
+ dma_buf_sz_pad = stmmac_rx_offset (priv ) + dma_conf -> dma_buf_sz +
2048
+ SKB_DATA_ALIGN (sizeof (struct skb_shared_info ));
2049
+ num_pages = DIV_ROUND_UP (dma_buf_sz_pad , PAGE_SIZE );
2050
+
2047
2051
rx_q -> queue_index = queue ;
2048
2052
rx_q -> priv_data = priv ;
2053
+ rx_q -> napi_skb_frag_size = num_pages * PAGE_SIZE ;
2049
2054
2050
2055
pp_params .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV ;
2051
2056
pp_params .pool_size = dma_conf -> dma_rx_size ;
2052
- num_pages = DIV_ROUND_UP (dma_conf -> dma_buf_sz , PAGE_SIZE );
2053
- pp_params .order = ilog2 (num_pages );
2057
+ pp_params .order = order_base_2 (num_pages );
2054
2058
pp_params .nid = dev_to_node (priv -> device );
2055
2059
pp_params .dev = priv -> device ;
2056
2060
pp_params .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE ;
2057
2061
pp_params .offset = stmmac_rx_offset (priv );
2058
- pp_params .max_len = STMMAC_MAX_RX_BUF_SIZE ( num_pages ) ;
2062
+ pp_params .max_len = dma_conf -> dma_buf_sz ;
2059
2063
2060
2064
rx_q -> page_pool = page_pool_create (& pp_params );
2061
2065
if (IS_ERR (rx_q -> page_pool )) {
@@ -5504,10 +5508,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5504
5508
5505
5509
/* Buffer is good. Go on. */
5506
5510
5507
- prefetch (page_address (buf -> page ) + buf -> page_offset );
5508
- if (buf -> sec_page )
5509
- prefetch (page_address (buf -> sec_page ));
5510
-
5511
5511
buf1_len = stmmac_rx_buf1_len (priv , p , status , len );
5512
5512
len += buf1_len ;
5513
5513
buf2_len = stmmac_rx_buf2_len (priv , p , status , len );
@@ -5529,6 +5529,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5529
5529
5530
5530
dma_sync_single_for_cpu (priv -> device , buf -> addr ,
5531
5531
buf1_len , dma_dir );
5532
+ net_prefetch (page_address (buf -> page ) +
5533
+ buf -> page_offset );
5532
5534
5533
5535
xdp_init_buff (& ctx .xdp , buf_sz , & rx_q -> xdp_rxq );
5534
5536
xdp_prepare_buff (& ctx .xdp , page_address (buf -> page ),
@@ -5582,22 +5584,26 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5582
5584
}
5583
5585
5584
5586
if (!skb ) {
5587
+ unsigned int head_pad_len ;
5588
+
5585
5589
/* XDP program may expand or reduce tail */
5586
5590
buf1_len = ctx .xdp .data_end - ctx .xdp .data ;
5587
5591
5588
- skb = napi_alloc_skb (& ch -> rx_napi , buf1_len );
5592
+ skb = napi_build_skb (page_address (buf -> page ),
5593
+ rx_q -> napi_skb_frag_size );
5589
5594
if (!skb ) {
5595
+ page_pool_recycle_direct (rx_q -> page_pool ,
5596
+ buf -> page );
5590
5597
rx_dropped ++ ;
5591
5598
count ++ ;
5592
5599
goto drain_data ;
5593
5600
}
5594
5601
5595
5602
/* XDP program may adjust header */
5596
- skb_copy_to_linear_data (skb , ctx .xdp .data , buf1_len );
5603
+ head_pad_len = ctx .xdp .data - ctx .xdp .data_hard_start ;
5604
+ skb_reserve (skb , head_pad_len );
5597
5605
skb_put (skb , buf1_len );
5598
-
5599
- /* Data payload copied into SKB, page ready for recycle */
5600
- page_pool_recycle_direct (rx_q -> page_pool , buf -> page );
5606
+ skb_mark_for_recycle (skb );
5601
5607
buf -> page = NULL ;
5602
5608
} else if (buf1_len ) {
5603
5609
dma_sync_single_for_cpu (priv -> device , buf -> addr ,
0 commit comments