26
26
#include "zcrx.h"
27
27
#include "rsrc.h"
28
28
29
+ static inline struct io_zcrx_ifq * io_pp_to_ifq (struct page_pool * pp )
30
+ {
31
+ return pp -> mp_priv ;
32
+ }
33
+
29
34
#define IO_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
30
35
31
36
static void __io_zcrx_unmap_area (struct io_zcrx_ifq * ifq ,
@@ -586,7 +591,7 @@ static void io_zcrx_refill_slow(struct page_pool *pp, struct io_zcrx_ifq *ifq)
586
591
587
592
static netmem_ref io_pp_zc_alloc_netmems (struct page_pool * pp , gfp_t gfp )
588
593
{
589
- struct io_zcrx_ifq * ifq = pp -> mp_priv ;
594
+ struct io_zcrx_ifq * ifq = io_pp_to_ifq ( pp ) ;
590
595
591
596
/* pp should already be ensuring that */
592
597
if (unlikely (pp -> alloc .count ))
@@ -618,7 +623,7 @@ static bool io_pp_zc_release_netmem(struct page_pool *pp, netmem_ref netmem)
618
623
619
624
static int io_pp_zc_init (struct page_pool * pp )
620
625
{
621
- struct io_zcrx_ifq * ifq = pp -> mp_priv ;
626
+ struct io_zcrx_ifq * ifq = io_pp_to_ifq ( pp ) ;
622
627
623
628
if (WARN_ON_ONCE (!ifq ))
624
629
return - EINVAL ;
@@ -637,7 +642,7 @@ static int io_pp_zc_init(struct page_pool *pp)
637
642
638
643
static void io_pp_zc_destroy (struct page_pool * pp )
639
644
{
640
- struct io_zcrx_ifq * ifq = pp -> mp_priv ;
645
+ struct io_zcrx_ifq * ifq = io_pp_to_ifq ( pp ) ;
641
646
struct io_zcrx_area * area = ifq -> area ;
642
647
643
648
if (WARN_ON_ONCE (area -> free_count != area -> nia .num_niovs ))
@@ -792,7 +797,7 @@ static int io_zcrx_recv_frag(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
792
797
793
798
niov = netmem_to_net_iov (frag -> netmem );
794
799
if (niov -> pp -> mp_ops != & io_uring_pp_zc_ops ||
795
- niov -> pp -> mp_priv != ifq )
800
+ io_pp_to_ifq ( niov -> pp ) != ifq )
796
801
return - EFAULT ;
797
802
798
803
if (!io_zcrx_queue_cqe (req , niov , ifq , off + skb_frag_off (frag ), len ))
0 commit comments