Skip to content

Commit d84366b

Browse files
committed
Merge branch 'net-make-memory-provider-install-close-paths-more-common'
Jakub Kicinski says: ==================== net: make memory provider install / close paths more common We seem to be fixing bugs in config path for devmem which also exist in the io_uring ZC path. Let's try to make the two paths more common, otherwise this is bound to keep happening. Found by code inspection and compile tested only. v1: https://lore.kernel.org/20250331194201.2026422-1-kuba@kernel.org ==================== Link: https://patch.msgid.link/20250403013405.2827250-1-kuba@kernel.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2 parents 053f3ff + 34f71de commit d84366b

File tree

4 files changed

+62
-65
lines changed

4 files changed

+62
-65
lines changed

include/net/page_pool/memory_provider.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
#include <net/page_pool/types.h>
77

88
struct netdev_rx_queue;
9+
struct netlink_ext_ack;
910
struct sk_buff;
1011

1112
struct memory_provider_ops {
@@ -24,8 +25,13 @@ void net_mp_niov_clear_page_pool(struct net_iov *niov);
2425

2526
int net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx,
2627
struct pp_memory_provider_params *p);
28+
int __net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
29+
const struct pp_memory_provider_params *p,
30+
struct netlink_ext_ack *extack);
2731
void net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx,
2832
struct pp_memory_provider_params *old_p);
33+
void __net_mp_close_rxq(struct net_device *dev, unsigned int rxq_idx,
34+
const struct pp_memory_provider_params *old_p);
2935

3036
/**
3137
* net_mp_netmem_place_in_cache() - give a netmem to a page pool

net/core/devmem.c

Lines changed: 15 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,6 @@
88
*/
99

1010
#include <linux/dma-buf.h>
11-
#include <linux/ethtool_netlink.h>
1211
#include <linux/genalloc.h>
1312
#include <linux/mm.h>
1413
#include <linux/netdevice.h>
@@ -117,21 +116,19 @@ void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
117116
struct netdev_rx_queue *rxq;
118117
unsigned long xa_idx;
119118
unsigned int rxq_idx;
120-
int err;
121119

122120
if (binding->list.next)
123121
list_del(&binding->list);
124122

125123
xa_for_each(&binding->bound_rxqs, xa_idx, rxq) {
126-
WARN_ON(rxq->mp_params.mp_priv != binding);
127-
128-
rxq->mp_params.mp_priv = NULL;
129-
rxq->mp_params.mp_ops = NULL;
124+
const struct pp_memory_provider_params mp_params = {
125+
.mp_priv = binding,
126+
.mp_ops = &dmabuf_devmem_ops,
127+
};
130128

131129
rxq_idx = get_netdev_rx_queue_index(rxq);
132130

133-
err = netdev_rx_queue_restart(binding->dev, rxq_idx);
134-
WARN_ON(err && err != -ENETDOWN);
131+
__net_mp_close_rxq(binding->dev, rxq_idx, &mp_params);
135132
}
136133

137134
xa_erase(&net_devmem_dmabuf_bindings, binding->id);
@@ -143,57 +140,28 @@ int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
143140
struct net_devmem_dmabuf_binding *binding,
144141
struct netlink_ext_ack *extack)
145142
{
143+
struct pp_memory_provider_params mp_params = {
144+
.mp_priv = binding,
145+
.mp_ops = &dmabuf_devmem_ops,
146+
};
146147
struct netdev_rx_queue *rxq;
147148
u32 xa_idx;
148149
int err;
149150

150-
if (rxq_idx >= dev->real_num_rx_queues) {
151-
NL_SET_ERR_MSG(extack, "rx queue index out of range");
152-
return -ERANGE;
153-
}
154-
155-
if (dev->cfg->hds_config != ETHTOOL_TCP_DATA_SPLIT_ENABLED) {
156-
NL_SET_ERR_MSG(extack, "tcp-data-split is disabled");
157-
return -EINVAL;
158-
}
159-
160-
if (dev->cfg->hds_thresh) {
161-
NL_SET_ERR_MSG(extack, "hds-thresh is not zero");
162-
return -EINVAL;
163-
}
151+
err = __net_mp_open_rxq(dev, rxq_idx, &mp_params, extack);
152+
if (err)
153+
return err;
164154

165155
rxq = __netif_get_rx_queue(dev, rxq_idx);
166-
if (rxq->mp_params.mp_ops) {
167-
NL_SET_ERR_MSG(extack, "designated queue already memory provider bound");
168-
return -EEXIST;
169-
}
170-
171-
#ifdef CONFIG_XDP_SOCKETS
172-
if (rxq->pool) {
173-
NL_SET_ERR_MSG(extack, "designated queue already in use by AF_XDP");
174-
return -EBUSY;
175-
}
176-
#endif
177-
178156
err = xa_alloc(&binding->bound_rxqs, &xa_idx, rxq, xa_limit_32b,
179157
GFP_KERNEL);
180158
if (err)
181-
return err;
182-
183-
rxq->mp_params.mp_priv = binding;
184-
rxq->mp_params.mp_ops = &dmabuf_devmem_ops;
185-
186-
err = netdev_rx_queue_restart(dev, rxq_idx);
187-
if (err)
188-
goto err_xa_erase;
159+
goto err_close_rxq;
189160

190161
return 0;
191162

192-
err_xa_erase:
193-
rxq->mp_params.mp_priv = NULL;
194-
rxq->mp_params.mp_ops = NULL;
195-
xa_erase(&binding->bound_rxqs, xa_idx);
196-
163+
err_close_rxq:
164+
__net_mp_close_rxq(dev, rxq_idx, &mp_params);
197165
return err;
198166
}
199167

net/core/netdev-genl.c

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -874,12 +874,6 @@ int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info)
874874
goto err_unlock;
875875
}
876876

877-
if (dev_xdp_prog_count(netdev)) {
878-
NL_SET_ERR_MSG(info->extack, "unable to bind dmabuf to device with XDP program attached");
879-
err = -EEXIST;
880-
goto err_unlock;
881-
}
882-
883877
binding = net_devmem_bind_dmabuf(netdev, dmabuf_fd, info->extack);
884878
if (IS_ERR(binding)) {
885879
err = PTR_ERR(binding);

net/core/netdev_rx_queue.c

Lines changed: 41 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
// SPDX-License-Identifier: GPL-2.0-or-later
22

3+
#include <linux/ethtool_netlink.h>
34
#include <linux/netdevice.h>
45
#include <net/netdev_lock.h>
56
#include <net/netdev_queues.h>
@@ -86,47 +87,74 @@ int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx)
8687
}
8788
EXPORT_SYMBOL_NS_GPL(netdev_rx_queue_restart, "NETDEV_INTERNAL");
8889

89-
static int __net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx,
90-
struct pp_memory_provider_params *p)
90+
int __net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
91+
const struct pp_memory_provider_params *p,
92+
struct netlink_ext_ack *extack)
9193
{
9294
struct netdev_rx_queue *rxq;
9395
int ret;
9496

9597
if (!netdev_need_ops_lock(dev))
9698
return -EOPNOTSUPP;
9799

98-
if (ifq_idx >= dev->real_num_rx_queues)
100+
if (rxq_idx >= dev->real_num_rx_queues)
99101
return -EINVAL;
100-
ifq_idx = array_index_nospec(ifq_idx, dev->real_num_rx_queues);
102+
rxq_idx = array_index_nospec(rxq_idx, dev->real_num_rx_queues);
101103

102-
rxq = __netif_get_rx_queue(dev, ifq_idx);
103-
if (rxq->mp_params.mp_ops)
104+
if (rxq_idx >= dev->real_num_rx_queues) {
105+
NL_SET_ERR_MSG(extack, "rx queue index out of range");
106+
return -ERANGE;
107+
}
108+
if (dev->cfg->hds_config != ETHTOOL_TCP_DATA_SPLIT_ENABLED) {
109+
NL_SET_ERR_MSG(extack, "tcp-data-split is disabled");
110+
return -EINVAL;
111+
}
112+
if (dev->cfg->hds_thresh) {
113+
NL_SET_ERR_MSG(extack, "hds-thresh is not zero");
114+
return -EINVAL;
115+
}
116+
if (dev_xdp_prog_count(dev)) {
117+
NL_SET_ERR_MSG(extack, "unable to custom memory provider to device with XDP program attached");
104118
return -EEXIST;
119+
}
120+
121+
rxq = __netif_get_rx_queue(dev, rxq_idx);
122+
if (rxq->mp_params.mp_ops) {
123+
NL_SET_ERR_MSG(extack, "designated queue already memory provider bound");
124+
return -EEXIST;
125+
}
126+
#ifdef CONFIG_XDP_SOCKETS
127+
if (rxq->pool) {
128+
NL_SET_ERR_MSG(extack, "designated queue already in use by AF_XDP");
129+
return -EBUSY;
130+
}
131+
#endif
105132

106133
rxq->mp_params = *p;
107-
ret = netdev_rx_queue_restart(dev, ifq_idx);
134+
ret = netdev_rx_queue_restart(dev, rxq_idx);
108135
if (ret) {
109136
rxq->mp_params.mp_ops = NULL;
110137
rxq->mp_params.mp_priv = NULL;
111138
}
112139
return ret;
113140
}
114141

115-
int net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx,
142+
int net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
116143
struct pp_memory_provider_params *p)
117144
{
118145
int ret;
119146

120147
netdev_lock(dev);
121-
ret = __net_mp_open_rxq(dev, ifq_idx, p);
148+
ret = __net_mp_open_rxq(dev, rxq_idx, p, NULL);
122149
netdev_unlock(dev);
123150
return ret;
124151
}
125152

126-
static void __net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx,
127-
struct pp_memory_provider_params *old_p)
153+
void __net_mp_close_rxq(struct net_device *dev, unsigned int ifq_idx,
154+
const struct pp_memory_provider_params *old_p)
128155
{
129156
struct netdev_rx_queue *rxq;
157+
int err;
130158

131159
if (WARN_ON_ONCE(ifq_idx >= dev->real_num_rx_queues))
132160
return;
@@ -146,7 +174,8 @@ static void __net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx,
146174

147175
rxq->mp_params.mp_ops = NULL;
148176
rxq->mp_params.mp_priv = NULL;
149-
WARN_ON(netdev_rx_queue_restart(dev, ifq_idx));
177+
err = netdev_rx_queue_restart(dev, ifq_idx);
178+
WARN_ON(err && err != -ENETDOWN);
150179
}
151180

152181
void net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx,

0 commit comments

Comments
 (0)