Skip to content

Commit 8a2b61e

Browse files
edumazetkuba-moo
authored andcommitted
net: no longer assume RTNL is held in flush_all_backlogs()
flush_all_backlogs() uses per-cpu and static data to hold its temporary data, on the assumption it is called under RTNL protection. Following patch in the series will break this assumption. Use instead a dynamically allocated piece of memory. In the unlikely case the allocation fails, use a boot-time allocated memory. Signed-off-by: Eric Dumazet <edumazet@google.com> Reviewed-by: Jesse Brandeburg <jbrandeburg@cloudflare.com> Link: https://patch.msgid.link/20250114205531.967841-3-edumazet@google.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
1 parent 0734d7c commit 8a2b61e

File tree

1 file changed

+35
-18
lines changed

1 file changed

+35
-18
lines changed

net/core/dev.c

Lines changed: 35 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -6013,8 +6013,6 @@ void netif_receive_skb_list(struct list_head *head)
60136013
}
60146014
EXPORT_SYMBOL(netif_receive_skb_list);
60156015

6016-
static DEFINE_PER_CPU(struct work_struct, flush_works);
6017-
60186016
/* Network device is going away, flush any packets still pending */
60196017
static void flush_backlog(struct work_struct *work)
60206018
{
@@ -6071,36 +6069,54 @@ static bool flush_required(int cpu)
60716069
return true;
60726070
}
60736071

6072+
struct flush_backlogs {
6073+
cpumask_t flush_cpus;
6074+
struct work_struct w[];
6075+
};
6076+
6077+
static struct flush_backlogs *flush_backlogs_alloc(void)
6078+
{
6079+
return kmalloc(struct_size_t(struct flush_backlogs, w, nr_cpu_ids),
6080+
GFP_KERNEL);
6081+
}
6082+
6083+
static struct flush_backlogs *flush_backlogs_fallback;
6084+
static DEFINE_MUTEX(flush_backlogs_mutex);
6085+
60746086
static void flush_all_backlogs(void)
60756087
{
6076-
static cpumask_t flush_cpus;
6088+
struct flush_backlogs *ptr = flush_backlogs_alloc();
60776089
unsigned int cpu;
60786090

6079-
/* since we are under rtnl lock protection we can use static data
6080-
* for the cpumask and avoid allocating on stack the possibly
6081-
* large mask
6082-
*/
6083-
ASSERT_RTNL();
6091+
if (!ptr) {
6092+
mutex_lock(&flush_backlogs_mutex);
6093+
ptr = flush_backlogs_fallback;
6094+
}
6095+
cpumask_clear(&ptr->flush_cpus);
60846096

60856097
cpus_read_lock();
60866098

6087-
cpumask_clear(&flush_cpus);
60886099
for_each_online_cpu(cpu) {
60896100
if (flush_required(cpu)) {
6090-
queue_work_on(cpu, system_highpri_wq,
6091-
per_cpu_ptr(&flush_works, cpu));
6092-
cpumask_set_cpu(cpu, &flush_cpus);
6101+
INIT_WORK(&ptr->w[cpu], flush_backlog);
6102+
queue_work_on(cpu, system_highpri_wq, &ptr->w[cpu]);
6103+
__cpumask_set_cpu(cpu, &ptr->flush_cpus);
60936104
}
60946105
}
60956106

60966107
/* we can have in flight packet[s] on the cpus we are not flushing,
60976108
* synchronize_net() in unregister_netdevice_many() will take care of
6098-
* them
6109+
* them.
60996110
*/
6100-
for_each_cpu(cpu, &flush_cpus)
6101-
flush_work(per_cpu_ptr(&flush_works, cpu));
6111+
for_each_cpu(cpu, &ptr->flush_cpus)
6112+
flush_work(&ptr->w[cpu]);
61026113

61036114
cpus_read_unlock();
6115+
6116+
if (ptr != flush_backlogs_fallback)
6117+
kfree(ptr);
6118+
else
6119+
mutex_unlock(&flush_backlogs_mutex);
61046120
}
61056121

61066122
static void net_rps_send_ipi(struct softnet_data *remsd)
@@ -12313,12 +12329,13 @@ static int __init net_dev_init(void)
1231312329
* Initialise the packet receive queues.
1231412330
*/
1231512331

12332+
flush_backlogs_fallback = flush_backlogs_alloc();
12333+
if (!flush_backlogs_fallback)
12334+
goto out;
12335+
1231612336
for_each_possible_cpu(i) {
12317-
struct work_struct *flush = per_cpu_ptr(&flush_works, i);
1231812337
struct softnet_data *sd = &per_cpu(softnet_data, i);
1231912338

12320-
INIT_WORK(flush, flush_backlog);
12321-
1232212339
skb_queue_head_init(&sd->input_pkt_queue);
1232312340
skb_queue_head_init(&sd->process_queue);
1232412341
#ifdef CONFIG_XFRM_OFFLOAD

0 commit comments

Comments
 (0)