Skip to content

Commit 97eb35f

Browse files
kkdwivediAlexei Starovoitov
authored andcommitted
bpf: Introduce rqspinlock kfuncs
Introduce four new kfuncs, bpf_res_spin_lock, and bpf_res_spin_unlock, and their irqsave/irqrestore variants, which wrap the rqspinlock APIs. bpf_res_spin_lock returns a conditional result, depending on whether the lock was acquired (NULL is returned when lock acquisition succeeds, non-NULL upon failure). The memory pointed to by the returned pointer upon failure can be dereferenced after the NULL check to obtain the error code. Instead of using the old bpf_spin_lock type, introduce a new type with the same layout, and the same alignment, but a different name to avoid type confusion. Preemption is disabled upon successful lock acquisition, however IRQs are not. Special kfuncs can be introduced later to allow disabling IRQs when taking a spin lock. Resilient locks are safe against AA deadlocks, hence not disabling IRQs currently does not allow violation of kernel safety. __irq_flag annotation is used to accept IRQ flags for the IRQ-variants, with the same semantics as existing bpf_local_irq_{save, restore}. These kfuncs will require additional verifier-side support in subsequent commits, to allow programs to hold multiple locks at the same time. Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com> Link: https://lore.kernel.org/r/20250316040541.108729-23-memxor@gmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
1 parent 4797931 commit 97eb35f

File tree

3 files changed

+82
-0
lines changed

3 files changed

+82
-0
lines changed

include/asm-generic/rqspinlock.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,13 @@ struct rqspinlock {
2323
};
2424
};
2525

26+
/* Even though this is same as struct rqspinlock, we need to emit a distinct
27+
* type in BTF for BPF programs.
28+
*/
29+
struct bpf_res_spin_lock {
30+
u32 val;
31+
};
32+
2633
struct qspinlock;
2734
#ifdef CONFIG_QUEUED_SPINLOCKS
2835
typedef struct qspinlock rqspinlock_t;

include/linux/bpf.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@
3030
#include <linux/static_call.h>
3131
#include <linux/memcontrol.h>
3232
#include <linux/cfi.h>
33+
#include <asm/rqspinlock.h>
3334

3435
struct bpf_verifier_env;
3536
struct bpf_verifier_log;

kernel/bpf/rqspinlock.c

Lines changed: 74 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,8 @@
1515

1616
#include <linux/smp.h>
1717
#include <linux/bug.h>
18+
#include <linux/bpf.h>
19+
#include <linux/err.h>
1820
#include <linux/cpumask.h>
1921
#include <linux/percpu.h>
2022
#include <linux/hardirq.h>
@@ -661,3 +663,75 @@ int __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val)
661663
EXPORT_SYMBOL_GPL(resilient_queued_spin_lock_slowpath);
662664

663665
#endif /* CONFIG_QUEUED_SPINLOCKS */
666+
667+
__bpf_kfunc_start_defs();
668+
669+
__bpf_kfunc int bpf_res_spin_lock(struct bpf_res_spin_lock *lock)
670+
{
671+
int ret;
672+
673+
BUILD_BUG_ON(sizeof(rqspinlock_t) != sizeof(struct bpf_res_spin_lock));
674+
BUILD_BUG_ON(__alignof__(rqspinlock_t) != __alignof__(struct bpf_res_spin_lock));
675+
676+
preempt_disable();
677+
ret = res_spin_lock((rqspinlock_t *)lock);
678+
if (unlikely(ret)) {
679+
preempt_enable();
680+
return ret;
681+
}
682+
return 0;
683+
}
684+
685+
__bpf_kfunc void bpf_res_spin_unlock(struct bpf_res_spin_lock *lock)
686+
{
687+
res_spin_unlock((rqspinlock_t *)lock);
688+
preempt_enable();
689+
}
690+
691+
__bpf_kfunc int bpf_res_spin_lock_irqsave(struct bpf_res_spin_lock *lock, unsigned long *flags__irq_flag)
692+
{
693+
u64 *ptr = (u64 *)flags__irq_flag;
694+
unsigned long flags;
695+
int ret;
696+
697+
preempt_disable();
698+
local_irq_save(flags);
699+
ret = res_spin_lock((rqspinlock_t *)lock);
700+
if (unlikely(ret)) {
701+
local_irq_restore(flags);
702+
preempt_enable();
703+
return ret;
704+
}
705+
*ptr = flags;
706+
return 0;
707+
}
708+
709+
__bpf_kfunc void bpf_res_spin_unlock_irqrestore(struct bpf_res_spin_lock *lock, unsigned long *flags__irq_flag)
710+
{
711+
u64 *ptr = (u64 *)flags__irq_flag;
712+
unsigned long flags = *ptr;
713+
714+
res_spin_unlock((rqspinlock_t *)lock);
715+
local_irq_restore(flags);
716+
preempt_enable();
717+
}
718+
719+
__bpf_kfunc_end_defs();
720+
721+
BTF_KFUNCS_START(rqspinlock_kfunc_ids)
722+
BTF_ID_FLAGS(func, bpf_res_spin_lock, KF_RET_NULL)
723+
BTF_ID_FLAGS(func, bpf_res_spin_unlock)
724+
BTF_ID_FLAGS(func, bpf_res_spin_lock_irqsave, KF_RET_NULL)
725+
BTF_ID_FLAGS(func, bpf_res_spin_unlock_irqrestore)
726+
BTF_KFUNCS_END(rqspinlock_kfunc_ids)
727+
728+
static const struct btf_kfunc_id_set rqspinlock_kfunc_set = {
729+
.owner = THIS_MODULE,
730+
.set = &rqspinlock_kfunc_ids,
731+
};
732+
733+
static __init int rqspinlock_register_kfuncs(void)
734+
{
735+
return register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &rqspinlock_kfunc_set);
736+
}
737+
late_initcall(rqspinlock_register_kfuncs);

0 commit comments

Comments
 (0)