|
15 | 15 |
|
16 | 16 | #include <linux/smp.h>
|
17 | 17 | #include <linux/bug.h>
|
| 18 | +#include <linux/bpf.h> |
| 19 | +#include <linux/err.h> |
18 | 20 | #include <linux/cpumask.h>
|
19 | 21 | #include <linux/percpu.h>
|
20 | 22 | #include <linux/hardirq.h>
|
@@ -661,3 +663,75 @@ int __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val)
|
661 | 663 | EXPORT_SYMBOL_GPL(resilient_queued_spin_lock_slowpath);
|
662 | 664 |
|
663 | 665 | #endif /* CONFIG_QUEUED_SPINLOCKS */
|
| 666 | + |
| 667 | +__bpf_kfunc_start_defs(); |
| 668 | + |
| 669 | +__bpf_kfunc int bpf_res_spin_lock(struct bpf_res_spin_lock *lock) |
| 670 | +{ |
| 671 | + int ret; |
| 672 | + |
| 673 | + BUILD_BUG_ON(sizeof(rqspinlock_t) != sizeof(struct bpf_res_spin_lock)); |
| 674 | + BUILD_BUG_ON(__alignof__(rqspinlock_t) != __alignof__(struct bpf_res_spin_lock)); |
| 675 | + |
| 676 | + preempt_disable(); |
| 677 | + ret = res_spin_lock((rqspinlock_t *)lock); |
| 678 | + if (unlikely(ret)) { |
| 679 | + preempt_enable(); |
| 680 | + return ret; |
| 681 | + } |
| 682 | + return 0; |
| 683 | +} |
| 684 | + |
| 685 | +__bpf_kfunc void bpf_res_spin_unlock(struct bpf_res_spin_lock *lock) |
| 686 | +{ |
| 687 | + res_spin_unlock((rqspinlock_t *)lock); |
| 688 | + preempt_enable(); |
| 689 | +} |
| 690 | + |
| 691 | +__bpf_kfunc int bpf_res_spin_lock_irqsave(struct bpf_res_spin_lock *lock, unsigned long *flags__irq_flag) |
| 692 | +{ |
| 693 | + u64 *ptr = (u64 *)flags__irq_flag; |
| 694 | + unsigned long flags; |
| 695 | + int ret; |
| 696 | + |
| 697 | + preempt_disable(); |
| 698 | + local_irq_save(flags); |
| 699 | + ret = res_spin_lock((rqspinlock_t *)lock); |
| 700 | + if (unlikely(ret)) { |
| 701 | + local_irq_restore(flags); |
| 702 | + preempt_enable(); |
| 703 | + return ret; |
| 704 | + } |
| 705 | + *ptr = flags; |
| 706 | + return 0; |
| 707 | +} |
| 708 | + |
| 709 | +__bpf_kfunc void bpf_res_spin_unlock_irqrestore(struct bpf_res_spin_lock *lock, unsigned long *flags__irq_flag) |
| 710 | +{ |
| 711 | + u64 *ptr = (u64 *)flags__irq_flag; |
| 712 | + unsigned long flags = *ptr; |
| 713 | + |
| 714 | + res_spin_unlock((rqspinlock_t *)lock); |
| 715 | + local_irq_restore(flags); |
| 716 | + preempt_enable(); |
| 717 | +} |
| 718 | + |
| 719 | +__bpf_kfunc_end_defs(); |
| 720 | + |
| 721 | +BTF_KFUNCS_START(rqspinlock_kfunc_ids) |
| 722 | +BTF_ID_FLAGS(func, bpf_res_spin_lock, KF_RET_NULL) |
| 723 | +BTF_ID_FLAGS(func, bpf_res_spin_unlock) |
| 724 | +BTF_ID_FLAGS(func, bpf_res_spin_lock_irqsave, KF_RET_NULL) |
| 725 | +BTF_ID_FLAGS(func, bpf_res_spin_unlock_irqrestore) |
| 726 | +BTF_KFUNCS_END(rqspinlock_kfunc_ids) |
| 727 | + |
| 728 | +static const struct btf_kfunc_id_set rqspinlock_kfunc_set = { |
| 729 | + .owner = THIS_MODULE, |
| 730 | + .set = &rqspinlock_kfunc_ids, |
| 731 | +}; |
| 732 | + |
| 733 | +static __init int rqspinlock_register_kfuncs(void) |
| 734 | +{ |
| 735 | + return register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &rqspinlock_kfunc_set); |
| 736 | +} |
| 737 | +late_initcall(rqspinlock_register_kfuncs); |
0 commit comments