From 124fb897b4908d7bcd0612bc8765445e01e81cce Mon Sep 17 00:00:00 2001 From: Mathieu Choplain Date: Mon, 21 Jul 2025 14:09:47 +0200 Subject: [PATCH 1/3] kernel: assert no spinlock is held on swap when !USE_SWITCH The do_swap() routine used when CONFIG_USE_SWITCH=y asserts that caller thread does not hold any spinlock when CONFIG_SPIN_VALIDATE is enabled. However, there is no similar check in place when CONFIG_USE_SWITCH=n. Copy this assertion in the USE_SWITCH=n implementation of z_swap_irqlock(). Signed-off-by: Mathieu Choplain --- kernel/include/kswap.h | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/kernel/include/kswap.h b/kernel/include/kswap.h index 0d790ed3a17d6..cff3efab6e90e 100644 --- a/kernel/include/kswap.h +++ b/kernel/include/kswap.h @@ -203,6 +203,16 @@ static inline int z_swap_irqlock(unsigned int key) { int ret; z_check_stack_sentinel(); + +#ifdef CONFIG_SPIN_VALIDATE + /* Refer to comment in do_swap() above for details */ +# ifndef CONFIG_ARM64 + __ASSERT(arch_irq_unlocked(key) || + _current->base.thread_state & (_THREAD_DUMMY | _THREAD_DEAD), + "Context switching while holding lock!"); +# endif /* CONFIG_ARM64 */ +#endif /* CONFIG_SPIN_VALIDATE */ + ret = arch_swap(key); return ret; } From d74173355c5e5ba931bd356c4adbf40c53c7d61f Mon Sep 17 00:00:00 2001 From: Mathieu Choplain Date: Wed, 23 Jul 2025 15:56:16 +0200 Subject: [PATCH 2/3] kernel: irq: update irq_lock() documentation regarding context switches Upon context switch, the virtual "interrupt lock" acquired by irq_lock() must not be "held". However, the current documentation for irq_lock() says that it is perfectly valid to hold it (!), and that a suspended thread will hold the "interrupt lock" upon being scheduled again (!!). Update the documentation to remove the outdated section and indicate that context switching while holding the interrupt lock is not allowed. Signed-off-by: Mathieu Choplain --- include/zephyr/irq.h | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/include/zephyr/irq.h b/include/zephyr/irq.h index 7cbff4d581c77..b51c2234dc391 100644 --- a/include/zephyr/irq.h +++ b/include/zephyr/irq.h @@ -239,16 +239,13 @@ irq_disconnect_dynamic(unsigned int irq, unsigned int priority, * (for example, ARM) will fail silently if invoked from user mode instead * of generating an exception. * - * @note - * This routine can be called by ISRs or by threads. If it is called by a - * thread, the interrupt lock is thread-specific; this means that interrupts - * remain disabled only while the thread is running. If the thread performs an - * operation that allows another thread to run (for example, giving a semaphore - * or sleeping for N milliseconds), the interrupt lock no longer applies and - * interrupts may be re-enabled while other processing occurs. When the thread - * once again becomes the current thread, the kernel re-establishes its - * interrupt lock; this ensures the thread won't be interrupted until it has - * explicitly released the interrupt lock it established. + * This routine can be called by ISRs and threads. + * + * @warning + * As long as all recursive calls to irq_lock() have not been balanced with + * corresponding irq_unlock() calls, the caller "holds the interrupt lock". + * + * "Holding the interrupt lock" when a context switch occurs is illegal. * * @warning * The lock-out key should never be used to manually re-enable interrupts From a17388118c4aeb595deb4af1bee672243468a196 Mon Sep 17 00:00:00 2001 From: Mathieu Choplain Date: Wed, 23 Jul 2025 16:17:55 +0200 Subject: [PATCH 3/3] kernel: spinlock: update k_spin_lock() documentation wrt context switch Threads must not attempt to context switch if they are holding a spinlock. Add this information to the documentation for k_spin_lock(). Signed-off-by: Mathieu Choplain --- include/zephyr/spinlock.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/include/zephyr/spinlock.h b/include/zephyr/spinlock.h index 451e91cbd5475..e6f444ff05fdc 100644 --- a/include/zephyr/spinlock.h +++ b/include/zephyr/spinlock.h @@ -175,6 +175,9 @@ static ALWAYS_INLINE void z_spinlock_validate_post(struct k_spinlock *l) * in uniprocessor contexts such that the locking reduces to an * interrupt mask operation. * + * @warning + * Holding a spinlock when a context switch occurs is illegal. + * * @param l A pointer to the spinlock to lock * @return A key value that must be passed to k_spin_unlock() when the * lock is released.