Skip to content

Commit 916c9f7

Browse files
committed
context: introduce spinlock that gives up after a few iterations
Introduces a spinlocking mutex that only offers access to its internals via a "try_unlock" method which spins a small finite number of times before unlocking. We use a spinlock because, in the minimal dependency set we support, there are no synchronization primitives except atomics, so that's the only form of mutex we can create. However, there are a number of problems with spinlocks -- see this article (from Kix in #346) for some of them: https://matklad.github.io/2020/01/02/spinlocks-considered-harmful.html To avoid these problems, we give up after a few spins. The way we will use this in the context object is: 1. When initializing the global context, if we can't get the lock, we just initialize a new stack-local context and use that. (A parallel thread must be initializing the context, which is wasteful but harmless.) 2. Once we unlock the context, we copy it onto the stack and re-lock it in order to minimize the time holding the lock. (The exception is during initialization where we hold the lock for the whole initialization, in the hopes that other threads will block on us instead of doing their own initialization.) If we rerandomize, we do this on the stack-local copy and then only re-lock to copy it back. 3. If we fail to get the lock to copy the rerandomized context back, we just don't copy it. The result is that we wasted some time rerandomizing without any benefit, which is not the end of the world. The spinlock was implemented with help from ChatGPT o3 and the unit tests with help from Claude 4 (though in both cases I did significant refactoring and review by hand).
1 parent 0977949 commit 916c9f7

File tree

2 files changed

+229
-0
lines changed

2 files changed

+229
-0
lines changed

src/context/mod.rs

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,9 @@ use crate::{Error, Secp256k1};
1313
#[cfg_attr(feature = "std", path = "internal_std.rs")]
1414
mod internal;
1515

16+
#[cfg(test)] // will have a better feature-gate in next commit
17+
mod spinlock;
18+
1619
#[cfg(feature = "std")]
1720
pub use internal::{rerandomize_global_context, with_global_context, with_raw_global_context};
1821

src/context/spinlock.rs

Lines changed: 226 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,226 @@
1+
// SPDX-License-Identifier: CC0-1.0
2+
3+
use core::cell::UnsafeCell;
4+
use core::hint::spin_loop;
5+
use core::ops::{Deref, DerefMut};
6+
use core::sync::atomic::{AtomicBool, Ordering};
7+
8+
const MAX_SPINLOCK_ATTEMPTS: usize = 128;
9+
10+
// Best-Effort Spinlock
11+
//
12+
// To obtain exclusive access, call [`Self::try_lock`], which will spinlock
13+
// for some small number of iterations before giving up. By trying again in
14+
// a loop, you can emulate a "true" spinlock that will only yield once it
15+
// has access. However, this would be very dangerous, especially in a nostd
16+
// environment, because if we are pre-empted by an interrupt handler while
17+
// the lock is held, and that interrupt handler attempts to take the lock,
18+
// then we deadlock.
19+
//
20+
// Instead, the strategy we take within this module is to simply create a
21+
// new stack-local context object if we are unable to obtain a lock on the
22+
// global one. This is slow and loses the defense-in-depth "rerandomization"
23+
// anti-sidechannel measure, but it is better than deadlocking..
24+
pub struct SpinLock<T> {
25+
flag: AtomicBool,
26+
// Invariant: if this is non-None, then the store is valid and can be
27+
// used with `ffi::secp256k1_context_preallocated_create`.
28+
data: UnsafeCell<T>,
29+
}
30+
31+
// Required by rustc if we have a static of this type.
32+
// Safety: `data` is accessed only while the `flag` is held.
33+
unsafe impl<T: Send> Sync for SpinLock<T> {}
34+
35+
#[cfg(test)]
36+
impl SpinLock<u64> {
37+
pub const fn new() -> Self { Self { flag: AtomicBool::new(false), data: UnsafeCell::new(100) } }
38+
}
39+
40+
impl<T> SpinLock<T> {
41+
/// Blocks until the lock is acquired, then returns an RAII guard.
42+
pub fn try_lock(&self) -> Option<SpinLockGuard<'_, T>> {
43+
for _ in 0..MAX_SPINLOCK_ATTEMPTS {
44+
if self.flag.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed).is_ok()
45+
{
46+
return Some(SpinLockGuard { lock: self });
47+
}
48+
spin_loop();
49+
}
50+
None
51+
}
52+
53+
/// Unlocks the data held by the spinlock.
54+
///
55+
/// # Safety
56+
///
57+
/// Once this method is called, no access to the data within the spinlock
58+
/// should be possible.
59+
///
60+
/// (This method is private so we can enforce this safety condition here.)
61+
#[inline(always)]
62+
unsafe fn unlock(&self) { self.flag.store(false, Ordering::Release); }
63+
}
64+
65+
/// Drops the lock when it goes out of scope.
66+
pub struct SpinLockGuard<'a, T> {
67+
lock: &'a SpinLock<T>,
68+
}
69+
70+
impl<T> Deref for SpinLockGuard<'_, T> {
71+
type Target = T;
72+
fn deref(&self) -> &Self::Target {
73+
// Safe: we hold the lock.
74+
unsafe { &*self.lock.data.get() }
75+
}
76+
}
77+
78+
impl<T> DerefMut for SpinLockGuard<'_, T> {
79+
fn deref_mut(&mut self) -> &mut Self::Target {
80+
// Safe: mutable access is unique while the guard lives.
81+
unsafe { &mut *self.lock.data.get() }
82+
}
83+
}
84+
85+
impl<T> Drop for SpinLockGuard<'_, T> {
86+
fn drop(&mut self) {
87+
// SAFETY: access to the data within the spinlock is only possible through
88+
// the `SpinLockGuard` which is being destructed.
89+
unsafe {
90+
self.lock.unlock();
91+
}
92+
}
93+
}
94+
95+
#[cfg(test)]
96+
mod tests {
97+
use super::*;
98+
99+
#[test]
100+
fn basic_lock_unlock() {
101+
let spinlock = SpinLock::new();
102+
103+
let guard = spinlock.try_lock().expect("Should be able to acquire lock");
104+
assert_eq!(*guard, 100);
105+
drop(guard);
106+
107+
let guard2 = spinlock.try_lock().expect("Should be able to reacquire lock");
108+
assert_eq!(*guard2, 100);
109+
}
110+
111+
#[test]
112+
fn modify_data() {
113+
let spinlock = SpinLock::new();
114+
115+
{
116+
let mut guard = spinlock.try_lock().expect("Should be able to acquire lock");
117+
*guard = 42;
118+
}
119+
120+
let guard = spinlock.try_lock().expect("Should be able to reacquire lock");
121+
assert_eq!(*guard, 42);
122+
}
123+
124+
#[test]
125+
fn contention_single_thread() {
126+
let spinlock = SpinLock::new();
127+
128+
let _guard1 = spinlock.try_lock().expect("Should be able to acquire lock");
129+
let result = spinlock.try_lock();
130+
assert!(result.is_none(), "Should not be able to acquire lock twice");
131+
}
132+
133+
#[test]
134+
fn guard_deref() {
135+
let spinlock = SpinLock::new();
136+
let guard = spinlock.try_lock().expect("Should be able to acquire lock");
137+
138+
// Test Deref
139+
assert_eq!(*guard, 100);
140+
141+
// Test que nous pouvons utiliser les méthodes de u64
142+
let value: u64 = *guard;
143+
assert_eq!(value, 100);
144+
}
145+
146+
#[test]
147+
fn guard_deref_mut() {
148+
let spinlock = SpinLock::new();
149+
let mut guard = spinlock.try_lock().expect("Should be able to acquire lock");
150+
151+
// Test DerefMut
152+
*guard += 50;
153+
assert_eq!(*guard, 150);
154+
155+
// Modifier via une méthode qui prend &mut self
156+
*guard = guard.wrapping_add(10);
157+
assert_eq!(*guard, 160);
158+
}
159+
}
160+
161+
#[cfg(all(test, feature = "std"))]
162+
mod std_tests {
163+
use std::sync::Arc;
164+
use std::thread;
165+
use std::time::Duration;
166+
167+
use super::*;
168+
169+
#[test]
170+
fn multiple_threads_no_contention() {
171+
let spinlock = Arc::new(SpinLock::new());
172+
let mut handles = vec![];
173+
174+
for i in 1..=3 {
175+
let spinlock_clone = Arc::clone(&spinlock);
176+
let handle = thread::spawn(move || {
177+
let mut guard = spinlock_clone.try_lock().expect("Should acquire lock");
178+
let old_value = *guard;
179+
*guard = old_value + i;
180+
// (drop guard)
181+
});
182+
handles.push(handle);
183+
184+
// Sleep between spawning threads. In practice this does not seem to be
185+
// necessary, at least on Andrew's system.
186+
thread::sleep(Duration::from_millis(10));
187+
}
188+
189+
for handle in handles {
190+
handle.join().expect("Thread should complete successfully");
191+
}
192+
193+
let guard = spinlock.try_lock().expect("Should be able to acquire lock");
194+
assert_eq!(*guard, 106);
195+
}
196+
197+
#[test]
198+
fn multiple_threads_contention() {
199+
let spinlock = Arc::new(SpinLock::new());
200+
let mut handles = vec![];
201+
202+
for i in 1..=3 {
203+
let spinlock_clone = Arc::clone(&spinlock);
204+
let handle = thread::spawn(move || {
205+
loop {
206+
if let Some(mut guard) = spinlock_clone.try_lock() {
207+
let old_value = *guard;
208+
*guard = old_value + i;
209+
// Sleep while holding lock.
210+
thread::sleep(Duration::from_millis(10));
211+
break;
212+
}
213+
//panic!("uncomment me to check that sometimes contention happens");
214+
}
215+
});
216+
handles.push(handle);
217+
}
218+
219+
for handle in handles {
220+
handle.join().expect("Thread should complete successfully");
221+
}
222+
223+
let guard = spinlock.try_lock().expect("Should be able to acquire lock");
224+
assert_eq!(*guard, 106);
225+
}
226+
}

0 commit comments

Comments
 (0)