Skip to content

Commit 2c95fdf

Browse files
committed
Add RawMutex from parking_lot
1 parent aa1912d commit 2c95fdf

File tree

3 files changed

+233
-453
lines changed

3 files changed

+233
-453
lines changed

src/libstd/sys_common/mutex.rs

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -8,14 +8,14 @@
88
// option. This file may not be copied, modified, or distributed
99
// except according to those terms.
1010

11-
use sys::mutex as imp;
11+
use super::parking_lot::raw_mutex::RawMutex;
1212

1313
/// An OS-based mutual exclusion lock.
1414
///
1515
/// This is the thinnest cross-platform wrapper around OS mutexes. All usage of
1616
/// this mutex is unsafe and it is recommended to instead use the safe wrapper
1717
/// at the top level of the crate instead of this type.
18-
pub struct Mutex(imp::Mutex);
18+
pub struct Mutex(RawMutex);
1919

2020
unsafe impl Sync for Mutex {}
2121

@@ -28,7 +28,7 @@ impl Mutex {
2828
/// mutex is ever used reentrantly, i.e., `raw_lock` or `try_lock`
2929
/// are called by the thread currently holding the lock.
3030
#[unstable(feature = "sys_internals", issue = "0")] // FIXME: min_const_fn
31-
pub const fn new() -> Mutex { Mutex(imp::Mutex::new()) }
31+
pub const fn new() -> Mutex { Mutex(RawMutex::INIT) }
3232

3333
/// Prepare the mutex for use.
3434
///
@@ -37,7 +37,7 @@ impl Mutex {
3737
/// Calling it in parallel with or after any operation (including another
3838
/// `init()`) is undefined behavior.
3939
#[inline]
40-
pub unsafe fn init(&mut self) { self.0.init() }
40+
pub unsafe fn init(&mut self) { }
4141

4242
/// Locks the mutex blocking the current thread until it is available.
4343
///
@@ -77,19 +77,19 @@ impl Mutex {
7777
/// Behavior is undefined if there are current or will be future users of
7878
/// this mutex.
7979
#[inline]
80-
pub unsafe fn destroy(&self) { self.0.destroy() }
80+
pub unsafe fn destroy(&self) { }
8181
}
8282

8383
// not meant to be exported to the outside world, just the containing module
84-
pub fn raw(mutex: &Mutex) -> &imp::Mutex { &mutex.0 }
84+
pub fn raw(mutex: &Mutex) -> &RawMutex { &mutex.0 }
8585

8686
#[must_use]
8787
/// A simple RAII utility for the above Mutex without the poisoning semantics.
88-
pub struct MutexGuard<'a>(&'a imp::Mutex);
88+
pub struct MutexGuard<'a>(&'a RawMutex);
8989

9090
impl<'a> Drop for MutexGuard<'a> {
9191
#[inline]
9292
fn drop(&mut self) {
93-
unsafe { self.0.unlock(); }
93+
self.0.unlock();
9494
}
9595
}

src/libstd/sys_common/parking_lot/raw_mutex.rs

Lines changed: 225 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,17 @@
55
// http://opensource.org/licenses/MIT>, at your option. This file may not be
66
// copied, modified, or distributed except according to those terms.
77

8-
type U8 = u8;
9-
use super::super::parking_lot_core::UnparkToken;
8+
use sync::atomic::{Ordering, AtomicU8, ATOMIC_U8_INIT};
9+
use time::Instant;
10+
use super::super::parking_lot_core::{
11+
self,
12+
deadlock,
13+
ParkResult,
14+
SpinWait,
15+
UnparkResult,
16+
UnparkToken,
17+
DEFAULT_PARK_TOKEN
18+
};
1019

1120
// UnparkToken used to indicate that that the target thread should attempt to
1221
// lock the mutex again as soon as it is unparked.
@@ -16,5 +25,217 @@ pub const TOKEN_NORMAL: UnparkToken = UnparkToken(0);
1625
// thread directly without unlocking it.
1726
pub const TOKEN_HANDOFF: UnparkToken = UnparkToken(1);
1827

19-
const LOCKED_BIT: U8 = 1;
20-
const PARKED_BIT: U8 = 2;
28+
const LOCKED_BIT: u8 = 1;
29+
const PARKED_BIT: u8 = 2;
30+
31+
/// Raw mutex type backed by the parking lot.
32+
pub struct RawMutex {
33+
state: AtomicU8,
34+
}
35+
36+
impl RawMutex {
37+
/// Initial value for an unlocked mutex.
38+
pub const INIT: RawMutex = RawMutex {
39+
state: ATOMIC_U8_INIT,
40+
};
41+
42+
/// Acquires this mutex, blocking the current thread until it is able to do so.
43+
#[inline]
44+
pub fn lock(&self) {
45+
if self
46+
.state
47+
.compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed)
48+
.is_err()
49+
{
50+
self.lock_slow(None);
51+
}
52+
unsafe { deadlock::acquire_resource(self as *const _ as usize) };
53+
}
54+
55+
/// Attempts to acquire this mutex without blocking.
56+
#[inline]
57+
pub fn try_lock(&self) -> bool {
58+
let mut state = self.state.load(Ordering::Relaxed);
59+
loop {
60+
if state & LOCKED_BIT != 0 {
61+
return false;
62+
}
63+
match self.state.compare_exchange_weak(
64+
state,
65+
state | LOCKED_BIT,
66+
Ordering::Acquire,
67+
Ordering::Relaxed,
68+
) {
69+
Ok(_) => {
70+
unsafe { deadlock::acquire_resource(self as *const _ as usize) };
71+
return true;
72+
}
73+
Err(x) => state = x,
74+
}
75+
}
76+
}
77+
78+
/// Unlocks this mutex.
79+
#[inline]
80+
pub fn unlock(&self) {
81+
unsafe { deadlock::release_resource(self as *const _ as usize) };
82+
if self
83+
.state
84+
.compare_exchange_weak(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed)
85+
.is_ok()
86+
{
87+
return;
88+
}
89+
self.unlock_slow(false);
90+
}
91+
}
92+
93+
impl RawMutex {
94+
// Used by Condvar when requeuing threads to us, must be called while
95+
// holding the queue lock.
96+
#[inline]
97+
pub(crate) fn mark_parked_if_locked(&self) -> bool {
98+
let mut state = self.state.load(Ordering::Relaxed);
99+
loop {
100+
if state & LOCKED_BIT == 0 {
101+
return false;
102+
}
103+
match self.state.compare_exchange_weak(
104+
state,
105+
state | PARKED_BIT,
106+
Ordering::Relaxed,
107+
Ordering::Relaxed,
108+
) {
109+
Ok(_) => return true,
110+
Err(x) => state = x,
111+
}
112+
}
113+
}
114+
115+
// Used by Condvar when requeuing threads to us, must be called while
116+
// holding the queue lock.
117+
#[inline]
118+
pub(crate) fn mark_parked(&self) {
119+
self.state.fetch_or(PARKED_BIT, Ordering::Relaxed);
120+
}
121+
122+
#[cold]
123+
#[inline(never)]
124+
fn lock_slow(&self, timeout: Option<Instant>) -> bool {
125+
let mut spinwait = SpinWait::new();
126+
let mut state = self.state.load(Ordering::Relaxed);
127+
loop {
128+
// Grab the lock if it isn't locked, even if there is a queue on it
129+
if state & LOCKED_BIT == 0 {
130+
match self.state.compare_exchange_weak(
131+
state,
132+
state | LOCKED_BIT,
133+
Ordering::Acquire,
134+
Ordering::Relaxed,
135+
) {
136+
Ok(_) => return true,
137+
Err(x) => state = x,
138+
}
139+
continue;
140+
}
141+
142+
// If there is no queue, try spinning a few times
143+
if state & PARKED_BIT == 0 && spinwait.spin() {
144+
state = self.state.load(Ordering::Relaxed);
145+
continue;
146+
}
147+
148+
// Set the parked bit
149+
if state & PARKED_BIT == 0 {
150+
if let Err(x) = self.state.compare_exchange_weak(
151+
state,
152+
state | PARKED_BIT,
153+
Ordering::Relaxed,
154+
Ordering::Relaxed,
155+
) {
156+
state = x;
157+
continue;
158+
}
159+
}
160+
161+
// Park our thread until we are woken up by an unlock
162+
unsafe {
163+
let addr = self as *const _ as usize;
164+
let validate = || self.state.load(Ordering::Relaxed) == LOCKED_BIT | PARKED_BIT;
165+
let before_sleep = || {};
166+
let timed_out = |_, was_last_thread| {
167+
// Clear the parked bit if we were the last parked thread
168+
if was_last_thread {
169+
self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed);
170+
}
171+
};
172+
match parking_lot_core::park(
173+
addr,
174+
validate,
175+
before_sleep,
176+
timed_out,
177+
DEFAULT_PARK_TOKEN,
178+
timeout,
179+
) {
180+
// The thread that unparked us passed the lock on to us
181+
// directly without unlocking it.
182+
ParkResult::Unparked(TOKEN_HANDOFF) => return true,
183+
184+
// We were unparked normally, try acquiring the lock again
185+
ParkResult::Unparked(_) => (),
186+
187+
// The validation function failed, try locking again
188+
ParkResult::Invalid => (),
189+
190+
// Timeout expired
191+
ParkResult::TimedOut => return false,
192+
}
193+
}
194+
195+
// Loop back and try locking again
196+
spinwait.reset();
197+
state = self.state.load(Ordering::Relaxed);
198+
}
199+
}
200+
201+
#[cold]
202+
#[inline(never)]
203+
fn unlock_slow(&self, force_fair: bool) {
204+
// Unlock directly if there are no parked threads
205+
if self
206+
.state
207+
.compare_exchange(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed)
208+
.is_ok()
209+
{
210+
return;
211+
}
212+
213+
// Unpark one thread and leave the parked bit set if there might
214+
// still be parked threads on this address.
215+
unsafe {
216+
let addr = self as *const _ as usize;
217+
let callback = |result: UnparkResult| {
218+
// If we are using a fair unlock then we should keep the
219+
// mutex locked and hand it off to the unparked thread.
220+
if result.unparked_threads != 0 && (force_fair || result.be_fair) {
221+
// Clear the parked bit if there are no more parked
222+
// threads.
223+
if !result.have_more_threads {
224+
self.state.store(LOCKED_BIT, Ordering::Relaxed);
225+
}
226+
return TOKEN_HANDOFF;
227+
}
228+
229+
// Clear the locked bit, and the parked bit as well if there
230+
// are no more parked threads.
231+
if result.have_more_threads {
232+
self.state.store(PARKED_BIT, Ordering::Release);
233+
} else {
234+
self.state.store(0, Ordering::Release);
235+
}
236+
TOKEN_NORMAL
237+
};
238+
parking_lot_core::unpark_one(addr, callback);
239+
}
240+
}
241+
}

0 commit comments

Comments
 (0)