5
5
// http://opensource.org/licenses/MIT>, at your option. This file may not be
6
6
// copied, modified, or distributed except according to those terms.
7
7
8
- type U8 = u8 ;
9
- use super :: super :: parking_lot_core:: UnparkToken ;
8
+ use sync:: atomic:: { Ordering , AtomicU8 , ATOMIC_U8_INIT } ;
9
+ use time:: Instant ;
10
+ use super :: super :: parking_lot_core:: {
11
+ self ,
12
+ deadlock,
13
+ ParkResult ,
14
+ SpinWait ,
15
+ UnparkResult ,
16
+ UnparkToken ,
17
+ DEFAULT_PARK_TOKEN
18
+ } ;
10
19
11
20
// UnparkToken used to indicate that that the target thread should attempt to
12
21
// lock the mutex again as soon as it is unparked.
@@ -16,5 +25,217 @@ pub const TOKEN_NORMAL: UnparkToken = UnparkToken(0);
16
25
// thread directly without unlocking it.
17
26
pub const TOKEN_HANDOFF : UnparkToken = UnparkToken ( 1 ) ;
18
27
19
- const LOCKED_BIT : U8 = 1 ;
20
- const PARKED_BIT : U8 = 2 ;
28
+ const LOCKED_BIT : u8 = 1 ;
29
+ const PARKED_BIT : u8 = 2 ;
30
+
31
+ /// Raw mutex type backed by the parking lot.
32
+ pub struct RawMutex {
33
+ state : AtomicU8 ,
34
+ }
35
+
36
+ impl RawMutex {
37
+ /// Initial value for an unlocked mutex.
38
+ pub const INIT : RawMutex = RawMutex {
39
+ state : ATOMIC_U8_INIT ,
40
+ } ;
41
+
42
+ /// Acquires this mutex, blocking the current thread until it is able to do so.
43
+ #[ inline]
44
+ pub fn lock ( & self ) {
45
+ if self
46
+ . state
47
+ . compare_exchange_weak ( 0 , LOCKED_BIT , Ordering :: Acquire , Ordering :: Relaxed )
48
+ . is_err ( )
49
+ {
50
+ self . lock_slow ( None ) ;
51
+ }
52
+ unsafe { deadlock:: acquire_resource ( self as * const _ as usize ) } ;
53
+ }
54
+
55
+ /// Attempts to acquire this mutex without blocking.
56
+ #[ inline]
57
+ pub fn try_lock ( & self ) -> bool {
58
+ let mut state = self . state . load ( Ordering :: Relaxed ) ;
59
+ loop {
60
+ if state & LOCKED_BIT != 0 {
61
+ return false ;
62
+ }
63
+ match self . state . compare_exchange_weak (
64
+ state,
65
+ state | LOCKED_BIT ,
66
+ Ordering :: Acquire ,
67
+ Ordering :: Relaxed ,
68
+ ) {
69
+ Ok ( _) => {
70
+ unsafe { deadlock:: acquire_resource ( self as * const _ as usize ) } ;
71
+ return true ;
72
+ }
73
+ Err ( x) => state = x,
74
+ }
75
+ }
76
+ }
77
+
78
+ /// Unlocks this mutex.
79
+ #[ inline]
80
+ pub fn unlock ( & self ) {
81
+ unsafe { deadlock:: release_resource ( self as * const _ as usize ) } ;
82
+ if self
83
+ . state
84
+ . compare_exchange_weak ( LOCKED_BIT , 0 , Ordering :: Release , Ordering :: Relaxed )
85
+ . is_ok ( )
86
+ {
87
+ return ;
88
+ }
89
+ self . unlock_slow ( false ) ;
90
+ }
91
+ }
92
+
93
+ impl RawMutex {
94
+ // Used by Condvar when requeuing threads to us, must be called while
95
+ // holding the queue lock.
96
+ #[ inline]
97
+ pub ( crate ) fn mark_parked_if_locked ( & self ) -> bool {
98
+ let mut state = self . state . load ( Ordering :: Relaxed ) ;
99
+ loop {
100
+ if state & LOCKED_BIT == 0 {
101
+ return false ;
102
+ }
103
+ match self . state . compare_exchange_weak (
104
+ state,
105
+ state | PARKED_BIT ,
106
+ Ordering :: Relaxed ,
107
+ Ordering :: Relaxed ,
108
+ ) {
109
+ Ok ( _) => return true ,
110
+ Err ( x) => state = x,
111
+ }
112
+ }
113
+ }
114
+
115
+ // Used by Condvar when requeuing threads to us, must be called while
116
+ // holding the queue lock.
117
+ #[ inline]
118
+ pub ( crate ) fn mark_parked ( & self ) {
119
+ self . state . fetch_or ( PARKED_BIT , Ordering :: Relaxed ) ;
120
+ }
121
+
122
+ #[ cold]
123
+ #[ inline( never) ]
124
+ fn lock_slow ( & self , timeout : Option < Instant > ) -> bool {
125
+ let mut spinwait = SpinWait :: new ( ) ;
126
+ let mut state = self . state . load ( Ordering :: Relaxed ) ;
127
+ loop {
128
+ // Grab the lock if it isn't locked, even if there is a queue on it
129
+ if state & LOCKED_BIT == 0 {
130
+ match self . state . compare_exchange_weak (
131
+ state,
132
+ state | LOCKED_BIT ,
133
+ Ordering :: Acquire ,
134
+ Ordering :: Relaxed ,
135
+ ) {
136
+ Ok ( _) => return true ,
137
+ Err ( x) => state = x,
138
+ }
139
+ continue ;
140
+ }
141
+
142
+ // If there is no queue, try spinning a few times
143
+ if state & PARKED_BIT == 0 && spinwait. spin ( ) {
144
+ state = self . state . load ( Ordering :: Relaxed ) ;
145
+ continue ;
146
+ }
147
+
148
+ // Set the parked bit
149
+ if state & PARKED_BIT == 0 {
150
+ if let Err ( x) = self . state . compare_exchange_weak (
151
+ state,
152
+ state | PARKED_BIT ,
153
+ Ordering :: Relaxed ,
154
+ Ordering :: Relaxed ,
155
+ ) {
156
+ state = x;
157
+ continue ;
158
+ }
159
+ }
160
+
161
+ // Park our thread until we are woken up by an unlock
162
+ unsafe {
163
+ let addr = self as * const _ as usize ;
164
+ let validate = || self . state . load ( Ordering :: Relaxed ) == LOCKED_BIT | PARKED_BIT ;
165
+ let before_sleep = || { } ;
166
+ let timed_out = |_, was_last_thread| {
167
+ // Clear the parked bit if we were the last parked thread
168
+ if was_last_thread {
169
+ self . state . fetch_and ( !PARKED_BIT , Ordering :: Relaxed ) ;
170
+ }
171
+ } ;
172
+ match parking_lot_core:: park (
173
+ addr,
174
+ validate,
175
+ before_sleep,
176
+ timed_out,
177
+ DEFAULT_PARK_TOKEN ,
178
+ timeout,
179
+ ) {
180
+ // The thread that unparked us passed the lock on to us
181
+ // directly without unlocking it.
182
+ ParkResult :: Unparked ( TOKEN_HANDOFF ) => return true ,
183
+
184
+ // We were unparked normally, try acquiring the lock again
185
+ ParkResult :: Unparked ( _) => ( ) ,
186
+
187
+ // The validation function failed, try locking again
188
+ ParkResult :: Invalid => ( ) ,
189
+
190
+ // Timeout expired
191
+ ParkResult :: TimedOut => return false ,
192
+ }
193
+ }
194
+
195
+ // Loop back and try locking again
196
+ spinwait. reset ( ) ;
197
+ state = self . state . load ( Ordering :: Relaxed ) ;
198
+ }
199
+ }
200
+
201
+ #[ cold]
202
+ #[ inline( never) ]
203
+ fn unlock_slow ( & self , force_fair : bool ) {
204
+ // Unlock directly if there are no parked threads
205
+ if self
206
+ . state
207
+ . compare_exchange ( LOCKED_BIT , 0 , Ordering :: Release , Ordering :: Relaxed )
208
+ . is_ok ( )
209
+ {
210
+ return ;
211
+ }
212
+
213
+ // Unpark one thread and leave the parked bit set if there might
214
+ // still be parked threads on this address.
215
+ unsafe {
216
+ let addr = self as * const _ as usize ;
217
+ let callback = |result : UnparkResult | {
218
+ // If we are using a fair unlock then we should keep the
219
+ // mutex locked and hand it off to the unparked thread.
220
+ if result. unparked_threads != 0 && ( force_fair || result. be_fair ) {
221
+ // Clear the parked bit if there are no more parked
222
+ // threads.
223
+ if !result. have_more_threads {
224
+ self . state . store ( LOCKED_BIT , Ordering :: Relaxed ) ;
225
+ }
226
+ return TOKEN_HANDOFF ;
227
+ }
228
+
229
+ // Clear the locked bit, and the parked bit as well if there
230
+ // are no more parked threads.
231
+ if result. have_more_threads {
232
+ self . state . store ( PARKED_BIT , Ordering :: Release ) ;
233
+ } else {
234
+ self . state . store ( 0 , Ordering :: Release ) ;
235
+ }
236
+ TOKEN_NORMAL
237
+ } ;
238
+ parking_lot_core:: unpark_one ( addr, callback) ;
239
+ }
240
+ }
241
+ }
0 commit comments