3
3
import . Base: unsafe_convert, lock, trylock, unlock, islocked, wait, notify, AbstractLock
4
4
5
5
export SpinLock
6
-
6
+ public PaddedSpinLock
7
7
# Important Note: these low-level primitives defined here
8
8
# are typically not for general usage
9
9
@@ -12,33 +12,68 @@ export SpinLock
12
12
# #########################################
13
13
14
14
"""
15
- SpinLock()
15
+ abstract type AbstractSpinLock <: AbstractLock end
16
16
17
- Create a non-reentrant, test-and-test-and-set spin lock.
17
+ A non-reentrant, test-and-test-and-set spin lock.
18
18
Recursive use will result in a deadlock.
19
19
This kind of lock should only be used around code that takes little time
20
20
to execute and does not block (e.g. perform I/O).
21
21
In general, [`ReentrantLock`](@ref) should be used instead.
22
22
23
23
Each [`lock`](@ref) must be matched with an [`unlock`](@ref).
24
- If [`!islocked(lck::SpinLock )`](@ref islocked) holds, [`trylock(lck)`](@ref trylock)
24
+ If [`!islocked(lck::AbstractSpinLock )`](@ref islocked) holds, [`trylock(lck)`](@ref trylock)
25
25
succeeds unless there are other tasks attempting to hold the lock "at the same time."
26
26
27
27
Test-and-test-and-set spin locks are quickest up to about 30ish
28
28
contending threads. If you have more contention than that, different
29
29
synchronization approaches should be considered.
30
30
"""
31
- mutable struct SpinLock <: AbstractLock
31
+ abstract type AbstractSpinLock <: AbstractLock end
32
+
33
+ """
34
+ SpinLock() <: AbstractSpinLock
35
+
36
+ Spinlocks are not padded, and so may suffer from false sharing.
37
+ See also [`PaddedSpinLock`](@ref).
38
+
39
+ See the documentation for [`AbstractSpinLock`](@ref) regarding correct usage.
40
+ """
41
+ mutable struct SpinLock <: AbstractSpinLock
32
42
# we make this much larger than necessary to minimize false-sharing
33
43
@atomic owned:: Int
34
44
SpinLock () = new (0 )
35
45
end
36
46
47
+ # TODO : Determine the cache line size using e.g., CPUID. Meanwhile, this is correct for most
48
+ # processors.
49
+ const CACHE_LINE_SIZE = 64
50
+
51
+ """
52
+ PaddedSpinLock() <: AbstractSpinLock
53
+
54
+ PaddedSpinLocks are padded so that each is guaranteed to be on its own cache line, to avoid
55
+ false sharing.
56
+ See also [`SpinLock`](@ref).
57
+
58
+ See the documentation for [`AbstractSpinLock`](@ref) regarding correct usage.
59
+ """
60
+ mutable struct PaddedSpinLock <: AbstractSpinLock
61
+ # we make this much larger than necessary to minimize false-sharing
62
+ _padding_before:: NTuple{max(0, CACHE_LINE_SIZE - sizeof(Int)), UInt8}
63
+ @atomic owned:: Int
64
+ _padding_after:: NTuple{max(0, CACHE_LINE_SIZE - sizeof(Int)), UInt8}
65
+ function PaddedSpinLock ()
66
+ l = new ()
67
+ @atomic l. owned = 0
68
+ return l
69
+ end
70
+ end
71
+
37
72
# Note: this cannot assert that the lock is held by the correct thread, because we do not
38
73
# track which thread locked it. Users beware.
39
- Base. assert_havelock (l:: SpinLock ) = islocked (l) ? nothing : Base. concurrency_violation ()
74
+ Base. assert_havelock (l:: AbstractSpinLock ) = islocked (l) ? nothing : Base. concurrency_violation ()
40
75
41
- function lock (l:: SpinLock )
76
+ function lock (l:: AbstractSpinLock )
42
77
while true
43
78
if @inline trylock (l)
44
79
return
@@ -49,7 +84,7 @@ function lock(l::SpinLock)
49
84
end
50
85
end
51
86
52
- function trylock (l:: SpinLock )
87
+ function trylock (l:: AbstractSpinLock )
53
88
if l. owned == 0
54
89
GC. disable_finalizers ()
55
90
p = @atomicswap :acquire l. owned = 1
@@ -61,7 +96,7 @@ function trylock(l::SpinLock)
61
96
return false
62
97
end
63
98
64
- function unlock (l:: SpinLock )
99
+ function unlock (l:: AbstractSpinLock )
65
100
if (@atomicswap :release l. owned = 0 ) == 0
66
101
error (" unlock count must match lock count" )
67
102
end
@@ -70,6 +105,6 @@ function unlock(l::SpinLock)
70
105
return
71
106
end
72
107
73
- function islocked (l:: SpinLock )
108
+ function islocked (l:: AbstractSpinLock )
74
109
return (@atomic :monotonic l. owned) != 0
75
110
end
0 commit comments