@@ -3,22 +3,14 @@ use crate::sync::{CacheAligned, Lock, LockGuard};
3
3
use std:: borrow:: Borrow ;
4
4
use std:: collections:: hash_map:: RawEntryMut ;
5
5
use std:: hash:: { Hash , Hasher } ;
6
- use std:: mem;
7
6
8
- #[ cfg( parallel_compiler) ]
9
- // 32 shards is sufficient to reduce contention on an 8-core Ryzen 7 1700,
10
- // but this should be tested on higher core count CPUs. How the `Sharded` type gets used
11
- // may also affect the ideal number of shards.
12
- const SHARD_BITS : usize = 5 ;
13
-
14
- #[ cfg( not( parallel_compiler) ) ]
15
7
const SHARD_BITS : usize = 0 ;
16
8
17
9
pub const SHARDS : usize = 1 << SHARD_BITS ;
18
10
19
11
/// An array of cache-line aligned inner locked structures with convenience methods.
20
12
pub struct Sharded < T > {
21
- shards : [ CacheAligned < Lock < T > > ; SHARDS ] ,
13
+ shard : Lock < T > ,
22
14
}
23
15
24
16
impl < T : Default > Default for Sharded < T > {
@@ -28,34 +20,43 @@ impl<T: Default> Default for Sharded<T> {
28
20
}
29
21
}
30
22
31
- impl < T > Sharded < T > {
23
+ impl < T : Default > Sharded < T > {
32
24
#[ inline]
33
25
pub fn new ( mut value : impl FnMut ( ) -> T ) -> Self {
34
- Sharded { shards : [ ( ) ; SHARDS ] . map ( | ( ) | CacheAligned ( Lock :: new ( value ( ) ) ) ) }
26
+ Sharded { shard : Lock :: new ( value ( ) ) }
35
27
}
36
28
37
29
/// The shard is selected by hashing `val` with `FxHasher`.
38
30
#[ inline]
39
- pub fn get_shard_by_value < K : Hash + ?Sized > ( & self , val : & K ) -> & Lock < T > {
40
- if SHARDS == 1 { & self . shards [ 0 ] . 0 } else { self . get_shard_by_hash ( make_hash ( val) ) }
31
+ pub fn with_get_shard_by_value < K : Hash + ?Sized , F : FnOnce ( & mut T ) -> R , R > (
32
+ & self ,
33
+ _val : & K ,
34
+ f : F ,
35
+ ) -> R {
36
+ self . shard . with_lock ( f)
41
37
}
42
38
43
39
#[ inline]
44
- pub fn get_shard_by_hash ( & self , hash : u64 ) -> & Lock < T > {
45
- & self . shards [ get_shard_index_by_hash ( hash ) ] . 0
40
+ pub fn with_get_shard_by_hash < F : FnOnce ( & mut T ) -> R , R > ( & self , _hash : u64 , f : F ) -> R {
41
+ self . shard . with_lock ( f )
46
42
}
47
43
48
44
#[ inline]
49
- pub fn get_shard_by_index ( & self , i : usize ) -> & Lock < T > {
50
- & self . shards [ i] . 0
45
+ pub fn get_shard_by_value < K : Hash + ?Sized > ( & self , _val : & K ) -> & Lock < T > {
46
+ & self . shard
47
+ }
48
+
49
+ #[ inline]
50
+ pub fn get_shard_by_hash ( & self , _hash : u64 ) -> & Lock < T > {
51
+ & self . shard
51
52
}
52
53
53
54
pub fn lock_shards ( & self ) -> Vec < LockGuard < ' _ , T > > {
54
- ( 0 .. SHARDS ) . map ( |i| self . shards [ i ] . 0 . lock ( ) ) . collect ( )
55
+ vec ! [ self . shard . lock( ) ]
55
56
}
56
57
57
58
pub fn try_lock_shards ( & self ) -> Option < Vec < LockGuard < ' _ , T > > > {
58
- ( 0 .. SHARDS ) . map ( |i| self . shards [ i ] . 0 . try_lock ( ) ) . collect ( )
59
+ Some ( vec ! [ self . shard . try_lock( ) ? ] )
59
60
}
60
61
}
61
62
@@ -75,17 +76,18 @@ impl<K: Eq + Hash + Copy> ShardedHashMap<K, ()> {
75
76
Q : Hash + Eq ,
76
77
{
77
78
let hash = make_hash ( value) ;
78
- let mut shard = self . get_shard_by_hash ( hash) . lock ( ) ;
79
- let entry = shard. raw_entry_mut ( ) . from_key_hashed_nocheck ( hash, value) ;
80
-
81
- match entry {
82
- RawEntryMut :: Occupied ( e) => * e. key ( ) ,
83
- RawEntryMut :: Vacant ( e) => {
84
- let v = make ( ) ;
85
- e. insert_hashed_nocheck ( hash, v, ( ) ) ;
86
- v
79
+ self . with_get_shard_by_hash ( hash, |shard| {
80
+ let entry = shard. raw_entry_mut ( ) . from_key_hashed_nocheck ( hash, value) ;
81
+
82
+ match entry {
83
+ RawEntryMut :: Occupied ( e) => * e. key ( ) ,
84
+ RawEntryMut :: Vacant ( e) => {
85
+ let v = make ( ) ;
86
+ e. insert_hashed_nocheck ( hash, v, ( ) ) ;
87
+ v
88
+ }
87
89
}
88
- }
90
+ } )
89
91
}
90
92
91
93
#[ inline]
@@ -95,17 +97,18 @@ impl<K: Eq + Hash + Copy> ShardedHashMap<K, ()> {
95
97
Q : Hash + Eq ,
96
98
{
97
99
let hash = make_hash ( & value) ;
98
- let mut shard = self . get_shard_by_hash ( hash) . lock ( ) ;
99
- let entry = shard. raw_entry_mut ( ) . from_key_hashed_nocheck ( hash, & value) ;
100
-
101
- match entry {
102
- RawEntryMut :: Occupied ( e) => * e. key ( ) ,
103
- RawEntryMut :: Vacant ( e) => {
104
- let v = make ( value) ;
105
- e. insert_hashed_nocheck ( hash, v, ( ) ) ;
106
- v
100
+ self . with_get_shard_by_hash ( hash, |shard| {
101
+ let entry = shard. raw_entry_mut ( ) . from_key_hashed_nocheck ( hash, & value) ;
102
+
103
+ match entry {
104
+ RawEntryMut :: Occupied ( e) => * e. key ( ) ,
105
+ RawEntryMut :: Vacant ( e) => {
106
+ let v = make ( value) ;
107
+ e. insert_hashed_nocheck ( hash, v, ( ) ) ;
108
+ v
109
+ }
107
110
}
108
- }
111
+ } )
109
112
}
110
113
}
111
114
@@ -117,9 +120,11 @@ pub trait IntoPointer {
117
120
impl < K : Eq + Hash + Copy + IntoPointer > ShardedHashMap < K , ( ) > {
118
121
pub fn contains_pointer_to < T : Hash + IntoPointer > ( & self , value : & T ) -> bool {
119
122
let hash = make_hash ( & value) ;
120
- let shard = self . get_shard_by_hash ( hash) . lock ( ) ;
121
- let value = value. into_pointer ( ) ;
122
- shard. raw_entry ( ) . from_hash ( hash, |entry| entry. into_pointer ( ) == value) . is_some ( )
123
+
124
+ self . with_get_shard_by_hash ( hash, |shard| {
125
+ let value = value. into_pointer ( ) ;
126
+ shard. raw_entry ( ) . from_hash ( hash, |entry| entry. into_pointer ( ) == value) . is_some ( )
127
+ } )
123
128
}
124
129
}
125
130
@@ -130,17 +135,19 @@ pub fn make_hash<K: Hash + ?Sized>(val: &K) -> u64 {
130
135
state. finish ( )
131
136
}
132
137
138
+ /*
133
139
/// Get a shard with a pre-computed hash value. If `get_shard_by_value` is
134
140
/// ever used in combination with `get_shard_by_hash` on a single `Sharded`
135
141
/// instance, then `hash` must be computed with `FxHasher`. Otherwise,
136
142
/// `hash` can be computed with any hasher, so long as that hasher is used
137
143
/// consistently for each `Sharded` instance.
138
144
#[inline]
139
145
#[allow(clippy::modulo_one)]
140
- pub fn get_shard_index_by_hash ( hash : u64 ) -> usize {
146
+ fn get_shard_index_by_hash(hash: u64) -> usize {
141
147
let hash_len = mem::size_of::<usize>();
142
148
// Ignore the top 7 bits as hashbrown uses these and get the next SHARD_BITS highest bits.
143
149
// hashbrown also uses the lowest bits, so we can't use those
144
150
let bits = (hash >> (hash_len * 8 - 7 - SHARD_BITS)) as usize;
145
151
bits % SHARDS
146
152
}
153
+ */
0 commit comments