@@ -134,15 +134,22 @@ fn h1(hash: u64) -> usize {
134
134
hash as usize
135
135
}
136
136
137
+ // Constant for h2 function that grabing the top 7 bits of the hash.
138
+ const MIN_HASH_LEN : usize = if mem:: size_of :: < usize > ( ) < mem:: size_of :: < u64 > ( ) {
139
+ mem:: size_of :: < usize > ( )
140
+ } else {
141
+ mem:: size_of :: < u64 > ( )
142
+ } ;
143
+
137
144
/// Secondary hash function, saved in the low 7 bits of the control byte.
138
145
#[ inline]
139
146
#[ allow( clippy:: cast_possible_truncation) ]
140
147
fn h2 ( hash : u64 ) -> u8 {
141
148
// Grab the top 7 bits of the hash. While the hash is normally a full 64-bit
142
149
// value, some hash functions (such as FxHash) produce a usize result
143
150
// instead, which means that the top 32 bits are 0 on 32-bit platforms.
144
- let hash_len = usize :: min ( mem :: size_of :: < usize > ( ) , mem :: size_of :: < u64 > ( ) ) ;
145
- let top7 = hash >> ( hash_len * 8 - 7 ) ;
151
+ // So we use MIN_HASH_LEN constant to handle this.
152
+ let top7 = hash >> ( MIN_HASH_LEN * 8 - 7 ) ;
146
153
( top7 & 0x7f ) as u8 // truncation
147
154
}
148
155
@@ -230,11 +237,15 @@ struct TableLayout {
230
237
231
238
impl TableLayout {
232
239
#[ inline]
233
- fn new < T > ( ) -> Self {
240
+ const fn new < T > ( ) -> Self {
234
241
let layout = Layout :: new :: < T > ( ) ;
235
242
Self {
236
243
size : layout. size ( ) ,
237
- ctrl_align : usize:: max ( layout. align ( ) , Group :: WIDTH ) ,
244
+ ctrl_align : if layout. align ( ) > Group :: WIDTH {
245
+ layout. align ( )
246
+ } else {
247
+ Group :: WIDTH
248
+ } ,
238
249
}
239
250
}
240
251
@@ -261,16 +272,6 @@ impl TableLayout {
261
272
}
262
273
}
263
274
264
- /// Returns a Layout which describes the allocation required for a hash table,
265
- /// and the offset of the control bytes in the allocation.
266
- /// (the offset is also one past last element of buckets)
267
- ///
268
- /// Returns `None` if an overflow occurs.
269
- #[ cfg_attr( feature = "inline-more" , inline) ]
270
- fn calculate_layout < T > ( buckets : usize ) -> Option < ( Layout , usize ) > {
271
- TableLayout :: new :: < T > ( ) . calculate_layout_for ( buckets)
272
- }
273
-
274
275
/// A reference to a hash table bucket containing a `T`.
275
276
///
276
277
/// This is usually just a pointer to the element itself. However if the element
@@ -296,9 +297,11 @@ impl<T> Clone for Bucket<T> {
296
297
}
297
298
298
299
impl < T > Bucket < T > {
300
+ const IS_ZERO_SIZED_TYPE : bool = mem:: size_of :: < T > ( ) == 0 ;
301
+
299
302
#[ inline]
300
303
unsafe fn from_base_index ( base : NonNull < T > , index : usize ) -> Self {
301
- let ptr = if mem :: size_of :: < T > ( ) == 0 {
304
+ let ptr = if Self :: IS_ZERO_SIZED_TYPE {
302
305
// won't overflow because index must be less than length
303
306
( index + 1 ) as * mut T
304
307
} else {
@@ -310,15 +313,15 @@ impl<T> Bucket<T> {
310
313
}
311
314
#[ inline]
312
315
unsafe fn to_base_index ( & self , base : NonNull < T > ) -> usize {
313
- if mem :: size_of :: < T > ( ) == 0 {
316
+ if Self :: IS_ZERO_SIZED_TYPE {
314
317
self . ptr . as_ptr ( ) as usize - 1
315
318
} else {
316
319
offset_from ( base. as_ptr ( ) , self . ptr . as_ptr ( ) )
317
320
}
318
321
}
319
322
#[ inline]
320
323
pub fn as_ptr ( & self ) -> * mut T {
321
- if mem :: size_of :: < T > ( ) == 0 {
324
+ if Self :: IS_ZERO_SIZED_TYPE {
322
325
// Just return an arbitrary ZST pointer which is properly aligned
323
326
mem:: align_of :: < T > ( ) as * mut T
324
327
} else {
@@ -327,7 +330,7 @@ impl<T> Bucket<T> {
327
330
}
328
331
#[ inline]
329
332
unsafe fn next_n ( & self , offset : usize ) -> Self {
330
- let ptr = if mem :: size_of :: < T > ( ) == 0 {
333
+ let ptr = if Self :: IS_ZERO_SIZED_TYPE {
331
334
( self . ptr . as_ptr ( ) as usize + offset) as * mut T
332
335
} else {
333
336
self . ptr . as_ptr ( ) . sub ( offset)
@@ -419,6 +422,9 @@ impl<T> RawTable<T, Global> {
419
422
}
420
423
421
424
impl < T , A : Allocator + Clone > RawTable < T , A > {
425
+ const TABLE_LAYOUT : TableLayout = TableLayout :: new :: < T > ( ) ;
426
+ const DATA_NEEDS_DROP : bool = mem:: needs_drop :: < T > ( ) ;
427
+
422
428
/// Creates a new empty hash table without allocating any memory, using the
423
429
/// given allocator.
424
430
///
@@ -447,7 +453,7 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
447
453
Ok ( Self {
448
454
table : RawTableInner :: new_uninitialized (
449
455
alloc,
450
- TableLayout :: new :: < T > ( ) ,
456
+ Self :: TABLE_LAYOUT ,
451
457
buckets,
452
458
fallibility,
453
459
) ?,
@@ -465,7 +471,7 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
465
471
Ok ( Self {
466
472
table : RawTableInner :: fallible_with_capacity (
467
473
alloc,
468
- TableLayout :: new :: < T > ( ) ,
474
+ Self :: TABLE_LAYOUT ,
469
475
capacity,
470
476
fallibility,
471
477
) ?,
@@ -499,7 +505,7 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
499
505
/// Deallocates the table without dropping any entries.
500
506
#[ cfg_attr( feature = "inline-more" , inline) ]
501
507
unsafe fn free_buckets ( & mut self ) {
502
- self . table . free_buckets ( TableLayout :: new :: < T > ( ) ) ;
508
+ self . table . free_buckets ( Self :: TABLE_LAYOUT ) ;
503
509
}
504
510
505
511
/// Returns pointer to one past last element of data table.
@@ -599,7 +605,7 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
599
605
}
600
606
601
607
unsafe fn drop_elements ( & mut self ) {
602
- if mem :: needs_drop :: < T > ( ) && !self . is_empty ( ) {
608
+ if Self :: DATA_NEEDS_DROP && !self . is_empty ( ) {
603
609
for item in self . iter ( ) {
604
610
item. drop ( ) ;
605
611
}
@@ -687,8 +693,8 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
687
693
additional,
688
694
& |table, index| hasher ( table. bucket :: < T > ( index) . as_ref ( ) ) ,
689
695
fallibility,
690
- TableLayout :: new :: < T > ( ) ,
691
- if mem :: needs_drop :: < T > ( ) {
696
+ Self :: TABLE_LAYOUT ,
697
+ if Self :: DATA_NEEDS_DROP {
692
698
Some ( mem:: transmute ( ptr:: drop_in_place :: < T > as unsafe fn ( * mut T ) ) )
693
699
} else {
694
700
None
@@ -710,7 +716,7 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
710
716
capacity,
711
717
& |table, index| hasher ( table. bucket :: < T > ( index) . as_ref ( ) ) ,
712
718
fallibility,
713
- TableLayout :: new :: < T > ( ) ,
719
+ Self :: TABLE_LAYOUT ,
714
720
)
715
721
}
716
722
}
@@ -1027,10 +1033,11 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
1027
1033
None
1028
1034
} else {
1029
1035
// Avoid `Option::unwrap_or_else` because it bloats LLVM IR.
1030
- let ( layout, ctrl_offset) = match calculate_layout :: < T > ( self . table . buckets ( ) ) {
1031
- Some ( lco) => lco,
1032
- None => unsafe { hint:: unreachable_unchecked ( ) } ,
1033
- } ;
1036
+ let ( layout, ctrl_offset) =
1037
+ match Self :: TABLE_LAYOUT . calculate_layout_for ( self . table . buckets ( ) ) {
1038
+ Some ( lco) => lco,
1039
+ None => unsafe { hint:: unreachable_unchecked ( ) } ,
1040
+ } ;
1034
1041
Some ( (
1035
1042
unsafe { NonNull :: new_unchecked ( self . table . ctrl . as_ptr ( ) . sub ( ctrl_offset) ) } ,
1036
1043
layout,
@@ -1739,7 +1746,7 @@ impl<T: Clone, A: Allocator + Clone> RawTable<T, A> {
1739
1746
// to make sure we drop only the elements that have been
1740
1747
// cloned so far.
1741
1748
let mut guard = guard ( ( 0 , & mut * self ) , |( index, self_) | {
1742
- if mem :: needs_drop :: < T > ( ) && !self_. is_empty ( ) {
1749
+ if Self :: DATA_NEEDS_DROP && !self_. is_empty ( ) {
1743
1750
for i in 0 ..=* index {
1744
1751
if self_. is_bucket_full ( i) {
1745
1752
self_. bucket ( i) . drop ( ) ;
@@ -2027,6 +2034,8 @@ pub struct RawIter<T> {
2027
2034
}
2028
2035
2029
2036
impl < T > RawIter < T > {
2037
+ const DATA_NEEDS_DROP : bool = mem:: needs_drop :: < T > ( ) ;
2038
+
2030
2039
/// Refresh the iterator so that it reflects a removal from the given bucket.
2031
2040
///
2032
2041
/// For the iterator to remain valid, this method must be called once
@@ -2144,7 +2153,7 @@ impl<T> RawIter<T> {
2144
2153
}
2145
2154
2146
2155
unsafe fn drop_elements ( & mut self ) {
2147
- if mem :: needs_drop :: < T > ( ) && self . len ( ) != 0 {
2156
+ if Self :: DATA_NEEDS_DROP && self . len ( ) != 0 {
2148
2157
for item in self {
2149
2158
item. drop ( ) ;
2150
2159
}
0 commit comments