@@ -781,7 +781,7 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
781
781
// - we know there is enough space in the table.
782
782
// - all elements are unique.
783
783
let index = new_table. find_insert_slot ( hash) ;
784
- new_table. set_ctrl ( index, h2 ( hash) ) ;
784
+ new_table. set_ctrl_h2 ( index, hash) ;
785
785
new_table. bucket ( index) . copy_from_nonoverlapping ( & item) ;
786
786
}
787
787
@@ -814,7 +814,7 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
814
814
815
815
let bucket = self . bucket ( index) ;
816
816
self . table . growth_left -= special_is_empty ( old_ctrl) as usize ;
817
- self . table . set_ctrl ( index, h2 ( hash) ) ;
817
+ self . table . set_ctrl_h2 ( index, hash) ;
818
818
bucket. write ( value) ;
819
819
self . table . items += 1 ;
820
820
bucket
@@ -871,7 +871,7 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
871
871
let old_ctrl = * self . table . ctrl ( index) ;
872
872
self . table . growth_left -= special_is_empty ( old_ctrl) as usize ;
873
873
874
- self . table . set_ctrl ( index, h2 ( hash) ) ;
874
+ self . table . set_ctrl_h2 ( index, hash) ;
875
875
bucket. write ( value) ;
876
876
self . table . items += 1 ;
877
877
bucket
@@ -1214,14 +1214,14 @@ impl<A: Allocator + Clone> RawTableInner<A> {
1214
1214
( pos. wrapping_sub ( self . probe_seq ( hash) . pos ) & self . bucket_mask ) / Group :: WIDTH
1215
1215
} ;
1216
1216
if likely ( probe_index ( i) == probe_index ( new_i) ) {
1217
- self . set_ctrl ( i, h2 ( hash) ) ;
1217
+ self . set_ctrl_h2 ( i, hash) ;
1218
1218
return Slot :: Skip ;
1219
1219
}
1220
1220
1221
1221
// We are moving the current item to a new position. Write
1222
1222
// our H2 to the control byte of the new position.
1223
1223
let prev_ctrl = * self . ctrl ( new_i) ;
1224
- self . set_ctrl ( new_i, h2 ( hash) ) ;
1224
+ self . set_ctrl_h2 ( new_i, hash) ;
1225
1225
if prev_ctrl == EMPTY {
1226
1226
self . set_ctrl ( i, EMPTY ) ;
1227
1227
Slot :: Empty ( new_i)
@@ -1244,6 +1244,13 @@ impl<A: Allocator + Clone> RawTableInner<A> {
1244
1244
}
1245
1245
}
1246
1246
1247
+ /// Sets a control byte to the hash, and possibly also the replicated control byte at
1248
+ /// the end of the array.
1249
+ #[ inline]
1250
+ unsafe fn set_ctrl_h2 ( & self , index : usize , hash : u64 ) {
1251
+ self . set_ctrl ( index, h2 ( hash) )
1252
+ }
1253
+
1247
1254
/// Sets a control byte, and possibly also the replicated control byte at
1248
1255
/// the end of the array.
1249
1256
#[ inline]
@@ -1577,7 +1584,7 @@ impl<T: Clone, A: Allocator + Clone> RawTable<T, A> {
1577
1584
// - we know there is enough space in the table.
1578
1585
// - all elements are unique.
1579
1586
let index = guard_self. table . find_insert_slot ( hash) ;
1580
- guard_self. table . set_ctrl ( index, h2 ( hash) ) ;
1587
+ guard_self. table . set_ctrl_h2 ( index, hash) ;
1581
1588
guard_self. bucket ( index) . write ( item) ;
1582
1589
}
1583
1590
}
0 commit comments