@@ -796,7 +796,7 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
796
796
{
797
797
let index = self . bucket_index ( & bucket) ;
798
798
let old_ctrl = * self . table . ctrl ( index) ;
799
- debug_assert ! ( self . is_full ( index) ) ;
799
+ debug_assert ! ( self . is_bucket_full ( index) ) ;
800
800
let old_growth_left = self . table . growth_left ;
801
801
let item = self . remove ( bucket) ;
802
802
if let Some ( new_item) = f ( item) {
@@ -929,9 +929,13 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
929
929
}
930
930
931
931
/// Checks whether the bucket at `index` is full.
932
+ ///
933
+ /// # Safety
934
+ ///
935
+ /// The caller must ensure `index` is less than the number of buckets.
932
936
#[ inline]
933
- pub fn is_full ( & self , index : usize ) -> bool {
934
- self . table . is_full ( index)
937
+ pub unsafe fn is_bucket_full ( & self , index : usize ) -> bool {
938
+ self . table . is_bucket_full ( index)
935
939
}
936
940
937
941
/// Returns an iterator over every element in the table. It is up to
@@ -1154,7 +1158,7 @@ impl<A: Allocator + Clone> RawTableInner<A> {
1154
1158
// table. This second scan is guaranteed to find an empty
1155
1159
// slot (due to the load factor) before hitting the trailing
1156
1160
// control bytes (containing EMPTY).
1157
- if unlikely ( self . is_full ( result) ) {
1161
+ if unlikely ( self . is_bucket_full ( result) ) {
1158
1162
debug_assert ! ( self . bucket_mask < Group :: WIDTH ) ;
1159
1163
debug_assert_ne ! ( probe_seq. pos, 0 ) ;
1160
1164
return Group :: load_aligned ( self . ctrl ( 0 ) )
@@ -1336,10 +1340,14 @@ impl<A: Allocator + Clone> RawTableInner<A> {
1336
1340
}
1337
1341
1338
1342
/// Checks whether the bucket at `index` is full.
1343
+ ///
1344
+ /// # Safety
1345
+ ///
1346
+ /// The caller must ensure `index` is less than the number of buckets.
1339
1347
#[ inline]
1340
- fn is_full ( & self , index : usize ) -> bool {
1348
+ unsafe fn is_bucket_full ( & self , index : usize ) -> bool {
1341
1349
debug_assert ! ( index < self . buckets( ) ) ;
1342
- is_full ( unsafe { * self . ctrl ( index) } )
1350
+ is_full ( * self . ctrl ( index) )
1343
1351
}
1344
1352
1345
1353
#[ inline]
@@ -1440,7 +1448,7 @@ impl<A: Allocator + Clone> RawTableInner<A> {
1440
1448
1441
1449
// Copy all elements to the new table.
1442
1450
for i in 0 ..self . buckets ( ) {
1443
- if !self . is_full ( i) {
1451
+ if !self . is_bucket_full ( i) {
1444
1452
continue ;
1445
1453
}
1446
1454
@@ -1585,7 +1593,7 @@ impl<A: Allocator + Clone> RawTableInner<A> {
1585
1593
1586
1594
#[ inline]
1587
1595
unsafe fn erase ( & mut self , index : usize ) {
1588
- debug_assert ! ( self . is_full ( index) ) ;
1596
+ debug_assert ! ( self . is_bucket_full ( index) ) ;
1589
1597
let index_before = index. wrapping_sub ( Group :: WIDTH ) & self . bucket_mask ;
1590
1598
let empty_before = Group :: load ( self . ctrl ( index_before) ) . match_empty ( ) ;
1591
1599
let empty_after = Group :: load ( self . ctrl ( index) ) . match_empty ( ) ;
@@ -1735,7 +1743,7 @@ impl<T: Clone, A: Allocator + Clone> RawTable<T, A> {
1735
1743
let mut guard = guard ( ( 0 , & mut * self ) , |( index, self_) | {
1736
1744
if mem:: needs_drop :: < T > ( ) && !self_. is_empty ( ) {
1737
1745
for i in 0 ..=* index {
1738
- if self_. is_full ( i) {
1746
+ if self_. is_bucket_full ( i) {
1739
1747
self_. bucket ( i) . drop ( ) ;
1740
1748
}
1741
1749
}
0 commit comments