@@ -796,7 +796,7 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
796
796
{
797
797
let index = self . bucket_index ( & bucket) ;
798
798
let old_ctrl = * self . table . ctrl ( index) ;
799
- debug_assert ! ( is_full ( old_ctrl ) ) ;
799
+ debug_assert ! ( self . is_bucket_full ( index ) ) ;
800
800
let old_growth_left = self . table . growth_left ;
801
801
let item = self . remove ( bucket) ;
802
802
if let Some ( new_item) = f ( item) {
@@ -928,6 +928,16 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
928
928
self . table . bucket_mask + 1
929
929
}
930
930
931
+ /// Checks whether the bucket at `index` is full.
932
+ ///
933
+ /// # Safety
934
+ ///
935
+ /// The caller must ensure `index` is less than the number of buckets.
936
+ #[ inline]
937
+ pub unsafe fn is_bucket_full ( & self , index : usize ) -> bool {
938
+ self . table . is_bucket_full ( index)
939
+ }
940
+
931
941
/// Returns an iterator over every element in the table. It is up to
932
942
/// the caller to ensure that the `RawTable` outlives the `RawIter`.
933
943
/// Because we cannot make the `next` method unsafe on the `RawIter`
@@ -1148,7 +1158,7 @@ impl<A: Allocator + Clone> RawTableInner<A> {
1148
1158
// table. This second scan is guaranteed to find an empty
1149
1159
// slot (due to the load factor) before hitting the trailing
1150
1160
// control bytes (containing EMPTY).
1151
- if unlikely ( is_full ( * self . ctrl ( result) ) ) {
1161
+ if unlikely ( self . is_bucket_full ( result) ) {
1152
1162
debug_assert ! ( self . bucket_mask < Group :: WIDTH ) ;
1153
1163
debug_assert_ne ! ( probe_seq. pos, 0 ) ;
1154
1164
return Group :: load_aligned ( self . ctrl ( 0 ) )
@@ -1329,6 +1339,17 @@ impl<A: Allocator + Clone> RawTableInner<A> {
1329
1339
self . bucket_mask + 1
1330
1340
}
1331
1341
1342
+ /// Checks whether the bucket at `index` is full.
1343
+ ///
1344
+ /// # Safety
1345
+ ///
1346
+ /// The caller must ensure `index` is less than the number of buckets.
1347
+ #[ inline]
1348
+ unsafe fn is_bucket_full ( & self , index : usize ) -> bool {
1349
+ debug_assert ! ( index < self . buckets( ) ) ;
1350
+ is_full ( * self . ctrl ( index) )
1351
+ }
1352
+
1332
1353
#[ inline]
1333
1354
fn num_ctrl_bytes ( & self ) -> usize {
1334
1355
self . bucket_mask + 1 + Group :: WIDTH
@@ -1427,7 +1448,7 @@ impl<A: Allocator + Clone> RawTableInner<A> {
1427
1448
1428
1449
// Copy all elements to the new table.
1429
1450
for i in 0 ..self . buckets ( ) {
1430
- if !is_full ( * self . ctrl ( i ) ) {
1451
+ if !self . is_bucket_full ( i ) {
1431
1452
continue ;
1432
1453
}
1433
1454
@@ -1573,7 +1594,7 @@ impl<A: Allocator + Clone> RawTableInner<A> {
1573
1594
1574
1595
#[ inline]
1575
1596
unsafe fn erase ( & mut self , index : usize ) {
1576
- debug_assert ! ( is_full ( * self . ctrl ( index) ) ) ;
1597
+ debug_assert ! ( self . is_bucket_full ( index) ) ;
1577
1598
let index_before = index. wrapping_sub ( Group :: WIDTH ) & self . bucket_mask ;
1578
1599
let empty_before = Group :: load ( self . ctrl ( index_before) ) . match_empty ( ) ;
1579
1600
let empty_after = Group :: load ( self . ctrl ( index) ) . match_empty ( ) ;
@@ -1723,7 +1744,7 @@ impl<T: Clone, A: Allocator + Clone> RawTable<T, A> {
1723
1744
let mut guard = guard ( ( 0 , & mut * self ) , |( index, self_) | {
1724
1745
if mem:: needs_drop :: < T > ( ) && !self_. is_empty ( ) {
1725
1746
for i in 0 ..=* index {
1726
- if is_full ( * self_. table . ctrl ( i ) ) {
1747
+ if self_. is_bucket_full ( i ) {
1727
1748
self_. bucket ( i) . drop ( ) ;
1728
1749
}
1729
1750
}
0 commit comments