@@ -796,7 +796,7 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
796
796
{
797
797
let index = self . bucket_index ( & bucket) ;
798
798
let old_ctrl = * self . table . ctrl ( index) ;
799
- debug_assert ! ( is_full( old_ctrl ) ) ;
799
+ debug_assert ! ( self . is_full( index ) ) ;
800
800
let old_growth_left = self . table . growth_left ;
801
801
let item = self . remove ( bucket) ;
802
802
if let Some ( new_item) = f ( item) {
@@ -928,6 +928,12 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
928
928
self . table . bucket_mask + 1
929
929
}
930
930
931
+ /// Checks whether the bucket at `index` is full.
932
+ #[ inline]
933
+ pub fn is_full ( & self , index : usize ) -> bool {
934
+ self . table . is_full ( index)
935
+ }
936
+
931
937
/// Returns an iterator over every element in the table. It is up to
932
938
/// the caller to ensure that the `RawTable` outlives the `RawIter`.
933
939
/// Because we cannot make the `next` method unsafe on the `RawIter`
@@ -1148,7 +1154,7 @@ impl<A: Allocator + Clone> RawTableInner<A> {
1148
1154
// table. This second scan is guaranteed to find an empty
1149
1155
// slot (due to the load factor) before hitting the trailing
1150
1156
// control bytes (containing EMPTY).
1151
- if unlikely ( is_full ( * self . ctrl ( result) ) ) {
1157
+ if unlikely ( self . is_full ( result) ) {
1152
1158
debug_assert ! ( self . bucket_mask < Group :: WIDTH ) ;
1153
1159
debug_assert_ne ! ( probe_seq. pos, 0 ) ;
1154
1160
return Group :: load_aligned ( self . ctrl ( 0 ) )
@@ -1329,6 +1335,13 @@ impl<A: Allocator + Clone> RawTableInner<A> {
1329
1335
self . bucket_mask + 1
1330
1336
}
1331
1337
1338
+ /// Checks whether the bucket at `index` is full.
1339
+ #[ inline]
1340
+ fn is_full ( & self , index : usize ) -> bool {
1341
+ debug_assert ! ( index < self . buckets( ) ) ;
1342
+ is_full ( unsafe { * self . ctrl ( index) } )
1343
+ }
1344
+
1332
1345
#[ inline]
1333
1346
fn num_ctrl_bytes ( & self ) -> usize {
1334
1347
self . bucket_mask + 1 + Group :: WIDTH
@@ -1427,7 +1440,7 @@ impl<A: Allocator + Clone> RawTableInner<A> {
1427
1440
1428
1441
// Copy all elements to the new table.
1429
1442
for i in 0 ..self . buckets ( ) {
1430
- if !is_full ( * self . ctrl ( i ) ) {
1443
+ if !self . is_full ( i ) {
1431
1444
continue ;
1432
1445
}
1433
1446
@@ -1572,7 +1585,7 @@ impl<A: Allocator + Clone> RawTableInner<A> {
1572
1585
1573
1586
#[ inline]
1574
1587
unsafe fn erase ( & mut self , index : usize ) {
1575
- debug_assert ! ( is_full ( * self . ctrl ( index) ) ) ;
1588
+ debug_assert ! ( self . is_full ( index) ) ;
1576
1589
let index_before = index. wrapping_sub ( Group :: WIDTH ) & self . bucket_mask ;
1577
1590
let empty_before = Group :: load ( self . ctrl ( index_before) ) . match_empty ( ) ;
1578
1591
let empty_after = Group :: load ( self . ctrl ( index) ) . match_empty ( ) ;
@@ -1722,7 +1735,7 @@ impl<T: Clone, A: Allocator + Clone> RawTable<T, A> {
1722
1735
let mut guard = guard ( ( 0 , & mut * self ) , |( index, self_) | {
1723
1736
if mem:: needs_drop :: < T > ( ) && !self_. is_empty ( ) {
1724
1737
for i in 0 ..=* index {
1725
- if is_full ( * self_. table . ctrl ( i ) ) {
1738
+ if self_. is_full ( i ) {
1726
1739
self_. bucket ( i) . drop ( ) ;
1727
1740
}
1728
1741
}
0 commit comments