@@ -1002,19 +1002,56 @@ impl<T> RawTable<T> {
1002
1002
}
1003
1003
1004
1004
/// Returns an iterator which removes all elements from the table without
1005
- /// freeing the memory. It is up to the caller to ensure that the `RawTable`
1006
- /// outlives the `RawDrain`. Because we cannot make the `next` method unsafe
1007
- /// on the `RawDrain`, we have to make the `drain` method unsafe.
1005
+ /// freeing the memory.
1006
+ ///
1007
+ /// It is up to the caller to ensure that the `RawTable` outlives the `RawDrain`.
1008
+ /// Because we cannot make the `next` method unsafe on the `RawDrain`,
1009
+ /// we have to make the `drain` method unsafe.
1008
1010
#[ cfg_attr( feature = "inline-more" , inline) ]
1009
1011
pub unsafe fn drain ( & mut self ) -> RawDrain < ' _ , T > {
1012
+ let iter = self . iter ( ) ;
1013
+ self . drain_iter_from ( iter)
1014
+ }
1015
+
1016
+ /// Returns an iterator which removes all elements from the table without
1017
+ /// freeing the memory.
1018
+ ///
1019
+ /// It is up to the caller to ensure that the `RawTable` outlives the `RawDrain`.
1020
+ /// Because we cannot make the `next` method unsafe on the `RawDrain`,
1021
+ /// we have to make the `drain` method unsafe.
1022
+ ///
1023
+ /// Iteration starts at the provided iterator's current location.
1024
+ /// You must ensure that the iterator covers all items that remain in the table.
1025
+ #[ cfg_attr( feature = "inline-more" , inline) ]
1026
+ pub unsafe fn drain_iter_from ( & mut self , iter : RawIter < T > ) -> RawDrain < ' _ , T > {
1027
+ debug_assert_eq ! ( iter. len( ) , self . len( ) ) ;
1010
1028
RawDrain {
1011
- iter : self . iter ( ) ,
1029
+ iter,
1012
1030
table : ManuallyDrop :: new ( mem:: replace ( self , Self :: new ( ) ) ) ,
1013
1031
orig_table : NonNull :: from ( self ) ,
1014
1032
marker : PhantomData ,
1015
1033
}
1016
1034
}
1017
1035
1036
+ /// Returns an iterator which consumes all elements from the table.
1037
+ ///
1038
+ /// It is up to the caller to ensure that the `RawTable` outlives the `RawIntoIter`.
1039
+ /// Because we cannot make the `next` method unsafe on the `RawIntoIter`,
1040
+ /// we have to make the `into_iter_from` method unsafe.
1041
+ ///
1042
+ /// Iteration starts at the provided iterator's current location.
1043
+ /// You must ensure that the iterator covers all items that remain in the table.
1044
+ pub unsafe fn into_iter_from ( self , iter : RawIter < T > ) -> RawIntoIter < T > {
1045
+ debug_assert_eq ! ( iter. len( ) , self . len( ) ) ;
1046
+
1047
+ let alloc = self . into_alloc ( ) ;
1048
+ RawIntoIter {
1049
+ iter,
1050
+ alloc,
1051
+ marker : PhantomData ,
1052
+ }
1053
+ }
1054
+
1018
1055
/// Converts the table into a raw allocation. The contents of the table
1019
1056
/// should be dropped using a `RawIter` before freeing the allocation.
1020
1057
#[ cfg_attr( feature = "inline-more" , inline) ]
@@ -1250,12 +1287,7 @@ impl<T> IntoIterator for RawTable<T> {
1250
1287
fn into_iter ( self ) -> RawIntoIter < T > {
1251
1288
unsafe {
1252
1289
let iter = self . iter ( ) ;
1253
- let alloc = self . into_alloc ( ) ;
1254
- RawIntoIter {
1255
- iter,
1256
- alloc,
1257
- marker : PhantomData ,
1258
- }
1290
+ self . into_iter_from ( iter)
1259
1291
}
1260
1292
}
1261
1293
}
@@ -1408,6 +1440,124 @@ pub struct RawIter<T> {
1408
1440
items : usize ,
1409
1441
}
1410
1442
1443
+ impl < T > RawIter < T > {
1444
+ /// Refresh the iterator so that it reflects a removal from the given bucket.
1445
+ ///
1446
+ /// For the iterator to remain valid, this method must be called once
1447
+ /// for each removed bucket before `next` is called again.
1448
+ ///
1449
+ /// This method should be called _before_ the removal is made. It is not necessary to call this
1450
+ /// method if you are removing an item that this iterator yielded in the past.
1451
+ #[ cfg( feature = "raw" ) ]
1452
+ pub fn reflect_remove ( & mut self , b : & Bucket < T > ) {
1453
+ self . reflect_toggle_full ( b, false ) ;
1454
+ }
1455
+
1456
+ /// Refresh the iterator so that it reflects an insertion into the given bucket.
1457
+ ///
1458
+ /// For the iterator to remain valid, this method must be called once
1459
+ /// for each insert before `next` is called again.
1460
+ ///
1461
+ /// This method does not guarantee that an insertion of a bucket witha greater
1462
+ /// index than the last one yielded will be reflected in the iterator.
1463
+ ///
1464
+ /// This method should be called _after_ the given insert is made.
1465
+ #[ cfg( feature = "raw" ) ]
1466
+ pub fn reflect_insert ( & mut self , b : & Bucket < T > ) {
1467
+ self . reflect_toggle_full ( b, true ) ;
1468
+ }
1469
+
1470
+ /// Refresh the iterator so that it reflects a change to the state of the given bucket.
1471
+ #[ cfg( feature = "raw" ) ]
1472
+ fn reflect_toggle_full ( & mut self , b : & Bucket < T > , is_insert : bool ) {
1473
+ unsafe {
1474
+ if b. as_ptr ( ) > self . iter . data . as_ptr ( ) {
1475
+ // The iterator has already passed the bucket's group.
1476
+ // So the toggle isn't relevant to this iterator.
1477
+ return ;
1478
+ }
1479
+
1480
+ if self . iter . next_ctrl < self . iter . end
1481
+ && b. as_ptr ( ) <= self . iter . data . next_n ( Group :: WIDTH ) . as_ptr ( )
1482
+ {
1483
+ // The iterator has not yet reached the bucket's group.
1484
+ // We don't need to reload anything, but we do need to adjust the item count.
1485
+
1486
+ if cfg ! ( debug_assertions) {
1487
+ // Double-check that the user isn't lying to us by checking the bucket state.
1488
+ // To do that, we need to find its control byte. We know that self.iter.data is
1489
+ // at self.iter.next_ctrl - Group::WIDTH, so we work from there:
1490
+ let offset = offset_from ( self . iter . data . as_ptr ( ) , b. as_ptr ( ) ) ;
1491
+ let ctrl = self . iter . next_ctrl . sub ( Group :: WIDTH ) . add ( offset) ;
1492
+ // This method should be called _before_ a removal, or _after_ an insert,
1493
+ // so in both cases the ctrl byte should indicate that the bucket is full.
1494
+ assert ! ( is_full( * ctrl) ) ;
1495
+ }
1496
+
1497
+ if is_insert {
1498
+ self . items += 1 ;
1499
+ } else {
1500
+ self . items -= 1 ;
1501
+ }
1502
+
1503
+ return ;
1504
+ }
1505
+
1506
+ // The iterator is at the bucket group that the toggled bucket is in.
1507
+ // We need to do two things:
1508
+ //
1509
+ // - Determine if the iterator already yielded the toggled bucket.
1510
+ // If it did, we're done.
1511
+ // - Otherwise, update the iterator cached group so that it won't
1512
+ // yield a to-be-removed bucket, or _will_ yield a to-be-added bucket.
1513
+ // We'll also need ot update the item count accordingly.
1514
+ if let Some ( index) = self . iter . current_group . lowest_set_bit ( ) {
1515
+ let next_bucket = self . iter . data . next_n ( index) ;
1516
+ if b. as_ptr ( ) > next_bucket. as_ptr ( ) {
1517
+ // The toggled bucket is "before" the bucket the iterator would yield next. We
1518
+ // therefore don't need to do anything --- the iterator has already passed the
1519
+ // bucket in question.
1520
+ //
1521
+ // The item count must already be correct, since a removal or insert "prior" to
1522
+ // the iterator's position wouldn't affect the item count.
1523
+ } else {
1524
+ // The removed bucket is an upcoming bucket. We need to make sure it does _not_
1525
+ // get yielded, and also that it's no longer included in the item count.
1526
+ //
1527
+ // NOTE: We can't just reload the group here, both since that might reflect
1528
+ // inserts we've already passed, and because that might inadvertently unset the
1529
+ // bits for _other_ removals. If we do that, we'd have to also decrement the
1530
+ // item count for those other bits that we unset. But the presumably subsequent
1531
+ // call to reflect for those buckets might _also_ decrement the item count.
1532
+ // Instead, we _just_ flip the bit for the particular bucket the caller asked
1533
+ // us to reflect.
1534
+ let our_bit = offset_from ( self . iter . data . as_ptr ( ) , b. as_ptr ( ) ) ;
1535
+ let was_full = self . iter . current_group . flip ( our_bit) ;
1536
+ debug_assert_ne ! ( was_full, is_insert) ;
1537
+
1538
+ if is_insert {
1539
+ self . items += 1 ;
1540
+ } else {
1541
+ self . items -= 1 ;
1542
+ }
1543
+
1544
+ if cfg ! ( debug_assertions) {
1545
+ if b. as_ptr ( ) == next_bucket. as_ptr ( ) {
1546
+ // The removed bucket should no longer be next
1547
+ debug_assert_ne ! ( self . iter. current_group. lowest_set_bit( ) , Some ( index) ) ;
1548
+ } else {
1549
+ // We should not have changed what bucket comes next.
1550
+ debug_assert_eq ! ( self . iter. current_group. lowest_set_bit( ) , Some ( index) ) ;
1551
+ }
1552
+ }
1553
+ }
1554
+ } else {
1555
+ // We must have already iterated past the removed item.
1556
+ }
1557
+ }
1558
+ }
1559
+ }
1560
+
1411
1561
impl < T > Clone for RawIter < T > {
1412
1562
#[ cfg_attr( feature = "inline-more" , inline) ]
1413
1563
fn clone ( & self ) -> Self {
0 commit comments