@@ -1372,6 +1372,20 @@ impl<T, A: Allocator> RawTable<T, A> {
1372
1372
self . table . iter ( )
1373
1373
}
1374
1374
1375
+ /// Returns an iterator over occupied buckets that could match a given hash.
1376
+ ///
1377
+ /// `RawTable` only stores 7 bits of the hash value, so this iterator may
1378
+ /// return items that have a hash value different than the one provided. You
1379
+ /// should always validate the returned values before using them.
1380
+ ///
1381
+ /// It is up to the caller to ensure that the `RawTable` outlives the
1382
+ /// `RawIterHash`. Because we cannot make the `next` method unsafe on the
1383
+ /// `RawIterHash` struct, we have to make the `iter_hash` method unsafe.
1384
+ #[ cfg_attr( feature = "inline-more" , inline) ]
1385
+ pub unsafe fn iter_hash ( & self , hash : u64 ) -> RawIterHash < T > {
1386
+ RawIterHash :: new ( self , hash)
1387
+ }
1388
+
1375
1389
/// Returns an iterator which removes all elements from the table without
1376
1390
/// freeing the memory.
1377
1391
#[ cfg_attr( feature = "inline-more" , inline) ]
@@ -3996,6 +4010,122 @@ impl<T, A: Allocator> Iterator for RawDrain<'_, T, A> {
3996
4010
impl < T , A : Allocator > ExactSizeIterator for RawDrain < ' _ , T , A > { }
3997
4011
impl < T , A : Allocator > FusedIterator for RawDrain < ' _ , T , A > { }
3998
4012
4013
+ /// Iterator over occupied buckets that could match a given hash.
4014
+ ///
4015
+ /// `RawTable` only stores 7 bits of the hash value, so this iterator may return
4016
+ /// items that have a hash value different than the one provided. You should
4017
+ /// always validate the returned values before using them.
4018
+ ///
4019
+ /// For maximum flexibility this iterator is not bound by a lifetime, but you
4020
+ /// must observe several rules when using it:
4021
+ /// - You must not free the hash table while iterating (including via growing/shrinking).
4022
+ /// - It is fine to erase a bucket that has been yielded by the iterator.
4023
+ /// - Erasing a bucket that has not yet been yielded by the iterator may still
4024
+ /// result in the iterator yielding that bucket.
4025
+ /// - It is unspecified whether an element inserted after the iterator was
4026
+ /// created will be yielded by that iterator.
4027
+ /// - The order in which the iterator yields buckets is unspecified and may
4028
+ /// change in the future.
4029
+ pub struct RawIterHash < T > {
4030
+ inner : RawIterHashInner ,
4031
+ _marker : PhantomData < T > ,
4032
+ }
4033
+
4034
+ struct RawIterHashInner {
4035
+ // See `RawTableInner`'s corresponding fields for details.
4036
+ // We can't store a `*const RawTableInner` as it would get
4037
+ // invalidated by the user calling `&mut` methods on `RawTable`.
4038
+ bucket_mask : usize ,
4039
+ ctrl : NonNull < u8 > ,
4040
+
4041
+ // The top 7 bits of the hash.
4042
+ h2_hash : u8 ,
4043
+
4044
+ // The sequence of groups to probe in the search.
4045
+ probe_seq : ProbeSeq ,
4046
+
4047
+ group : Group ,
4048
+
4049
+ // The elements within the group with a matching h2-hash.
4050
+ bitmask : BitMaskIter ,
4051
+ }
4052
+
4053
+ impl < T > RawIterHash < T > {
4054
+ #[ cfg_attr( feature = "inline-more" , inline) ]
4055
+ unsafe fn new < A : Allocator > ( table : & RawTable < T , A > , hash : u64 ) -> Self {
4056
+ RawIterHash {
4057
+ inner : RawIterHashInner :: new ( & table. table , hash) ,
4058
+ _marker : PhantomData ,
4059
+ }
4060
+ }
4061
+ }
4062
+
4063
+ impl RawIterHashInner {
4064
+ #[ cfg_attr( feature = "inline-more" , inline) ]
4065
+ unsafe fn new ( table : & RawTableInner , hash : u64 ) -> Self {
4066
+ let h2_hash = h2 ( hash) ;
4067
+ let probe_seq = table. probe_seq ( hash) ;
4068
+ let group = Group :: load ( table. ctrl ( probe_seq. pos ) ) ;
4069
+ let bitmask = group. match_byte ( h2_hash) . into_iter ( ) ;
4070
+
4071
+ RawIterHashInner {
4072
+ bucket_mask : table. bucket_mask ,
4073
+ ctrl : table. ctrl ,
4074
+ h2_hash,
4075
+ probe_seq,
4076
+ group,
4077
+ bitmask,
4078
+ }
4079
+ }
4080
+ }
4081
+
4082
+ impl < T > Iterator for RawIterHash < T > {
4083
+ type Item = Bucket < T > ;
4084
+
4085
+ fn next ( & mut self ) -> Option < Bucket < T > > {
4086
+ unsafe {
4087
+ match self . inner . next ( ) {
4088
+ Some ( index) => {
4089
+ // Can't use `RawTable::bucket` here as we don't have
4090
+ // an actual `RawTable` reference to use.
4091
+ debug_assert ! ( index <= self . inner. bucket_mask) ;
4092
+ let bucket = Bucket :: from_base_index ( self . inner . ctrl . cast ( ) , index) ;
4093
+ Some ( bucket)
4094
+ }
4095
+ None => None ,
4096
+ }
4097
+ }
4098
+ }
4099
+ }
4100
+
4101
+ impl Iterator for RawIterHashInner {
4102
+ type Item = usize ;
4103
+
4104
+ fn next ( & mut self ) -> Option < Self :: Item > {
4105
+ unsafe {
4106
+ loop {
4107
+ if let Some ( bit) = self . bitmask . next ( ) {
4108
+ let index = ( self . probe_seq . pos + bit) & self . bucket_mask ;
4109
+ return Some ( index) ;
4110
+ }
4111
+ if likely ( self . group . match_empty ( ) . any_bit_set ( ) ) {
4112
+ return None ;
4113
+ }
4114
+ self . probe_seq . move_next ( self . bucket_mask ) ;
4115
+
4116
+ // Can't use `RawTableInner::ctrl` here as we don't have
4117
+ // an actual `RawTableInner` reference to use.
4118
+ let index = self . probe_seq . pos ;
4119
+ debug_assert ! ( index < self . bucket_mask + 1 + Group :: WIDTH ) ;
4120
+ let group_ctrl = self . ctrl . as_ptr ( ) . add ( index) ;
4121
+
4122
+ self . group = Group :: load ( group_ctrl) ;
4123
+ self . bitmask = self . group . match_byte ( self . h2_hash ) . into_iter ( ) ;
4124
+ }
4125
+ }
4126
+ }
4127
+ }
4128
+
3999
4129
pub ( crate ) struct RawExtractIf < ' a , T , A : Allocator > {
4000
4130
pub iter : RawIter < T > ,
4001
4131
pub table : & ' a mut RawTable < T , A > ,
0 commit comments