@@ -5,6 +5,7 @@ use core::iter::FusedIterator;
5
5
use core:: marker:: PhantomData ;
6
6
use core:: mem;
7
7
use core:: mem:: ManuallyDrop ;
8
+ use core:: ops:: Range ;
8
9
use core:: ptr:: NonNull ;
9
10
use scopeguard:: guard;
10
11
@@ -23,6 +24,17 @@ fn unlikely(b: bool) -> bool {
23
24
b
24
25
}
25
26
27
+ #[ cfg( feature = "nightly" ) ]
28
+ #[ inline]
29
+ unsafe fn offset_from < T > ( to : * const T , from : * const T ) -> usize {
30
+ to. offset_from ( from) as usize
31
+ }
32
+ #[ cfg( not( feature = "nightly" ) ) ]
33
+ #[ inline]
34
+ unsafe fn offset_from < T > ( to : * const T , from : * const T ) -> usize {
35
+ ( to as usize - from as usize ) / mem:: size_of :: < T > ( )
36
+ }
37
+
26
38
// Use the SSE2 implementation if possible: it allows us to scan 16 buckets at
27
39
// once instead of 8.
28
40
#[ cfg( all(
@@ -299,16 +311,8 @@ impl<T> RawTable<T> {
299
311
300
312
/// Returns the index of a bucket from a `Bucket`.
301
313
#[ inline]
302
- #[ cfg( feature = "nightly" ) ]
303
314
unsafe fn bucket_index ( & self , bucket : & Bucket < T > ) -> usize {
304
- bucket. ptr . as_ptr ( ) . offset_from ( self . data . as_ptr ( ) ) as usize
305
- }
306
-
307
- /// Returns the index of a bucket from a `Bucket`.
308
- #[ inline]
309
- #[ cfg( not( feature = "nightly" ) ) ]
310
- unsafe fn bucket_index ( & self , bucket : & Bucket < T > ) -> usize {
311
- ( bucket. ptr . as_ptr ( ) as usize - self . data . as_ptr ( ) as usize ) / mem:: size_of :: < T > ( )
315
+ offset_from ( bucket. ptr . as_ptr ( ) , self . data . as_ptr ( ) )
312
316
}
313
317
314
318
/// Returns a pointer to a control byte.
@@ -704,14 +708,8 @@ impl<T> RawTable<T> {
704
708
/// struct, we have to make the `iter` method unsafe.
705
709
#[ inline]
706
710
pub unsafe fn iter ( & self ) -> RawIter < T > {
707
- let current_group = Group :: load_aligned ( self . ctrl . as_ptr ( ) )
708
- . match_empty_or_deleted ( )
709
- . invert ( ) ;
710
711
RawIter {
711
- data : self . data . as_ptr ( ) ,
712
- ctrl : self . ctrl . as_ptr ( ) ,
713
- current_group,
714
- end : self . ctrl ( self . bucket_mask ) ,
712
+ iter : RawIterRange :: new ( self . ctrl . as_ptr ( ) , self . data . as_ptr ( ) , 0 ..self . buckets ( ) ) ,
715
713
items : self . items ,
716
714
}
717
715
}
@@ -819,33 +817,75 @@ impl<T> IntoIterator for RawTable<T> {
819
817
}
820
818
}
821
819
822
- /// Iterator which returns a raw pointer to every full bucket in the table.
823
- pub struct RawIter < T > {
820
+ /// Iterator over a a sub-range of a table. Unlike `RawIter` this iterator does
821
+ /// not track an item count.
822
+ pub struct RawIterRange < T > {
824
823
// Using *const here for covariance
825
824
data : * const T ,
826
825
ctrl : * const u8 ,
827
826
current_group : BitMask ,
828
827
end : * const u8 ,
829
- items : usize ,
830
828
}
831
829
832
- unsafe impl < T > Send for RawIter < T > where T : Send { }
833
- unsafe impl < T > Sync for RawIter < T > where T : Sync { }
830
+ impl < T > RawIterRange < T > {
831
+ /// Returns a `RawIterRange` covering a subset of a table.
832
+ ///
833
+ /// The start offset must be aligned to the group width.
834
+ #[ inline]
835
+ unsafe fn new (
836
+ ctrl : * const u8 ,
837
+ data : * const T ,
838
+ range : Range < usize > ,
839
+ ) -> RawIterRange < T > {
840
+ debug_assert_eq ! ( range. start % Group :: WIDTH , 0 ) ;
841
+ let ctrl = ctrl. add ( range. start ) ;
842
+ let data = data. add ( range. start ) ;
843
+ let end = ctrl. add ( range. end ) ;
844
+ let current_group = Group :: load_aligned ( ctrl) . match_empty_or_deleted ( ) . invert ( ) ;
845
+ RawIterRange {
846
+ data,
847
+ ctrl,
848
+ current_group,
849
+ end,
850
+ }
851
+ }
834
852
835
- impl < T > Clone for RawIter < T > {
853
+ /// Splits a `RawIterRange` into two halves.
854
+ ///
855
+ /// This will fail if the total range is smaller than the group width.
856
+ #[ inline]
857
+ #[ cfg( feature = "rayon" ) ]
858
+ pub unsafe fn split ( mut self ) -> ( RawIterRange < T > , Option < RawIterRange < T > > ) {
859
+ let len = offset_from ( self . end , self . ctrl ) ;
860
+ debug_assert ! ( len. is_power_of_two( ) ) ;
861
+ if len <= Group :: WIDTH {
862
+ ( self , None )
863
+ } else {
864
+ debug_assert_eq ! ( len % ( Group :: WIDTH * 2 ) , 0 ) ;
865
+ let mid = len / 2 ;
866
+ let tail = RawIterRange :: new ( self . ctrl , self . data , mid..len) ;
867
+ self . end = self . ctrl . add ( mid) ;
868
+ ( self , Some ( tail) )
869
+ }
870
+ }
871
+ }
872
+
873
+ unsafe impl < T > Send for RawIterRange < T > where T : Send { }
874
+ unsafe impl < T > Sync for RawIterRange < T > where T : Sync { }
875
+
876
+ impl < T > Clone for RawIterRange < T > {
836
877
#[ inline]
837
878
fn clone ( & self ) -> Self {
838
- RawIter {
879
+ RawIterRange {
839
880
data : self . data ,
840
881
ctrl : self . ctrl ,
841
882
current_group : self . current_group ,
842
883
end : self . end ,
843
- items : self . items ,
844
884
}
845
885
}
846
886
}
847
887
848
- impl < T > Iterator for RawIter < T > {
888
+ impl < T > Iterator for RawIterRange < T > {
849
889
type Item = Bucket < T > ;
850
890
851
891
#[ inline]
@@ -854,16 +894,11 @@ impl<T> Iterator for RawIter<T> {
854
894
loop {
855
895
if let Some ( index) = self . current_group . lowest_set_bit ( ) {
856
896
self . current_group = self . current_group . remove_lowest_bit ( ) ;
857
- self . items -= 1 ;
858
897
return Some ( Bucket :: from_ptr ( self . data . add ( index) ) ) ;
859
898
}
860
899
861
900
self . ctrl = self . ctrl . add ( Group :: WIDTH ) ;
862
901
if self . ctrl >= self . end {
863
- // We don't check against items == 0 here to allow the
864
- // compiler to optimize away the item count entirely if the
865
- // iterator length is never queried.
866
- debug_assert_eq ! ( self . items, 0 ) ;
867
902
return None ;
868
903
}
869
904
@@ -875,6 +910,51 @@ impl<T> Iterator for RawIter<T> {
875
910
}
876
911
}
877
912
913
+ #[ inline]
914
+ fn size_hint ( & self ) -> ( usize , Option < usize > ) {
915
+ // We don't have an item count, so just guess based on the range size.
916
+ ( 0 , Some ( unsafe { offset_from ( self . end , self . ctrl ) } ) )
917
+ }
918
+ }
919
+
920
+ impl < T > FusedIterator for RawIterRange < T > { }
921
+
922
+ /// Iterator which returns a raw pointer to every full bucket in the table.
923
+ pub struct RawIter < T > {
924
+ pub iter : RawIterRange < T > ,
925
+ items : usize ,
926
+ }
927
+
928
+ impl < T > Clone for RawIter < T > {
929
+ #[ inline]
930
+ fn clone ( & self ) -> Self {
931
+ RawIter {
932
+ iter : self . iter . clone ( ) ,
933
+ items : self . items ,
934
+ }
935
+ }
936
+ }
937
+
938
+ impl < T > Iterator for RawIter < T > {
939
+ type Item = Bucket < T > ;
940
+
941
+ #[ inline]
942
+ fn next ( & mut self ) -> Option < Bucket < T > > {
943
+ match self . iter . next ( ) {
944
+ Some ( b) => {
945
+ self . items -= 1 ;
946
+ Some ( b)
947
+ }
948
+ None => {
949
+ // We don't check against items == 0 here to allow the
950
+ // compiler to optimize away the item count entirely if the
951
+ // iterator length is never queried.
952
+ debug_assert_eq ! ( self . items, 0 ) ;
953
+ None
954
+ }
955
+ }
956
+ }
957
+
878
958
#[ inline]
879
959
fn size_hint ( & self ) -> ( usize , Option < usize > ) {
880
960
( self . items , Some ( self . items ) )
0 commit comments