@@ -925,6 +925,7 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
925
925
eq ( self . bucket ( index) . as_ref ( ) )
926
926
} ) ;
927
927
928
+ // Avoid `Option::map` because it bloats LLVM IR.
928
929
match result {
929
930
Some ( index) => Some ( unsafe { self . bucket ( index) } ) ,
930
931
None => None ,
@@ -1255,30 +1256,29 @@ impl<A: Allocator + Clone> RawTableInner<A> {
1255
1256
}
1256
1257
}
1257
1258
1258
- /// Searches for an element in the table.
1259
+ /// Searches for an element in the table. This uses dynamic dispatch to reduce the amount of
1260
+ /// code generated, but it is eliminated by LLVM optimizations.
1259
1261
#[ inline]
1260
1262
fn find_inner ( & self , hash : u64 , eq : & mut dyn FnMut ( usize ) -> bool ) -> Option < usize > {
1261
- unsafe {
1262
- let h2_hash = h2 ( hash) ;
1263
- let mut probe_seq = self . probe_seq ( hash) ;
1264
-
1265
- loop {
1266
- let group = Group :: load ( self . ctrl ( probe_seq. pos ) ) ;
1263
+ let h2_hash = h2 ( hash) ;
1264
+ let mut probe_seq = self . probe_seq ( hash) ;
1267
1265
1268
- for bit in group . match_byte ( h2_hash ) {
1269
- let index = ( probe_seq. pos + bit ) & self . bucket_mask ;
1266
+ loop {
1267
+ let group = unsafe { Group :: load ( self . ctrl ( probe_seq. pos ) ) } ;
1270
1268
1271
- if likely ( eq ( index) ) {
1272
- return Some ( index) ;
1273
- }
1274
- }
1269
+ for bit in group. match_byte ( h2_hash) {
1270
+ let index = ( probe_seq. pos + bit) & self . bucket_mask ;
1275
1271
1276
- if likely ( group . match_empty ( ) . any_bit_set ( ) ) {
1277
- return None ;
1272
+ if likely ( eq ( index ) ) {
1273
+ return Some ( index ) ;
1278
1274
}
1275
+ }
1279
1276
1280
- probe_seq. move_next ( self . bucket_mask ) ;
1277
+ if likely ( group. match_empty ( ) . any_bit_set ( ) ) {
1278
+ return None ;
1281
1279
}
1280
+
1281
+ probe_seq. move_next ( self . bucket_mask ) ;
1282
1282
}
1283
1283
}
1284
1284
0 commit comments