@@ -8,6 +8,7 @@ use core::mem::ManuallyDrop;
8
8
use core:: ops:: Range ;
9
9
use core:: ptr:: NonNull ;
10
10
use scopeguard:: guard;
11
+ use CollectionAllocErr ;
11
12
12
13
// Branch prediction hint. This is currently only available on nightly but it
13
14
// consistently improves performance by 10-15%.
@@ -61,6 +62,32 @@ mod bitmask;
61
62
use self :: bitmask:: BitMask ;
62
63
use self :: imp:: Group ;
63
64
65
+ /// Whether memory allocation errors should return an error or abort.
66
+ enum Fallibility {
67
+ Fallible ,
68
+ Infallible ,
69
+ }
70
+
71
+ impl Fallibility {
72
+ /// Error to return on capacity overflow.
73
+ #[ inline]
74
+ fn capacity_overflow ( & self ) -> CollectionAllocErr {
75
+ match * self {
76
+ Fallibility :: Fallible => CollectionAllocErr :: CapacityOverflow ,
77
+ Fallibility :: Infallible => panic ! ( "Hash table capacity overflow" ) ,
78
+ }
79
+ }
80
+
81
+ /// Error to return on allocation error.
82
+ #[ inline]
83
+ fn alloc_err ( & self , layout : Layout ) -> CollectionAllocErr {
84
+ match * self {
85
+ Fallibility :: Fallible => CollectionAllocErr :: AllocErr ,
86
+ Fallibility :: Infallible => handle_alloc_error ( layout) ,
87
+ }
88
+ }
89
+ }
90
+
64
91
/// Control byte value for an empty bucket.
65
92
const EMPTY : u8 = 0b11111111 ;
66
93
@@ -129,18 +156,23 @@ impl Iterator for ProbeSeq {
129
156
130
157
/// Returns the number of buckets needed to hold the given number of items,
131
158
/// taking the maximum load factor into account.
159
+ ///
160
+ /// Returns `None` if an overflow occurs.
132
161
#[ inline]
133
- fn capacity_to_buckets ( cap : usize ) -> usize {
162
+ fn capacity_to_buckets ( cap : usize ) -> Option < usize > {
134
163
let adjusted_cap = if cap < 8 {
135
164
// Need at least 1 free bucket on small tables
136
165
cap + 1
137
166
} else {
138
167
// Otherwise require 1/8 buckets to be empty (87.5% load)
139
- cap. checked_mul ( 8 ) . expect ( "Hash table capacity overflow" ) / 7
168
+ //
169
+ // Be careful when modifying this, calculate_layout relies on the
170
+ // overflow check here.
171
+ cap. checked_mul ( 8 ) ? / 7
140
172
} ;
141
173
142
174
// Any overflows will have been caught by the checked_mul.
143
- adjusted_cap. next_power_of_two ( )
175
+ Some ( adjusted_cap. next_power_of_two ( ) )
144
176
}
145
177
146
178
/// Returns the maximum effective capacity for the given bucket mask, taking
@@ -156,6 +188,8 @@ fn bucket_mask_to_capacity(bucket_mask: usize) -> usize {
156
188
157
189
// Returns a Layout which describes the allocation required for a hash table,
158
190
// and the offset of the buckets in the allocation.
191
+ ///
192
+ /// Returns `None` if an overflow occurs.
159
193
#[ inline]
160
194
#[ cfg( feature = "nightly" ) ]
161
195
fn calculate_layout < T > ( buckets : usize ) -> Option < ( Layout , usize ) > {
@@ -169,10 +203,10 @@ fn calculate_layout<T>(buckets: usize) -> Option<(Layout, usize)> {
169
203
// We add `Group::WIDTH` control bytes at the end of the array which
170
204
// replicate the bytes at the start of the array and thus avoids the need to
171
205
// perform bounds-checking while probing.
172
- let ctrl = Layout :: array :: < u8 > ( buckets + Group :: WIDTH )
173
- . ok ( ) ?
174
- . align_to ( Group :: WIDTH )
175
- . ok ( ) ? ;
206
+ //
207
+ // There is no possible overflow here since buckets is a power of two and
208
+ // Group::WIDTH is a small number.
209
+ let ctrl = unsafe { Layout :: from_size_align_unchecked ( buckets + Group :: WIDTH , Group :: WIDTH ) } ;
176
210
177
211
ctrl. extend ( data) . ok ( )
178
212
}
@@ -188,12 +222,11 @@ fn calculate_layout<T>(buckets: usize) -> Option<(Layout, usize)> {
188
222
let data_align = usize:: max ( mem:: align_of :: < T > ( ) , Group :: WIDTH ) ;
189
223
let data_offset = ( buckets + Group :: WIDTH ) . checked_add ( data_align - 1 ) ? & !( data_align - 1 ) ;
190
224
let len = data_offset. checked_add ( mem:: size_of :: < T > ( ) . checked_mul ( buckets) ?) ?;
191
- unsafe {
192
- Some ( (
193
- Layout :: from_size_align_unchecked ( len, data_align) ,
194
- data_offset,
195
- ) )
196
- }
225
+
226
+ Some ( (
227
+ unsafe { Layout :: from_size_align_unchecked ( len, data_align) } ,
228
+ data_offset,
229
+ ) )
197
230
}
198
231
199
232
/// A reference to a hash table bucket containing a `T`.
@@ -271,28 +304,36 @@ impl<T> RawTable<T> {
271
304
///
272
305
/// The control bytes are left uninitialized.
273
306
#[ inline]
274
- unsafe fn new_uninitialized ( buckets : usize ) -> RawTable < T > {
307
+ unsafe fn new_uninitialized (
308
+ buckets : usize ,
309
+ fallability : Fallibility ,
310
+ ) -> Result < RawTable < T > , CollectionAllocErr > {
275
311
let ( layout, data_offset) =
276
- calculate_layout :: < T > ( buckets) . expect ( "Hash table capacity overflow" ) ;
277
- let ctrl = NonNull :: new ( alloc ( layout) ) . unwrap_or_else ( || handle_alloc_error ( layout) ) ;
312
+ calculate_layout :: < T > ( buckets) . ok_or_else ( || fallability . capacity_overflow ( ) ) ? ;
313
+ let ctrl = NonNull :: new ( alloc ( layout) ) . ok_or_else ( || fallability . alloc_err ( layout) ) ? ;
278
314
let data = NonNull :: new_unchecked ( ctrl. as_ptr ( ) . add ( data_offset) as * mut T ) ;
279
- RawTable {
315
+ Ok ( RawTable {
280
316
data,
281
317
ctrl,
282
318
bucket_mask : buckets - 1 ,
283
319
items : 0 ,
284
320
growth_left : bucket_mask_to_capacity ( buckets - 1 ) ,
285
- }
321
+ } )
286
322
}
287
323
288
- /// Allocates a new hash table with at least enough capacity for inserting
289
- /// the given number of elements without reallocating.
290
- pub fn with_capacity ( capacity : usize ) -> RawTable < T > {
324
+ /// Attempts to allocate a new hash table with at least enough capacity
325
+ /// for inserting the given number of elements without reallocating.
326
+ fn try_with_capacity (
327
+ capacity : usize ,
328
+ fallability : Fallibility ,
329
+ ) -> Result < RawTable < T > , CollectionAllocErr > {
291
330
if capacity == 0 {
292
- RawTable :: new ( )
331
+ Ok ( RawTable :: new ( ) )
293
332
} else {
294
333
unsafe {
295
- let result = RawTable :: new_uninitialized ( capacity_to_buckets ( capacity) ) ;
334
+ let buckets =
335
+ capacity_to_buckets ( capacity) . ok_or_else ( || fallability. capacity_overflow ( ) ) ?;
336
+ let result = RawTable :: new_uninitialized ( buckets, fallability) ?;
296
337
result
297
338
. ctrl ( 0 )
298
339
. write_bytes ( EMPTY , result. buckets ( ) + Group :: WIDTH ) ;
@@ -306,11 +347,18 @@ impl<T> RawTable<T> {
306
347
. write_bytes ( DELETED , Group :: WIDTH - result. buckets ( ) ) ;
307
348
}
308
349
309
- result
350
+ Ok ( result)
310
351
}
311
352
}
312
353
}
313
354
355
+ /// Allocates a new hash table with at least enough capacity for inserting
356
+ /// the given number of elements without reallocating.
357
+ pub fn with_capacity ( capacity : usize ) -> RawTable < T > {
358
+ RawTable :: try_with_capacity ( capacity, Fallibility :: Infallible )
359
+ . unwrap_or_else ( |_| unsafe { hint:: unreachable_unchecked ( ) } )
360
+ }
361
+
314
362
/// Deallocates the table without dropping any entries.
315
363
#[ inline]
316
364
unsafe fn free_buckets ( & mut self ) {
@@ -473,8 +521,9 @@ impl<T> RawTable<T> {
473
521
#[ inline]
474
522
pub fn shrink_to ( & mut self , min_size : usize , hasher : impl Fn ( & T ) -> u64 ) {
475
523
let min_size = usize:: max ( self . items , min_size) ;
476
- if bucket_mask_to_capacity ( self . bucket_mask ) >= min_size * 2 {
477
- self . resize ( min_size, hasher) ;
524
+ if self . bucket_mask != 0 && bucket_mask_to_capacity ( self . bucket_mask ) >= min_size * 2 {
525
+ self . resize ( min_size, hasher, Fallibility :: Infallible )
526
+ . unwrap_or_else ( |_| unsafe { hint:: unreachable_unchecked ( ) } ) ;
478
527
}
479
528
}
480
529
@@ -483,25 +532,47 @@ impl<T> RawTable<T> {
483
532
#[ inline]
484
533
pub fn reserve ( & mut self , additional : usize , hasher : impl Fn ( & T ) -> u64 ) {
485
534
if additional > self . growth_left {
486
- self . reserve_rehash ( additional, hasher) ;
535
+ self . reserve_rehash ( additional, hasher, Fallibility :: Infallible )
536
+ . unwrap_or_else ( |_| unsafe { hint:: unreachable_unchecked ( ) } ) ;
537
+ }
538
+ }
539
+
540
+ /// Tries to ensure that at least `additional` items can be inserted into
541
+ /// the table without reallocation.
542
+ #[ inline]
543
+ pub fn try_reserve (
544
+ & mut self ,
545
+ additional : usize ,
546
+ hasher : impl Fn ( & T ) -> u64 ,
547
+ ) -> Result < ( ) , CollectionAllocErr > {
548
+ if additional > self . growth_left {
549
+ self . reserve_rehash ( additional, hasher, Fallibility :: Fallible )
550
+ } else {
551
+ Ok ( ( ) )
487
552
}
488
553
}
489
554
490
- /// Out-of-line slow path for `reserve`.
555
+ /// Out-of-line slow path for `reserve` and `try_reserve` .
491
556
#[ cold]
492
557
#[ inline( never) ]
493
- fn reserve_rehash ( & mut self , additional : usize , hasher : impl Fn ( & T ) -> u64 ) {
558
+ fn reserve_rehash (
559
+ & mut self ,
560
+ additional : usize ,
561
+ hasher : impl Fn ( & T ) -> u64 ,
562
+ fallability : Fallibility ,
563
+ ) -> Result < ( ) , CollectionAllocErr > {
494
564
let new_items = self
495
565
. items
496
566
. checked_add ( additional)
497
- . expect ( "Hash table capacity overflow" ) ;
567
+ . ok_or_else ( || fallability . capacity_overflow ( ) ) ? ;
498
568
499
569
// Rehash in-place without re-allocating if we have plenty of spare
500
570
// capacity that is locked up due to DELETED entries.
501
571
if new_items < bucket_mask_to_capacity ( self . bucket_mask ) / 2 {
502
572
self . rehash_in_place ( hasher) ;
573
+ Ok ( ( ) )
503
574
} else {
504
- self . resize ( new_items, hasher) ;
575
+ self . resize ( new_items, hasher, fallability )
505
576
}
506
577
}
507
578
@@ -607,12 +678,17 @@ impl<T> RawTable<T> {
607
678
608
679
/// Allocates a new table of a different size and moves the contents of the
609
680
/// current table into it.
610
- fn resize ( & mut self , capacity : usize , hasher : impl Fn ( & T ) -> u64 ) {
681
+ fn resize (
682
+ & mut self ,
683
+ capacity : usize ,
684
+ hasher : impl Fn ( & T ) -> u64 ,
685
+ fallability : Fallibility ,
686
+ ) -> Result < ( ) , CollectionAllocErr > {
611
687
unsafe {
612
688
debug_assert ! ( self . items <= capacity) ;
613
689
614
690
// Allocate and initialize the new table.
615
- let mut new_table = RawTable :: with_capacity ( capacity) ;
691
+ let mut new_table = RawTable :: try_with_capacity ( capacity, fallability ) ? ;
616
692
new_table. growth_left -= self . items ;
617
693
new_table. items = self . items ;
618
694
@@ -642,6 +718,8 @@ impl<T> RawTable<T> {
642
718
// the items will not be dropped (since they have been moved into the
643
719
// new table).
644
720
mem:: swap ( self , & mut new_table) ;
721
+
722
+ Ok ( ( ) )
645
723
}
646
724
}
647
725
@@ -762,7 +840,10 @@ impl<T: Clone> Clone for RawTable<T> {
762
840
Self :: new ( )
763
841
} else {
764
842
unsafe {
765
- let mut new_table = ManuallyDrop :: new ( Self :: new_uninitialized ( self . buckets ( ) ) ) ;
843
+ let mut new_table = ManuallyDrop :: new (
844
+ Self :: new_uninitialized ( self . buckets ( ) , Fallibility :: Infallible )
845
+ . unwrap_or_else ( |_| hint:: unreachable_unchecked ( ) ) ,
846
+ ) ;
766
847
767
848
// Copy the control bytes unchanged. We do this in a single pass
768
849
self . ctrl ( 0 )
0 commit comments