Skip to content

Commit afb75dc

Browse files
authored
Merge pull request #245 from cuviper/more-reserves
Add more reservation abilities
2 parents 2251730 + 882c8f9 commit afb75dc

File tree

7 files changed

+213
-17
lines changed

7 files changed

+213
-17
lines changed

RELEASES.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,11 @@
2424
`par_sort_by_cached_key` methods which perform stable sorts in place
2525
using a key extraction function.
2626

27+
- `IndexMap` and `IndexSet` now have `reserve_exact`, `try_reserve`, and
28+
`try_reserve_exact` methods that correspond to the same methods on `Vec`.
29+
However, exactness only applies to the direct capacity for items, while the
30+
raw hash table still follows its own rules for capacity and load factor.
31+
2732
- The `hashbrown` dependency has been updated to version 0.13.
2833

2934
- 1.9.1

src/lib.rs

Lines changed: 55 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -191,3 +191,58 @@ trait Entries {
191191
where
192192
F: FnOnce(&mut [Self::Entry]);
193193
}
194+
195+
/// The error type for `try_reserve` methods.
196+
#[derive(Clone, PartialEq, Eq, Debug)]
197+
pub struct TryReserveError {
198+
kind: TryReserveErrorKind,
199+
}
200+
201+
#[derive(Clone, PartialEq, Eq, Debug)]
202+
enum TryReserveErrorKind {
203+
// The standard library's kind is currently opaque to us, otherwise we could unify this.
204+
Std(alloc::collections::TryReserveError),
205+
CapacityOverflow,
206+
AllocError { layout: alloc::alloc::Layout },
207+
}
208+
209+
// These are not `From` so we don't expose them in our public API.
210+
impl TryReserveError {
211+
fn from_alloc(error: alloc::collections::TryReserveError) -> Self {
212+
Self {
213+
kind: TryReserveErrorKind::Std(error),
214+
}
215+
}
216+
217+
fn from_hashbrown(error: hashbrown::TryReserveError) -> Self {
218+
Self {
219+
kind: match error {
220+
hashbrown::TryReserveError::CapacityOverflow => {
221+
TryReserveErrorKind::CapacityOverflow
222+
}
223+
hashbrown::TryReserveError::AllocError { layout } => {
224+
TryReserveErrorKind::AllocError { layout }
225+
}
226+
},
227+
}
228+
}
229+
}
230+
231+
impl core::fmt::Display for TryReserveError {
232+
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
233+
let reason = match &self.kind {
234+
TryReserveErrorKind::Std(e) => return core::fmt::Display::fmt(e, f),
235+
TryReserveErrorKind::CapacityOverflow => {
236+
" because the computed capacity exceeded the collection's maximum"
237+
}
238+
TryReserveErrorKind::AllocError { .. } => {
239+
" because the memory allocator returned an error"
240+
}
241+
};
242+
f.write_str("memory allocation failed")?;
243+
f.write_str(reason)
244+
}
245+
}
246+
247+
#[cfg(feature = "std")]
248+
impl std::error::Error for TryReserveError {}

src/map.rs

Lines changed: 32 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ use std::collections::hash_map::RandomState;
3131
use self::core::IndexMapCore;
3232
use crate::equivalent::Equivalent;
3333
use crate::util::{third, try_simplify_range};
34-
use crate::{Bucket, Entries, HashValue};
34+
use crate::{Bucket, Entries, HashValue, TryReserveError};
3535

3636
/// A hash table where the iteration order of the key-value pairs is independent
3737
/// of the hash values of the keys.
@@ -319,6 +319,37 @@ where
319319
self.core.reserve(additional);
320320
}
321321

322+
/// Reserve capacity for `additional` more key-value pairs, without over-allocating.
323+
///
324+
/// Unlike `reserve`, this does not deliberately over-allocate the entry capacity to avoid
325+
/// frequent re-allocations. However, the underlying data structures may still have internal
326+
/// capacity requirements, and the allocator itself may give more space than requested, so this
327+
/// cannot be relied upon to be precisely minimal.
328+
///
329+
/// Computes in **O(n)** time.
330+
pub fn reserve_exact(&mut self, additional: usize) {
331+
self.core.reserve_exact(additional);
332+
}
333+
334+
/// Try to reserve capacity for `additional` more key-value pairs.
335+
///
336+
/// Computes in **O(n)** time.
337+
pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
338+
self.core.try_reserve(additional)
339+
}
340+
341+
/// Try to reserve capacity for `additional` more key-value pairs, without over-allocating.
342+
///
343+
/// Unlike `try_reserve`, this does not deliberately over-allocate the entry capacity to avoid
344+
/// frequent re-allocations. However, the underlying data structures may still have internal
345+
/// capacity requirements, and the allocator itself may give more space than requested, so this
346+
/// cannot be relied upon to be precisely minimal.
347+
///
348+
/// Computes in **O(n)** time.
349+
pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
350+
self.core.try_reserve_exact(additional)
351+
}
352+
322353
/// Shrink the capacity of the map as much as possible.
323354
///
324355
/// Computes in **O(n)** time.

src/map/core.rs

Lines changed: 71 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -12,9 +12,9 @@ mod raw;
1212
use hashbrown::raw::RawTable;
1313

1414
use crate::vec::{Drain, Vec};
15-
use core::cmp;
15+
use crate::TryReserveError;
1616
use core::fmt;
17-
use core::mem::replace;
17+
use core::mem;
1818
use core::ops::RangeBounds;
1919

2020
use crate::equivalent::Equivalent;
@@ -62,18 +62,18 @@ where
6262
V: Clone,
6363
{
6464
fn clone(&self) -> Self {
65-
let indices = self.indices.clone();
66-
let mut entries = Vec::with_capacity(indices.capacity());
67-
entries.clone_from(&self.entries);
68-
IndexMapCore { indices, entries }
65+
let mut new = Self::new();
66+
new.clone_from(self);
67+
new
6968
}
7069

7170
fn clone_from(&mut self, other: &Self) {
7271
let hasher = get_hash(&other.entries);
7372
self.indices.clone_from_with_hasher(&other.indices, hasher);
7473
if self.entries.capacity() < other.entries.len() {
75-
// If we must resize, match the indices capacity
76-
self.reserve_entries();
74+
// If we must resize, match the indices capacity.
75+
let additional = other.entries.len() - self.entries.len();
76+
self.reserve_entries(additional);
7777
}
7878
self.entries.clone_from(&other.entries);
7979
}
@@ -120,6 +120,9 @@ impl<K, V> Entries for IndexMapCore<K, V> {
120120
}
121121

122122
impl<K, V> IndexMapCore<K, V> {
123+
/// The maximum capacity before the `entries` allocation would exceed `isize::MAX`.
124+
const MAX_ENTRIES_CAPACITY: usize = (isize::MAX as usize) / mem::size_of::<Bucket<K, V>>();
125+
123126
#[inline]
124127
pub(crate) const fn new() -> Self {
125128
IndexMapCore {
@@ -143,7 +146,7 @@ impl<K, V> IndexMapCore<K, V> {
143146

144147
#[inline]
145148
pub(crate) fn capacity(&self) -> usize {
146-
cmp::min(self.indices.capacity(), self.entries.capacity())
149+
Ord::min(self.indices.capacity(), self.entries.capacity())
147150
}
148151

149152
pub(crate) fn clear(&mut self) {
@@ -193,15 +196,67 @@ impl<K, V> IndexMapCore<K, V> {
193196
/// Reserve capacity for `additional` more key-value pairs.
194197
pub(crate) fn reserve(&mut self, additional: usize) {
195198
self.indices.reserve(additional, get_hash(&self.entries));
196-
self.reserve_entries();
199+
// Only grow entries if necessary, since we also round up capacity.
200+
if additional > self.entries.capacity() - self.entries.len() {
201+
self.reserve_entries(additional);
202+
}
203+
}
204+
205+
/// Reserve entries capacity, rounded up to match the indices
206+
fn reserve_entries(&mut self, additional: usize) {
207+
// Use a soft-limit on the maximum capacity, but if the caller explicitly
208+
// requested more, do it and let them have the resulting panic.
209+
let new_capacity = Ord::min(self.indices.capacity(), Self::MAX_ENTRIES_CAPACITY);
210+
let try_add = new_capacity - self.entries.len();
211+
if try_add > additional && self.entries.try_reserve_exact(try_add).is_ok() {
212+
return;
213+
}
214+
self.entries.reserve_exact(additional);
197215
}
198216

199-
/// Reserve entries capacity to match the indices
200-
fn reserve_entries(&mut self) {
201-
let additional = self.indices.capacity() - self.entries.len();
217+
/// Reserve capacity for `additional` more key-value pairs, without over-allocating.
218+
pub(crate) fn reserve_exact(&mut self, additional: usize) {
219+
self.indices.reserve(additional, get_hash(&self.entries));
202220
self.entries.reserve_exact(additional);
203221
}
204222

223+
/// Try to reserve capacity for `additional` more key-value pairs.
224+
pub(crate) fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
225+
self.indices
226+
.try_reserve(additional, get_hash(&self.entries))
227+
.map_err(TryReserveError::from_hashbrown)?;
228+
// Only grow entries if necessary, since we also round up capacity.
229+
if additional > self.entries.capacity() - self.entries.len() {
230+
self.try_reserve_entries(additional)
231+
} else {
232+
Ok(())
233+
}
234+
}
235+
236+
/// Try to reserve entries capacity, rounded up to match the indices
237+
fn try_reserve_entries(&mut self, additional: usize) -> Result<(), TryReserveError> {
238+
// Use a soft-limit on the maximum capacity, but if the caller explicitly
239+
// requested more, do it and let them have the resulting error.
240+
let new_capacity = Ord::min(self.indices.capacity(), Self::MAX_ENTRIES_CAPACITY);
241+
let try_add = new_capacity - self.entries.len();
242+
if try_add > additional && self.entries.try_reserve_exact(try_add).is_ok() {
243+
return Ok(());
244+
}
245+
self.entries
246+
.try_reserve_exact(additional)
247+
.map_err(TryReserveError::from_alloc)
248+
}
249+
250+
/// Try to reserve capacity for `additional` more key-value pairs, without over-allocating.
251+
pub(crate) fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
252+
self.indices
253+
.try_reserve(additional, get_hash(&self.entries))
254+
.map_err(TryReserveError::from_hashbrown)?;
255+
self.entries
256+
.try_reserve_exact(additional)
257+
.map_err(TryReserveError::from_alloc)
258+
}
259+
205260
/// Shrink the capacity of the map with a lower bound
206261
pub(crate) fn shrink_to(&mut self, min_capacity: usize) {
207262
self.indices
@@ -228,7 +283,7 @@ impl<K, V> IndexMapCore<K, V> {
228283
if i == self.entries.capacity() {
229284
// Reserve our own capacity synced to the indices,
230285
// rather than letting `Vec::push` just double it.
231-
self.reserve_entries();
286+
self.reserve_entries(1);
232287
}
233288
self.entries.push(Bucket { hash, key, value });
234289
i
@@ -248,7 +303,7 @@ impl<K, V> IndexMapCore<K, V> {
248303
K: Eq,
249304
{
250305
match self.get_index_of(hash, &key) {
251-
Some(i) => (i, Some(replace(&mut self.entries[i].value, value))),
306+
Some(i) => (i, Some(mem::replace(&mut self.entries[i].value, value))),
252307
None => (self.push(hash, key, value), None),
253308
}
254309
}
@@ -601,7 +656,7 @@ pub use self::raw::OccupiedEntry;
601656
impl<K, V> OccupiedEntry<'_, K, V> {
602657
/// Sets the value of the entry to `value`, and returns the entry's old value.
603658
pub fn insert(&mut self, value: V) -> V {
604-
replace(self.get_mut(), value)
659+
mem::replace(self.get_mut(), value)
605660
}
606661

607662
/// Remove the key, value pair stored in the map for this entry, and return the value.

src/map/tests.rs

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -158,6 +158,15 @@ fn reserve() {
158158
assert_eq!(map.get(&capacity), Some(&std::usize::MAX));
159159
}
160160

161+
#[test]
162+
fn try_reserve() {
163+
let mut map = IndexMap::<usize, usize>::new();
164+
assert_eq!(map.capacity(), 0);
165+
assert_eq!(map.try_reserve(100), Ok(()));
166+
assert!(map.capacity() >= 100);
167+
assert!(map.try_reserve(usize::MAX).is_err());
168+
}
169+
161170
#[test]
162171
fn shrink_to_fit() {
163172
let mut map = IndexMap::<usize, usize>::new();

src/set.rs

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ pub use self::slice::Slice;
1111

1212
#[cfg(feature = "rayon")]
1313
pub use crate::rayon::set as rayon;
14+
use crate::TryReserveError;
1415

1516
#[cfg(feature = "std")]
1617
use std::collections::hash_map::RandomState;
@@ -266,6 +267,37 @@ where
266267
self.map.reserve(additional);
267268
}
268269

270+
/// Reserve capacity for `additional` more values, without over-allocating.
271+
///
272+
/// Unlike `reserve`, this does not deliberately over-allocate the entry capacity to avoid
273+
/// frequent re-allocations. However, the underlying data structures may still have internal
274+
/// capacity requirements, and the allocator itself may give more space than requested, so this
275+
/// cannot be relied upon to be precisely minimal.
276+
///
277+
/// Computes in **O(n)** time.
278+
pub fn reserve_exact(&mut self, additional: usize) {
279+
self.map.reserve_exact(additional);
280+
}
281+
282+
/// Try to reserve capacity for `additional` more values.
283+
///
284+
/// Computes in **O(n)** time.
285+
pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
286+
self.map.try_reserve(additional)
287+
}
288+
289+
/// Try to reserve capacity for `additional` more values, without over-allocating.
290+
///
291+
/// Unlike `try_reserve`, this does not deliberately over-allocate the entry capacity to avoid
292+
/// frequent re-allocations. However, the underlying data structures may still have internal
293+
/// capacity requirements, and the allocator itself may give more space than requested, so this
294+
/// cannot be relied upon to be precisely minimal.
295+
///
296+
/// Computes in **O(n)** time.
297+
pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
298+
self.map.try_reserve_exact(additional)
299+
}
300+
269301
/// Shrink the capacity of the set as much as possible.
270302
///
271303
/// Computes in **O(n)** time.

src/set/tests.rs

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -282,6 +282,15 @@ fn reserve() {
282282
assert_eq!(set.get(&capacity), Some(&capacity));
283283
}
284284

285+
#[test]
286+
fn try_reserve() {
287+
let mut set = IndexSet::<usize>::new();
288+
assert_eq!(set.capacity(), 0);
289+
assert_eq!(set.try_reserve(100), Ok(()));
290+
assert!(set.capacity() >= 100);
291+
assert!(set.try_reserve(usize::MAX).is_err());
292+
}
293+
285294
#[test]
286295
fn shrink_to_fit() {
287296
let mut set = IndexSet::<usize>::new();

0 commit comments

Comments
 (0)