Skip to content

Commit 0d40bae

Browse files
committed
Add reserve_exact, try_reserve, try_reserve_exact
1 parent 2251730 commit 0d40bae

File tree

6 files changed

+170
-1
lines changed

6 files changed

+170
-1
lines changed

src/lib.rs

Lines changed: 55 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -191,3 +191,58 @@ trait Entries {
191191
where
192192
F: FnOnce(&mut [Self::Entry]);
193193
}
194+
195+
/// The error type for `try_reserve` methods.
196+
#[derive(Clone, PartialEq, Eq, Debug)]
197+
pub struct TryReserveError {
198+
kind: TryReserveErrorKind,
199+
}
200+
201+
#[derive(Clone, PartialEq, Eq, Debug)]
202+
enum TryReserveErrorKind {
203+
// The standard library's kind is currently opaque to us, otherwise we could unify this.
204+
Std(alloc::collections::TryReserveError),
205+
CapacityOverflow,
206+
AllocError { layout: alloc::alloc::Layout },
207+
}
208+
209+
// These are not `From` so we don't expose them in our public API.
210+
impl TryReserveError {
211+
fn from_alloc(error: alloc::collections::TryReserveError) -> Self {
212+
Self {
213+
kind: TryReserveErrorKind::Std(error),
214+
}
215+
}
216+
217+
fn from_hashbrown(error: hashbrown::TryReserveError) -> Self {
218+
Self {
219+
kind: match error {
220+
hashbrown::TryReserveError::CapacityOverflow => {
221+
TryReserveErrorKind::CapacityOverflow
222+
}
223+
hashbrown::TryReserveError::AllocError { layout } => {
224+
TryReserveErrorKind::AllocError { layout }
225+
}
226+
},
227+
}
228+
}
229+
}
230+
231+
impl core::fmt::Display for TryReserveError {
232+
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
233+
let reason = match &self.kind {
234+
TryReserveErrorKind::Std(e) => return core::fmt::Display::fmt(e, f),
235+
TryReserveErrorKind::CapacityOverflow => {
236+
" because the computed capacity exceeded the collection's maximum"
237+
}
238+
TryReserveErrorKind::AllocError { .. } => {
239+
" because the memory allocator returned an error"
240+
}
241+
};
242+
f.write_str("memory allocation failed")?;
243+
f.write_str(reason)
244+
}
245+
}
246+
247+
#[cfg(feature = "std")]
248+
impl std::error::Error for TryReserveError {}

src/map.rs

Lines changed: 32 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ use std::collections::hash_map::RandomState;
3131
use self::core::IndexMapCore;
3232
use crate::equivalent::Equivalent;
3333
use crate::util::{third, try_simplify_range};
34-
use crate::{Bucket, Entries, HashValue};
34+
use crate::{Bucket, Entries, HashValue, TryReserveError};
3535

3636
/// A hash table where the iteration order of the key-value pairs is independent
3737
/// of the hash values of the keys.
@@ -319,6 +319,37 @@ where
319319
self.core.reserve(additional);
320320
}
321321

322+
/// Reserve capacity for `additional` more key-value pairs, without over-allocating.
323+
///
324+
/// Unlike `reserve`, this does not deliberately over-allocate the entry capacity to avoid
325+
/// frequent re-allocations. However, the underlying data structures may still have internal
326+
/// capacity requirements, and the allocator itself may give more space than requested, so this
327+
/// cannot be relied upon to be precisely minimal.
328+
///
329+
/// Computes in **O(n)** time.
330+
pub fn reserve_exact(&mut self, additional: usize) {
331+
self.core.reserve_exact(additional);
332+
}
333+
334+
/// Try to reserve capacity for `additional` more key-value pairs.
335+
///
336+
/// Computes in **O(n)** time.
337+
pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
338+
self.core.try_reserve(additional)
339+
}
340+
341+
/// Try to reserve capacity for `additional` more key-value pairs, without over-allocating.
342+
///
343+
/// Unlike `try_reserve`, this does not deliberately over-allocate the entry capacity to avoid
344+
/// frequent re-allocations. However, the underlying data structures may still have internal
345+
/// capacity requirements, and the allocator itself may give more space than requested, so this
346+
/// cannot be relied upon to be precisely minimal.
347+
///
348+
/// Computes in **O(n)** time.
349+
pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
350+
self.core.try_reserve_exact(additional)
351+
}
352+
322353
/// Shrink the capacity of the map as much as possible.
323354
///
324355
/// Computes in **O(n)** time.

src/map/core.rs

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@ mod raw;
1212
use hashbrown::raw::RawTable;
1313

1414
use crate::vec::{Drain, Vec};
15+
use crate::TryReserveError;
1516
use core::cmp;
1617
use core::fmt;
1718
use core::mem::replace;
@@ -202,6 +203,38 @@ impl<K, V> IndexMapCore<K, V> {
202203
self.entries.reserve_exact(additional);
203204
}
204205

206+
/// Reserve capacity for `additional` more key-value pairs, without over-allocating.
207+
pub(crate) fn reserve_exact(&mut self, additional: usize) {
208+
self.indices.reserve(additional, get_hash(&self.entries));
209+
self.entries.reserve_exact(additional);
210+
}
211+
212+
/// Try to reserve capacity for `additional` more key-value pairs.
213+
pub(crate) fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
214+
self.indices
215+
.try_reserve(additional, get_hash(&self.entries))
216+
.map_err(TryReserveError::from_hashbrown)?;
217+
self.try_reserve_entries()
218+
}
219+
220+
/// Try to reserve entries capacity to match the indices
221+
fn try_reserve_entries(&mut self) -> Result<(), TryReserveError> {
222+
let additional = self.indices.capacity() - self.entries.len();
223+
self.entries
224+
.try_reserve_exact(additional)
225+
.map_err(TryReserveError::from_alloc)
226+
}
227+
228+
/// Try to reserve capacity for `additional` more key-value pairs, without over-allocating.
229+
pub(crate) fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
230+
self.indices
231+
.try_reserve(additional, get_hash(&self.entries))
232+
.map_err(TryReserveError::from_hashbrown)?;
233+
self.entries
234+
.try_reserve_exact(additional)
235+
.map_err(TryReserveError::from_alloc)
236+
}
237+
205238
/// Shrink the capacity of the map with a lower bound
206239
pub(crate) fn shrink_to(&mut self, min_capacity: usize) {
207240
self.indices

src/map/tests.rs

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -158,6 +158,15 @@ fn reserve() {
158158
assert_eq!(map.get(&capacity), Some(&std::usize::MAX));
159159
}
160160

161+
#[test]
162+
fn try_reserve() {
163+
let mut map = IndexMap::<usize, usize>::new();
164+
assert_eq!(map.capacity(), 0);
165+
assert_eq!(map.try_reserve(100), Ok(()));
166+
assert!(map.capacity() >= 100);
167+
assert!(map.try_reserve(usize::MAX).is_err());
168+
}
169+
161170
#[test]
162171
fn shrink_to_fit() {
163172
let mut map = IndexMap::<usize, usize>::new();

src/set.rs

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ pub use self::slice::Slice;
1111

1212
#[cfg(feature = "rayon")]
1313
pub use crate::rayon::set as rayon;
14+
use crate::TryReserveError;
1415

1516
#[cfg(feature = "std")]
1617
use std::collections::hash_map::RandomState;
@@ -266,6 +267,37 @@ where
266267
self.map.reserve(additional);
267268
}
268269

270+
/// Reserve capacity for `additional` more values, without over-allocating.
271+
///
272+
/// Unlike `reserve`, this does not deliberately over-allocate the entry capacity to avoid
273+
/// frequent re-allocations. However, the underlying data structures may still have internal
274+
/// capacity requirements, and the allocator itself may give more space than requested, so this
275+
/// cannot be relied upon to be precisely minimal.
276+
///
277+
/// Computes in **O(n)** time.
278+
pub fn reserve_exact(&mut self, additional: usize) {
279+
self.map.reserve_exact(additional);
280+
}
281+
282+
/// Try to reserve capacity for `additional` more values.
283+
///
284+
/// Computes in **O(n)** time.
285+
pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
286+
self.map.try_reserve(additional)
287+
}
288+
289+
/// Try to reserve capacity for `additional` more values, without over-allocating.
290+
///
291+
/// Unlike `try_reserve`, this does not deliberately over-allocate the entry capacity to avoid
292+
/// frequent re-allocations. However, the underlying data structures may still have internal
293+
/// capacity requirements, and the allocator itself may give more space than requested, so this
294+
/// cannot be relied upon to be precisely minimal.
295+
///
296+
/// Computes in **O(n)** time.
297+
pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
298+
self.map.try_reserve_exact(additional)
299+
}
300+
269301
/// Shrink the capacity of the set as much as possible.
270302
///
271303
/// Computes in **O(n)** time.

src/set/tests.rs

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -282,6 +282,15 @@ fn reserve() {
282282
assert_eq!(set.get(&capacity), Some(&capacity));
283283
}
284284

285+
#[test]
286+
fn try_reserve() {
287+
let mut set = IndexSet::<usize>::new();
288+
assert_eq!(set.capacity(), 0);
289+
assert_eq!(set.try_reserve(100), Ok(()));
290+
assert!(set.capacity() >= 100);
291+
assert!(set.try_reserve(usize::MAX).is_err());
292+
}
293+
285294
#[test]
286295
fn shrink_to_fit() {
287296
let mut set = IndexSet::<usize>::new();

0 commit comments

Comments
 (0)