Skip to content

Commit dbf20da

Browse files
committed
Be more careful about reserving entries capacity
Blindly following the indices capacity could make the entries exceed the maximum layout size, `isize::MAX`, so now we use a limit against that. Furthermore, even within that limit we can *try* the reservation first, and fall back to the minimum requested growth if that fails. This shouldn't matter in practice for 64-bit targets, but smaller targets could potentially run close to the max allocation size.
1 parent 0d40bae commit dbf20da

File tree

1 file changed

+34
-20
lines changed

1 file changed

+34
-20
lines changed

src/map/core.rs

Lines changed: 34 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -13,9 +13,8 @@ use hashbrown::raw::RawTable;
1313

1414
use crate::vec::{Drain, Vec};
1515
use crate::TryReserveError;
16-
use core::cmp;
1716
use core::fmt;
18-
use core::mem::replace;
17+
use core::mem;
1918
use core::ops::RangeBounds;
2019

2120
use crate::equivalent::Equivalent;
@@ -63,18 +62,18 @@ where
6362
V: Clone,
6463
{
6564
fn clone(&self) -> Self {
66-
let indices = self.indices.clone();
67-
let mut entries = Vec::with_capacity(indices.capacity());
68-
entries.clone_from(&self.entries);
69-
IndexMapCore { indices, entries }
65+
let mut new = Self::new();
66+
new.clone_from(self);
67+
new
7068
}
7169

7270
fn clone_from(&mut self, other: &Self) {
7371
let hasher = get_hash(&other.entries);
7472
self.indices.clone_from_with_hasher(&other.indices, hasher);
7573
if self.entries.capacity() < other.entries.len() {
76-
// If we must resize, match the indices capacity
77-
self.reserve_entries();
74+
// If we must resize, match the indices capacity.
75+
let additional = other.entries.len() - self.entries.len();
76+
self.reserve_entries(additional);
7877
}
7978
self.entries.clone_from(&other.entries);
8079
}
@@ -121,6 +120,9 @@ impl<K, V> Entries for IndexMapCore<K, V> {
121120
}
122121

123122
impl<K, V> IndexMapCore<K, V> {
123+
/// The maximum capacity before the `entries` allocation would exceed `isize::MAX`.
124+
const MAX_ENTRIES_CAPACITY: usize = (isize::MAX as usize) / mem::size_of::<Bucket<K, V>>();
125+
124126
#[inline]
125127
pub(crate) const fn new() -> Self {
126128
IndexMapCore {
@@ -144,7 +146,7 @@ impl<K, V> IndexMapCore<K, V> {
144146

145147
#[inline]
146148
pub(crate) fn capacity(&self) -> usize {
147-
cmp::min(self.indices.capacity(), self.entries.capacity())
149+
Ord::min(self.indices.capacity(), self.entries.capacity())
148150
}
149151

150152
pub(crate) fn clear(&mut self) {
@@ -194,12 +196,18 @@ impl<K, V> IndexMapCore<K, V> {
194196
/// Reserve capacity for `additional` more key-value pairs.
195197
pub(crate) fn reserve(&mut self, additional: usize) {
196198
self.indices.reserve(additional, get_hash(&self.entries));
197-
self.reserve_entries();
199+
self.reserve_entries(additional);
198200
}
199201

200-
/// Reserve entries capacity to match the indices
201-
fn reserve_entries(&mut self) {
202-
let additional = self.indices.capacity() - self.entries.len();
202+
/// Reserve entries capacity, rounded up to match the indices
203+
fn reserve_entries(&mut self, additional: usize) {
204+
// Use a soft-limit on the maximum capacity, but if the caller explicitly
205+
// requested more, do it and let them have the resulting panic.
206+
let new_capacity = Ord::min(self.indices.capacity(), Self::MAX_ENTRIES_CAPACITY);
207+
let try_add = new_capacity - self.entries.len();
208+
if try_add > additional && self.entries.try_reserve_exact(try_add).is_ok() {
209+
return;
210+
}
203211
self.entries.reserve_exact(additional);
204212
}
205213

@@ -214,12 +222,18 @@ impl<K, V> IndexMapCore<K, V> {
214222
self.indices
215223
.try_reserve(additional, get_hash(&self.entries))
216224
.map_err(TryReserveError::from_hashbrown)?;
217-
self.try_reserve_entries()
225+
self.try_reserve_entries(additional)
218226
}
219227

220-
/// Try to reserve entries capacity to match the indices
221-
fn try_reserve_entries(&mut self) -> Result<(), TryReserveError> {
222-
let additional = self.indices.capacity() - self.entries.len();
228+
/// Try to reserve entries capacity, rounded up to match the indices
229+
fn try_reserve_entries(&mut self, additional: usize) -> Result<(), TryReserveError> {
230+
// Use a soft-limit on the maximum capacity, but if the caller explicitly
231+
// requested more, do it and let them have the resulting error.
232+
let new_capacity = Ord::min(self.indices.capacity(), Self::MAX_ENTRIES_CAPACITY);
233+
let try_add = new_capacity - self.entries.len();
234+
if try_add > additional && self.entries.try_reserve_exact(try_add).is_ok() {
235+
return Ok(());
236+
}
223237
self.entries
224238
.try_reserve_exact(additional)
225239
.map_err(TryReserveError::from_alloc)
@@ -261,7 +275,7 @@ impl<K, V> IndexMapCore<K, V> {
261275
if i == self.entries.capacity() {
262276
// Reserve our own capacity synced to the indices,
263277
// rather than letting `Vec::push` just double it.
264-
self.reserve_entries();
278+
self.reserve_entries(1);
265279
}
266280
self.entries.push(Bucket { hash, key, value });
267281
i
@@ -281,7 +295,7 @@ impl<K, V> IndexMapCore<K, V> {
281295
K: Eq,
282296
{
283297
match self.get_index_of(hash, &key) {
284-
Some(i) => (i, Some(replace(&mut self.entries[i].value, value))),
298+
Some(i) => (i, Some(mem::replace(&mut self.entries[i].value, value))),
285299
None => (self.push(hash, key, value), None),
286300
}
287301
}
@@ -634,7 +648,7 @@ pub use self::raw::OccupiedEntry;
634648
impl<K, V> OccupiedEntry<'_, K, V> {
635649
/// Sets the value of the entry to `value`, and returns the entry's old value.
636650
pub fn insert(&mut self, value: V) -> V {
637-
replace(self.get_mut(), value)
651+
mem::replace(self.get_mut(), value)
638652
}
639653

640654
/// Remove the key, value pair stored in the map for this entry, and return the value.

0 commit comments

Comments
 (0)