Skip to content

Commit 8fbb911

Browse files
committed
let SHARD = 1
1 parent f8c4ad3 commit 8fbb911

File tree

3 files changed

+53
-45
lines changed

3 files changed

+53
-45
lines changed

compiler/rustc_data_structures/Cargo.toml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ indexmap = { version = "1.9.3" }
1414
jobserver_crate = { version = "0.1.13", package = "jobserver" }
1515
libc = "0.2"
1616
measureme = "10.0.0"
17-
rustc-rayon-core = { version = "0.5.0", optional = true }
17+
rustc-rayon-core = { version = "0.5.0" }
1818
rustc-rayon = { version = "0.5.0", optional = true }
1919
rustc_arena = { path = "../rustc_arena" }
2020
rustc_graphviz = { path = "../rustc_graphviz" }
@@ -51,4 +51,4 @@ features = [
5151
memmap2 = "0.2.1"
5252

5353
[features]
54-
rustc_use_parallel_compiler = ["indexmap/rustc-rayon", "rustc-rayon", "rustc-rayon-core"]
54+
rustc_use_parallel_compiler = ["indexmap/rustc-rayon", "rustc-rayon"]

compiler/rustc_data_structures/src/lib.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@
3333
#![feature(strict_provenance)]
3434
#![feature(ptr_alignment_type)]
3535
#![feature(macro_metavar_expr)]
36+
#![feature(mutex_unpoison)]
3637
#![allow(rustc::default_hash_types)]
3738
#![allow(rustc::potential_query_instability)]
3839
#![deny(rustc::untranslatable_diagnostic)]

compiler/rustc_data_structures/src/sharded.rs

Lines changed: 50 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -3,22 +3,14 @@ use crate::sync::{CacheAligned, Lock, LockGuard};
33
use std::borrow::Borrow;
44
use std::collections::hash_map::RawEntryMut;
55
use std::hash::{Hash, Hasher};
6-
use std::mem;
76

8-
#[cfg(parallel_compiler)]
9-
// 32 shards is sufficient to reduce contention on an 8-core Ryzen 7 1700,
10-
// but this should be tested on higher core count CPUs. How the `Sharded` type gets used
11-
// may also affect the ideal number of shards.
12-
const SHARD_BITS: usize = 5;
13-
14-
#[cfg(not(parallel_compiler))]
157
const SHARD_BITS: usize = 0;
168

179
pub const SHARDS: usize = 1 << SHARD_BITS;
1810

1911
/// An array of cache-line aligned inner locked structures with convenience methods.
2012
pub struct Sharded<T> {
21-
shards: [CacheAligned<Lock<T>>; SHARDS],
13+
shard: Lock<T>,
2214
}
2315

2416
impl<T: Default> Default for Sharded<T> {
@@ -28,34 +20,43 @@ impl<T: Default> Default for Sharded<T> {
2820
}
2921
}
3022

31-
impl<T> Sharded<T> {
23+
impl<T: Default> Sharded<T> {
3224
#[inline]
3325
pub fn new(mut value: impl FnMut() -> T) -> Self {
34-
Sharded { shards: [(); SHARDS].map(|()| CacheAligned(Lock::new(value()))) }
26+
Sharded { shard: Lock::new(value()) }
3527
}
3628

3729
/// The shard is selected by hashing `val` with `FxHasher`.
3830
#[inline]
39-
pub fn get_shard_by_value<K: Hash + ?Sized>(&self, val: &K) -> &Lock<T> {
40-
if SHARDS == 1 { &self.shards[0].0 } else { self.get_shard_by_hash(make_hash(val)) }
31+
pub fn with_get_shard_by_value<K: Hash + ?Sized, F: FnOnce(&mut T) -> R, R>(
32+
&self,
33+
_val: &K,
34+
f: F,
35+
) -> R {
36+
self.shard.with_lock(f)
4137
}
4238

4339
#[inline]
44-
pub fn get_shard_by_hash(&self, hash: u64) -> &Lock<T> {
45-
&self.shards[get_shard_index_by_hash(hash)].0
40+
pub fn with_get_shard_by_hash<F: FnOnce(&mut T) -> R, R>(&self, _hash: u64, f: F) -> R {
41+
self.shard.with_lock(f)
4642
}
4743

4844
#[inline]
49-
pub fn get_shard_by_index(&self, i: usize) -> &Lock<T> {
50-
&self.shards[i].0
45+
pub fn get_shard_by_value<K: Hash + ?Sized>(&self, _val: &K) -> &Lock<T> {
46+
&self.shard
47+
}
48+
49+
#[inline]
50+
pub fn get_shard_by_hash(&self, _hash: u64) -> &Lock<T> {
51+
&self.shard
5152
}
5253

5354
pub fn lock_shards(&self) -> Vec<LockGuard<'_, T>> {
54-
(0..SHARDS).map(|i| self.shards[i].0.lock()).collect()
55+
vec![self.shard.lock()]
5556
}
5657

5758
pub fn try_lock_shards(&self) -> Option<Vec<LockGuard<'_, T>>> {
58-
(0..SHARDS).map(|i| self.shards[i].0.try_lock()).collect()
59+
Some(vec![self.shard.try_lock()?])
5960
}
6061
}
6162

@@ -75,17 +76,18 @@ impl<K: Eq + Hash + Copy> ShardedHashMap<K, ()> {
7576
Q: Hash + Eq,
7677
{
7778
let hash = make_hash(value);
78-
let mut shard = self.get_shard_by_hash(hash).lock();
79-
let entry = shard.raw_entry_mut().from_key_hashed_nocheck(hash, value);
80-
81-
match entry {
82-
RawEntryMut::Occupied(e) => *e.key(),
83-
RawEntryMut::Vacant(e) => {
84-
let v = make();
85-
e.insert_hashed_nocheck(hash, v, ());
86-
v
79+
self.with_get_shard_by_hash(hash, |shard| {
80+
let entry = shard.raw_entry_mut().from_key_hashed_nocheck(hash, value);
81+
82+
match entry {
83+
RawEntryMut::Occupied(e) => *e.key(),
84+
RawEntryMut::Vacant(e) => {
85+
let v = make();
86+
e.insert_hashed_nocheck(hash, v, ());
87+
v
88+
}
8789
}
88-
}
90+
})
8991
}
9092

9193
#[inline]
@@ -95,17 +97,18 @@ impl<K: Eq + Hash + Copy> ShardedHashMap<K, ()> {
9597
Q: Hash + Eq,
9698
{
9799
let hash = make_hash(&value);
98-
let mut shard = self.get_shard_by_hash(hash).lock();
99-
let entry = shard.raw_entry_mut().from_key_hashed_nocheck(hash, &value);
100-
101-
match entry {
102-
RawEntryMut::Occupied(e) => *e.key(),
103-
RawEntryMut::Vacant(e) => {
104-
let v = make(value);
105-
e.insert_hashed_nocheck(hash, v, ());
106-
v
100+
self.with_get_shard_by_hash(hash, |shard| {
101+
let entry = shard.raw_entry_mut().from_key_hashed_nocheck(hash, &value);
102+
103+
match entry {
104+
RawEntryMut::Occupied(e) => *e.key(),
105+
RawEntryMut::Vacant(e) => {
106+
let v = make(value);
107+
e.insert_hashed_nocheck(hash, v, ());
108+
v
109+
}
107110
}
108-
}
111+
})
109112
}
110113
}
111114

@@ -117,9 +120,11 @@ pub trait IntoPointer {
117120
impl<K: Eq + Hash + Copy + IntoPointer> ShardedHashMap<K, ()> {
118121
pub fn contains_pointer_to<T: Hash + IntoPointer>(&self, value: &T) -> bool {
119122
let hash = make_hash(&value);
120-
let shard = self.get_shard_by_hash(hash).lock();
121-
let value = value.into_pointer();
122-
shard.raw_entry().from_hash(hash, |entry| entry.into_pointer() == value).is_some()
123+
124+
self.with_get_shard_by_hash(hash, |shard| {
125+
let value = value.into_pointer();
126+
shard.raw_entry().from_hash(hash, |entry| entry.into_pointer() == value).is_some()
127+
})
123128
}
124129
}
125130

@@ -130,17 +135,19 @@ pub fn make_hash<K: Hash + ?Sized>(val: &K) -> u64 {
130135
state.finish()
131136
}
132137

138+
/*
133139
/// Get a shard with a pre-computed hash value. If `get_shard_by_value` is
134140
/// ever used in combination with `get_shard_by_hash` on a single `Sharded`
135141
/// instance, then `hash` must be computed with `FxHasher`. Otherwise,
136142
/// `hash` can be computed with any hasher, so long as that hasher is used
137143
/// consistently for each `Sharded` instance.
138144
#[inline]
139145
#[allow(clippy::modulo_one)]
140-
pub fn get_shard_index_by_hash(hash: u64) -> usize {
146+
fn get_shard_index_by_hash(hash: u64) -> usize {
141147
let hash_len = mem::size_of::<usize>();
142148
// Ignore the top 7 bits as hashbrown uses these and get the next SHARD_BITS highest bits.
143149
// hashbrown also uses the lowest bits, so we can't use those
144150
let bits = (hash >> (hash_len * 8 - 7 - SHARD_BITS)) as usize;
145151
bits % SHARDS
146152
}
153+
*/

0 commit comments

Comments
 (0)