Skip to content

Commit 7ad9edc

Browse files
committed
use Sharded in caches
1 parent 00fa49f commit 7ad9edc

File tree

3 files changed

+24
-95
lines changed

3 files changed

+24
-95
lines changed

compiler/rustc_data_structures/src/sharded.rs

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -9,15 +9,11 @@ use std::mem;
99
#[cfg_attr(parallel_compiler, repr(align(64)))]
1010
struct CacheAligned<T>(T);
1111

12-
#[cfg(parallel_compiler)]
1312
// 32 shards is sufficient to reduce contention on an 8-core Ryzen 7 1700,
1413
// but this should be tested on higher core count CPUs. How the `Sharded` type gets used
1514
// may also affect the ideal number of shards.
1615
const SHARD_BITS: usize = 5;
1716

18-
#[cfg(not(parallel_compiler))]
19-
const SHARD_BITS: usize = 0;
20-
2117
pub const SHARDS: usize = 1 << SHARD_BITS;
2218

2319
/// An array of cache-line aligned inner locked structures with convenience methods.

compiler/rustc_query_system/src/query/caches.rs

Lines changed: 10 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@ use crate::dep_graph::DepNodeIndex;
22

33
use rustc_data_structures::fx::FxHashMap;
44
use rustc_data_structures::sharded;
5-
#[cfg(parallel_compiler)]
65
use rustc_data_structures::sharded::Sharded;
76
use rustc_data_structures::sync::Lock;
87
use rustc_index::vec::{Idx, IndexVec};
@@ -37,10 +36,7 @@ impl<'tcx, K: Eq + Hash, V: 'tcx> CacheSelector<'tcx, V> for DefaultCacheSelecto
3736
}
3837

3938
pub struct DefaultCache<K, V> {
40-
#[cfg(parallel_compiler)]
4139
cache: Sharded<FxHashMap<K, (V, DepNodeIndex)>>,
42-
#[cfg(not(parallel_compiler))]
43-
cache: Lock<FxHashMap<K, (V, DepNodeIndex)>>,
4440
}
4541

4642
impl<K, V> Default for DefaultCache<K, V> {
@@ -60,40 +56,26 @@ where
6056
#[inline(always)]
6157
fn lookup(&self, key: &K) -> Option<(V, DepNodeIndex)> {
6258
let key_hash = sharded::make_hash(key);
63-
#[cfg(parallel_compiler)]
6459
let lock = self.cache.get_shard_by_hash(key_hash).lock();
65-
#[cfg(not(parallel_compiler))]
66-
let lock = self.cache.lock();
60+
6761
let result = lock.raw_entry().from_key_hashed_nocheck(key_hash, key);
6862

6963
if let Some((_, value)) = result { Some(*value) } else { None }
7064
}
7165

7266
#[inline]
7367
fn complete(&self, key: K, value: V, index: DepNodeIndex) {
74-
#[cfg(parallel_compiler)]
7568
let mut lock = self.cache.get_shard_by_value(&key).lock();
76-
#[cfg(not(parallel_compiler))]
77-
let mut lock = self.cache.lock();
69+
7870
// We may be overwriting another value. This is all right, since the dep-graph
7971
// will check that the fingerprint matches.
8072
lock.insert(key, (value, index));
8173
}
8274

8375
fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) {
84-
#[cfg(parallel_compiler)]
85-
{
86-
let shards = self.cache.lock_shards();
87-
for shard in shards.iter() {
88-
for (k, v) in shard.iter() {
89-
f(k, &v.0, v.1);
90-
}
91-
}
92-
}
93-
#[cfg(not(parallel_compiler))]
94-
{
95-
let map = self.cache.lock();
96-
for (k, v) in map.iter() {
76+
let shards = self.cache.lock_shards();
77+
for shard in shards.iter() {
78+
for (k, v) in shard.iter() {
9779
f(k, &v.0, v.1);
9880
}
9981
}
@@ -149,10 +131,7 @@ impl<'tcx, K: Idx, V: 'tcx> CacheSelector<'tcx, V> for VecCacheSelector<K> {
149131
}
150132

151133
pub struct VecCache<K: Idx, V> {
152-
#[cfg(parallel_compiler)]
153134
cache: Sharded<IndexVec<K, Option<(V, DepNodeIndex)>>>,
154-
#[cfg(not(parallel_compiler))]
155-
cache: Lock<IndexVec<K, Option<(V, DepNodeIndex)>>>,
156135
}
157136

158137
impl<K: Idx, V> Default for VecCache<K, V> {
@@ -171,38 +150,22 @@ where
171150

172151
#[inline(always)]
173152
fn lookup(&self, key: &K) -> Option<(V, DepNodeIndex)> {
174-
#[cfg(parallel_compiler)]
175153
let lock = self.cache.get_shard_by_hash(key.index() as u64).lock();
176-
#[cfg(not(parallel_compiler))]
177-
let lock = self.cache.lock();
154+
178155
if let Some(Some(value)) = lock.get(*key) { Some(*value) } else { None }
179156
}
180157

181158
#[inline]
182159
fn complete(&self, key: K, value: V, index: DepNodeIndex) {
183-
#[cfg(parallel_compiler)]
184160
let mut lock = self.cache.get_shard_by_hash(key.index() as u64).lock();
185-
#[cfg(not(parallel_compiler))]
186-
let mut lock = self.cache.lock();
161+
187162
lock.insert(key, (value, index));
188163
}
189164

190165
fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) {
191-
#[cfg(parallel_compiler)]
192-
{
193-
let shards = self.cache.lock_shards();
194-
for shard in shards.iter() {
195-
for (k, v) in shard.iter_enumerated() {
196-
if let Some(v) = v {
197-
f(&k, &v.0, v.1);
198-
}
199-
}
200-
}
201-
}
202-
#[cfg(not(parallel_compiler))]
203-
{
204-
let map = self.cache.lock();
205-
for (k, v) in map.iter_enumerated() {
166+
let shards = self.cache.lock_shards();
167+
for shard in shards.iter() {
168+
for (k, v) in shard.iter_enumerated() {
206169
if let Some(v) = v {
207170
f(&k, &v.0, v.1);
208171
}

compiler/rustc_query_system/src/query/plumbing.rs

Lines changed: 14 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,10 @@ use rustc_data_structures::stack::ensure_sufficient_stack;
1919
use rustc_data_structures::sync::Lock;
2020
#[cfg(parallel_compiler)]
2121
use rustc_data_structures::{cold_path, sharded::Sharded};
22-
use rustc_errors::{DiagnosticBuilder, ErrorGuaranteed, FatalError};
22+
use rustc_data_structures::profiling::TimingGuard;
23+
use rustc_data_structures::sharded::Sharded;
24+
use rustc_data_structures::stack::ensure_sufficient_stack;
25+
use rustc_data_structures::sync::{Lock, LockGuard};use rustc_errors::{DiagnosticBuilder, ErrorGuaranteed, FatalError};
2326
use rustc_span::{Span, DUMMY_SP};
2427
use std::cell::Cell;
2528
use std::collections::hash_map::Entry;
@@ -31,10 +34,7 @@ use thin_vec::ThinVec;
3134
use super::QueryConfig;
3235

3336
pub struct QueryState<K, D: DepKind> {
34-
#[cfg(parallel_compiler)]
3537
active: Sharded<FxHashMap<K, QueryResult<D>>>,
36-
#[cfg(not(parallel_compiler))]
37-
active: Lock<FxHashMap<K, QueryResult<D>>>,
3838
}
3939

4040
/// Indicates the state of a query for a given key in a query map.
@@ -53,15 +53,8 @@ where
5353
D: DepKind,
5454
{
5555
pub fn all_inactive(&self) -> bool {
56-
#[cfg(parallel_compiler)]
57-
{
58-
let shards = self.active.lock_shards();
59-
shards.iter().all(|shard| shard.is_empty())
60-
}
61-
#[cfg(not(parallel_compiler))]
62-
{
63-
self.active.lock().is_empty()
64-
}
56+
let shards = self.active.lock_shards();
57+
shards.iter().all(|shard| shard.is_empty())
6558
}
6659

6760
pub fn try_collect_active_jobs<Qcx: Copy>(
@@ -70,27 +63,11 @@ where
7063
make_query: fn(Qcx, K) -> QueryStackFrame<D>,
7164
jobs: &mut QueryMap<D>,
7265
) -> Option<()> {
73-
#[cfg(parallel_compiler)]
74-
{
75-
// We use try_lock_shards here since we are called from the
76-
// deadlock handler, and this shouldn't be locked.
77-
let shards = self.active.try_lock_shards()?;
78-
for shard in shards.iter() {
79-
for (k, v) in shard.iter() {
80-
if let QueryResult::Started(ref job) = *v {
81-
let query = make_query(qcx, *k);
82-
jobs.insert(job.id, QueryJobInfo { query, job: job.clone() });
83-
}
84-
}
85-
}
86-
}
87-
#[cfg(not(parallel_compiler))]
88-
{
89-
// We use try_lock here since we are called from the
90-
// deadlock handler, and this shouldn't be locked.
91-
// (FIXME: Is this relevant for non-parallel compilers? It doesn't
92-
// really hurt much.)
93-
for (k, v) in self.active.try_lock()?.iter() {
66+
// We use try_lock_shards here since we are called from the
67+
// deadlock handler, and this shouldn't be locked.
68+
let shards = self.active.try_lock_shards()?;
69+
for shard in shards.iter() {
70+
for (k, v) in shard.iter() {
9471
if let QueryResult::Started(ref job) = *v {
9572
let query = make_query(qcx, *k);
9673
jobs.insert(job.id, QueryJobInfo { query, job: job.clone() });
@@ -182,10 +159,8 @@ where
182159
cache.complete(key, result, dep_node_index);
183160

184161
let job = {
185-
#[cfg(parallel_compiler)]
186162
let mut lock = state.active.get_shard_by_value(&key).lock();
187-
#[cfg(not(parallel_compiler))]
188-
let mut lock = state.active.lock();
163+
189164
match lock.remove(&key).unwrap() {
190165
QueryResult::Started(job) => job,
191166
QueryResult::Poisoned => panic!(),
@@ -207,10 +182,8 @@ where
207182
// Poison the query so jobs waiting on it panic.
208183
let state = self.state;
209184
let job = {
210-
#[cfg(parallel_compiler)]
211185
let mut shard = state.active.get_shard_by_value(&self.key).lock();
212-
#[cfg(not(parallel_compiler))]
213-
let mut shard = state.active.lock();
186+
214187
let job = match shard.remove(&self.key).unwrap() {
215188
QueryResult::Started(job) => job,
216189
QueryResult::Poisoned => panic!(),
@@ -323,11 +296,8 @@ where
323296
Qcx: QueryContext,
324297
{
325298
let state = query.query_state(qcx);
326-
#[cfg(parallel_compiler)]
327299
let mut state_lock = state.active.get_shard_by_value(&key).lock();
328-
#[cfg(not(parallel_compiler))]
329-
let mut state_lock = state.active.lock();
330-
300+
let state_lock = state.active.lock();
331301
// For the parallel compiler we need to check both the query cache and query state structures
332302
// while holding the state lock to ensure that 1) the query has not yet completed and 2) the
333303
// query is not still executing. Without checking the query cache here, we can end up

0 commit comments

Comments
 (0)