Skip to content

Commit 3b0715f

Browse files
committed
set parallel_compiler to default
1 parent 8fbb911 commit 3b0715f

File tree

8 files changed

+43
-67
lines changed

8 files changed

+43
-67
lines changed

compiler/rustc_codegen_ssa/src/base.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -686,7 +686,7 @@ pub fn codegen_crate<B: ExtraBackendMethods>(
686686
// This likely is a temporary measure. Once we don't have to support the
687687
// non-parallel compiler anymore, we can compile CGUs end-to-end in
688688
// parallel and get rid of the complicated scheduling logic.
689-
let mut pre_compiled_cgus = if tcx.sess.threads() > 1 {
689+
let mut pre_compiled_cgus = if rustc_data_structures::sync::active() {
690690
tcx.sess.time("compile_first_CGU_batch", || {
691691
// Try to find one CGU to compile per thread.
692692
let cgus: Vec<_> = cgu_reuse

compiler/rustc_driver_impl/src/lib.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -257,7 +257,7 @@ fn run_compiler(
257257
let sopts = config::build_session_options(&matches);
258258

259259
// Set parallel mode before thread pool creation, which will create `Lock`s.
260-
interface::set_thread_safe_mode(&sopts.unstable_opts);
260+
interface::set_parallel_mode(&sopts.unstable_opts);
261261

262262
if let Some(ref code) = matches.opt_str("explain") {
263263
handle_explain(diagnostics_registry(), code, sopts.error_format);

compiler/rustc_interface/src/interface.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -61,8 +61,8 @@ impl Compiler {
6161
}
6262

6363
#[allow(rustc::bad_opt_access)]
64-
pub fn set_thread_safe_mode(sopts: &config::UnstableOptions) {
65-
rustc_data_structures::sync::set_dyn_thread_safe_mode(sopts.threads > 1);
64+
pub fn set_parallel_mode(sopts: &config::UnstableOptions) {
65+
rustc_data_structures::sync::set(sopts.threads > 1);
6666
}
6767

6868
/// Converts strings provided as `--cfg [cfgspec]` into a `crate_cfg`.

compiler/rustc_interface/src/util.rs

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -128,12 +128,15 @@ fn get_stack_size() -> Option<usize> {
128128
env::var_os("RUST_MIN_STACK").is_none().then_some(STACK_SIZE)
129129
}
130130

131-
#[cfg(not(parallel_compiler))]
132131
pub(crate) fn run_in_thread_pool_with_globals<F: FnOnce() -> R + Send, R: Send>(
133132
edition: Edition,
134133
_threads: usize,
135134
f: F,
136135
) -> R {
136+
#[cfg(parallel_compiler)]
137+
if _threads > 1 {
138+
return run_in_threads_pool_with_globals(edition, _threads, f);
139+
}
137140
// The "thread pool" is a single spawned thread in the non-parallel
138141
// compiler. We run on a spawned thread instead of the main thread (a) to
139142
// provide control over the stack size, and (b) to increase similarity with
@@ -163,7 +166,7 @@ pub(crate) fn run_in_thread_pool_with_globals<F: FnOnce() -> R + Send, R: Send>(
163166
}
164167

165168
#[cfg(parallel_compiler)]
166-
pub(crate) fn run_in_thread_pool_with_globals<F: FnOnce() -> R + Send, R: Send>(
169+
pub(crate) fn run_in_threads_pool_with_globals<F: FnOnce() -> R + Send, R: Send>(
167170
edition: Edition,
168171
threads: usize,
169172
f: F,

compiler/rustc_query_system/src/query/job.rs

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -124,7 +124,6 @@ impl<D: DepKind> QueryJob<D> {
124124
}
125125

126126
impl QueryJobId {
127-
#[cfg(not(parallel_compiler))]
128127
pub(super) fn find_cycle_in_stack<D: DepKind>(
129128
&self,
130129
query_map: QueryMap<D>,

compiler/rustc_query_system/src/query/plumbing.rs

Lines changed: 32 additions & 56 deletions
Original file line numberDiff line numberDiff line change
@@ -12,28 +12,28 @@ use crate::query::job::{report_cycle, QueryInfo, QueryJob, QueryJobId, QueryJobI
1212
use crate::query::SerializedDepNodeIndex;
1313
use crate::query::{QueryContext, QueryMap, QuerySideEffects, QueryStackFrame};
1414
use crate::HandleCycleError;
15+
#[cfg(parallel_compiler)]
16+
use rustc_data_structures::cold_path;
1517
use rustc_data_structures::fingerprint::Fingerprint;
1618
use rustc_data_structures::fx::FxHashMap;
19+
use rustc_data_structures::sharded::Sharded;
1720
use rustc_data_structures::stack::ensure_sufficient_stack;
1821
use rustc_data_structures::sync::Lock;
19-
#[cfg(parallel_compiler)]
20-
use rustc_data_structures::{cold_path, sharded::Sharded};
22+
2123
use rustc_errors::{DiagnosticBuilder, ErrorGuaranteed, FatalError};
2224
use rustc_span::{Span, DUMMY_SP};
2325
use std::cell::Cell;
2426
use std::collections::hash_map::Entry;
2527
use std::fmt::Debug;
2628
use std::hash::Hash;
2729
use std::mem;
30+
use std::ops::DerefMut;
2831
use thin_vec::ThinVec;
2932

3033
use super::QueryConfig;
3134

3235
pub struct QueryState<K, D: DepKind> {
33-
#[cfg(parallel_compiler)]
3436
active: Sharded<FxHashMap<K, QueryResult<D>>>,
35-
#[cfg(not(parallel_compiler))]
36-
active: Lock<FxHashMap<K, QueryResult<D>>>,
3737
}
3838

3939
/// Indicates the state of a query for a given key in a query map.
@@ -52,15 +52,8 @@ where
5252
D: DepKind,
5353
{
5454
pub fn all_inactive(&self) -> bool {
55-
#[cfg(parallel_compiler)]
56-
{
57-
let shards = self.active.lock_shards();
58-
shards.iter().all(|shard| shard.is_empty())
59-
}
60-
#[cfg(not(parallel_compiler))]
61-
{
62-
self.active.lock().is_empty()
63-
}
55+
let shards = self.active.lock_shards();
56+
shards.iter().all(|shard| shard.is_empty())
6457
}
6558

6659
pub fn try_collect_active_jobs<Qcx: Copy>(
@@ -69,27 +62,11 @@ where
6962
make_query: fn(Qcx, K) -> QueryStackFrame<D>,
7063
jobs: &mut QueryMap<D>,
7164
) -> Option<()> {
72-
#[cfg(parallel_compiler)]
73-
{
74-
// We use try_lock_shards here since we are called from the
75-
// deadlock handler, and this shouldn't be locked.
76-
let shards = self.active.try_lock_shards()?;
77-
for shard in shards.iter() {
78-
for (k, v) in shard.iter() {
79-
if let QueryResult::Started(ref job) = *v {
80-
let query = make_query(qcx, *k);
81-
jobs.insert(job.id, QueryJobInfo { query, job: job.clone() });
82-
}
83-
}
84-
}
85-
}
86-
#[cfg(not(parallel_compiler))]
87-
{
88-
// We use try_lock here since we are called from the
89-
// deadlock handler, and this shouldn't be locked.
90-
// (FIXME: Is this relevant for non-parallel compilers? It doesn't
91-
// really hurt much.)
92-
for (k, v) in self.active.try_lock()?.iter() {
65+
// We use try_lock_shards here since we are called from the
66+
// deadlock handler, and this shouldn't be locked.
67+
let shards = self.active.try_lock_shards()?;
68+
for shard in shards.iter() {
69+
for (k, v) in shard.iter() {
9370
if let QueryResult::Started(ref job) = *v {
9471
let query = make_query(qcx, *k);
9572
jobs.insert(job.id, QueryJobInfo { query, job: job.clone() });
@@ -183,14 +160,10 @@ where
183160
cache.complete(key, result, dep_node_index);
184161

185162
let job = {
186-
#[cfg(parallel_compiler)]
187-
let mut lock = state.active.get_shard_by_value(&key).lock();
188-
#[cfg(not(parallel_compiler))]
189-
let mut lock = state.active.lock();
190-
match lock.remove(&key).unwrap() {
163+
state.active.with_get_shard_by_value(&key, |lock| match lock.remove(&key).unwrap() {
191164
QueryResult::Started(job) => job,
192165
QueryResult::Poisoned => panic!(),
193-
}
166+
})
194167
};
195168

196169
job.signal_complete();
@@ -208,16 +181,14 @@ where
208181
// Poison the query so jobs waiting on it panic.
209182
let state = self.state;
210183
let job = {
211-
#[cfg(parallel_compiler)]
212-
let mut shard = state.active.get_shard_by_value(&self.key).lock();
213-
#[cfg(not(parallel_compiler))]
214-
let mut shard = state.active.lock();
215-
let job = match shard.remove(&self.key).unwrap() {
216-
QueryResult::Started(job) => job,
217-
QueryResult::Poisoned => panic!(),
218-
};
219-
shard.insert(self.key, QueryResult::Poisoned);
220-
job
184+
state.active.with_get_shard_by_value(&self.key, |shard| {
185+
let job = match shard.remove(&self.key).unwrap() {
186+
QueryResult::Started(job) => job,
187+
QueryResult::Poisoned => panic!(),
188+
};
189+
shard.insert(self.key, QueryResult::Poisoned);
190+
job
191+
})
221192
};
222193
// Also signal the completion of the job, so waiters
223194
// will continue execution.
@@ -254,7 +225,6 @@ where
254225

255226
#[cold]
256227
#[inline(never)]
257-
#[cfg(not(parallel_compiler))]
258228
fn cycle_error<Q, Qcx>(
259229
query: Q,
260230
qcx: Qcx,
@@ -324,10 +294,8 @@ where
324294
Qcx: QueryContext,
325295
{
326296
let state = query.query_state(qcx);
327-
#[cfg(parallel_compiler)]
328297
let mut state_lock = state.active.get_shard_by_value(&key).lock();
329-
#[cfg(not(parallel_compiler))]
330-
let mut state_lock = state.active.lock();
298+
let lock = state_lock.deref_mut();
331299

332300
// For the parallel compiler we need to check both the query cache and query state structures
333301
// while holding the state lock to ensure that 1) the query has not yet completed and 2) the
@@ -344,7 +312,7 @@ where
344312

345313
let current_job_id = qcx.current_query_job();
346314

347-
match state_lock.entry(key) {
315+
match lock.entry(key) {
348316
Entry::Vacant(entry) => {
349317
// Nothing has computed or is computing the query, so we start a new job and insert it in the
350318
// state map.
@@ -370,6 +338,14 @@ where
370338
}
371339
#[cfg(parallel_compiler)]
372340
QueryResult::Started(job) => {
341+
if std::intrinsics::likely(!rustc_data_structures::sync::active()) {
342+
let id = job.id;
343+
drop(state_lock);
344+
345+
// If we are single-threaded we know that we have cycle error,
346+
// so we just return the error.
347+
return cycle_error(query, qcx, id, span);
348+
}
373349
// Get the latch out
374350
let latch = job.latch();
375351
drop(state_lock);

src/bootstrap/config.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1147,7 +1147,7 @@ impl Config {
11471147
set(&mut config.use_lld, rust.use_lld);
11481148
set(&mut config.lld_enabled, rust.lld);
11491149
set(&mut config.llvm_tools_enabled, rust.llvm_tools);
1150-
config.rustc_parallel = rust.parallel_compiler.unwrap_or(false);
1150+
config.rustc_parallel = rust.parallel_compiler.unwrap_or(true);
11511151
config.rustc_default_linker = rust.default_linker;
11521152
config.musl_root = rust.musl_root.map(PathBuf::from);
11531153
config.save_toolstates = rust.save_toolstates.map(PathBuf::from);

src/librustdoc/lib.rs

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -212,8 +212,6 @@ fn init_logging() {
212212
.with_verbose_exit(true)
213213
.with_verbose_entry(true)
214214
.with_indent_amount(2);
215-
#[cfg(all(parallel_compiler, debug_assertions))]
216-
let layer = layer.with_thread_ids(true).with_thread_names(true);
217215

218216
use tracing_subscriber::layer::SubscriberExt;
219217
let subscriber = tracing_subscriber::Registry::default().with(filter).with(layer);
@@ -740,7 +738,7 @@ fn main_args(at_args: &[String]) -> MainResult {
740738
};
741739

742740
// Set parallel mode before error handler creation, which will create `Lock`s.
743-
interface::set_thread_safe_mode(&options.unstable_opts);
741+
interface::set_parallel_mode(&options.unstable_opts);
744742

745743
let diag = core::new_handler(
746744
options.error_format,

0 commit comments

Comments
 (0)