@@ -12,28 +12,28 @@ use crate::query::job::{report_cycle, QueryInfo, QueryJob, QueryJobId, QueryJobI
12
12
use crate :: query:: SerializedDepNodeIndex ;
13
13
use crate :: query:: { QueryContext , QueryMap , QuerySideEffects , QueryStackFrame } ;
14
14
use crate :: HandleCycleError ;
15
+ #[ cfg( parallel_compiler) ]
16
+ use rustc_data_structures:: cold_path;
15
17
use rustc_data_structures:: fingerprint:: Fingerprint ;
16
18
use rustc_data_structures:: fx:: FxHashMap ;
19
+ use rustc_data_structures:: sharded:: Sharded ;
17
20
use rustc_data_structures:: stack:: ensure_sufficient_stack;
18
21
use rustc_data_structures:: sync:: Lock ;
19
- #[ cfg( parallel_compiler) ]
20
- use rustc_data_structures:: { cold_path, sharded:: Sharded } ;
22
+
21
23
use rustc_errors:: { DiagnosticBuilder , ErrorGuaranteed , FatalError } ;
22
24
use rustc_span:: { Span , DUMMY_SP } ;
23
25
use std:: cell:: Cell ;
24
26
use std:: collections:: hash_map:: Entry ;
25
27
use std:: fmt:: Debug ;
26
28
use std:: hash:: Hash ;
27
29
use std:: mem;
30
+ use std:: ops:: DerefMut ;
28
31
use thin_vec:: ThinVec ;
29
32
30
33
use super :: QueryConfig ;
31
34
32
35
pub struct QueryState < K , D : DepKind > {
33
- #[ cfg( parallel_compiler) ]
34
36
active : Sharded < FxHashMap < K , QueryResult < D > > > ,
35
- #[ cfg( not( parallel_compiler) ) ]
36
- active : Lock < FxHashMap < K , QueryResult < D > > > ,
37
37
}
38
38
39
39
/// Indicates the state of a query for a given key in a query map.
52
52
D : DepKind ,
53
53
{
54
54
pub fn all_inactive ( & self ) -> bool {
55
- #[ cfg( parallel_compiler) ]
56
- {
57
- let shards = self . active . lock_shards ( ) ;
58
- shards. iter ( ) . all ( |shard| shard. is_empty ( ) )
59
- }
60
- #[ cfg( not( parallel_compiler) ) ]
61
- {
62
- self . active . lock ( ) . is_empty ( )
63
- }
55
+ let shards = self . active . lock_shards ( ) ;
56
+ shards. iter ( ) . all ( |shard| shard. is_empty ( ) )
64
57
}
65
58
66
59
pub fn try_collect_active_jobs < Qcx : Copy > (
@@ -69,27 +62,11 @@ where
69
62
make_query : fn ( Qcx , K ) -> QueryStackFrame < D > ,
70
63
jobs : & mut QueryMap < D > ,
71
64
) -> Option < ( ) > {
72
- #[ cfg( parallel_compiler) ]
73
- {
74
- // We use try_lock_shards here since we are called from the
75
- // deadlock handler, and this shouldn't be locked.
76
- let shards = self . active . try_lock_shards ( ) ?;
77
- for shard in shards. iter ( ) {
78
- for ( k, v) in shard. iter ( ) {
79
- if let QueryResult :: Started ( ref job) = * v {
80
- let query = make_query ( qcx, * k) ;
81
- jobs. insert ( job. id , QueryJobInfo { query, job : job. clone ( ) } ) ;
82
- }
83
- }
84
- }
85
- }
86
- #[ cfg( not( parallel_compiler) ) ]
87
- {
88
- // We use try_lock here since we are called from the
89
- // deadlock handler, and this shouldn't be locked.
90
- // (FIXME: Is this relevant for non-parallel compilers? It doesn't
91
- // really hurt much.)
92
- for ( k, v) in self . active . try_lock ( ) ?. iter ( ) {
65
+ // We use try_lock_shards here since we are called from the
66
+ // deadlock handler, and this shouldn't be locked.
67
+ let shards = self . active . try_lock_shards ( ) ?;
68
+ for shard in shards. iter ( ) {
69
+ for ( k, v) in shard. iter ( ) {
93
70
if let QueryResult :: Started ( ref job) = * v {
94
71
let query = make_query ( qcx, * k) ;
95
72
jobs. insert ( job. id , QueryJobInfo { query, job : job. clone ( ) } ) ;
@@ -183,14 +160,10 @@ where
183
160
cache. complete ( key, result, dep_node_index) ;
184
161
185
162
let job = {
186
- #[ cfg( parallel_compiler) ]
187
- let mut lock = state. active . get_shard_by_value ( & key) . lock ( ) ;
188
- #[ cfg( not( parallel_compiler) ) ]
189
- let mut lock = state. active . lock ( ) ;
190
- match lock. remove ( & key) . unwrap ( ) {
163
+ state. active . with_get_shard_by_value ( & key, |lock| match lock. remove ( & key) . unwrap ( ) {
191
164
QueryResult :: Started ( job) => job,
192
165
QueryResult :: Poisoned => panic ! ( ) ,
193
- }
166
+ } )
194
167
} ;
195
168
196
169
job. signal_complete ( ) ;
@@ -208,16 +181,14 @@ where
208
181
// Poison the query so jobs waiting on it panic.
209
182
let state = self . state ;
210
183
let job = {
211
- #[ cfg( parallel_compiler) ]
212
- let mut shard = state. active . get_shard_by_value ( & self . key ) . lock ( ) ;
213
- #[ cfg( not( parallel_compiler) ) ]
214
- let mut shard = state. active . lock ( ) ;
215
- let job = match shard. remove ( & self . key ) . unwrap ( ) {
216
- QueryResult :: Started ( job) => job,
217
- QueryResult :: Poisoned => panic ! ( ) ,
218
- } ;
219
- shard. insert ( self . key , QueryResult :: Poisoned ) ;
220
- job
184
+ state. active . with_get_shard_by_value ( & self . key , |shard| {
185
+ let job = match shard. remove ( & self . key ) . unwrap ( ) {
186
+ QueryResult :: Started ( job) => job,
187
+ QueryResult :: Poisoned => panic ! ( ) ,
188
+ } ;
189
+ shard. insert ( self . key , QueryResult :: Poisoned ) ;
190
+ job
191
+ } )
221
192
} ;
222
193
// Also signal the completion of the job, so waiters
223
194
// will continue execution.
@@ -254,7 +225,6 @@ where
254
225
255
226
#[ cold]
256
227
#[ inline( never) ]
257
- #[ cfg( not( parallel_compiler) ) ]
258
228
fn cycle_error < Q , Qcx > (
259
229
query : Q ,
260
230
qcx : Qcx ,
@@ -324,10 +294,8 @@ where
324
294
Qcx : QueryContext ,
325
295
{
326
296
let state = query. query_state ( qcx) ;
327
- #[ cfg( parallel_compiler) ]
328
297
let mut state_lock = state. active . get_shard_by_value ( & key) . lock ( ) ;
329
- #[ cfg( not( parallel_compiler) ) ]
330
- let mut state_lock = state. active . lock ( ) ;
298
+ let lock = state_lock. deref_mut ( ) ;
331
299
332
300
// For the parallel compiler we need to check both the query cache and query state structures
333
301
// while holding the state lock to ensure that 1) the query has not yet completed and 2) the
@@ -344,7 +312,7 @@ where
344
312
345
313
let current_job_id = qcx. current_query_job ( ) ;
346
314
347
- match state_lock . entry ( key) {
315
+ match lock . entry ( key) {
348
316
Entry :: Vacant ( entry) => {
349
317
// Nothing has computed or is computing the query, so we start a new job and insert it in the
350
318
// state map.
@@ -370,6 +338,14 @@ where
370
338
}
371
339
#[ cfg( parallel_compiler) ]
372
340
QueryResult :: Started ( job) => {
341
+ if std:: intrinsics:: likely ( !rustc_data_structures:: sync:: active ( ) ) {
342
+ let id = job. id ;
343
+ drop ( state_lock) ;
344
+
345
+ // If we are single-threaded we know that we have cycle error,
346
+ // so we just return the error.
347
+ return cycle_error ( query, qcx, id, span) ;
348
+ }
373
349
// Get the latch out
374
350
let latch = job. latch ( ) ;
375
351
drop ( state_lock) ;
0 commit comments