50
50
//! improved.
51
51
52
52
use std:: cell:: Cell ;
53
- use std:: collections:: { HashMap , HashSet } ;
53
+ use std:: collections:: { BTreeMap , HashMap , HashSet } ;
54
54
use std:: io;
55
55
use std:: marker;
56
56
use std:: mem;
@@ -119,11 +119,8 @@ struct DrainState<'a, 'cfg> {
119
119
rustc_tokens : HashMap < JobId , Vec < Acquired > > ,
120
120
121
121
/// This represents the list of rustc jobs (processes) and associated
122
- /// clients that are interested in receiving a token. Note that each process
123
- /// may be present many times (if it has requested multiple tokens).
124
- // We use a vec here as we don't want to order randomly which rustc we give
125
- // tokens to.
126
- to_send_clients : Vec < ( JobId , Client ) > ,
122
+ /// clients that are interested in receiving a token.
123
+ to_send_clients : BTreeMap < JobId , Vec < Client > > ,
127
124
128
125
/// The list of jobs that we have not yet started executing, but have
129
126
/// retrieved from the `queue`. We eagerly pull jobs off the main queue to
@@ -135,7 +132,7 @@ struct DrainState<'a, 'cfg> {
135
132
finished : usize ,
136
133
}
137
134
138
- #[ derive( Debug , Copy , Clone , PartialEq , Eq , Hash ) ]
135
+ #[ derive( Debug , Copy , Clone , PartialEq , Eq , Hash , PartialOrd , Ord ) ]
139
136
pub struct JobId ( pub u32 ) ;
140
137
141
138
impl std:: fmt:: Display for JobId {
@@ -357,7 +354,7 @@ impl<'a, 'cfg> JobQueue<'a, 'cfg> {
357
354
358
355
tokens : Vec :: new ( ) ,
359
356
rustc_tokens : HashMap :: new ( ) ,
360
- to_send_clients : Vec :: new ( ) ,
357
+ to_send_clients : BTreeMap :: new ( ) ,
361
358
pending_queue : Vec :: new ( ) ,
362
359
print : DiagnosticPrinter :: new ( cx. bcx . config ) ,
363
360
finished : 0 ,
@@ -428,11 +425,26 @@ impl<'a, 'cfg> DrainState<'a, 'cfg> {
428
425
self . active . len ( ) < self . tokens . len ( ) + 1
429
426
}
430
427
428
+ // The oldest job (i.e., least job ID) is the one we grant tokens to first.
429
+ fn pop_waiting_client ( & mut self ) -> ( JobId , Client ) {
430
+ // FIXME: replace this with BTreeMap::first_entry when that stabilizes.
431
+ let key = * self
432
+ . to_send_clients
433
+ . keys ( )
434
+ . next ( )
435
+ . expect ( "at least one waiter" ) ;
436
+ let clients = self . to_send_clients . get_mut ( & key) . unwrap ( ) ;
437
+ let client = clients. pop ( ) . unwrap ( ) ;
438
+ if clients. is_empty ( ) {
439
+ self . to_send_clients . remove ( & key) ;
440
+ }
441
+ ( key, client)
442
+ }
443
+
431
444
// If we managed to acquire some extra tokens, send them off to a waiting rustc.
432
445
fn grant_rustc_token_requests ( & mut self ) -> CargoResult < ( ) > {
433
446
while !self . to_send_clients . is_empty ( ) && self . has_extra_tokens ( ) {
434
- // Remove from the front so we grant the token to the oldest waiter
435
- let ( id, client) = self . to_send_clients . remove ( 0 ) ;
447
+ let ( id, client) = self . pop_waiting_client ( ) ;
436
448
// This unwrap is guaranteed to succeed. `active` must be at least
437
449
// length 1, as otherwise there can't be a client waiting to be sent
438
450
// on, so tokens.len() must also be at least one.
@@ -494,12 +506,7 @@ impl<'a, 'cfg> DrainState<'a, 'cfg> {
494
506
// completely.
495
507
self . tokens . extend ( rustc_tokens) ;
496
508
}
497
- while let Some ( pos) =
498
- self . to_send_clients . iter ( ) . position ( |( i, _) | * i == id)
499
- {
500
- // drain all the pending clients
501
- self . to_send_clients . remove ( pos) ;
502
- }
509
+ self . to_send_clients . remove ( & id) ;
503
510
self . active . remove ( & id) . unwrap ( )
504
511
}
505
512
// ... otherwise if it hasn't finished we leave it
@@ -536,7 +543,10 @@ impl<'a, 'cfg> DrainState<'a, 'cfg> {
536
543
Message :: NeedsToken ( id, client) => {
537
544
log:: info!( "queue token request" ) ;
538
545
jobserver_helper. request_token ( ) ;
539
- self . to_send_clients . push ( ( id, client) ) ;
546
+ self . to_send_clients
547
+ . entry ( id)
548
+ . or_insert_with ( Vec :: new)
549
+ . push ( client) ;
540
550
}
541
551
Message :: ReleaseToken ( id) => {
542
552
// Note that this pops off potentially a completely
0 commit comments