@@ -12,6 +12,7 @@ use std::sync::{
12
12
Mutex ,
13
13
} ;
14
14
use std:: time:: Duration ;
15
+ use systemstat:: { Platform , System } ;
15
16
16
17
pub ( super ) struct Worker < ' a , DB : WriteResults + Sync > {
17
18
name : String ,
@@ -55,16 +56,32 @@ impl<'a, DB: WriteResults + Sync> Worker<'a, DB> {
55
56
& self . name
56
57
}
57
58
58
- pub ( super ) fn run ( & self ) -> Fallible < ( ) > {
59
+ pub ( super ) fn run ( & self , threads_count : usize ) -> Fallible < ( ) > {
59
60
// This uses a `loop` instead of a `while let` to avoid locking the graph too much
60
61
let mut guard = self . graph . lock ( ) . unwrap ( ) ;
62
+ let system = System :: new ( ) ;
61
63
loop {
62
64
self . maybe_cleanup_target_dir ( ) ?;
63
65
let walk_result = guard. next_task ( self . ex , self . db , & self . name ) ;
64
66
match walk_result {
65
67
WalkResult :: Task ( id, task) => {
66
68
drop ( guard) ;
67
69
info ! ( "running task: {:?}" , task) ;
70
+
71
+ // Wait for 15 seconds before running if the 1 minute load
72
+ // average exceeds the thread count. This tries to back off
73
+ // from spawning too many jobs on the server, hopefully
74
+ // improving performance.
75
+ loop {
76
+ let avg = system. load_average ( ) ?;
77
+
78
+ if avg. one > threads_count as f32 {
79
+ std:: thread:: sleep ( std:: time:: Duration :: new ( 15 , 0 ) ) ;
80
+ } else {
81
+ break ;
82
+ }
83
+ }
84
+
68
85
let res = task. run (
69
86
self . config ,
70
87
self . workspace ,
0 commit comments