@@ -5465,9 +5465,33 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
5465
5465
5466
5466
static __always_inline void return_cfs_rq_runtime (struct cfs_rq * cfs_rq );
5467
5467
5468
- static inline void finish_delayed_dequeue_entity (struct sched_entity * se )
5468
+ static void set_delayed (struct sched_entity * se )
5469
+ {
5470
+ se -> sched_delayed = 1 ;
5471
+ for_each_sched_entity (se ) {
5472
+ struct cfs_rq * cfs_rq = cfs_rq_of (se );
5473
+
5474
+ cfs_rq -> h_nr_delayed ++ ;
5475
+ if (cfs_rq_throttled (cfs_rq ))
5476
+ break ;
5477
+ }
5478
+ }
5479
+
5480
+ static void clear_delayed (struct sched_entity * se )
5469
5481
{
5470
5482
se -> sched_delayed = 0 ;
5483
+ for_each_sched_entity (se ) {
5484
+ struct cfs_rq * cfs_rq = cfs_rq_of (se );
5485
+
5486
+ cfs_rq -> h_nr_delayed -- ;
5487
+ if (cfs_rq_throttled (cfs_rq ))
5488
+ break ;
5489
+ }
5490
+ }
5491
+
5492
+ static inline void finish_delayed_dequeue_entity (struct sched_entity * se )
5493
+ {
5494
+ clear_delayed (se );
5471
5495
if (sched_feat (DELAY_ZERO ) && se -> vlag > 0 )
5472
5496
se -> vlag = 0 ;
5473
5497
}
@@ -5496,7 +5520,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
5496
5520
if (sched_feat (DELAY_DEQUEUE ) && delay &&
5497
5521
!entity_eligible (cfs_rq , se )) {
5498
5522
update_load_avg (cfs_rq , se , 0 );
5499
- se -> sched_delayed = 1 ;
5523
+ set_delayed ( se ) ;
5500
5524
return false;
5501
5525
}
5502
5526
}
@@ -5908,7 +5932,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
5908
5932
struct rq * rq = rq_of (cfs_rq );
5909
5933
struct cfs_bandwidth * cfs_b = tg_cfs_bandwidth (cfs_rq -> tg );
5910
5934
struct sched_entity * se ;
5911
- long task_delta , idle_task_delta , dequeue = 1 ;
5935
+ long task_delta , idle_task_delta , delayed_delta , dequeue = 1 ;
5912
5936
long rq_h_nr_running = rq -> cfs .h_nr_running ;
5913
5937
5914
5938
raw_spin_lock (& cfs_b -> lock );
@@ -5941,6 +5965,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
5941
5965
5942
5966
task_delta = cfs_rq -> h_nr_running ;
5943
5967
idle_task_delta = cfs_rq -> idle_h_nr_running ;
5968
+ delayed_delta = cfs_rq -> h_nr_delayed ;
5944
5969
for_each_sched_entity (se ) {
5945
5970
struct cfs_rq * qcfs_rq = cfs_rq_of (se );
5946
5971
int flags ;
@@ -5964,6 +5989,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
5964
5989
5965
5990
qcfs_rq -> h_nr_running -= task_delta ;
5966
5991
qcfs_rq -> idle_h_nr_running -= idle_task_delta ;
5992
+ qcfs_rq -> h_nr_delayed -= delayed_delta ;
5967
5993
5968
5994
if (qcfs_rq -> load .weight ) {
5969
5995
/* Avoid re-evaluating load for this entity: */
@@ -5986,6 +6012,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
5986
6012
5987
6013
qcfs_rq -> h_nr_running -= task_delta ;
5988
6014
qcfs_rq -> idle_h_nr_running -= idle_task_delta ;
6015
+ qcfs_rq -> h_nr_delayed -= delayed_delta ;
5989
6016
}
5990
6017
5991
6018
/* At this point se is NULL and we are at root level*/
@@ -6011,7 +6038,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
6011
6038
struct rq * rq = rq_of (cfs_rq );
6012
6039
struct cfs_bandwidth * cfs_b = tg_cfs_bandwidth (cfs_rq -> tg );
6013
6040
struct sched_entity * se ;
6014
- long task_delta , idle_task_delta ;
6041
+ long task_delta , idle_task_delta , delayed_delta ;
6015
6042
long rq_h_nr_running = rq -> cfs .h_nr_running ;
6016
6043
6017
6044
se = cfs_rq -> tg -> se [cpu_of (rq )];
@@ -6047,6 +6074,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
6047
6074
6048
6075
task_delta = cfs_rq -> h_nr_running ;
6049
6076
idle_task_delta = cfs_rq -> idle_h_nr_running ;
6077
+ delayed_delta = cfs_rq -> h_nr_delayed ;
6050
6078
for_each_sched_entity (se ) {
6051
6079
struct cfs_rq * qcfs_rq = cfs_rq_of (se );
6052
6080
@@ -6064,6 +6092,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
6064
6092
6065
6093
qcfs_rq -> h_nr_running += task_delta ;
6066
6094
qcfs_rq -> idle_h_nr_running += idle_task_delta ;
6095
+ qcfs_rq -> h_nr_delayed += delayed_delta ;
6067
6096
6068
6097
/* end evaluation on encountering a throttled cfs_rq */
6069
6098
if (cfs_rq_throttled (qcfs_rq ))
@@ -6081,6 +6110,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
6081
6110
6082
6111
qcfs_rq -> h_nr_running += task_delta ;
6083
6112
qcfs_rq -> idle_h_nr_running += idle_task_delta ;
6113
+ qcfs_rq -> h_nr_delayed += delayed_delta ;
6084
6114
6085
6115
/* end evaluation on encountering a throttled cfs_rq */
6086
6116
if (cfs_rq_throttled (qcfs_rq ))
@@ -6934,7 +6964,7 @@ requeue_delayed_entity(struct sched_entity *se)
6934
6964
}
6935
6965
6936
6966
update_load_avg (cfs_rq , se , 0 );
6937
- se -> sched_delayed = 0 ;
6967
+ clear_delayed ( se ) ;
6938
6968
}
6939
6969
6940
6970
/*
@@ -6948,6 +6978,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
6948
6978
struct cfs_rq * cfs_rq ;
6949
6979
struct sched_entity * se = & p -> se ;
6950
6980
int idle_h_nr_running = task_has_idle_policy (p );
6981
+ int h_nr_delayed = 0 ;
6951
6982
int task_new = !(flags & ENQUEUE_WAKEUP );
6952
6983
int rq_h_nr_running = rq -> cfs .h_nr_running ;
6953
6984
u64 slice = 0 ;
@@ -6974,6 +7005,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
6974
7005
if (p -> in_iowait )
6975
7006
cpufreq_update_util (rq , SCHED_CPUFREQ_IOWAIT );
6976
7007
7008
+ if (task_new )
7009
+ h_nr_delayed = !!se -> sched_delayed ;
7010
+
6977
7011
for_each_sched_entity (se ) {
6978
7012
if (se -> on_rq ) {
6979
7013
if (se -> sched_delayed )
@@ -6996,6 +7030,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
6996
7030
6997
7031
cfs_rq -> h_nr_running ++ ;
6998
7032
cfs_rq -> idle_h_nr_running += idle_h_nr_running ;
7033
+ cfs_rq -> h_nr_delayed += h_nr_delayed ;
6999
7034
7000
7035
if (cfs_rq_is_idle (cfs_rq ))
7001
7036
idle_h_nr_running = 1 ;
@@ -7019,6 +7054,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
7019
7054
7020
7055
cfs_rq -> h_nr_running ++ ;
7021
7056
cfs_rq -> idle_h_nr_running += idle_h_nr_running ;
7057
+ cfs_rq -> h_nr_delayed += h_nr_delayed ;
7022
7058
7023
7059
if (cfs_rq_is_idle (cfs_rq ))
7024
7060
idle_h_nr_running = 1 ;
@@ -7081,13 +7117,16 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
7081
7117
struct task_struct * p = NULL ;
7082
7118
int idle_h_nr_running = 0 ;
7083
7119
int h_nr_running = 0 ;
7120
+ int h_nr_delayed = 0 ;
7084
7121
struct cfs_rq * cfs_rq ;
7085
7122
u64 slice = 0 ;
7086
7123
7087
7124
if (entity_is_task (se )) {
7088
7125
p = task_of (se );
7089
7126
h_nr_running = 1 ;
7090
7127
idle_h_nr_running = task_has_idle_policy (p );
7128
+ if (!task_sleep && !task_delayed )
7129
+ h_nr_delayed = !!se -> sched_delayed ;
7091
7130
} else {
7092
7131
cfs_rq = group_cfs_rq (se );
7093
7132
slice = cfs_rq_min_slice (cfs_rq );
@@ -7105,6 +7144,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
7105
7144
7106
7145
cfs_rq -> h_nr_running -= h_nr_running ;
7107
7146
cfs_rq -> idle_h_nr_running -= idle_h_nr_running ;
7147
+ cfs_rq -> h_nr_delayed -= h_nr_delayed ;
7108
7148
7109
7149
if (cfs_rq_is_idle (cfs_rq ))
7110
7150
idle_h_nr_running = h_nr_running ;
@@ -7143,6 +7183,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
7143
7183
7144
7184
cfs_rq -> h_nr_running -= h_nr_running ;
7145
7185
cfs_rq -> idle_h_nr_running -= idle_h_nr_running ;
7186
+ cfs_rq -> h_nr_delayed -= h_nr_delayed ;
7146
7187
7147
7188
if (cfs_rq_is_idle (cfs_rq ))
7148
7189
idle_h_nr_running = h_nr_running ;
0 commit comments