Skip to content

Commit f0a0bd3

Browse files
Waiman-Longhtejun
authored andcommitted
cgroup/cpuset: Code cleanup and comment update
Rename partition_xcpus_newstate() to isolated_cpus_update(), update_partition_exclusive() to update_partition_exclusive_flag() and the new_xcpus_state variable to isolcpus_updated to make their meanings more explicit. Also add some comments to further clarify the code. No functional change is expected. Signed-off-by: Waiman Long <longman@redhat.com> Signed-off-by: Tejun Heo <tj@kernel.org>
1 parent 6da580e commit f0a0bd3

File tree

1 file changed

+38
-23
lines changed

1 file changed

+38
-23
lines changed

kernel/cgroup/cpuset.c

Lines changed: 38 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,13 @@ static const char * const perr_strings[] = {
6565
};
6666

6767
/*
68-
* Exclusive CPUs distributed out to sub-partitions of top_cpuset
68+
* For local partitions, update to subpartitions_cpus & isolated_cpus is done
69+
* in update_parent_effective_cpumask(). For remote partitions, it is done in
70+
* the remote_partition_*() and remote_cpus_update() helpers.
71+
*/
72+
/*
73+
* Exclusive CPUs distributed out to local or remote sub-partitions of
74+
* top_cpuset
6975
*/
7076
static cpumask_var_t subpartitions_cpus;
7177

@@ -1089,9 +1095,14 @@ void cpuset_reset_sched_domains(void)
10891095
*
10901096
* Iterate through each task of @cs updating its cpus_allowed to the
10911097
* effective cpuset's. As this function is called with cpuset_mutex held,
1092-
* cpuset membership stays stable. For top_cpuset, task_cpu_possible_mask()
1093-
* is used instead of effective_cpus to make sure all offline CPUs are also
1094-
* included as hotplug code won't update cpumasks for tasks in top_cpuset.
1098+
* cpuset membership stays stable.
1099+
*
1100+
* For top_cpuset, task_cpu_possible_mask() is used instead of effective_cpus
1101+
* to make sure all offline CPUs are also included as hotplug code won't
1102+
* update cpumasks for tasks in top_cpuset.
1103+
*
1104+
* As task_cpu_possible_mask() can be task dependent in arm64, we have to
1105+
* do cpu masking per task instead of doing it once for all.
10951106
*/
10961107
void cpuset_update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus)
10971108
{
@@ -1151,7 +1162,7 @@ static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
11511162
*
11521163
* Return: 0 if successful, an error code otherwise
11531164
*/
1154-
static int update_partition_exclusive(struct cpuset *cs, int new_prs)
1165+
static int update_partition_exclusive_flag(struct cpuset *cs, int new_prs)
11551166
{
11561167
bool exclusive = (new_prs > PRS_MEMBER);
11571168

@@ -1234,12 +1245,12 @@ static void reset_partition_data(struct cpuset *cs)
12341245
}
12351246

12361247
/*
1237-
* partition_xcpus_newstate - Exclusive CPUs state change
1248+
* isolated_cpus_update - Update the isolated_cpus mask
12381249
* @old_prs: old partition_root_state
12391250
* @new_prs: new partition_root_state
12401251
* @xcpus: exclusive CPUs with state change
12411252
*/
1242-
static void partition_xcpus_newstate(int old_prs, int new_prs, struct cpumask *xcpus)
1253+
static void isolated_cpus_update(int old_prs, int new_prs, struct cpumask *xcpus)
12431254
{
12441255
WARN_ON_ONCE(old_prs == new_prs);
12451256
if (new_prs == PRS_ISOLATED)
@@ -1273,8 +1284,8 @@ static bool partition_xcpus_add(int new_prs, struct cpuset *parent,
12731284

12741285
isolcpus_updated = (new_prs != parent->partition_root_state);
12751286
if (isolcpus_updated)
1276-
partition_xcpus_newstate(parent->partition_root_state, new_prs,
1277-
xcpus);
1287+
isolated_cpus_update(parent->partition_root_state, new_prs,
1288+
xcpus);
12781289

12791290
cpumask_andnot(parent->effective_cpus, parent->effective_cpus, xcpus);
12801291
return isolcpus_updated;
@@ -1304,8 +1315,8 @@ static bool partition_xcpus_del(int old_prs, struct cpuset *parent,
13041315

13051316
isolcpus_updated = (old_prs != parent->partition_root_state);
13061317
if (isolcpus_updated)
1307-
partition_xcpus_newstate(old_prs, parent->partition_root_state,
1308-
xcpus);
1318+
isolated_cpus_update(old_prs, parent->partition_root_state,
1319+
xcpus);
13091320

13101321
cpumask_and(xcpus, xcpus, cpu_active_mask);
13111322
cpumask_or(parent->effective_cpus, parent->effective_cpus, xcpus);
@@ -1634,8 +1645,8 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
16341645
int old_prs, new_prs;
16351646
int part_error = PERR_NONE; /* Partition error? */
16361647
int subparts_delta = 0;
1637-
struct cpumask *xcpus; /* cs effective_xcpus */
16381648
int isolcpus_updated = 0;
1649+
struct cpumask *xcpus = user_xcpus(cs);
16391650
bool nocpu;
16401651

16411652
lockdep_assert_held(&cpuset_mutex);
@@ -1647,7 +1658,6 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
16471658
*/
16481659
adding = deleting = false;
16491660
old_prs = new_prs = cs->partition_root_state;
1650-
xcpus = user_xcpus(cs);
16511661

16521662
if (cmd == partcmd_invalidate) {
16531663
if (is_prs_invalid(old_prs))
@@ -1861,7 +1871,7 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
18611871
* CPU lists in cs haven't been updated yet. So defer it to later.
18621872
*/
18631873
if ((old_prs != new_prs) && (cmd != partcmd_update)) {
1864-
int err = update_partition_exclusive(cs, new_prs);
1874+
int err = update_partition_exclusive_flag(cs, new_prs);
18651875

18661876
if (err)
18671877
return err;
@@ -1899,7 +1909,7 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
18991909
update_unbound_workqueue_cpumask(isolcpus_updated);
19001910

19011911
if ((old_prs != new_prs) && (cmd == partcmd_update))
1902-
update_partition_exclusive(cs, new_prs);
1912+
update_partition_exclusive_flag(cs, new_prs);
19031913

19041914
if (adding || deleting) {
19051915
cpuset_update_tasks_cpumask(parent, tmp->addmask);
@@ -2829,7 +2839,7 @@ static int update_prstate(struct cpuset *cs, int new_prs)
28292839
int err = PERR_NONE, old_prs = cs->partition_root_state;
28302840
struct cpuset *parent = parent_cs(cs);
28312841
struct tmpmasks tmpmask;
2832-
bool new_xcpus_state = false;
2842+
bool isolcpus_updated = false;
28332843

28342844
if (old_prs == new_prs)
28352845
return 0;
@@ -2843,7 +2853,7 @@ static int update_prstate(struct cpuset *cs, int new_prs)
28432853
if (alloc_cpumasks(NULL, &tmpmask))
28442854
return -ENOMEM;
28452855

2846-
err = update_partition_exclusive(cs, new_prs);
2856+
err = update_partition_exclusive_flag(cs, new_prs);
28472857
if (err)
28482858
goto out;
28492859

@@ -2884,8 +2894,9 @@ static int update_prstate(struct cpuset *cs, int new_prs)
28842894
} else if (old_prs && new_prs) {
28852895
/*
28862896
* A change in load balance state only, no change in cpumasks.
2897+
* Need to update isolated_cpus.
28872898
*/
2888-
new_xcpus_state = true;
2899+
isolcpus_updated = true;
28892900
} else {
28902901
/*
28912902
* Switching back to member is always allowed even if it
@@ -2909,22 +2920,26 @@ static int update_prstate(struct cpuset *cs, int new_prs)
29092920
*/
29102921
if (err) {
29112922
new_prs = -new_prs;
2912-
update_partition_exclusive(cs, new_prs);
2923+
update_partition_exclusive_flag(cs, new_prs);
29132924
}
29142925

29152926
spin_lock_irq(&callback_lock);
29162927
cs->partition_root_state = new_prs;
29172928
WRITE_ONCE(cs->prs_err, err);
29182929
if (!is_partition_valid(cs))
29192930
reset_partition_data(cs);
2920-
else if (new_xcpus_state)
2921-
partition_xcpus_newstate(old_prs, new_prs, cs->effective_xcpus);
2931+
else if (isolcpus_updated)
2932+
isolated_cpus_update(old_prs, new_prs, cs->effective_xcpus);
29222933
spin_unlock_irq(&callback_lock);
2923-
update_unbound_workqueue_cpumask(new_xcpus_state);
2934+
update_unbound_workqueue_cpumask(isolcpus_updated);
29242935

2925-
/* Force update if switching back to member */
2936+
/* Force update if switching back to member & update effective_xcpus */
29262937
update_cpumasks_hier(cs, &tmpmask, !new_prs);
29272938

2939+
/* A newly created partition must have effective_xcpus set */
2940+
WARN_ON_ONCE(!old_prs && (new_prs > 0)
2941+
&& cpumask_empty(cs->effective_xcpus));
2942+
29282943
/* Update sched domains and load balance flag */
29292944
update_partition_sd_lb(cs, old_prs);
29302945

0 commit comments

Comments
 (0)