@@ -1406,6 +1406,7 @@ static int remote_partition_enable(struct cpuset *cs, int new_prs,
1406
1406
list_add (& cs -> remote_sibling , & remote_children );
1407
1407
spin_unlock_irq (& callback_lock );
1408
1408
update_unbound_workqueue_cpumask (isolcpus_updated );
1409
+ cs -> prs_err = 0 ;
1409
1410
1410
1411
/*
1411
1412
* Propagate changes in top_cpuset's effective_cpus down the hierarchy.
@@ -1436,9 +1437,11 @@ static void remote_partition_disable(struct cpuset *cs, struct tmpmasks *tmp)
1436
1437
list_del_init (& cs -> remote_sibling );
1437
1438
isolcpus_updated = partition_xcpus_del (cs -> partition_root_state ,
1438
1439
NULL , tmp -> new_cpus );
1439
- cs -> partition_root_state = - cs -> partition_root_state ;
1440
- if (!cs -> prs_err )
1441
- cs -> prs_err = PERR_INVCPUS ;
1440
+ if (cs -> prs_err )
1441
+ cs -> partition_root_state = - cs -> partition_root_state ;
1442
+ else
1443
+ cs -> partition_root_state = PRS_MEMBER ;
1444
+
1442
1445
reset_partition_data (cs );
1443
1446
spin_unlock_irq (& callback_lock );
1444
1447
update_unbound_workqueue_cpumask (isolcpus_updated );
@@ -1471,8 +1474,10 @@ static void remote_cpus_update(struct cpuset *cs, struct cpumask *newmask,
1471
1474
1472
1475
WARN_ON_ONCE (!cpumask_subset (cs -> effective_xcpus , subpartitions_cpus ));
1473
1476
1474
- if (cpumask_empty (newmask ))
1477
+ if (cpumask_empty (newmask )) {
1478
+ cs -> prs_err = PERR_CPUSEMPTY ;
1475
1479
goto invalidate ;
1480
+ }
1476
1481
1477
1482
adding = cpumask_andnot (tmp -> addmask , newmask , cs -> effective_xcpus );
1478
1483
deleting = cpumask_andnot (tmp -> delmask , cs -> effective_xcpus , newmask );
@@ -1482,10 +1487,15 @@ static void remote_cpus_update(struct cpuset *cs, struct cpumask *newmask,
1482
1487
* not allocated to other partitions and there are effective_cpus
1483
1488
* left in the top cpuset.
1484
1489
*/
1485
- if (adding && (!capable (CAP_SYS_ADMIN ) ||
1486
- cpumask_intersects (tmp -> addmask , subpartitions_cpus ) ||
1487
- cpumask_subset (top_cpuset .effective_cpus , tmp -> addmask )))
1488
- goto invalidate ;
1490
+ if (adding ) {
1491
+ if (!capable (CAP_SYS_ADMIN ))
1492
+ cs -> prs_err = PERR_ACCESS ;
1493
+ else if (cpumask_intersects (tmp -> addmask , subpartitions_cpus ) ||
1494
+ cpumask_subset (top_cpuset .effective_cpus , tmp -> addmask ))
1495
+ cs -> prs_err = PERR_NOCPUS ;
1496
+ if (cs -> prs_err )
1497
+ goto invalidate ;
1498
+ }
1489
1499
1490
1500
spin_lock_irq (& callback_lock );
1491
1501
if (adding )
@@ -1601,7 +1611,7 @@ static bool prstate_housekeeping_conflict(int prstate, struct cpumask *new_cpus)
1601
1611
* The partcmd_update command is used by update_cpumasks_hier() with newmask
1602
1612
* NULL and update_cpumask() with newmask set. The partcmd_invalidate is used
1603
1613
* by update_cpumask() with NULL newmask. In both cases, the callers won't
1604
- * check for error and so partition_root_state and prs_error will be updated
1614
+ * check for error and so partition_root_state and prs_err will be updated
1605
1615
* directly.
1606
1616
*/
1607
1617
static int update_parent_effective_cpumask (struct cpuset * cs , int cmd ,
@@ -3739,6 +3749,7 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
3739
3749
3740
3750
if (remote && cpumask_empty (& new_cpus ) &&
3741
3751
partition_is_populated (cs , NULL )) {
3752
+ cs -> prs_err = PERR_HOTPLUG ;
3742
3753
remote_partition_disable (cs , tmp );
3743
3754
compute_effective_cpumask (& new_cpus , cs , parent );
3744
3755
remote = false;
0 commit comments