Skip to content

Commit 54de442

Browse files
Alex ShiIngo Molnar
authored andcommitted
sched/topology: Rename SD_SHARE_PKG_RESOURCES to SD_SHARE_LLC
SD_SHARE_PKG_RESOURCES is a bit of a misnomer: its naming suggests that it's sharing all 'package resources' - while in reality it's specifically for sharing the LLC only. Rename it to SD_SHARE_LLC to reduce confusion. [ mingo: Rewrote the confusing changelog as well. ] Suggested-by: Valentin Schneider <vschneid@redhat.com> Signed-off-by: Alex Shi <alexs@kernel.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Reviewed-by: Valentin Schneider <vschneid@redhat.com> Reviewed-by: Ricardo Neri <ricardo.neri-calderon@linux.intel.com> Reviewed-by: Barry Song <baohua@kernel.org> Link: https://lore.kernel.org/r/20240210113924.1130448-5-alexs@kernel.org
1 parent fbc4498 commit 54de442

File tree

5 files changed

+23
-23
lines changed

5 files changed

+23
-23
lines changed

arch/powerpc/kernel/smp.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -984,7 +984,7 @@ static bool shared_caches __ro_after_init;
984984
/* cpumask of CPUs with asymmetric SMT dependency */
985985
static int powerpc_smt_flags(void)
986986
{
987-
int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
987+
int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_LLC;
988988

989989
if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
990990
printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
@@ -1010,9 +1010,9 @@ static __ro_after_init DEFINE_STATIC_KEY_FALSE(splpar_asym_pack);
10101010
static int powerpc_shared_cache_flags(void)
10111011
{
10121012
if (static_branch_unlikely(&splpar_asym_pack))
1013-
return SD_SHARE_PKG_RESOURCES | SD_ASYM_PACKING;
1013+
return SD_SHARE_LLC | SD_ASYM_PACKING;
10141014

1015-
return SD_SHARE_PKG_RESOURCES;
1015+
return SD_SHARE_LLC;
10161016
}
10171017

10181018
static int powerpc_shared_proc_flags(void)

include/linux/sched/sd_flags.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -117,13 +117,13 @@ SD_FLAG(SD_SHARE_CPUCAPACITY, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS)
117117
SD_FLAG(SD_CLUSTER, SDF_NEEDS_GROUPS)
118118

119119
/*
120-
* Domain members share CPU package resources (i.e. caches)
120+
* Domain members share CPU Last Level Caches
121121
*
122122
* SHARED_CHILD: Set from the base domain up until spanned CPUs no longer share
123123
* the same cache(s).
124124
* NEEDS_GROUPS: Caches are shared between groups.
125125
*/
126-
SD_FLAG(SD_SHARE_PKG_RESOURCES, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS)
126+
SD_FLAG(SD_SHARE_LLC, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS)
127127

128128
/*
129129
* Only a single load balancing instance

include/linux/sched/topology.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -38,21 +38,21 @@ extern const struct sd_flag_debug sd_flag_debug[];
3838
#ifdef CONFIG_SCHED_SMT
3939
static inline int cpu_smt_flags(void)
4040
{
41-
return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
41+
return SD_SHARE_CPUCAPACITY | SD_SHARE_LLC;
4242
}
4343
#endif
4444

4545
#ifdef CONFIG_SCHED_CLUSTER
4646
static inline int cpu_cluster_flags(void)
4747
{
48-
return SD_CLUSTER | SD_SHARE_PKG_RESOURCES;
48+
return SD_CLUSTER | SD_SHARE_LLC;
4949
}
5050
#endif
5151

5252
#ifdef CONFIG_SCHED_MC
5353
static inline int cpu_core_flags(void)
5454
{
55-
return SD_SHARE_PKG_RESOURCES;
55+
return SD_SHARE_LLC;
5656
}
5757
#endif
5858

kernel/sched/fair.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10678,7 +10678,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
1067810678
*/
1067910679
if (local->group_type == group_has_spare) {
1068010680
if ((busiest->group_type > group_fully_busy) &&
10681-
!(env->sd->flags & SD_SHARE_PKG_RESOURCES)) {
10681+
!(env->sd->flags & SD_SHARE_LLC)) {
1068210682
/*
1068310683
* If busiest is overloaded, try to fill spare
1068410684
* capacity. This might end up creating spare capacity

kernel/sched/topology.c

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -657,13 +657,13 @@ static void destroy_sched_domains(struct sched_domain *sd)
657657
}
658658

659659
/*
660-
* Keep a special pointer to the highest sched_domain that has
661-
* SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this
662-
* allows us to avoid some pointer chasing select_idle_sibling().
660+
* Keep a special pointer to the highest sched_domain that has SD_SHARE_LLC set
661+
* (Last Level Cache Domain) for this allows us to avoid some pointer chasing
662+
* select_idle_sibling().
663663
*
664-
* Also keep a unique ID per domain (we use the first CPU number in
665-
* the cpumask of the domain), this allows us to quickly tell if
666-
* two CPUs are in the same cache domain, see cpus_share_cache().
664+
* Also keep a unique ID per domain (we use the first CPU number in the cpumask
665+
* of the domain), this allows us to quickly tell if two CPUs are in the same
666+
* cache domain, see cpus_share_cache().
667667
*/
668668
DEFINE_PER_CPU(struct sched_domain __rcu *, sd_llc);
669669
DEFINE_PER_CPU(int, sd_llc_size);
@@ -684,7 +684,7 @@ static void update_top_cache_domain(int cpu)
684684
int id = cpu;
685685
int size = 1;
686686

687-
sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
687+
sd = highest_flag_domain(cpu, SD_SHARE_LLC);
688688
if (sd) {
689689
id = cpumask_first(sched_domain_span(sd));
690690
size = cpumask_weight(sched_domain_span(sd));
@@ -1554,7 +1554,7 @@ static struct cpumask ***sched_domains_numa_masks;
15541554
* function. For details, see include/linux/sched/sd_flags.h.
15551555
*
15561556
* SD_SHARE_CPUCAPACITY
1557-
* SD_SHARE_PKG_RESOURCES
1557+
* SD_SHARE_LLC
15581558
* SD_CLUSTER
15591559
* SD_NUMA
15601560
*
@@ -1566,7 +1566,7 @@ static struct cpumask ***sched_domains_numa_masks;
15661566
#define TOPOLOGY_SD_FLAGS \
15671567
(SD_SHARE_CPUCAPACITY | \
15681568
SD_CLUSTER | \
1569-
SD_SHARE_PKG_RESOURCES | \
1569+
SD_SHARE_LLC | \
15701570
SD_NUMA | \
15711571
SD_ASYM_PACKING)
15721572

@@ -1609,7 +1609,7 @@ sd_init(struct sched_domain_topology_level *tl,
16091609
| 0*SD_BALANCE_WAKE
16101610
| 1*SD_WAKE_AFFINE
16111611
| 0*SD_SHARE_CPUCAPACITY
1612-
| 0*SD_SHARE_PKG_RESOURCES
1612+
| 0*SD_SHARE_LLC
16131613
| 0*SD_SERIALIZE
16141614
| 1*SD_PREFER_SIBLING
16151615
| 0*SD_NUMA
@@ -1646,7 +1646,7 @@ sd_init(struct sched_domain_topology_level *tl,
16461646
if (sd->flags & SD_SHARE_CPUCAPACITY) {
16471647
sd->imbalance_pct = 110;
16481648

1649-
} else if (sd->flags & SD_SHARE_PKG_RESOURCES) {
1649+
} else if (sd->flags & SD_SHARE_LLC) {
16501650
sd->imbalance_pct = 117;
16511651
sd->cache_nice_tries = 1;
16521652

@@ -1671,7 +1671,7 @@ sd_init(struct sched_domain_topology_level *tl,
16711671
* For all levels sharing cache; connect a sched_domain_shared
16721672
* instance.
16731673
*/
1674-
if (sd->flags & SD_SHARE_PKG_RESOURCES) {
1674+
if (sd->flags & SD_SHARE_LLC) {
16751675
sd->shared = *per_cpu_ptr(sdd->sds, sd_id);
16761676
atomic_inc(&sd->shared->ref);
16771677
atomic_set(&sd->shared->nr_busy_cpus, sd_weight);
@@ -2446,8 +2446,8 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
24462446
for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
24472447
struct sched_domain *child = sd->child;
24482448

2449-
if (!(sd->flags & SD_SHARE_PKG_RESOURCES) && child &&
2450-
(child->flags & SD_SHARE_PKG_RESOURCES)) {
2449+
if (!(sd->flags & SD_SHARE_LLC) && child &&
2450+
(child->flags & SD_SHARE_LLC)) {
24512451
struct sched_domain __rcu *top_p;
24522452
unsigned int nr_llcs;
24532453

0 commit comments

Comments
 (0)