Skip to content

Commit 7d0ec14

Browse files
James Morsebp3tk0v
authored andcommitted
x86/resctrl: Add resctrl_arch_ prefix to pseudo lock functions
resctrl's pseudo lock has some copy-to-cache and measurement functions that are micro-architecture specific. For example, pseudo_lock_fn() is not at all portable. Label these 'resctrl_arch_' so they stay under /arch/x86. To expose these functions to the filesystem code they need an entry in a header file, and can't be marked static. Signed-off-by: James Morse <james.morse@arm.com> Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de> Reviewed-by: Shaopeng Tan <tan.shaopeng@jp.fujitsu.com> Reviewed-by: Tony Luck <tony.luck@intel.com> Reviewed-by: Reinette Chatre <reinette.chatre@intel.com> Reviewed-by: Fenghua Yu <fenghuay@nvidia.com> Reviewed-by: Babu Moger <babu.moger@amd.com> Tested-by: Carl Worth <carl@os.amperecomputing.com> # arm64 Tested-by: Shaopeng Tan <tan.shaopeng@jp.fujitsu.com> Tested-by: Peter Newman <peternewman@google.com> Tested-by: Amit Singh Tomar <amitsinght@marvell.com> # arm64 Tested-by: Shanker Donthineni <sdonthineni@nvidia.com> # arm64 Tested-by: Babu Moger <babu.moger@amd.com> Link: https://lore.kernel.org/r/20250311183715.16445-24-james.morse@arm.com
1 parent c32a7d7 commit 7d0ec14

File tree

2 files changed

+24
-17
lines changed

2 files changed

+24
-17
lines changed

arch/x86/include/asm/resctrl.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -203,6 +203,11 @@ static inline void *resctrl_arch_mon_ctx_alloc(struct rdt_resource *r, int evtid
203203
static inline void resctrl_arch_mon_ctx_free(struct rdt_resource *r, int evtid,
204204
void *ctx) { };
205205

206+
u64 resctrl_arch_get_prefetch_disable_bits(void);
207+
int resctrl_arch_pseudo_lock_fn(void *_rdtgrp);
208+
int resctrl_arch_measure_cycles_lat_fn(void *_plr);
209+
int resctrl_arch_measure_l2_residency(void *_plr);
210+
int resctrl_arch_measure_l3_residency(void *_plr);
206211
void resctrl_cpu_detect(struct cpuinfo_x86 *c);
207212

208213
#else

arch/x86/kernel/cpu/resctrl/pseudo_lock.c

Lines changed: 19 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,8 @@ static const struct class pseudo_lock_class = {
6161
};
6262

6363
/**
64-
* get_prefetch_disable_bits - prefetch disable bits of supported platforms
64+
* resctrl_arch_get_prefetch_disable_bits - prefetch disable bits of supported
65+
* platforms
6566
* @void: It takes no parameters.
6667
*
6768
* Capture the list of platforms that have been validated to support
@@ -75,13 +76,13 @@ static const struct class pseudo_lock_class = {
7576
* in the SDM.
7677
*
7778
* When adding a platform here also add support for its cache events to
78-
* measure_cycles_perf_fn()
79+
* resctrl_arch_measure_l*_residency()
7980
*
8081
* Return:
8182
* If platform is supported, the bits to disable hardware prefetchers, 0
8283
* if platform is not supported.
8384
*/
84-
static u64 get_prefetch_disable_bits(void)
85+
u64 resctrl_arch_get_prefetch_disable_bits(void)
8586
{
8687
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
8788
boot_cpu_data.x86 != 6)
@@ -408,7 +409,7 @@ static void pseudo_lock_free(struct rdtgroup *rdtgrp)
408409
}
409410

410411
/**
411-
* pseudo_lock_fn - Load kernel memory into cache
412+
* resctrl_arch_pseudo_lock_fn - Load kernel memory into cache
412413
* @_rdtgrp: resource group to which pseudo-lock region belongs
413414
*
414415
* This is the core pseudo-locking flow.
@@ -426,7 +427,7 @@ static void pseudo_lock_free(struct rdtgroup *rdtgrp)
426427
*
427428
* Return: 0. Waiter on waitqueue will be woken on completion.
428429
*/
429-
static int pseudo_lock_fn(void *_rdtgrp)
430+
int resctrl_arch_pseudo_lock_fn(void *_rdtgrp)
430431
{
431432
struct rdtgroup *rdtgrp = _rdtgrp;
432433
struct pseudo_lock_region *plr = rdtgrp->plr;
@@ -712,7 +713,7 @@ int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp)
712713
* Not knowing the bits to disable prefetching implies that this
713714
* platform does not support Cache Pseudo-Locking.
714715
*/
715-
prefetch_disable_bits = get_prefetch_disable_bits();
716+
prefetch_disable_bits = resctrl_arch_get_prefetch_disable_bits();
716717
if (prefetch_disable_bits == 0) {
717718
rdt_last_cmd_puts("Pseudo-locking not supported\n");
718719
return -EINVAL;
@@ -872,7 +873,8 @@ bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_ctrl_domain *d)
872873
}
873874

874875
/**
875-
* measure_cycles_lat_fn - Measure cycle latency to read pseudo-locked memory
876+
* resctrl_arch_measure_cycles_lat_fn - Measure cycle latency to read
877+
* pseudo-locked memory
876878
* @_plr: pseudo-lock region to measure
877879
*
878880
* There is no deterministic way to test if a memory region is cached. One
@@ -885,7 +887,7 @@ bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_ctrl_domain *d)
885887
*
886888
* Return: 0. Waiter on waitqueue will be woken on completion.
887889
*/
888-
static int measure_cycles_lat_fn(void *_plr)
890+
int resctrl_arch_measure_cycles_lat_fn(void *_plr)
889891
{
890892
struct pseudo_lock_region *plr = _plr;
891893
u32 saved_low, saved_high;
@@ -1069,7 +1071,7 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr,
10691071
return 0;
10701072
}
10711073

1072-
static int measure_l2_residency(void *_plr)
1074+
int resctrl_arch_measure_l2_residency(void *_plr)
10731075
{
10741076
struct pseudo_lock_region *plr = _plr;
10751077
struct residency_counts counts = {0};
@@ -1107,7 +1109,7 @@ static int measure_l2_residency(void *_plr)
11071109
return 0;
11081110
}
11091111

1110-
static int measure_l3_residency(void *_plr)
1112+
int resctrl_arch_measure_l3_residency(void *_plr)
11111113
{
11121114
struct pseudo_lock_region *plr = _plr;
11131115
struct residency_counts counts = {0};
@@ -1205,14 +1207,14 @@ static int pseudo_lock_measure_cycles(struct rdtgroup *rdtgrp, int sel)
12051207
plr->cpu = cpu;
12061208

12071209
if (sel == 1)
1208-
thread = kthread_run_on_cpu(measure_cycles_lat_fn, plr,
1209-
cpu, "pseudo_lock_measure/%u");
1210+
thread = kthread_run_on_cpu(resctrl_arch_measure_cycles_lat_fn,
1211+
plr, cpu, "pseudo_lock_measure/%u");
12101212
else if (sel == 2)
1211-
thread = kthread_run_on_cpu(measure_l2_residency, plr,
1212-
cpu, "pseudo_lock_measure/%u");
1213+
thread = kthread_run_on_cpu(resctrl_arch_measure_l2_residency,
1214+
plr, cpu, "pseudo_lock_measure/%u");
12131215
else if (sel == 3)
1214-
thread = kthread_run_on_cpu(measure_l3_residency, plr,
1215-
cpu, "pseudo_lock_measure/%u");
1216+
thread = kthread_run_on_cpu(resctrl_arch_measure_l3_residency,
1217+
plr, cpu, "pseudo_lock_measure/%u");
12161218
else
12171219
goto out;
12181220

@@ -1307,7 +1309,7 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp)
13071309

13081310
plr->thread_done = 0;
13091311

1310-
thread = kthread_run_on_cpu(pseudo_lock_fn, rdtgrp,
1312+
thread = kthread_run_on_cpu(resctrl_arch_pseudo_lock_fn, rdtgrp,
13111313
plr->cpu, "pseudo_lock/%u");
13121314
if (IS_ERR(thread)) {
13131315
ret = PTR_ERR(thread);

0 commit comments

Comments
 (0)