Skip to content

Commit b8c2f66

Browse files
Merge patch series "RISC-V: ACPI improvements"
Sunil V L <sunilvl@ventanamicro.com> says: This series is a set of patches which were originally part of RFC v1 series [1] to add ACPI support in RISC-V interrupt controllers. Since these patches are independent of the interrupt controllers, creating this new series which helps to merge instead of waiting for big series. This set of patches primarily adds support below ECR [2] which is approved by the ASWG and adds below features. - Get CBO block sizes from RHCT on ACPI based systems. Additionally, the series contains a patch to improve acpi_os_ioremap(). [1] - https://lore.kernel.org/lkml/20230803175202.3173957-1-sunilvl@ventanamicro.com/ [2] - https://drive.google.com/file/d/1sKbOa8m1UZw1JkquZYe3F1zQBN1xXsaf/view?usp=sharing * b4-shazam-merge: RISC-V: cacheflush: Initialize CBO variables on ACPI systems RISC-V: ACPI: RHCT: Add function to get CBO block sizes RISC-V: ACPI: Update the return value of acpi_get_rhct() RISC-V: ACPI: Enhance acpi_os_ioremap with MMIO remapping Link: https://lore.kernel.org/r/20231018124007.1306159-1-sunilvl@ventanamicro.com Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
2 parents 87615e9 + 2960f37 commit b8c2f66

File tree

5 files changed

+201
-11
lines changed

5 files changed

+201
-11
lines changed

arch/riscv/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,7 @@ config RISCV
3939
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
4040
select ARCH_HAS_UBSAN_SANITIZE_ALL
4141
select ARCH_HAS_VDSO_DATA
42+
select ARCH_KEEP_MEMBLOCK if ACPI
4243
select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
4344
select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT
4445
select ARCH_STACKWALK

arch/riscv/include/asm/acpi.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -66,6 +66,8 @@ int acpi_get_riscv_isa(struct acpi_table_header *table,
6666
unsigned int cpu, const char **isa);
6767

6868
static inline int acpi_numa_get_nid(unsigned int cpu) { return NUMA_NO_NODE; }
69+
void acpi_get_cbo_block_size(struct acpi_table_header *table, u32 *cbom_size,
70+
u32 *cboz_size, u32 *cbop_size);
6971
#else
7072
static inline void acpi_init_rintc_map(void) { }
7173
static inline struct acpi_madt_rintc *acpi_cpu_get_madt_rintc(int cpu)
@@ -79,6 +81,10 @@ static inline int acpi_get_riscv_isa(struct acpi_table_header *table,
7981
return -EINVAL;
8082
}
8183

84+
static inline void acpi_get_cbo_block_size(struct acpi_table_header *table,
85+
u32 *cbom_size, u32 *cboz_size,
86+
u32 *cbop_size) { }
87+
8288
#endif /* CONFIG_ACPI */
8389

8490
#endif /*_ASM_ACPI_H*/

arch/riscv/kernel/acpi.c

Lines changed: 85 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,9 +14,10 @@
1414
*/
1515

1616
#include <linux/acpi.h>
17+
#include <linux/efi.h>
1718
#include <linux/io.h>
19+
#include <linux/memblock.h>
1820
#include <linux/pci.h>
19-
#include <linux/efi.h>
2021

2122
int acpi_noirq = 1; /* skip ACPI IRQ initialization */
2223
int acpi_disabled = 1;
@@ -217,7 +218,89 @@ void __init __acpi_unmap_table(void __iomem *map, unsigned long size)
217218

218219
void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
219220
{
220-
return (void __iomem *)memremap(phys, size, MEMREMAP_WB);
221+
efi_memory_desc_t *md, *region = NULL;
222+
pgprot_t prot;
223+
224+
if (WARN_ON_ONCE(!efi_enabled(EFI_MEMMAP)))
225+
return NULL;
226+
227+
for_each_efi_memory_desc(md) {
228+
u64 end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
229+
230+
if (phys < md->phys_addr || phys >= end)
231+
continue;
232+
233+
if (phys + size > end) {
234+
pr_warn(FW_BUG "requested region covers multiple EFI memory regions\n");
235+
return NULL;
236+
}
237+
region = md;
238+
break;
239+
}
240+
241+
/*
242+
* It is fine for AML to remap regions that are not represented in the
243+
* EFI memory map at all, as it only describes normal memory, and MMIO
244+
* regions that require a virtual mapping to make them accessible to
245+
* the EFI runtime services.
246+
*/
247+
prot = PAGE_KERNEL_IO;
248+
if (region) {
249+
switch (region->type) {
250+
case EFI_LOADER_CODE:
251+
case EFI_LOADER_DATA:
252+
case EFI_BOOT_SERVICES_CODE:
253+
case EFI_BOOT_SERVICES_DATA:
254+
case EFI_CONVENTIONAL_MEMORY:
255+
case EFI_PERSISTENT_MEMORY:
256+
if (memblock_is_map_memory(phys) ||
257+
!memblock_is_region_memory(phys, size)) {
258+
pr_warn(FW_BUG "requested region covers kernel memory\n");
259+
return NULL;
260+
}
261+
262+
/*
263+
* Mapping kernel memory is permitted if the region in
264+
* question is covered by a single memblock with the
265+
* NOMAP attribute set: this enables the use of ACPI
266+
* table overrides passed via initramfs.
267+
* This particular use case only requires read access.
268+
*/
269+
fallthrough;
270+
271+
case EFI_RUNTIME_SERVICES_CODE:
272+
/*
273+
* This would be unusual, but not problematic per se,
274+
* as long as we take care not to create a writable
275+
* mapping for executable code.
276+
*/
277+
prot = PAGE_KERNEL_RO;
278+
break;
279+
280+
case EFI_ACPI_RECLAIM_MEMORY:
281+
/*
282+
* ACPI reclaim memory is used to pass firmware tables
283+
* and other data that is intended for consumption by
284+
* the OS only, which may decide it wants to reclaim
285+
* that memory and use it for something else. We never
286+
* do that, but we usually add it to the linear map
287+
* anyway, in which case we should use the existing
288+
* mapping.
289+
*/
290+
if (memblock_is_map_memory(phys))
291+
return (void __iomem *)__va(phys);
292+
fallthrough;
293+
294+
default:
295+
if (region->attribute & EFI_MEMORY_WB)
296+
prot = PAGE_KERNEL;
297+
else if ((region->attribute & EFI_MEMORY_WC) ||
298+
(region->attribute & EFI_MEMORY_WT))
299+
prot = pgprot_writecombine(PAGE_KERNEL);
300+
}
301+
}
302+
303+
return ioremap_prot(phys, size, pgprot_val(prot));
221304
}
222305

223306
#ifdef CONFIG_PCI

arch/riscv/mm/cacheflush.c

Lines changed: 19 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,9 @@
33
* Copyright (C) 2017 SiFive
44
*/
55

6+
#include <linux/acpi.h>
67
#include <linux/of.h>
8+
#include <asm/acpi.h>
79
#include <asm/cacheflush.h>
810

911
#ifdef CONFIG_SMP
@@ -124,13 +126,24 @@ void __init riscv_init_cbo_blocksizes(void)
124126
unsigned long cbom_hartid, cboz_hartid;
125127
u32 cbom_block_size = 0, cboz_block_size = 0;
126128
struct device_node *node;
129+
struct acpi_table_header *rhct;
130+
acpi_status status;
131+
132+
if (acpi_disabled) {
133+
for_each_of_cpu_node(node) {
134+
/* set block-size for cbom and/or cboz extension if available */
135+
cbo_get_block_size(node, "riscv,cbom-block-size",
136+
&cbom_block_size, &cbom_hartid);
137+
cbo_get_block_size(node, "riscv,cboz-block-size",
138+
&cboz_block_size, &cboz_hartid);
139+
}
140+
} else {
141+
status = acpi_get_table(ACPI_SIG_RHCT, 0, &rhct);
142+
if (ACPI_FAILURE(status))
143+
return;
127144

128-
for_each_of_cpu_node(node) {
129-
/* set block-size for cbom and/or cboz extension if available */
130-
cbo_get_block_size(node, "riscv,cbom-block-size",
131-
&cbom_block_size, &cbom_hartid);
132-
cbo_get_block_size(node, "riscv,cboz-block-size",
133-
&cboz_block_size, &cboz_hartid);
145+
acpi_get_cbo_block_size(rhct, &cbom_block_size, &cboz_block_size, NULL);
146+
acpi_put_table((struct acpi_table_header *)rhct);
134147
}
135148

136149
if (cbom_block_size)

drivers/acpi/riscv/rhct.c

Lines changed: 90 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,9 @@
88
#define pr_fmt(fmt) "ACPI: RHCT: " fmt
99

1010
#include <linux/acpi.h>
11+
#include <linux/bits.h>
1112

12-
static struct acpi_table_header *acpi_get_rhct(void)
13+
static struct acpi_table_rhct *acpi_get_rhct(void)
1314
{
1415
static struct acpi_table_header *rhct;
1516
acpi_status status;
@@ -26,7 +27,7 @@ static struct acpi_table_header *acpi_get_rhct(void)
2627
}
2728
}
2829

29-
return rhct;
30+
return (struct acpi_table_rhct *)rhct;
3031
}
3132

3233
/*
@@ -48,7 +49,7 @@ int acpi_get_riscv_isa(struct acpi_table_header *table, unsigned int cpu, const
4849
BUG_ON(acpi_disabled);
4950

5051
if (!table) {
51-
rhct = (struct acpi_table_rhct *)acpi_get_rhct();
52+
rhct = acpi_get_rhct();
5253
if (!rhct)
5354
return -ENOENT;
5455
} else {
@@ -81,3 +82,89 @@ int acpi_get_riscv_isa(struct acpi_table_header *table, unsigned int cpu, const
8182

8283
return -1;
8384
}
85+
86+
static void acpi_parse_hart_info_cmo_node(struct acpi_table_rhct *rhct,
87+
struct acpi_rhct_hart_info *hart_info,
88+
u32 *cbom_size, u32 *cboz_size, u32 *cbop_size)
89+
{
90+
u32 size_hartinfo = sizeof(struct acpi_rhct_hart_info);
91+
u32 size_hdr = sizeof(struct acpi_rhct_node_header);
92+
struct acpi_rhct_node_header *ref_node;
93+
struct acpi_rhct_cmo_node *cmo_node;
94+
u32 *hart_info_node_offset;
95+
96+
hart_info_node_offset = ACPI_ADD_PTR(u32, hart_info, size_hartinfo);
97+
for (int i = 0; i < hart_info->num_offsets; i++) {
98+
ref_node = ACPI_ADD_PTR(struct acpi_rhct_node_header,
99+
rhct, hart_info_node_offset[i]);
100+
if (ref_node->type == ACPI_RHCT_NODE_TYPE_CMO) {
101+
cmo_node = ACPI_ADD_PTR(struct acpi_rhct_cmo_node,
102+
ref_node, size_hdr);
103+
if (cbom_size && cmo_node->cbom_size <= 30) {
104+
if (!*cbom_size)
105+
*cbom_size = BIT(cmo_node->cbom_size);
106+
else if (*cbom_size != BIT(cmo_node->cbom_size))
107+
pr_warn("CBOM size is not the same across harts\n");
108+
}
109+
110+
if (cboz_size && cmo_node->cboz_size <= 30) {
111+
if (!*cboz_size)
112+
*cboz_size = BIT(cmo_node->cboz_size);
113+
else if (*cboz_size != BIT(cmo_node->cboz_size))
114+
pr_warn("CBOZ size is not the same across harts\n");
115+
}
116+
117+
if (cbop_size && cmo_node->cbop_size <= 30) {
118+
if (!*cbop_size)
119+
*cbop_size = BIT(cmo_node->cbop_size);
120+
else if (*cbop_size != BIT(cmo_node->cbop_size))
121+
pr_warn("CBOP size is not the same across harts\n");
122+
}
123+
}
124+
}
125+
}
126+
127+
/*
128+
* During early boot, the caller should call acpi_get_table() and pass its pointer to
129+
* these functions (and free up later). At run time, since this table can be used
130+
* multiple times, pass NULL so that the table remains in memory.
131+
*/
132+
void acpi_get_cbo_block_size(struct acpi_table_header *table, u32 *cbom_size,
133+
u32 *cboz_size, u32 *cbop_size)
134+
{
135+
u32 size_hdr = sizeof(struct acpi_rhct_node_header);
136+
struct acpi_rhct_node_header *node, *end;
137+
struct acpi_rhct_hart_info *hart_info;
138+
struct acpi_table_rhct *rhct;
139+
140+
if (acpi_disabled)
141+
return;
142+
143+
if (table) {
144+
rhct = (struct acpi_table_rhct *)table;
145+
} else {
146+
rhct = acpi_get_rhct();
147+
if (!rhct)
148+
return;
149+
}
150+
151+
if (cbom_size)
152+
*cbom_size = 0;
153+
154+
if (cboz_size)
155+
*cboz_size = 0;
156+
157+
if (cbop_size)
158+
*cbop_size = 0;
159+
160+
end = ACPI_ADD_PTR(struct acpi_rhct_node_header, rhct, rhct->header.length);
161+
for (node = ACPI_ADD_PTR(struct acpi_rhct_node_header, rhct, rhct->node_offset);
162+
node < end;
163+
node = ACPI_ADD_PTR(struct acpi_rhct_node_header, node, node->length)) {
164+
if (node->type == ACPI_RHCT_NODE_TYPE_HART_INFO) {
165+
hart_info = ACPI_ADD_PTR(struct acpi_rhct_hart_info, node, size_hdr);
166+
acpi_parse_hart_info_cmo_node(rhct, hart_info, cbom_size,
167+
cboz_size, cbop_size);
168+
}
169+
}
170+
}

0 commit comments

Comments
 (0)