Skip to content

Commit 42be24a

Browse files
Suzuki K Poulosectmarinas
authored andcommitted
arm64: Enable memory encrypt for Realms
Use the memory encryption APIs to trigger a RSI call to request a transition between protected memory and shared memory (or vice versa) and updating the kernel's linear map of modified pages to flip the top bit of the IPA. This requires that block mappings are not used in the direct map for realm guests. Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Reviewed-by: Gavin Shan <gshan@redhat.com> Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com> Co-developed-by: Steven Price <steven.price@arm.com> Signed-off-by: Steven Price <steven.price@arm.com> Link: https://lore.kernel.org/r/20241017131434.40935-10-steven.price@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
1 parent 0e9cb59 commit 42be24a

File tree

6 files changed

+123
-3
lines changed

6 files changed

+123
-3
lines changed

arch/arm64/Kconfig

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@ config ARM64
2121
select ARCH_ENABLE_SPLIT_PMD_PTLOCK if PGTABLE_LEVELS > 2
2222
select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE
2323
select ARCH_HAS_CACHE_LINE_SIZE
24+
select ARCH_HAS_CC_PLATFORM
2425
select ARCH_HAS_CURRENT_STACK_POINTER
2526
select ARCH_HAS_DEBUG_VIRTUAL
2627
select ARCH_HAS_DEBUG_VM_PGTABLE
@@ -44,6 +45,8 @@ config ARM64
4445
select ARCH_HAS_SETUP_DMA_OPS
4546
select ARCH_HAS_SET_DIRECT_MAP
4647
select ARCH_HAS_SET_MEMORY
48+
select ARCH_HAS_MEM_ENCRYPT
49+
select ARCH_HAS_FORCE_DMA_UNENCRYPTED
4750
select ARCH_STACKWALK
4851
select ARCH_HAS_STRICT_KERNEL_RWX
4952
select ARCH_HAS_STRICT_MODULE_RWX

arch/arm64/include/asm/mem_encrypt.h

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,8 @@
22
#ifndef __ASM_MEM_ENCRYPT_H
33
#define __ASM_MEM_ENCRYPT_H
44

5+
#include <asm/rsi.h>
6+
57
struct arm64_mem_crypt_ops {
68
int (*encrypt)(unsigned long addr, int numpages);
79
int (*decrypt)(unsigned long addr, int numpages);
@@ -12,4 +14,11 @@ int arm64_mem_crypt_ops_register(const struct arm64_mem_crypt_ops *ops);
1214
int set_memory_encrypted(unsigned long addr, int numpages);
1315
int set_memory_decrypted(unsigned long addr, int numpages);
1416

17+
int realm_register_memory_enc_ops(void);
18+
19+
static inline bool force_dma_unencrypted(struct device *dev)
20+
{
21+
return is_realm_world();
22+
}
23+
1524
#endif /* __ASM_MEM_ENCRYPT_H */

arch/arm64/include/asm/pgtable.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -684,6 +684,11 @@ static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
684684
#define pgprot_nx(prot) \
685685
__pgprot_modify(prot, PTE_MAYBE_GP, PTE_PXN)
686686

687+
#define pgprot_decrypted(prot) \
688+
__pgprot_modify(prot, PROT_NS_SHARED, PROT_NS_SHARED)
689+
#define pgprot_encrypted(prot) \
690+
__pgprot_modify(prot, PROT_NS_SHARED, 0)
691+
687692
/*
688693
* Mark the prot value as uncacheable and unbufferable.
689694
*/

arch/arm64/include/asm/set_memory.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,4 +15,7 @@ int set_direct_map_invalid_noflush(struct page *page);
1515
int set_direct_map_default_noflush(struct page *page);
1616
bool kernel_page_present(struct page *page);
1717

18+
int set_memory_encrypted(unsigned long addr, int numpages);
19+
int set_memory_decrypted(unsigned long addr, int numpages);
20+
1821
#endif /* _ASM_ARM64_SET_MEMORY_H */

arch/arm64/kernel/rsi.c

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,8 +7,10 @@
77
#include <linux/memblock.h>
88
#include <linux/psci.h>
99
#include <linux/swiotlb.h>
10+
#include <linux/cc_platform.h>
1011

1112
#include <asm/io.h>
13+
#include <asm/mem_encrypt.h>
1214
#include <asm/rsi.h>
1315

1416
static struct realm_config config;
@@ -19,6 +21,17 @@ EXPORT_SYMBOL(prot_ns_shared);
1921
DEFINE_STATIC_KEY_FALSE_RO(rsi_present);
2022
EXPORT_SYMBOL(rsi_present);
2123

24+
bool cc_platform_has(enum cc_attr attr)
25+
{
26+
switch (attr) {
27+
case CC_ATTR_MEM_ENCRYPT:
28+
return is_realm_world();
29+
default:
30+
return false;
31+
}
32+
}
33+
EXPORT_SYMBOL_GPL(cc_platform_has);
34+
2235
static bool rsi_version_matches(void)
2336
{
2437
unsigned long ver_lower, ver_higher;
@@ -119,6 +132,9 @@ void __init arm64_rsi_init(void)
119132
if (arm64_ioremap_prot_hook_register(realm_ioremap_hook))
120133
return;
121134

135+
if (realm_register_memory_enc_ops())
136+
return;
137+
122138
arm64_rsi_setup_memory();
123139

124140
static_branch_enable(&rsi_present);

arch/arm64/mm/pageattr.c

Lines changed: 87 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -5,10 +5,12 @@
55
#include <linux/kernel.h>
66
#include <linux/mm.h>
77
#include <linux/module.h>
8+
#include <linux/mem_encrypt.h>
89
#include <linux/sched.h>
910
#include <linux/vmalloc.h>
1011

1112
#include <asm/cacheflush.h>
13+
#include <asm/pgtable-prot.h>
1214
#include <asm/set_memory.h>
1315
#include <asm/tlbflush.h>
1416
#include <asm/kfence.h>
@@ -23,14 +25,16 @@ bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED
2325
bool can_set_direct_map(void)
2426
{
2527
/*
26-
* rodata_full and DEBUG_PAGEALLOC require linear map to be
27-
* mapped at page granularity, so that it is possible to
28+
* rodata_full, DEBUG_PAGEALLOC and a Realm guest all require linear
29+
* map to be mapped at page granularity, so that it is possible to
2830
* protect/unprotect single pages.
2931
*
3032
* KFENCE pool requires page-granular mapping if initialized late.
33+
*
34+
* Realms need to make pages shared/protected at page granularity.
3135
*/
3236
return rodata_full || debug_pagealloc_enabled() ||
33-
arm64_kfence_can_set_direct_map();
37+
arm64_kfence_can_set_direct_map() || is_realm_world();
3438
}
3539

3640
static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
@@ -198,6 +202,86 @@ int set_direct_map_default_noflush(struct page *page)
198202
PAGE_SIZE, change_page_range, &data);
199203
}
200204

205+
static int __set_memory_enc_dec(unsigned long addr,
206+
int numpages,
207+
bool encrypt)
208+
{
209+
unsigned long set_prot = 0, clear_prot = 0;
210+
phys_addr_t start, end;
211+
int ret;
212+
213+
if (!is_realm_world())
214+
return 0;
215+
216+
if (!__is_lm_address(addr))
217+
return -EINVAL;
218+
219+
start = __virt_to_phys(addr);
220+
end = start + numpages * PAGE_SIZE;
221+
222+
if (encrypt)
223+
clear_prot = PROT_NS_SHARED;
224+
else
225+
set_prot = PROT_NS_SHARED;
226+
227+
/*
228+
* Break the mapping before we make any changes to avoid stale TLB
229+
* entries or Synchronous External Aborts caused by RIPAS_EMPTY
230+
*/
231+
ret = __change_memory_common(addr, PAGE_SIZE * numpages,
232+
__pgprot(set_prot),
233+
__pgprot(clear_prot | PTE_VALID));
234+
235+
if (ret)
236+
return ret;
237+
238+
if (encrypt)
239+
ret = rsi_set_memory_range_protected(start, end);
240+
else
241+
ret = rsi_set_memory_range_shared(start, end);
242+
243+
if (ret)
244+
return ret;
245+
246+
return __change_memory_common(addr, PAGE_SIZE * numpages,
247+
__pgprot(PTE_VALID),
248+
__pgprot(0));
249+
}
250+
251+
static int realm_set_memory_encrypted(unsigned long addr, int numpages)
252+
{
253+
int ret = __set_memory_enc_dec(addr, numpages, true);
254+
255+
/*
256+
* If the request to change state fails, then the only sensible cause
257+
* of action for the caller is to leak the memory
258+
*/
259+
WARN(ret, "Failed to encrypt memory, %d pages will be leaked",
260+
numpages);
261+
262+
return ret;
263+
}
264+
265+
static int realm_set_memory_decrypted(unsigned long addr, int numpages)
266+
{
267+
int ret = __set_memory_enc_dec(addr, numpages, false);
268+
269+
WARN(ret, "Failed to decrypt memory, %d pages will be leaked",
270+
numpages);
271+
272+
return ret;
273+
}
274+
275+
static const struct arm64_mem_crypt_ops realm_crypt_ops = {
276+
.encrypt = realm_set_memory_encrypted,
277+
.decrypt = realm_set_memory_decrypted,
278+
};
279+
280+
int realm_register_memory_enc_ops(void)
281+
{
282+
return arm64_mem_crypt_ops_register(&realm_crypt_ops);
283+
}
284+
201285
#ifdef CONFIG_DEBUG_PAGEALLOC
202286
void __kernel_map_pages(struct page *page, int numpages, int enable)
203287
{

0 commit comments

Comments
 (0)