Skip to content

Commit 08999b2

Browse files
jarkkojshansendc
authored andcommitted
x86/sgx: Free backing memory after faulting the enclave page
There is a limited amount of SGX memory (EPC) on each system. When that memory is used up, SGX has its own swapping mechanism which is similar in concept but totally separate from the core mm/* code. Instead of swapping to disk, SGX swaps from EPC to normal RAM. That normal RAM comes from a shared memory pseudo-file and can itself be swapped by the core mm code. There is a hierarchy like this: EPC <-> shmem <-> disk After data is swapped back in from shmem to EPC, the shmem backing storage needs to be freed. Currently, the backing shmem is not freed. This effectively wastes the shmem while the enclave is running. The memory is recovered when the enclave is destroyed and the backing storage freed. Sort this out by freeing memory with shmem_truncate_range(), as soon as a page is faulted back to the EPC. In addition, free the memory for PCMD pages as soon as all PCMD's in a page have been marked as unused by zeroing its contents. Cc: stable@vger.kernel.org Fixes: 1728ab5 ("x86/sgx: Add a page reclaimer") Reported-by: Dave Hansen <dave.hansen@linux.intel.com> Signed-off-by: Jarkko Sakkinen <jarkko@kernel.org> Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Link: https://lkml.kernel.org/r/20220303223859.273187-1-jarkko@kernel.org
1 parent a365a65 commit 08999b2

File tree

1 file changed

+48
-9
lines changed

1 file changed

+48
-9
lines changed

arch/x86/kernel/cpu/sgx/encl.c

Lines changed: 48 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,30 @@
1212
#include "encls.h"
1313
#include "sgx.h"
1414

15+
/*
16+
* Calculate byte offset of a PCMD struct associated with an enclave page. PCMD's
17+
* follow right after the EPC data in the backing storage. In addition to the
18+
* visible enclave pages, there's one extra page slot for SECS, before PCMD
19+
* structs.
20+
*/
21+
static inline pgoff_t sgx_encl_get_backing_page_pcmd_offset(struct sgx_encl *encl,
22+
unsigned long page_index)
23+
{
24+
pgoff_t epc_end_off = encl->size + sizeof(struct sgx_secs);
25+
26+
return epc_end_off + page_index * sizeof(struct sgx_pcmd);
27+
}
28+
29+
/*
30+
* Free a page from the backing storage in the given page index.
31+
*/
32+
static inline void sgx_encl_truncate_backing_page(struct sgx_encl *encl, unsigned long page_index)
33+
{
34+
struct inode *inode = file_inode(encl->backing);
35+
36+
shmem_truncate_range(inode, PFN_PHYS(page_index), PFN_PHYS(page_index) + PAGE_SIZE - 1);
37+
}
38+
1539
/*
1640
* ELDU: Load an EPC page as unblocked. For more info, see "OS Management of EPC
1741
* Pages" in the SDM.
@@ -22,24 +46,28 @@ static int __sgx_encl_eldu(struct sgx_encl_page *encl_page,
2246
{
2347
unsigned long va_offset = encl_page->desc & SGX_ENCL_PAGE_VA_OFFSET_MASK;
2448
struct sgx_encl *encl = encl_page->encl;
49+
pgoff_t page_index, page_pcmd_off;
2550
struct sgx_pageinfo pginfo;
2651
struct sgx_backing b;
27-
pgoff_t page_index;
52+
bool pcmd_page_empty;
53+
u8 *pcmd_page;
2854
int ret;
2955

3056
if (secs_page)
3157
page_index = PFN_DOWN(encl_page->desc - encl_page->encl->base);
3258
else
3359
page_index = PFN_DOWN(encl->size);
3460

61+
page_pcmd_off = sgx_encl_get_backing_page_pcmd_offset(encl, page_index);
62+
3563
ret = sgx_encl_get_backing(encl, page_index, &b);
3664
if (ret)
3765
return ret;
3866

3967
pginfo.addr = encl_page->desc & PAGE_MASK;
4068
pginfo.contents = (unsigned long)kmap_atomic(b.contents);
41-
pginfo.metadata = (unsigned long)kmap_atomic(b.pcmd) +
42-
b.pcmd_offset;
69+
pcmd_page = kmap_atomic(b.pcmd);
70+
pginfo.metadata = (unsigned long)pcmd_page + b.pcmd_offset;
4371

4472
if (secs_page)
4573
pginfo.secs = (u64)sgx_get_epc_virt_addr(secs_page);
@@ -55,11 +83,24 @@ static int __sgx_encl_eldu(struct sgx_encl_page *encl_page,
5583
ret = -EFAULT;
5684
}
5785

58-
kunmap_atomic((void *)(unsigned long)(pginfo.metadata - b.pcmd_offset));
86+
memset(pcmd_page + b.pcmd_offset, 0, sizeof(struct sgx_pcmd));
87+
88+
/*
89+
* The area for the PCMD in the page was zeroed above. Check if the
90+
* whole page is now empty meaning that all PCMD's have been zeroed:
91+
*/
92+
pcmd_page_empty = !memchr_inv(pcmd_page, 0, PAGE_SIZE);
93+
94+
kunmap_atomic(pcmd_page);
5995
kunmap_atomic((void *)(unsigned long)pginfo.contents);
6096

6197
sgx_encl_put_backing(&b, false);
6298

99+
sgx_encl_truncate_backing_page(encl, page_index);
100+
101+
if (pcmd_page_empty)
102+
sgx_encl_truncate_backing_page(encl, PFN_DOWN(page_pcmd_off));
103+
63104
return ret;
64105
}
65106

@@ -579,15 +620,15 @@ static struct page *sgx_encl_get_backing_page(struct sgx_encl *encl,
579620
int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index,
580621
struct sgx_backing *backing)
581622
{
582-
pgoff_t pcmd_index = PFN_DOWN(encl->size) + 1 + (page_index >> 5);
623+
pgoff_t page_pcmd_off = sgx_encl_get_backing_page_pcmd_offset(encl, page_index);
583624
struct page *contents;
584625
struct page *pcmd;
585626

586627
contents = sgx_encl_get_backing_page(encl, page_index);
587628
if (IS_ERR(contents))
588629
return PTR_ERR(contents);
589630

590-
pcmd = sgx_encl_get_backing_page(encl, pcmd_index);
631+
pcmd = sgx_encl_get_backing_page(encl, PFN_DOWN(page_pcmd_off));
591632
if (IS_ERR(pcmd)) {
592633
put_page(contents);
593634
return PTR_ERR(pcmd);
@@ -596,9 +637,7 @@ int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index,
596637
backing->page_index = page_index;
597638
backing->contents = contents;
598639
backing->pcmd = pcmd;
599-
backing->pcmd_offset =
600-
(page_index & (PAGE_SIZE / sizeof(struct sgx_pcmd) - 1)) *
601-
sizeof(struct sgx_pcmd);
640+
backing->pcmd_offset = page_pcmd_off & (PAGE_SIZE - 1);
602641

603642
return 0;
604643
}

0 commit comments

Comments
 (0)