2020-11-12 22:01:16 +00:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
#ifndef _X86_SGX_H
|
|
|
|
#define _X86_SGX_H
|
|
|
|
|
|
|
|
#include <linux/bitops.h>
|
|
|
|
#include <linux/err.h>
|
|
|
|
#include <linux/io.h>
|
|
|
|
#include <linux/rwsem.h>
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <asm/asm.h>
|
|
|
|
#include "arch.h"
|
|
|
|
|
|
|
|
#undef pr_fmt
|
|
|
|
#define pr_fmt(fmt) "sgx: " fmt
|
|
|
|
|
|
|
|
#define SGX_MAX_EPC_SECTIONS 8
|
2020-11-12 22:01:24 +00:00
|
|
|
#define SGX_EEXTEND_BLOCK_SIZE 256
|
x86/sgx: Add a page reclaimer
Just like normal RAM, there is a limited amount of enclave memory available
and overcommitting it is a very valuable tool to reduce resource use.
Introduce a simple reclaim mechanism for enclave pages.
In contrast to normal page reclaim, the kernel cannot directly access
enclave memory. To get around this, the SGX architecture provides a set of
functions to help. Among other things, these functions copy enclave memory
to and from normal memory, encrypting it and protecting its integrity in
the process.
Implement a page reclaimer by using these functions. Picks victim pages in
LRU fashion from all the enclaves running in the system. A new kernel
thread (ksgxswapd) reclaims pages in the background based on watermarks,
similar to normal kswapd.
All enclave pages can be reclaimed, architecturally. But, there are some
limits to this, such as the special SECS metadata page which must be
reclaimed last. The page version array (used to mitigate replaying old
reclaimed pages) is also architecturally reclaimable, but not yet
implemented. The end result is that the vast majority of enclave pages are
currently reclaimable.
Co-developed-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Jarkko Sakkinen <jarkko@kernel.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Acked-by: Jethro Beekman <jethro@fortanix.com>
Link: https://lkml.kernel.org/r/20201112220135.165028-22-jarkko@kernel.org
2020-11-12 22:01:32 +00:00
|
|
|
#define SGX_NR_TO_SCAN 16
|
|
|
|
#define SGX_NR_LOW_PAGES 32
|
|
|
|
#define SGX_NR_HIGH_PAGES 64
|
|
|
|
|
|
|
|
/* Pages, which are being tracked by the page reclaimer. */
|
|
|
|
#define SGX_EPC_PAGE_RECLAIMER_TRACKED BIT(0)
|
2020-11-12 22:01:16 +00:00
|
|
|
|
|
|
|
struct sgx_epc_page {
|
|
|
|
unsigned int section;
|
x86/sgx: Add a page reclaimer
Just like normal RAM, there is a limited amount of enclave memory available
and overcommitting it is a very valuable tool to reduce resource use.
Introduce a simple reclaim mechanism for enclave pages.
In contrast to normal page reclaim, the kernel cannot directly access
enclave memory. To get around this, the SGX architecture provides a set of
functions to help. Among other things, these functions copy enclave memory
to and from normal memory, encrypting it and protecting its integrity in
the process.
Implement a page reclaimer by using these functions. Picks victim pages in
LRU fashion from all the enclaves running in the system. A new kernel
thread (ksgxswapd) reclaims pages in the background based on watermarks,
similar to normal kswapd.
All enclave pages can be reclaimed, architecturally. But, there are some
limits to this, such as the special SECS metadata page which must be
reclaimed last. The page version array (used to mitigate replaying old
reclaimed pages) is also architecturally reclaimable, but not yet
implemented. The end result is that the vast majority of enclave pages are
currently reclaimable.
Co-developed-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Jarkko Sakkinen <jarkko@kernel.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Acked-by: Jethro Beekman <jethro@fortanix.com>
Link: https://lkml.kernel.org/r/20201112220135.165028-22-jarkko@kernel.org
2020-11-12 22:01:32 +00:00
|
|
|
unsigned int flags;
|
|
|
|
struct sgx_encl_page *owner;
|
2020-11-12 22:01:16 +00:00
|
|
|
struct list_head list;
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The firmware can define multiple chunks of EPC to the different areas of the
|
|
|
|
* physical memory e.g. for memory areas of the each node. This structure is
|
|
|
|
* used to store EPC pages for one EPC section and virtual memory area where
|
|
|
|
* the pages have been mapped.
|
2020-11-16 22:25:31 +00:00
|
|
|
*
|
|
|
|
* 'lock' must be held before accessing 'page_list' or 'free_cnt'.
|
2020-11-12 22:01:16 +00:00
|
|
|
*/
|
|
|
|
struct sgx_epc_section {
|
|
|
|
unsigned long phys_addr;
|
|
|
|
void *virt_addr;
|
|
|
|
struct sgx_epc_page *pages;
|
2020-11-16 22:25:31 +00:00
|
|
|
|
2020-11-12 22:01:16 +00:00
|
|
|
spinlock_t lock;
|
2020-11-16 22:25:31 +00:00
|
|
|
struct list_head page_list;
|
|
|
|
unsigned long free_cnt;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Pages which need EREMOVE run on them before they can be
|
|
|
|
* used. Only safe to be accessed in ksgxd and init code.
|
|
|
|
* Not protected by locks.
|
|
|
|
*/
|
|
|
|
struct list_head init_laundry_list;
|
2020-11-12 22:01:16 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
extern struct sgx_epc_section sgx_epc_sections[SGX_MAX_EPC_SECTIONS];
|
|
|
|
|
|
|
|
static inline unsigned long sgx_get_epc_phys_addr(struct sgx_epc_page *page)
|
|
|
|
{
|
|
|
|
struct sgx_epc_section *section = &sgx_epc_sections[page->section];
|
|
|
|
unsigned long index;
|
|
|
|
|
|
|
|
index = ((unsigned long)page - (unsigned long)section->pages) / sizeof(*page);
|
|
|
|
|
|
|
|
return section->phys_addr + index * PAGE_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void *sgx_get_epc_virt_addr(struct sgx_epc_page *page)
|
|
|
|
{
|
|
|
|
struct sgx_epc_section *section = &sgx_epc_sections[page->section];
|
|
|
|
unsigned long index;
|
|
|
|
|
|
|
|
index = ((unsigned long)page - (unsigned long)section->pages) / sizeof(*page);
|
|
|
|
|
|
|
|
return section->virt_addr + index * PAGE_SIZE;
|
|
|
|
}
|
|
|
|
|
2020-11-12 22:01:20 +00:00
|
|
|
struct sgx_epc_page *__sgx_alloc_epc_page(void);
|
|
|
|
void sgx_free_epc_page(struct sgx_epc_page *page);
|
|
|
|
|
x86/sgx: Add a page reclaimer
Just like normal RAM, there is a limited amount of enclave memory available
and overcommitting it is a very valuable tool to reduce resource use.
Introduce a simple reclaim mechanism for enclave pages.
In contrast to normal page reclaim, the kernel cannot directly access
enclave memory. To get around this, the SGX architecture provides a set of
functions to help. Among other things, these functions copy enclave memory
to and from normal memory, encrypting it and protecting its integrity in
the process.
Implement a page reclaimer by using these functions. Picks victim pages in
LRU fashion from all the enclaves running in the system. A new kernel
thread (ksgxswapd) reclaims pages in the background based on watermarks,
similar to normal kswapd.
All enclave pages can be reclaimed, architecturally. But, there are some
limits to this, such as the special SECS metadata page which must be
reclaimed last. The page version array (used to mitigate replaying old
reclaimed pages) is also architecturally reclaimable, but not yet
implemented. The end result is that the vast majority of enclave pages are
currently reclaimable.
Co-developed-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Jarkko Sakkinen <jarkko@kernel.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Acked-by: Jethro Beekman <jethro@fortanix.com>
Link: https://lkml.kernel.org/r/20201112220135.165028-22-jarkko@kernel.org
2020-11-12 22:01:32 +00:00
|
|
|
void sgx_mark_page_reclaimable(struct sgx_epc_page *page);
|
|
|
|
int sgx_unmark_page_reclaimable(struct sgx_epc_page *page);
|
|
|
|
struct sgx_epc_page *sgx_alloc_epc_page(void *owner, bool reclaim);
|
|
|
|
|
2020-11-12 22:01:16 +00:00
|
|
|
#endif /* _X86_SGX_H */
|