summaryrefslogtreecommitdiff
path: root/arch/x86/kernel/cpu/sgx/main.c
diff options
context:
space:
mode:
authorSean Christopherson <sean.j.christopherson@intel.com>2020-11-13 00:01:16 +0200
committerBorislav Petkov <bp@suse.de>2020-11-17 14:36:13 +0100
commite7e0545299d8cb0fd6fe3ba50401b7f5c3937362 (patch)
tree0cbb0933ce71e4d525edcd9c6d61e59b871de5c7 /arch/x86/kernel/cpu/sgx/main.c
parentd205e0f1426e0f99e2b4f387c49f2d8b66e129dd (diff)
x86/sgx: Initialize metadata for Enclave Page Cache (EPC) sections
Although carved out of normal DRAM, enclave memory is marked in the system memory map as reserved and is not managed by the core mm. There may be several regions spread across the system. Each contiguous region is called an Enclave Page Cache (EPC) section. EPC sections are enumerated via CPUID Enclave pages can only be accessed when they are mapped as part of an enclave, by a hardware thread running inside the enclave. Parse CPUID data, create metadata for EPC pages and populate a simple EPC page allocator. Although much smaller, ‘struct sgx_epc_page’ metadata is the SGX analog of the core mm ‘struct page’. Similar to how the core mm’s page->flags encode zone and NUMA information, embed the EPC section index to the first eight bits of sgx_epc_page->desc. This allows a quick reverse lookup from EPC page to EPC section. Existing client hardware supports only a single section, while upcoming server hardware will support at most eight sections. Thus, eight bits should be enough for long term needs. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Co-developed-by: Serge Ayoun <serge.ayoun@intel.com> Signed-off-by: Serge Ayoun <serge.ayoun@intel.com> Co-developed-by: Jarkko Sakkinen <jarkko@kernel.org> Signed-off-by: Jarkko Sakkinen <jarkko@kernel.org> Signed-off-by: Borislav Petkov <bp@suse.de> Acked-by: Jethro Beekman <jethro@fortanix.com> Link: https://lkml.kernel.org/r/20201112220135.165028-6-jarkko@kernel.org
Diffstat (limited to 'arch/x86/kernel/cpu/sgx/main.c')
-rw-r--r--arch/x86/kernel/cpu/sgx/main.c190
1 files changed, 190 insertions, 0 deletions
diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c
new file mode 100644
index 000000000000..187a237eec38
--- /dev/null
+++ b/arch/x86/kernel/cpu/sgx/main.c
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2016-20 Intel Corporation. */
+
+#include <linux/freezer.h>
+#include <linux/highmem.h>
+#include <linux/kthread.h>
+#include <linux/pagemap.h>
+#include <linux/ratelimit.h>
+#include <linux/sched/mm.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
+#include "encls.h"
+
+struct sgx_epc_section sgx_epc_sections[SGX_MAX_EPC_SECTIONS];
+static int sgx_nr_epc_sections;
+static struct task_struct *ksgxd_tsk;
+
+/*
+ * Reset dirty EPC pages to uninitialized state. Laundry can be left with SECS
+ * pages whose child pages blocked EREMOVE.
+ */
+static void sgx_sanitize_section(struct sgx_epc_section *section)
+{
+ struct sgx_epc_page *page;
+ LIST_HEAD(dirty);
+ int ret;
+
+ while (!list_empty(&section->laundry_list)) {
+ if (kthread_should_stop())
+ return;
+
+ spin_lock(&section->lock);
+
+ page = list_first_entry(&section->laundry_list,
+ struct sgx_epc_page, list);
+
+ ret = __eremove(sgx_get_epc_virt_addr(page));
+ if (!ret)
+ list_move(&page->list, &section->page_list);
+ else
+ list_move_tail(&page->list, &dirty);
+
+ spin_unlock(&section->lock);
+
+ cond_resched();
+ }
+
+ list_splice(&dirty, &section->laundry_list);
+}
+
+static int ksgxd(void *p)
+{
+ int i;
+
+ set_freezable();
+
+ /*
+ * Sanitize pages in order to recover from kexec(). The 2nd pass is
+ * required for SECS pages, whose child pages blocked EREMOVE.
+ */
+ for (i = 0; i < sgx_nr_epc_sections; i++)
+ sgx_sanitize_section(&sgx_epc_sections[i]);
+
+ for (i = 0; i < sgx_nr_epc_sections; i++) {
+ sgx_sanitize_section(&sgx_epc_sections[i]);
+
+ /* Should never happen. */
+ if (!list_empty(&sgx_epc_sections[i].laundry_list))
+ WARN(1, "EPC section %d has unsanitized pages.\n", i);
+ }
+
+ return 0;
+}
+
+static bool __init sgx_page_reclaimer_init(void)
+{
+ struct task_struct *tsk;
+
+ tsk = kthread_run(ksgxd, NULL, "ksgxd");
+ if (IS_ERR(tsk))
+ return false;
+
+ ksgxd_tsk = tsk;
+
+ return true;
+}
+
+static bool __init sgx_setup_epc_section(u64 phys_addr, u64 size,
+ unsigned long index,
+ struct sgx_epc_section *section)
+{
+ unsigned long nr_pages = size >> PAGE_SHIFT;
+ unsigned long i;
+
+ section->virt_addr = memremap(phys_addr, size, MEMREMAP_WB);
+ if (!section->virt_addr)
+ return false;
+
+ section->pages = vmalloc(nr_pages * sizeof(struct sgx_epc_page));
+ if (!section->pages) {
+ memunmap(section->virt_addr);
+ return false;
+ }
+
+ section->phys_addr = phys_addr;
+ spin_lock_init(&section->lock);
+ INIT_LIST_HEAD(&section->page_list);
+ INIT_LIST_HEAD(&section->laundry_list);
+
+ for (i = 0; i < nr_pages; i++) {
+ section->pages[i].section = index;
+ list_add_tail(&section->pages[i].list, &section->laundry_list);
+ }
+
+ return true;
+}
+
+/**
+ * A section metric is concatenated in a way that @low bits 12-31 define the
+ * bits 12-31 of the metric and @high bits 0-19 define the bits 32-51 of the
+ * metric.
+ */
+static inline u64 __init sgx_calc_section_metric(u64 low, u64 high)
+{
+ return (low & GENMASK_ULL(31, 12)) +
+ ((high & GENMASK_ULL(19, 0)) << 32);
+}
+
+static bool __init sgx_page_cache_init(void)
+{
+ u32 eax, ebx, ecx, edx, type;
+ u64 pa, size;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sgx_epc_sections); i++) {
+ cpuid_count(SGX_CPUID, i + SGX_CPUID_EPC, &eax, &ebx, &ecx, &edx);
+
+ type = eax & SGX_CPUID_EPC_MASK;
+ if (type == SGX_CPUID_EPC_INVALID)
+ break;
+
+ if (type != SGX_CPUID_EPC_SECTION) {
+ pr_err_once("Unknown EPC section type: %u\n", type);
+ break;
+ }
+
+ pa = sgx_calc_section_metric(eax, ebx);
+ size = sgx_calc_section_metric(ecx, edx);
+
+ pr_info("EPC section 0x%llx-0x%llx\n", pa, pa + size - 1);
+
+ if (!sgx_setup_epc_section(pa, size, i, &sgx_epc_sections[i])) {
+ pr_err("No free memory for an EPC section\n");
+ break;
+ }
+
+ sgx_nr_epc_sections++;
+ }
+
+ if (!sgx_nr_epc_sections) {
+ pr_err("There are zero EPC sections.\n");
+ return false;
+ }
+
+ return true;
+}
+
+static void __init sgx_init(void)
+{
+ int i;
+
+ if (!boot_cpu_has(X86_FEATURE_SGX))
+ return;
+
+ if (!sgx_page_cache_init())
+ return;
+
+ if (!sgx_page_reclaimer_init())
+ goto err_page_cache;
+
+ return;
+
+err_page_cache:
+ for (i = 0; i < sgx_nr_epc_sections; i++) {
+ vfree(sgx_epc_sections[i].pages);
+ memunmap(sgx_epc_sections[i].virt_addr);
+ }
+}
+
+device_initcall(sgx_init);