summaryrefslogtreecommitdiff
path: root/arch/arm64/mm/pageattr.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/mm/pageattr.c')
-rw-r--r--arch/arm64/mm/pageattr.c98
1 files changed, 94 insertions, 4 deletions
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index 0e270a1c51e6..6ae6ae806454 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -5,10 +5,12 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
+#include <linux/mem_encrypt.h>
#include <linux/sched.h>
#include <linux/vmalloc.h>
#include <asm/cacheflush.h>
+#include <asm/pgtable-prot.h>
#include <asm/set_memory.h>
#include <asm/tlbflush.h>
#include <asm/kfence.h>
@@ -23,14 +25,16 @@ bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED
bool can_set_direct_map(void)
{
/*
- * rodata_full and DEBUG_PAGEALLOC require linear map to be
- * mapped at page granularity, so that it is possible to
+ * rodata_full, DEBUG_PAGEALLOC and a Realm guest all require linear
+ * map to be mapped at page granularity, so that it is possible to
* protect/unprotect single pages.
*
* KFENCE pool requires page-granular mapping if initialized late.
+ *
+ * Realms need to make pages shared/protected at page granularity.
*/
return rodata_full || debug_pagealloc_enabled() ||
- arm64_kfence_can_set_direct_map();
+ arm64_kfence_can_set_direct_map() || is_realm_world();
}
static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
@@ -60,7 +64,13 @@ static int __change_memory_common(unsigned long start, unsigned long size,
ret = apply_to_page_range(&init_mm, start, size, change_page_range,
&data);
- flush_tlb_kernel_range(start, start + size);
+ /*
+ * If the memory is being made valid without changing any other bits
+ * then a TLBI isn't required as a non-valid entry cannot be cached in
+ * the TLB.
+ */
+ if (pgprot_val(set_mask) != PTE_VALID || pgprot_val(clear_mask))
+ flush_tlb_kernel_range(start, start + size);
return ret;
}
@@ -192,6 +202,86 @@ int set_direct_map_default_noflush(struct page *page)
PAGE_SIZE, change_page_range, &data);
}
+static int __set_memory_enc_dec(unsigned long addr,
+ int numpages,
+ bool encrypt)
+{
+ unsigned long set_prot = 0, clear_prot = 0;
+ phys_addr_t start, end;
+ int ret;
+
+ if (!is_realm_world())
+ return 0;
+
+ if (!__is_lm_address(addr))
+ return -EINVAL;
+
+ start = __virt_to_phys(addr);
+ end = start + numpages * PAGE_SIZE;
+
+ if (encrypt)
+ clear_prot = PROT_NS_SHARED;
+ else
+ set_prot = PROT_NS_SHARED;
+
+ /*
+ * Break the mapping before we make any changes to avoid stale TLB
+ * entries or Synchronous External Aborts caused by RIPAS_EMPTY
+ */
+ ret = __change_memory_common(addr, PAGE_SIZE * numpages,
+ __pgprot(set_prot),
+ __pgprot(clear_prot | PTE_VALID));
+
+ if (ret)
+ return ret;
+
+ if (encrypt)
+ ret = rsi_set_memory_range_protected(start, end);
+ else
+ ret = rsi_set_memory_range_shared(start, end);
+
+ if (ret)
+ return ret;
+
+ return __change_memory_common(addr, PAGE_SIZE * numpages,
+ __pgprot(PTE_VALID),
+ __pgprot(0));
+}
+
+static int realm_set_memory_encrypted(unsigned long addr, int numpages)
+{
+ int ret = __set_memory_enc_dec(addr, numpages, true);
+
+ /*
+ * If the request to change state fails, then the only sensible cause
+ * of action for the caller is to leak the memory
+ */
+ WARN(ret, "Failed to encrypt memory, %d pages will be leaked",
+ numpages);
+
+ return ret;
+}
+
+static int realm_set_memory_decrypted(unsigned long addr, int numpages)
+{
+ int ret = __set_memory_enc_dec(addr, numpages, false);
+
+ WARN(ret, "Failed to decrypt memory, %d pages will be leaked",
+ numpages);
+
+ return ret;
+}
+
+static const struct arm64_mem_crypt_ops realm_crypt_ops = {
+ .encrypt = realm_set_memory_encrypted,
+ .decrypt = realm_set_memory_decrypted,
+};
+
+int realm_register_memory_enc_ops(void)
+{
+ return arm64_mem_crypt_ops_register(&realm_crypt_ops);
+}
+
#ifdef CONFIG_DEBUG_PAGEALLOC
void __kernel_map_pages(struct page *page, int numpages, int enable)
{