summaryrefslogtreecommitdiff
path: root/arch/arm64/kernel/mte.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kernel/mte.c')
-rw-r--r--arch/arm64/kernel/mte.c468
1 files changed, 330 insertions, 138 deletions
diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
index 69b3fde8759e..32148bf09c1d 100644
--- a/arch/arm64/kernel/mte.c
+++ b/arch/arm64/kernel/mte.c
@@ -4,6 +4,7 @@
*/
#include <linux/bitops.h>
+#include <linux/cpu.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/prctl.h>
@@ -14,6 +15,7 @@
#include <linux/swapops.h>
#include <linux/thread_info.h>
#include <linux/types.h>
+#include <linux/uaccess.h>
#include <linux/uio.h>
#include <asm/barrier.h>
@@ -22,58 +24,49 @@
#include <asm/ptrace.h>
#include <asm/sysreg.h>
-u64 gcr_kernel_excl __ro_after_init;
-
-static bool report_fault_once = true;
+static DEFINE_PER_CPU_READ_MOSTLY(u64, mte_tcf_preferred);
#ifdef CONFIG_KASAN_HW_TAGS
-/* Whether the MTE asynchronous mode is enabled. */
-DEFINE_STATIC_KEY_FALSE(mte_async_mode);
-EXPORT_SYMBOL_GPL(mte_async_mode);
+/*
+ * The asynchronous and asymmetric MTE modes have the same behavior for
+ * store operations. This flag is set when either of these modes is enabled.
+ */
+DEFINE_STATIC_KEY_FALSE(mte_async_or_asymm_mode);
+EXPORT_SYMBOL_GPL(mte_async_or_asymm_mode);
#endif
-static void mte_sync_page_tags(struct page *page, pte_t old_pte,
- bool check_swap, bool pte_is_tagged)
+void mte_sync_tags(pte_t pte, unsigned int nr_pages)
{
- if (check_swap && is_swap_pte(old_pte)) {
- swp_entry_t entry = pte_to_swp_entry(old_pte);
-
- if (!non_swap_entry(entry) && mte_restore_tags(entry, page))
- return;
- }
+ struct page *page = pte_page(pte);
+ struct folio *folio = page_folio(page);
+ unsigned long i;
- if (!pte_is_tagged)
- return;
+ if (folio_test_hugetlb(folio)) {
+ unsigned long nr = folio_nr_pages(folio);
- page_kasan_tag_reset(page);
- /*
- * We need smp_wmb() in between setting the flags and clearing the
- * tags because if another thread reads page->flags and builds a
- * tagged address out of it, there is an actual dependency to the
- * memory access, but on the current thread we do not guarantee that
- * the new page->flags are visible before the tags were updated.
- */
- smp_wmb();
- mte_clear_page_tags(page_address(page));
-}
+ /* Hugetlb MTE flags are set for head page only */
+ if (folio_try_hugetlb_mte_tagging(folio)) {
+ for (i = 0; i < nr; i++, page++)
+ mte_clear_page_tags(page_address(page));
+ folio_set_hugetlb_mte_tagged(folio);
+ }
-void mte_sync_tags(pte_t old_pte, pte_t pte)
-{
- struct page *page = pte_page(pte);
- long i, nr_pages = compound_nr(page);
- bool check_swap = nr_pages == 1;
- bool pte_is_tagged = pte_tagged(pte);
+ /* ensure the tags are visible before the PTE is set */
+ smp_wmb();
- /* Early out if there's nothing to do */
- if (!check_swap && !pte_is_tagged)
return;
+ }
/* if PG_mte_tagged is set, tags have already been initialised */
for (i = 0; i < nr_pages; i++, page++) {
- if (!test_and_set_bit(PG_mte_tagged, &page->flags))
- mte_sync_page_tags(page, old_pte, check_swap,
- pte_is_tagged);
+ if (try_page_mte_tagging(page)) {
+ mte_clear_page_tags(page_address(page));
+ set_page_mte_tagged(page);
+ }
}
+
+ /* ensure the tags are visible before the PTE is set */
+ smp_wmb();
}
int memcmp_pages(struct page *page1, struct page *page2)
@@ -91,40 +84,20 @@ int memcmp_pages(struct page *page1, struct page *page2)
/*
* If the page content is identical but at least one of the pages is
* tagged, return non-zero to avoid KSM merging. If only one of the
- * pages is tagged, set_pte_at() may zero or change the tags of the
+ * pages is tagged, __set_ptes() may zero or change the tags of the
* other page via mte_sync_tags().
*/
- if (test_bit(PG_mte_tagged, &page1->flags) ||
- test_bit(PG_mte_tagged, &page2->flags))
+ if (page_mte_tagged(page1) || page_mte_tagged(page2))
return addr1 != addr2;
return ret;
}
-void mte_init_tags(u64 max_tag)
-{
- static bool gcr_kernel_excl_initialized;
-
- if (!gcr_kernel_excl_initialized) {
- /*
- * The format of the tags in KASAN is 0xFF and in MTE is 0xF.
- * This conversion extracts an MTE tag from a KASAN tag.
- */
- u64 incl = GENMASK(FIELD_GET(MTE_TAG_MASK >> MTE_TAG_SHIFT,
- max_tag), 0);
-
- gcr_kernel_excl = ~incl & SYS_GCR_EL1_EXCL_MASK;
- gcr_kernel_excl_initialized = true;
- }
-
- /* Enable the kernel exclude mask for random tags generation. */
- write_sysreg_s(SYS_GCR_EL1_RRND | gcr_kernel_excl, SYS_GCR_EL1);
-}
-
static inline void __mte_enable_kernel(const char *mode, unsigned long tcf)
{
/* Enable MTE Sync Mode for EL1. */
- sysreg_clear_set(sctlr_el1, SCTLR_ELx_TCF_MASK, tcf);
+ sysreg_clear_set(sctlr_el1, SCTLR_EL1_TCF_MASK,
+ SYS_FIELD_PREP(SCTLR_EL1, TCF, tcf));
isb();
pr_info_once("MTE: enabled in %s mode at EL1\n", mode);
@@ -137,15 +110,15 @@ void mte_enable_kernel_sync(void)
* Make sure we enter this function when no PE has set
* async mode previously.
*/
- WARN_ONCE(system_uses_mte_async_mode(),
+ WARN_ONCE(system_uses_mte_async_or_asymm_mode(),
"MTE async mode enabled system wide!");
- __mte_enable_kernel("synchronous", SCTLR_ELx_TCF_SYNC);
+ __mte_enable_kernel("synchronous", SCTLR_EL1_TCF_SYNC);
}
void mte_enable_kernel_async(void)
{
- __mte_enable_kernel("asynchronous", SCTLR_ELx_TCF_ASYNC);
+ __mte_enable_kernel("asynchronous", SCTLR_EL1_TCF_ASYNC);
/*
* MTE async mode is set system wide by the first PE that
@@ -155,30 +128,59 @@ void mte_enable_kernel_async(void)
* mode in between sync and async, this strategy needs
* to be reviewed.
*/
- if (!system_uses_mte_async_mode())
- static_branch_enable(&mte_async_mode);
+ if (!system_uses_mte_async_or_asymm_mode())
+ static_branch_enable(&mte_async_or_asymm_mode);
}
-#endif
-void mte_set_report_once(bool state)
+void mte_enable_kernel_asymm(void)
{
- WRITE_ONCE(report_fault_once, state);
+ if (cpus_have_cap(ARM64_MTE_ASYMM)) {
+ __mte_enable_kernel("asymmetric", SCTLR_EL1_TCF_ASYMM);
+
+ /*
+ * MTE asymm mode behaves as async mode for store
+ * operations. The mode is set system wide by the
+ * first PE that executes this function.
+ *
+ * Note: If in future KASAN acquires a runtime switching
+ * mode in between sync and async, this strategy needs
+ * to be reviewed.
+ */
+ if (!system_uses_mte_async_or_asymm_mode())
+ static_branch_enable(&mte_async_or_asymm_mode);
+ } else {
+ /*
+ * If the CPU does not support MTE asymmetric mode the
+ * kernel falls back on synchronous mode which is the
+ * default for kasan=on.
+ */
+ mte_enable_kernel_sync();
+ }
}
-bool mte_report_once(void)
+int mte_enable_kernel_store_only(void)
{
- return READ_ONCE(report_fault_once);
+ /*
+ * If the CPU does not support MTE store only,
+ * the kernel checks all operations.
+ */
+ if (!cpus_have_cap(ARM64_MTE_STORE_ONLY))
+ return -EINVAL;
+
+ sysreg_clear_set(sctlr_el1, SCTLR_EL1_TCSO_MASK,
+ SYS_FIELD_PREP(SCTLR_EL1, TCSO, 1));
+ isb();
+
+ pr_info_once("MTE: enabled store only mode at EL1\n");
+
+ return 0;
}
+#endif
#ifdef CONFIG_KASAN_HW_TAGS
void mte_check_tfsr_el1(void)
{
- u64 tfsr_el1;
-
- if (!system_supports_mte())
- return;
-
- tfsr_el1 = read_sysreg_s(SYS_TFSR_EL1);
+ u64 tfsr_el1 = read_sysreg_s(SYS_TFSR_EL1);
if (unlikely(tfsr_el1 & SYS_TFSR_EL1_TF1)) {
/*
@@ -193,28 +195,78 @@ void mte_check_tfsr_el1(void)
}
#endif
-static void update_gcr_el1_excl(u64 excl)
+/*
+ * This is where we actually resolve the system and process MTE mode
+ * configuration into an actual value in SCTLR_EL1 that affects
+ * userspace.
+ */
+static void mte_update_sctlr_user(struct task_struct *task)
{
+ /*
+ * This must be called with preemption disabled and can only be called
+ * on the current or next task since the CPU must match where the thread
+ * is going to run. The caller is responsible for calling
+ * update_sctlr_el1() later in the same preemption disabled block.
+ */
+ unsigned long sctlr = task->thread.sctlr_user;
+ unsigned long mte_ctrl = task->thread.mte_ctrl;
+ unsigned long pref, resolved_mte_tcf;
+ pref = __this_cpu_read(mte_tcf_preferred);
+ /*
+ * If there is no overlap between the system preferred and
+ * program requested values go with what was requested.
+ */
+ resolved_mte_tcf = (mte_ctrl & pref) ? pref : mte_ctrl;
+ sctlr &= ~(SCTLR_EL1_TCF0_MASK | SCTLR_EL1_TCSO0_MASK);
/*
- * Note that the mask controlled by the user via prctl() is an
- * include while GCR_EL1 accepts an exclude mask.
- * No need for ISB since this only affects EL0 currently, implicit
- * with ERET.
+ * Pick an actual setting. The order in which we check for
+ * set bits and map into register values determines our
+ * default order.
*/
- sysreg_clear_set_s(SYS_GCR_EL1, SYS_GCR_EL1_EXCL_MASK, excl);
+ if (resolved_mte_tcf & MTE_CTRL_TCF_ASYMM)
+ sctlr |= SYS_FIELD_PREP_ENUM(SCTLR_EL1, TCF0, ASYMM);
+ else if (resolved_mte_tcf & MTE_CTRL_TCF_ASYNC)
+ sctlr |= SYS_FIELD_PREP_ENUM(SCTLR_EL1, TCF0, ASYNC);
+ else if (resolved_mte_tcf & MTE_CTRL_TCF_SYNC)
+ sctlr |= SYS_FIELD_PREP_ENUM(SCTLR_EL1, TCF0, SYNC);
+
+ if (mte_ctrl & MTE_CTRL_STORE_ONLY)
+ sctlr |= SYS_FIELD_PREP(SCTLR_EL1, TCSO0, 1);
+
+ task->thread.sctlr_user = sctlr;
}
-static void set_gcr_el1_excl(u64 excl)
+static void mte_update_gcr_excl(struct task_struct *task)
{
- current->thread.gcr_user_excl = excl;
-
/*
- * SYS_GCR_EL1 will be set to current->thread.gcr_user_excl value
- * by mte_set_user_gcr() in kernel_exit,
+ * SYS_GCR_EL1 will be set to current->thread.mte_ctrl value by
+ * mte_set_user_gcr() in kernel_exit, but only if KASAN is enabled.
*/
+ if (kasan_hw_tags_enabled())
+ return;
+
+ write_sysreg_s(
+ ((task->thread.mte_ctrl >> MTE_CTRL_GCR_USER_EXCL_SHIFT) &
+ SYS_GCR_EL1_EXCL_MASK) | SYS_GCR_EL1_RRND,
+ SYS_GCR_EL1);
}
+#ifdef CONFIG_KASAN_HW_TAGS
+/* Only called from assembly, silence sparse */
+void __init kasan_hw_tags_enable(struct alt_instr *alt, __le32 *origptr,
+ __le32 *updptr, int nr_inst);
+
+void __init kasan_hw_tags_enable(struct alt_instr *alt, __le32 *origptr,
+ __le32 *updptr, int nr_inst)
+{
+ BUG_ON(nr_inst != 1); /* Branch -> NOP */
+
+ if (kasan_hw_tags_enabled())
+ *updptr = cpu_to_le32(aarch64_insn_gen_nop());
+}
+#endif
+
void mte_thread_init_user(void)
{
if (!system_supports_mte())
@@ -224,15 +276,21 @@ void mte_thread_init_user(void)
dsb(ish);
write_sysreg_s(0, SYS_TFSRE0_EL1);
clear_thread_flag(TIF_MTE_ASYNC_FAULT);
- /* disable tag checking */
- set_task_sctlr_el1((current->thread.sctlr_user & ~SCTLR_EL1_TCF0_MASK) |
- SCTLR_EL1_TCF0_NONE);
- /* reset tag generation mask */
- set_gcr_el1_excl(SYS_GCR_EL1_EXCL_MASK);
+ /* disable tag checking and reset tag generation mask */
+ set_mte_ctrl(current, 0);
}
void mte_thread_switch(struct task_struct *next)
{
+ if (!system_supports_mte())
+ return;
+
+ mte_update_sctlr_user(next);
+ mte_update_gcr_excl(next);
+
+ /* TCO may not have been disabled on exception entry for the current task. */
+ mte_disable_tco_entry(next);
+
/*
* Check if an async tag exception occurred at EL1.
*
@@ -244,6 +302,49 @@ void mte_thread_switch(struct task_struct *next)
mte_check_tfsr_el1();
}
+void mte_cpu_setup(void)
+{
+ u64 rgsr;
+
+ /*
+ * CnP must be enabled only after the MAIR_EL1 register has been set
+ * up. Inconsistent MAIR_EL1 between CPUs sharing the same TLB may
+ * lead to the wrong memory type being used for a brief window during
+ * CPU power-up.
+ *
+ * CnP is not a boot feature so MTE gets enabled before CnP, but let's
+ * make sure that is the case.
+ */
+ BUG_ON(read_sysreg(ttbr0_el1) & TTBR_CNP_BIT);
+ BUG_ON(read_sysreg(ttbr1_el1) & TTBR_CNP_BIT);
+
+ /* Normal Tagged memory type at the corresponding MAIR index */
+ sysreg_clear_set(mair_el1,
+ MAIR_ATTRIDX(MAIR_ATTR_MASK, MT_NORMAL_TAGGED),
+ MAIR_ATTRIDX(MAIR_ATTR_NORMAL_TAGGED,
+ MT_NORMAL_TAGGED));
+
+ write_sysreg_s(KERNEL_GCR_EL1, SYS_GCR_EL1);
+
+ /*
+ * If GCR_EL1.RRND=1 is implemented the same way as RRND=0, then
+ * RGSR_EL1.SEED must be non-zero for IRG to produce
+ * pseudorandom numbers. As RGSR_EL1 is UNKNOWN out of reset, we
+ * must initialize it.
+ */
+ rgsr = (read_sysreg(CNTVCT_EL0) & SYS_RGSR_EL1_SEED_MASK) <<
+ SYS_RGSR_EL1_SEED_SHIFT;
+ if (rgsr == 0)
+ rgsr = 1 << SYS_RGSR_EL1_SEED_SHIFT;
+ write_sysreg_s(rgsr, SYS_RGSR_EL1);
+
+ /* clear any pending tag check faults in TFSR*_EL1 */
+ write_sysreg_s(0, SYS_TFSR_EL1);
+ write_sysreg_s(0, SYS_TFSRE0_EL1);
+
+ local_flush_tlb_all();
+}
+
void mte_suspend_enter(void)
{
if (!system_supports_mte())
@@ -265,38 +366,43 @@ void mte_suspend_exit(void)
if (!system_supports_mte())
return;
- update_gcr_el1_excl(gcr_kernel_excl);
+ mte_cpu_setup();
}
long set_mte_ctrl(struct task_struct *task, unsigned long arg)
{
- u64 sctlr = task->thread.sctlr_user & ~SCTLR_EL1_TCF0_MASK;
- u64 gcr_excl = ~((arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT) &
- SYS_GCR_EL1_EXCL_MASK;
+ u64 mte_ctrl = (~((arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT) &
+ SYS_GCR_EL1_EXCL_MASK) << MTE_CTRL_GCR_USER_EXCL_SHIFT;
if (!system_supports_mte())
return 0;
- switch (arg & PR_MTE_TCF_MASK) {
- case PR_MTE_TCF_NONE:
- sctlr |= SCTLR_EL1_TCF0_NONE;
- break;
- case PR_MTE_TCF_SYNC:
- sctlr |= SCTLR_EL1_TCF0_SYNC;
- break;
- case PR_MTE_TCF_ASYNC:
- sctlr |= SCTLR_EL1_TCF0_ASYNC;
- break;
- default:
- return -EINVAL;
- }
+ if (arg & PR_MTE_TCF_ASYNC)
+ mte_ctrl |= MTE_CTRL_TCF_ASYNC;
+ if (arg & PR_MTE_TCF_SYNC)
+ mte_ctrl |= MTE_CTRL_TCF_SYNC;
- if (task != current) {
- task->thread.sctlr_user = sctlr;
- task->thread.gcr_user_excl = gcr_excl;
- } else {
- set_task_sctlr_el1(sctlr);
- set_gcr_el1_excl(gcr_excl);
+ /*
+ * If the system supports it and both sync and async modes are
+ * specified then implicitly enable asymmetric mode.
+ * Userspace could see a mix of both sync and async anyway due
+ * to differing or changing defaults on CPUs.
+ */
+ if (cpus_have_cap(ARM64_MTE_ASYMM) &&
+ (arg & PR_MTE_TCF_ASYNC) &&
+ (arg & PR_MTE_TCF_SYNC))
+ mte_ctrl |= MTE_CTRL_TCF_ASYMM;
+
+ if (arg & PR_MTE_STORE_ONLY)
+ mte_ctrl |= MTE_CTRL_STORE_ONLY;
+
+ task->thread.mte_ctrl = mte_ctrl;
+ if (task == current) {
+ preempt_disable();
+ mte_update_sctlr_user(task);
+ mte_update_gcr_excl(task);
+ update_sctlr_el1(task->thread.sctlr_user);
+ preempt_enable();
}
return 0;
@@ -305,24 +411,20 @@ long set_mte_ctrl(struct task_struct *task, unsigned long arg)
long get_mte_ctrl(struct task_struct *task)
{
unsigned long ret;
- u64 incl = ~task->thread.gcr_user_excl & SYS_GCR_EL1_EXCL_MASK;
+ u64 mte_ctrl = task->thread.mte_ctrl;
+ u64 incl = (~mte_ctrl >> MTE_CTRL_GCR_USER_EXCL_SHIFT) &
+ SYS_GCR_EL1_EXCL_MASK;
if (!system_supports_mte())
return 0;
ret = incl << PR_MTE_TAG_SHIFT;
-
- switch (task->thread.sctlr_user & SCTLR_EL1_TCF0_MASK) {
- case SCTLR_EL1_TCF0_NONE:
- ret |= PR_MTE_TCF_NONE;
- break;
- case SCTLR_EL1_TCF0_SYNC:
- ret |= PR_MTE_TCF_SYNC;
- break;
- case SCTLR_EL1_TCF0_ASYNC:
+ if (mte_ctrl & MTE_CTRL_TCF_ASYNC)
ret |= PR_MTE_TCF_ASYNC;
- break;
- }
+ if (mte_ctrl & MTE_CTRL_TCF_SYNC)
+ ret |= PR_MTE_TCF_SYNC;
+ if (mte_ctrl & MTE_CTRL_STORE_ONLY)
+ ret |= PR_MTE_STORE_ONLY;
return ret;
}
@@ -335,10 +437,9 @@ long get_mte_ctrl(struct task_struct *task)
static int __access_remote_tags(struct mm_struct *mm, unsigned long addr,
struct iovec *kiov, unsigned int gup_flags)
{
- struct vm_area_struct *vma;
void __user *buf = kiov->iov_base;
size_t len = kiov->iov_len;
- int ret;
+ int err = 0;
int write = gup_flags & FOLL_WRITE;
if (!access_ok(buf, len))
@@ -348,14 +449,17 @@ static int __access_remote_tags(struct mm_struct *mm, unsigned long addr,
return -EIO;
while (len) {
+ struct vm_area_struct *vma;
unsigned long tags, offset;
void *maddr;
- struct page *page = NULL;
+ struct page *page = get_user_page_vma_remote(mm, addr,
+ gup_flags, &vma);
+ struct folio *folio;
- ret = get_user_pages_remote(mm, addr, 1, gup_flags, &page,
- &vma, NULL);
- if (ret <= 0)
+ if (IS_ERR(page)) {
+ err = PTR_ERR(page);
break;
+ }
/*
* Only copy tags if the page has been mapped as PROT_MTE
@@ -365,11 +469,17 @@ static int __access_remote_tags(struct mm_struct *mm, unsigned long addr,
* was never mapped with PROT_MTE.
*/
if (!(vma->vm_flags & VM_MTE)) {
- ret = -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
put_page(page);
break;
}
- WARN_ON_ONCE(!test_bit(PG_mte_tagged, &page->flags));
+
+ folio = page_folio(page);
+ if (folio_test_hugetlb(folio))
+ WARN_ON_ONCE(!folio_test_hugetlb_mte_tagged(folio) &&
+ !is_huge_zero_folio(folio));
+ else
+ WARN_ON_ONCE(!page_mte_tagged(page) && !is_zero_page(page));
/* limit access to the end of the page */
offset = offset_in_page(addr);
@@ -398,7 +508,7 @@ static int __access_remote_tags(struct mm_struct *mm, unsigned long addr,
kiov->iov_len = buf - kiov->iov_base;
if (!kiov->iov_len) {
/* check for error accessing the tracee's address space */
- if (ret <= 0)
+ if (err)
return -EIO;
else
return -EFAULT;
@@ -461,3 +571,85 @@ int mte_ptrace_copy_tags(struct task_struct *child, long request,
return ret;
}
+
+static ssize_t mte_tcf_preferred_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ switch (per_cpu(mte_tcf_preferred, dev->id)) {
+ case MTE_CTRL_TCF_ASYNC:
+ return sysfs_emit(buf, "async\n");
+ case MTE_CTRL_TCF_SYNC:
+ return sysfs_emit(buf, "sync\n");
+ case MTE_CTRL_TCF_ASYMM:
+ return sysfs_emit(buf, "asymm\n");
+ default:
+ return sysfs_emit(buf, "???\n");
+ }
+}
+
+static ssize_t mte_tcf_preferred_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u64 tcf;
+
+ if (sysfs_streq(buf, "async"))
+ tcf = MTE_CTRL_TCF_ASYNC;
+ else if (sysfs_streq(buf, "sync"))
+ tcf = MTE_CTRL_TCF_SYNC;
+ else if (cpus_have_cap(ARM64_MTE_ASYMM) && sysfs_streq(buf, "asymm"))
+ tcf = MTE_CTRL_TCF_ASYMM;
+ else
+ return -EINVAL;
+
+ device_lock(dev);
+ per_cpu(mte_tcf_preferred, dev->id) = tcf;
+ device_unlock(dev);
+
+ return count;
+}
+static DEVICE_ATTR_RW(mte_tcf_preferred);
+
+static int register_mte_tcf_preferred_sysctl(void)
+{
+ unsigned int cpu;
+
+ if (!system_supports_mte())
+ return 0;
+
+ for_each_possible_cpu(cpu) {
+ per_cpu(mte_tcf_preferred, cpu) = MTE_CTRL_TCF_ASYNC;
+ device_create_file(get_cpu_device(cpu),
+ &dev_attr_mte_tcf_preferred);
+ }
+
+ return 0;
+}
+subsys_initcall(register_mte_tcf_preferred_sysctl);
+
+/*
+ * Return 0 on success, the number of bytes not probed otherwise.
+ */
+size_t mte_probe_user_range(const char __user *uaddr, size_t size)
+{
+ const char __user *end = uaddr + size;
+ char val;
+
+ __raw_get_user(val, uaddr, efault);
+
+ uaddr = PTR_ALIGN(uaddr, MTE_GRANULE_SIZE);
+ while (uaddr < end) {
+ /*
+ * A read is sufficient for mte, the caller should have probed
+ * for the pte write permission if required.
+ */
+ __raw_get_user(val, uaddr, efault);
+ uaddr += MTE_GRANULE_SIZE;
+ }
+ (void)val;
+
+ return 0;
+
+efault:
+ return end - uaddr;
+}