From c14d08c5adb25f397638be3d8dd7f4738fb38272 Mon Sep 17 00:00:00 2001 From: Ricardo Koller Date: Wed, 26 Apr 2023 17:23:19 +0000 Subject: KVM: arm64: Rename free_removed to free_unlinked Normalize on referring to tables outside of an active paging structure as 'unlinked'. A subsequent change to KVM will add support for building page tables that are not part of an active paging structure. The existing 'removed_table' terminology is quite clunky when applied in this context. Signed-off-by: Ricardo Koller Reviewed-by: Oliver Upton Reviewed-by: Shaoqin Huang Reviewed-by: Gavin Shan Link: https://lore.kernel.org/r/20230426172330.1439644-2-ricarkol@google.com Signed-off-by: Oliver Upton --- arch/arm64/kvm/hyp/nvhe/mem_protect.c | 6 +++--- arch/arm64/kvm/hyp/pgtable.c | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'arch/arm64/kvm/hyp') diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c index 2e9ec4a2a4a3..d35e75b13ffe 100644 --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c @@ -91,9 +91,9 @@ static void host_s2_put_page(void *addr) hyp_put_page(&host_s2_pool, addr); } -static void host_s2_free_removed_table(void *addr, u32 level) +static void host_s2_free_unlinked_table(void *addr, u32 level) { - kvm_pgtable_stage2_free_removed(&host_mmu.mm_ops, addr, level); + kvm_pgtable_stage2_free_unlinked(&host_mmu.mm_ops, addr, level); } static int prepare_s2_pool(void *pgt_pool_base) @@ -110,7 +110,7 @@ static int prepare_s2_pool(void *pgt_pool_base) host_mmu.mm_ops = (struct kvm_pgtable_mm_ops) { .zalloc_pages_exact = host_s2_zalloc_pages_exact, .zalloc_page = host_s2_zalloc_page, - .free_removed_table = host_s2_free_removed_table, + .free_unlinked_table = host_s2_free_unlinked_table, .phys_to_virt = hyp_phys_to_virt, .virt_to_phys = hyp_virt_to_phys, .page_count = hyp_page_count, diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index 3d61bd3e591d..a3246d6cddec 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -860,7 +860,7 @@ static int stage2_map_walk_table_pre(const struct kvm_pgtable_visit_ctx *ctx, if (ret) return ret; - mm_ops->free_removed_table(childp, ctx->level); + mm_ops->free_unlinked_table(childp, ctx->level); return 0; } @@ -905,7 +905,7 @@ static int stage2_map_walk_leaf(const struct kvm_pgtable_visit_ctx *ctx, * The TABLE_PRE callback runs for table entries on the way down, looking * for table entries which we could conceivably replace with a block entry * for this mapping. If it finds one it replaces the entry and calls - * kvm_pgtable_mm_ops::free_removed_table() to tear down the detached table. + * kvm_pgtable_mm_ops::free_unlinked_table() to tear down the detached table. * * Otherwise, the LEAF callback performs the mapping at the existing leaves * instead. @@ -1276,7 +1276,7 @@ void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt) pgt->pgd = NULL; } -void kvm_pgtable_stage2_free_removed(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, u32 level) +void kvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, u32 level) { kvm_pteref_t ptep = (kvm_pteref_t)pgtable; struct kvm_pgtable_walker walker = { -- cgit From 02f10845f435fbda4aa2385d4c3a9730c4a5c75a Mon Sep 17 00:00:00 2001 From: Ricardo Koller Date: Wed, 26 Apr 2023 17:23:20 +0000 Subject: KVM: arm64: Add KVM_PGTABLE_WALK flags for skipping CMOs and BBM TLBIs Add two flags to kvm_pgtable_visit_ctx, KVM_PGTABLE_WALK_SKIP_BBM_TLBI and KVM_PGTABLE_WALK_SKIP_CMO, to indicate that the walk should not perform TLB invalidations (TLBIs) in break-before-make (BBM) nor cache maintenance operations (CMO). This will be used by a future commit to create unlinked tables not accessible to the HW page-table walker. Signed-off-by: Ricardo Koller Reviewed-by: Shaoqin Huang Reviewed-by: Gavin Shan Link: https://lore.kernel.org/r/20230426172330.1439644-3-ricarkol@google.com Signed-off-by: Oliver Upton --- arch/arm64/kvm/hyp/pgtable.c | 37 ++++++++++++++++++++++++++----------- 1 file changed, 26 insertions(+), 11 deletions(-) (limited to 'arch/arm64/kvm/hyp') diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index a3246d6cddec..633679ee3c49 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -62,6 +62,16 @@ struct kvm_pgtable_walk_data { u64 end; }; +static bool kvm_pgtable_walk_skip_bbm_tlbi(const struct kvm_pgtable_visit_ctx *ctx) +{ + return unlikely(ctx->flags & KVM_PGTABLE_WALK_SKIP_BBM_TLBI); +} + +static bool kvm_pgtable_walk_skip_cmo(const struct kvm_pgtable_visit_ctx *ctx) +{ + return unlikely(ctx->flags & KVM_PGTABLE_WALK_SKIP_CMO); +} + static bool kvm_phys_is_valid(u64 phys) { return phys < BIT(id_aa64mmfr0_parange_to_phys_shift(ID_AA64MMFR0_EL1_PARANGE_MAX)); @@ -741,14 +751,17 @@ static bool stage2_try_break_pte(const struct kvm_pgtable_visit_ctx *ctx, if (!stage2_try_set_pte(ctx, KVM_INVALID_PTE_LOCKED)) return false; - /* - * Perform the appropriate TLB invalidation based on the evicted pte - * value (if any). - */ - if (kvm_pte_table(ctx->old, ctx->level)) - kvm_call_hyp(__kvm_tlb_flush_vmid, mmu); - else if (kvm_pte_valid(ctx->old)) - kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr, ctx->level); + if (!kvm_pgtable_walk_skip_bbm_tlbi(ctx)) { + /* + * Perform the appropriate TLB invalidation based on the + * evicted pte value (if any). + */ + if (kvm_pte_table(ctx->old, ctx->level)) + kvm_call_hyp(__kvm_tlb_flush_vmid, mmu); + else if (kvm_pte_valid(ctx->old)) + kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, + ctx->addr, ctx->level); + } if (stage2_pte_is_counted(ctx->old)) mm_ops->put_page(ctx->ptep); @@ -832,11 +845,13 @@ static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx, return -EAGAIN; /* Perform CMOs before installation of the guest stage-2 PTE */ - if (mm_ops->dcache_clean_inval_poc && stage2_pte_cacheable(pgt, new)) + if (!kvm_pgtable_walk_skip_cmo(ctx) && mm_ops->dcache_clean_inval_poc && + stage2_pte_cacheable(pgt, new)) mm_ops->dcache_clean_inval_poc(kvm_pte_follow(new, mm_ops), - granule); + granule); - if (mm_ops->icache_inval_pou && stage2_pte_executable(new)) + if (!kvm_pgtable_walk_skip_cmo(ctx) && mm_ops->icache_inval_pou && + stage2_pte_executable(new)) mm_ops->icache_inval_pou(kvm_pte_follow(new, mm_ops), granule); stage2_make_pte(ctx, new); -- cgit From e7c05540c694b2f53a4d25e360c39984d521ccb1 Mon Sep 17 00:00:00 2001 From: Ricardo Koller Date: Wed, 26 Apr 2023 17:23:21 +0000 Subject: KVM: arm64: Add helper for creating unlinked stage2 subtrees Add a stage2 helper, kvm_pgtable_stage2_create_unlinked(), for creating unlinked tables (which is the opposite of kvm_pgtable_stage2_free_unlinked()). Creating an unlinked table is useful for splitting level 1 and 2 entries into subtrees of PAGE_SIZE PTEs. For example, a level 1 entry can be split into PAGE_SIZE PTEs by first creating a fully populated tree, and then use it to replace the level 1 entry in a single step. This will be used in a subsequent commit for eager huge-page splitting (a dirty-logging optimization). Signed-off-by: Ricardo Koller Reviewed-by: Shaoqin Huang Reviewed-by: Gavin Shan Link: https://lore.kernel.org/r/20230426172330.1439644-4-ricarkol@google.com Signed-off-by: Oliver Upton --- arch/arm64/kvm/hyp/pgtable.c | 53 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) (limited to 'arch/arm64/kvm/hyp') diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index 633679ee3c49..56edffc02bc6 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -1222,6 +1222,59 @@ int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size) return kvm_pgtable_walk(pgt, addr, size, &walker); } +kvm_pte_t *kvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt, + u64 phys, u32 level, + enum kvm_pgtable_prot prot, + void *mc, bool force_pte) +{ + struct stage2_map_data map_data = { + .phys = phys, + .mmu = pgt->mmu, + .memcache = mc, + .force_pte = force_pte, + }; + struct kvm_pgtable_walker walker = { + .cb = stage2_map_walker, + .flags = KVM_PGTABLE_WALK_LEAF | + KVM_PGTABLE_WALK_SKIP_BBM_TLBI | + KVM_PGTABLE_WALK_SKIP_CMO, + .arg = &map_data, + }; + /* + * The input address (.addr) is irrelevant for walking an + * unlinked table. Construct an ambiguous IA range to map + * kvm_granule_size(level) worth of memory. + */ + struct kvm_pgtable_walk_data data = { + .walker = &walker, + .addr = 0, + .end = kvm_granule_size(level), + }; + struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops; + kvm_pte_t *pgtable; + int ret; + + if (!IS_ALIGNED(phys, kvm_granule_size(level))) + return ERR_PTR(-EINVAL); + + ret = stage2_set_prot_attr(pgt, prot, &map_data.attr); + if (ret) + return ERR_PTR(ret); + + pgtable = mm_ops->zalloc_page(mc); + if (!pgtable) + return ERR_PTR(-ENOMEM); + + ret = __kvm_pgtable_walk(&data, mm_ops, (kvm_pteref_t)pgtable, + level + 1); + if (ret) { + kvm_pgtable_stage2_free_unlinked(mm_ops, pgtable, level); + mm_ops->put_page(pgtable); + return ERR_PTR(ret); + } + + return pgtable; +} int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu, struct kvm_pgtable_mm_ops *mm_ops, -- cgit From 8f5a3eb7513fc4deae511ce91af1c2c23874a8a7 Mon Sep 17 00:00:00 2001 From: Ricardo Koller Date: Wed, 26 Apr 2023 17:23:24 +0000 Subject: KVM: arm64: Add kvm_pgtable_stage2_split() Add a new stage2 function, kvm_pgtable_stage2_split(), for splitting a range of huge pages. This will be used for eager-splitting huge pages into PAGE_SIZE pages. The goal is to avoid having to split huge pages on write-protection faults, and instead use this function to do it ahead of time for large ranges (e.g., all guest memory in 1G chunks at a time). Signed-off-by: Ricardo Koller Reviewed-by: Shaoqin Huang Reviewed-by: Gavin Shan Link: https://lore.kernel.org/r/20230426172330.1439644-7-ricarkol@google.com Signed-off-by: Oliver Upton --- arch/arm64/kvm/hyp/pgtable.c | 103 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 103 insertions(+) (limited to 'arch/arm64/kvm/hyp') diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index 56edffc02bc6..8b03cd6fed12 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -1276,6 +1276,109 @@ kvm_pte_t *kvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt, return pgtable; } +/* + * Get the number of page-tables needed to replace a block with a + * fully populated tree up to the PTE entries. Note that @level is + * interpreted as in "level @level entry". + */ +static int stage2_block_get_nr_page_tables(u32 level) +{ + switch (level) { + case 1: + return PTRS_PER_PTE + 1; + case 2: + return 1; + case 3: + return 0; + default: + WARN_ON_ONCE(level < KVM_PGTABLE_MIN_BLOCK_LEVEL || + level >= KVM_PGTABLE_MAX_LEVELS); + return -EINVAL; + }; +} + +static int stage2_split_walker(const struct kvm_pgtable_visit_ctx *ctx, + enum kvm_pgtable_walk_flags visit) +{ + struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops; + struct kvm_mmu_memory_cache *mc = ctx->arg; + struct kvm_s2_mmu *mmu; + kvm_pte_t pte = ctx->old, new, *childp; + enum kvm_pgtable_prot prot; + u32 level = ctx->level; + bool force_pte; + int nr_pages; + u64 phys; + + /* No huge-pages exist at the last level */ + if (level == KVM_PGTABLE_MAX_LEVELS - 1) + return 0; + + /* We only split valid block mappings */ + if (!kvm_pte_valid(pte)) + return 0; + + nr_pages = stage2_block_get_nr_page_tables(level); + if (nr_pages < 0) + return nr_pages; + + if (mc->nobjs >= nr_pages) { + /* Build a tree mapped down to the PTE granularity. */ + force_pte = true; + } else { + /* + * Don't force PTEs, so create_unlinked() below does + * not populate the tree up to the PTE level. The + * consequence is that the call will require a single + * page of level 2 entries at level 1, or a single + * page of PTEs at level 2. If we are at level 1, the + * PTEs will be created recursively. + */ + force_pte = false; + nr_pages = 1; + } + + if (mc->nobjs < nr_pages) + return -ENOMEM; + + mmu = container_of(mc, struct kvm_s2_mmu, split_page_cache); + phys = kvm_pte_to_phys(pte); + prot = kvm_pgtable_stage2_pte_prot(pte); + + childp = kvm_pgtable_stage2_create_unlinked(mmu->pgt, phys, + level, prot, mc, force_pte); + if (IS_ERR(childp)) + return PTR_ERR(childp); + + if (!stage2_try_break_pte(ctx, mmu)) { + kvm_pgtable_stage2_free_unlinked(mm_ops, childp, level); + mm_ops->put_page(childp); + return -EAGAIN; + } + + /* + * Note, the contents of the page table are guaranteed to be made + * visible before the new PTE is assigned because stage2_make_pte() + * writes the PTE using smp_store_release(). + */ + new = kvm_init_table_pte(childp, mm_ops); + stage2_make_pte(ctx, new); + dsb(ishst); + return 0; +} + +int kvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size, + struct kvm_mmu_memory_cache *mc) +{ + struct kvm_pgtable_walker walker = { + .cb = stage2_split_walker, + .flags = KVM_PGTABLE_WALK_LEAF, + .arg = mc, + }; + + return kvm_pgtable_walk(pgt, addr, size, &walker); +} + int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu, struct kvm_pgtable_mm_ops *mm_ops, enum kvm_pgtable_stage2_flags flags, -- cgit From a12ab1378a88b38f3af8b1a899b7d8526324ba6f Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 26 Apr 2023 17:23:30 +0000 Subject: KVM: arm64: Use local TLBI on permission relaxation Broadcast TLB invalidations (TLBIs) targeting the Inner Shareable Domain are usually less performant than their non-shareable variant. In particular, we observed some implementations that take millliseconds to complete parallel broadcasted TLBIs. It's safe to use non-shareable TLBIs when relaxing permissions on a PTE in the KVM case. According to the ARM ARM (0487I.a) section D8.13.1 "Using break-before-make when updating translation table entries", permission relaxation does not need break-before-make. Specifically, R_WHZWS states that these are the only changes that require a break-before-make sequence: changes of memory type (Shareability or Cacheability), address changes, or changing the block size. Signed-off-by: Marc Zyngier Signed-off-by: Ricardo Koller Reviewed-by: Gavin Shan Link: https://lore.kernel.org/r/20230426172330.1439644-13-ricarkol@google.com Signed-off-by: Oliver Upton --- arch/arm64/kvm/hyp/nvhe/hyp-main.c | 10 ++++++++ arch/arm64/kvm/hyp/nvhe/tlb.c | 52 ++++++++++++++++++++++++++++++++++++++ arch/arm64/kvm/hyp/pgtable.c | 2 +- arch/arm64/kvm/hyp/vhe/tlb.c | 32 +++++++++++++++++++++++ 4 files changed, 95 insertions(+), 1 deletion(-) (limited to 'arch/arm64/kvm/hyp') diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c index 728e01d4536b..c6bf1e49ca93 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c +++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c @@ -125,6 +125,15 @@ static void handle___kvm_tlb_flush_vmid_ipa(struct kvm_cpu_context *host_ctxt) __kvm_tlb_flush_vmid_ipa(kern_hyp_va(mmu), ipa, level); } +static void handle___kvm_tlb_flush_vmid_ipa_nsh(struct kvm_cpu_context *host_ctxt) +{ + DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1); + DECLARE_REG(phys_addr_t, ipa, host_ctxt, 2); + DECLARE_REG(int, level, host_ctxt, 3); + + __kvm_tlb_flush_vmid_ipa_nsh(kern_hyp_va(mmu), ipa, level); +} + static void handle___kvm_tlb_flush_vmid(struct kvm_cpu_context *host_ctxt) { DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1); @@ -315,6 +324,7 @@ static const hcall_t host_hcall[] = { HANDLE_FUNC(__kvm_vcpu_run), HANDLE_FUNC(__kvm_flush_vm_context), HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa), + HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa_nsh), HANDLE_FUNC(__kvm_tlb_flush_vmid), HANDLE_FUNC(__kvm_flush_cpu_context), HANDLE_FUNC(__kvm_timer_set_cntvoff), diff --git a/arch/arm64/kvm/hyp/nvhe/tlb.c b/arch/arm64/kvm/hyp/nvhe/tlb.c index 978179133f4b..b9991bbd8e3f 100644 --- a/arch/arm64/kvm/hyp/nvhe/tlb.c +++ b/arch/arm64/kvm/hyp/nvhe/tlb.c @@ -130,6 +130,58 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, __tlb_switch_to_host(&cxt); } +void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu, + phys_addr_t ipa, int level) +{ + struct tlb_inv_context cxt; + + /* Switch to requested VMID */ + __tlb_switch_to_guest(mmu, &cxt, true); + + /* + * We could do so much better if we had the VA as well. + * Instead, we invalidate Stage-2 for this IPA, and the + * whole of Stage-1. Weep... + */ + ipa >>= 12; + __tlbi_level(ipas2e1, ipa, level); + + /* + * We have to ensure completion of the invalidation at Stage-2, + * since a table walk on another CPU could refill a TLB with a + * complete (S1 + S2) walk based on the old Stage-2 mapping if + * the Stage-1 invalidation happened first. + */ + dsb(nsh); + __tlbi(vmalle1); + dsb(nsh); + isb(); + + /* + * If the host is running at EL1 and we have a VPIPT I-cache, + * then we must perform I-cache maintenance at EL2 in order for + * it to have an effect on the guest. Since the guest cannot hit + * I-cache lines allocated with a different VMID, we don't need + * to worry about junk out of guest reset (we nuke the I-cache on + * VMID rollover), but we do need to be careful when remapping + * executable pages for the same guest. This can happen when KSM + * takes a CoW fault on an executable page, copies the page into + * a page that was previously mapped in the guest and then needs + * to invalidate the guest view of the I-cache for that page + * from EL1. To solve this, we invalidate the entire I-cache when + * unmapping a page from a guest if we have a VPIPT I-cache but + * the host is running at EL1. As above, we could do better if + * we had the VA. + * + * The moral of this story is: if you have a VPIPT I-cache, then + * you should be running with VHE enabled. + */ + if (icache_is_vpipt()) + icache_inval_all_pou(); + + __tlb_switch_to_host(&cxt); +} + void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu) { struct tlb_inv_context cxt; diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index 8b03cd6fed12..2dd7e4a2da73 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -1189,7 +1189,7 @@ int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr, KVM_PGTABLE_WALK_HANDLE_FAULT | KVM_PGTABLE_WALK_SHARED); if (!ret) - kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, pgt->mmu, addr, level); + kvm_call_hyp(__kvm_tlb_flush_vmid_ipa_nsh, pgt->mmu, addr, level); return ret; } diff --git a/arch/arm64/kvm/hyp/vhe/tlb.c b/arch/arm64/kvm/hyp/vhe/tlb.c index 24cef9b87f9e..e69da550cdc5 100644 --- a/arch/arm64/kvm/hyp/vhe/tlb.c +++ b/arch/arm64/kvm/hyp/vhe/tlb.c @@ -111,6 +111,38 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, __tlb_switch_to_host(&cxt); } +void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu, + phys_addr_t ipa, int level) +{ + struct tlb_inv_context cxt; + + dsb(nshst); + + /* Switch to requested VMID */ + __tlb_switch_to_guest(mmu, &cxt); + + /* + * We could do so much better if we had the VA as well. + * Instead, we invalidate Stage-2 for this IPA, and the + * whole of Stage-1. Weep... + */ + ipa >>= 12; + __tlbi_level(ipas2e1, ipa, level); + + /* + * We have to ensure completion of the invalidation at Stage-2, + * since a table walk on another CPU could refill a TLB with a + * complete (S1 + S2) walk based on the old Stage-2 mapping if + * the Stage-1 invalidation happened first. + */ + dsb(nsh); + __tlbi(vmalle1); + dsb(nsh); + isb(); + + __tlb_switch_to_host(&cxt); +} + void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu) { struct tlb_inv_context cxt; -- cgit From b53d4a27234955810362f91be679afc12e4c3237 Mon Sep 17 00:00:00 2001 From: Mostafa Saleh Date: Tue, 30 May 2023 15:08:45 +0000 Subject: KVM: arm64: Use BTI for nvhe MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit CONFIG_ARM64_BTI_KERNEL compiles the kernel to support ARMv8.5-BTI. However, the nvhe code doesn't make use of it as it doesn't map any pages with Guarded Page(GP) bit. kvm pgtable code is modified to map executable pages with GP bit if BTI is enabled for the kernel. At hyp init, SCTLR_EL2.BT is set to 1 to match EL1 configuration (SCTLR_EL1.BT1) set in bti_enable(). One difference between kernel and nvhe code, is that the kernel maps .text with GP while nvhe maps all the executable pages, this makes nvhe code need to deal with special initialization code coming from other executable sections (.idmap.text). For this we need to add bti instruction at the beginning of __kvm_handle_stub_hvc as it can be called by __host_hvc through branch instruction(br) and unlike SYM_FUNC_START, SYM_CODE_START doesn’t add bti instruction at the beginning, and it can’t be modified to add it as it is used with vector tables. Another solution which is more intrusive is to convert __kvm_handle_stub_hvc to a function and inject “bti jc” instead of “bti c” in SYM_FUNC_START Signed-off-by: Mostafa Saleh Link: https://lore.kernel.org/r/20230530150845.2856828-1-smostafa@google.com Signed-off-by: Oliver Upton --- arch/arm64/kvm/hyp/nvhe/hyp-init.S | 12 ++++++++++++ arch/arm64/kvm/hyp/pgtable.c | 7 ++++++- 2 files changed, 18 insertions(+), 1 deletion(-) (limited to 'arch/arm64/kvm/hyp') diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-init.S b/arch/arm64/kvm/hyp/nvhe/hyp-init.S index a6d67c2bb5ae..f3ee66aa2f9d 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-init.S +++ b/arch/arm64/kvm/hyp/nvhe/hyp-init.S @@ -128,6 +128,13 @@ alternative_if ARM64_HAS_ADDRESS_AUTH SCTLR_ELx_ENDA | SCTLR_ELx_ENDB) orr x0, x0, x1 alternative_else_nop_endif + +#ifdef CONFIG_ARM64_BTI_KERNEL +alternative_if ARM64_BTI + orr x0, x0, #SCTLR_EL2_BT +alternative_else_nop_endif +#endif /* CONFIG_ARM64_BTI_KERNEL */ + msr sctlr_el2, x0 isb @@ -196,6 +203,11 @@ SYM_CODE_START_LOCAL(__kvm_hyp_init_cpu) SYM_CODE_END(__kvm_hyp_init_cpu) SYM_CODE_START(__kvm_handle_stub_hvc) + /* + * __kvm_handle_stub_hvc called from __host_hvc through branch instruction(br) so + * we need bti j at beginning. + */ + bti j cmp x0, #HVC_SOFT_RESTART b.ne 1f diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index 3d61bd3e591d..a79a45fc4047 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -34,7 +34,7 @@ #define KVM_PTE_LEAF_ATTR_LO_S2_SH_IS 3 #define KVM_PTE_LEAF_ATTR_LO_S2_AF BIT(10) -#define KVM_PTE_LEAF_ATTR_HI GENMASK(63, 51) +#define KVM_PTE_LEAF_ATTR_HI GENMASK(63, 50) #define KVM_PTE_LEAF_ATTR_HI_SW GENMASK(58, 55) @@ -42,6 +42,8 @@ #define KVM_PTE_LEAF_ATTR_HI_S2_XN BIT(54) +#define KVM_PTE_LEAF_ATTR_HI_S1_GP BIT(50) + #define KVM_PTE_LEAF_ATTR_S2_PERMS (KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R | \ KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W | \ KVM_PTE_LEAF_ATTR_HI_S2_XN) @@ -371,6 +373,9 @@ static int hyp_set_prot_attr(enum kvm_pgtable_prot prot, kvm_pte_t *ptep) if (device) return -EINVAL; + + if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && system_supports_bti()) + attr |= KVM_PTE_LEAF_ATTR_HI_S1_GP; } else { attr |= KVM_PTE_LEAF_ATTR_HI_S1_XN; } -- cgit From 048be5fea43deef7e96c0de5ba05515c5cbe28cb Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 23 May 2023 11:18:18 +0100 Subject: KVM: arm64: Block unsafe FF-A calls from the host When KVM is initialised in protected mode, we must take care to filter certain FFA calls from the host kernel so that the integrity of guest and hypervisor memory is maintained and is not made available to the secure world. As a first step, intercept and block all memory-related FF-A SMC calls from the host to EL3 and don't advertise any FF-A features. This puts the framework in place for handling them properly. Co-developed-by: Andrew Walbran Signed-off-by: Andrew Walbran Signed-off-by: Will Deacon Link: https://lore.kernel.org/r/20230523101828.7328-2-will@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/kvm/hyp/include/nvhe/ffa.h | 16 +++++ arch/arm64/kvm/hyp/nvhe/Makefile | 2 +- arch/arm64/kvm/hyp/nvhe/ffa.c | 119 ++++++++++++++++++++++++++++++++++ arch/arm64/kvm/hyp/nvhe/hyp-main.c | 3 + 4 files changed, 139 insertions(+), 1 deletion(-) create mode 100644 arch/arm64/kvm/hyp/include/nvhe/ffa.h create mode 100644 arch/arm64/kvm/hyp/nvhe/ffa.c (limited to 'arch/arm64/kvm/hyp') diff --git a/arch/arm64/kvm/hyp/include/nvhe/ffa.h b/arch/arm64/kvm/hyp/include/nvhe/ffa.h new file mode 100644 index 000000000000..fc09ec671e24 --- /dev/null +++ b/arch/arm64/kvm/hyp/include/nvhe/ffa.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2022 - Google LLC + * Author: Andrew Walbran + */ +#ifndef __KVM_HYP_FFA_H +#define __KVM_HYP_FFA_H + +#include + +#define FFA_MIN_FUNC_NUM 0x60 +#define FFA_MAX_FUNC_NUM 0x7F + +bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt); + +#endif /* __KVM_HYP_FFA_H */ diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile index 530347cdebe3..9ddc025e4b86 100644 --- a/arch/arm64/kvm/hyp/nvhe/Makefile +++ b/arch/arm64/kvm/hyp/nvhe/Makefile @@ -22,7 +22,7 @@ lib-objs := $(addprefix ../../../lib/, $(lib-objs)) hyp-obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o \ hyp-main.o hyp-smp.o psci-relay.o early_alloc.o page_alloc.o \ - cache.o setup.o mm.o mem_protect.o sys_regs.o pkvm.o stacktrace.o + cache.o setup.o mm.o mem_protect.o sys_regs.o pkvm.o stacktrace.o ffa.o hyp-obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \ ../fpsimd.o ../hyp-entry.o ../exception.o ../pgtable.o hyp-obj-$(CONFIG_DEBUG_LIST) += list_debug.o diff --git a/arch/arm64/kvm/hyp/nvhe/ffa.c b/arch/arm64/kvm/hyp/nvhe/ffa.c new file mode 100644 index 000000000000..302806928a64 --- /dev/null +++ b/arch/arm64/kvm/hyp/nvhe/ffa.c @@ -0,0 +1,119 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * FF-A v1.0 proxy to filter out invalid memory-sharing SMC calls issued by + * the host. FF-A is a slightly more palatable abbreviation of "Arm Firmware + * Framework for Arm A-profile", which is specified by Arm in document + * number DEN0077. + * + * Copyright (C) 2022 - Google LLC + * Author: Andrew Walbran + * + * This driver hooks into the SMC trapping logic for the host and intercepts + * all calls falling within the FF-A range. Each call is either: + * + * - Forwarded on unmodified to the SPMD at EL3 + * - Rejected as "unsupported" + * - Accompanied by a host stage-2 page-table check/update and reissued + * + * Consequently, any attempts by the host to make guest memory pages + * accessible to the secure world using FF-A will be detected either here + * (in the case that the memory is already owned by the guest) or during + * donation to the guest (in the case that the memory was previously shared + * with the secure world). + * + * To allow the rolling-back of page-table updates and FF-A calls in the + * event of failure, operations involving the RXTX buffers are locked for + * the duration and are therefore serialised. + */ + +#include +#include +#include +#include + +static void ffa_to_smccc_error(struct arm_smccc_res *res, u64 ffa_errno) +{ + *res = (struct arm_smccc_res) { + .a0 = FFA_ERROR, + .a2 = ffa_errno, + }; +} + +static void ffa_set_retval(struct kvm_cpu_context *ctxt, + struct arm_smccc_res *res) +{ + cpu_reg(ctxt, 0) = res->a0; + cpu_reg(ctxt, 1) = res->a1; + cpu_reg(ctxt, 2) = res->a2; + cpu_reg(ctxt, 3) = res->a3; +} + +static bool is_ffa_call(u64 func_id) +{ + return ARM_SMCCC_IS_FAST_CALL(func_id) && + ARM_SMCCC_OWNER_NUM(func_id) == ARM_SMCCC_OWNER_STANDARD && + ARM_SMCCC_FUNC_NUM(func_id) >= FFA_MIN_FUNC_NUM && + ARM_SMCCC_FUNC_NUM(func_id) <= FFA_MAX_FUNC_NUM; +} + +/* + * Is a given FFA function supported, either by forwarding on directly + * or by handling at EL2? + */ +static bool ffa_call_supported(u64 func_id) +{ + switch (func_id) { + /* Unsupported memory management calls */ + case FFA_FN64_MEM_RETRIEVE_REQ: + case FFA_MEM_RETRIEVE_RESP: + case FFA_MEM_RELINQUISH: + case FFA_MEM_OP_PAUSE: + case FFA_MEM_OP_RESUME: + case FFA_MEM_FRAG_RX: + case FFA_FN64_MEM_DONATE: + /* Indirect message passing via RX/TX buffers */ + case FFA_MSG_SEND: + case FFA_MSG_POLL: + case FFA_MSG_WAIT: + /* 32-bit variants of 64-bit calls */ + case FFA_MSG_SEND_DIRECT_REQ: + case FFA_MSG_SEND_DIRECT_RESP: + case FFA_RXTX_MAP: + case FFA_MEM_DONATE: + case FFA_MEM_RETRIEVE_REQ: + /* Don't advertise any features just yet */ + case FFA_FEATURES: + return false; + } + + return true; +} + +bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt) +{ + DECLARE_REG(u64, func_id, host_ctxt, 0); + struct arm_smccc_res res; + + /* + * There's no way we can tell what a non-standard SMC call might + * be up to. Ideally, we would terminate these here and return + * an error to the host, but sadly devices make use of custom + * firmware calls for things like power management, debugging, + * RNG access and crash reporting. + * + * Given that the architecture requires us to trust EL3 anyway, + * we forward unrecognised calls on under the assumption that + * the firmware doesn't expose a mechanism to access arbitrary + * non-secure memory. Short of a per-device table of SMCs, this + * is the best we can do. + */ + if (!is_ffa_call(func_id)) + return false; + + if (ffa_call_supported(func_id)) + return false; /* Pass through */ + + ffa_to_smccc_error(&res, FFA_RET_NOT_SUPPORTED); + ffa_set_retval(host_ctxt, &res); + return true; +} diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c index 728e01d4536b..223611e43279 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c +++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c @@ -13,6 +13,7 @@ #include #include +#include #include #include #include @@ -373,6 +374,8 @@ static void handle_host_smc(struct kvm_cpu_context *host_ctxt) bool handled; handled = kvm_host_psci_handler(host_ctxt); + if (!handled) + handled = kvm_host_ffa_handler(host_ctxt); if (!handled) default_host_smc_handler(host_ctxt); -- cgit From 12bdce4f41197a1a97ba1c711f77d557841e13d9 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 23 May 2023 11:18:19 +0100 Subject: KVM: arm64: Probe FF-A version and host/hyp partition ID during init Probe FF-A during pKVM initialisation so that we can detect any inconsistencies in the version or partition ID early on. Signed-off-by: Will Deacon Link: https://lore.kernel.org/r/20230523101828.7328-3-will@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/kvm/hyp/include/nvhe/ffa.h | 1 + arch/arm64/kvm/hyp/nvhe/ffa.c | 30 ++++++++++++++++++++++++++++++ arch/arm64/kvm/hyp/nvhe/setup.c | 5 +++++ 3 files changed, 36 insertions(+) (limited to 'arch/arm64/kvm/hyp') diff --git a/arch/arm64/kvm/hyp/include/nvhe/ffa.h b/arch/arm64/kvm/hyp/include/nvhe/ffa.h index fc09ec671e24..5c9b92430ff3 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/ffa.h +++ b/arch/arm64/kvm/hyp/include/nvhe/ffa.h @@ -11,6 +11,7 @@ #define FFA_MIN_FUNC_NUM 0x60 #define FFA_MAX_FUNC_NUM 0x7F +int hyp_ffa_init(void); bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt); #endif /* __KVM_HYP_FFA_H */ diff --git a/arch/arm64/kvm/hyp/nvhe/ffa.c b/arch/arm64/kvm/hyp/nvhe/ffa.c index 302806928a64..abdcaf98d9b0 100644 --- a/arch/arm64/kvm/hyp/nvhe/ffa.c +++ b/arch/arm64/kvm/hyp/nvhe/ffa.c @@ -31,6 +31,12 @@ #include #include +/* + * "ID value 0 must be returned at the Non-secure physical FF-A instance" + * We share this ID with the host. + */ +#define HOST_FFA_ID 0 + static void ffa_to_smccc_error(struct arm_smccc_res *res, u64 ffa_errno) { *res = (struct arm_smccc_res) { @@ -117,3 +123,27 @@ bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt) ffa_set_retval(host_ctxt, &res); return true; } + +int hyp_ffa_init(void) +{ + struct arm_smccc_res res; + + if (kvm_host_psci_config.smccc_version < ARM_SMCCC_VERSION_1_2) + return 0; + + arm_smccc_1_1_smc(FFA_VERSION, FFA_VERSION_1_0, 0, 0, 0, 0, 0, 0, &res); + if (res.a0 == FFA_RET_NOT_SUPPORTED) + return 0; + + if (res.a0 != FFA_VERSION_1_0) + return -EOPNOTSUPP; + + arm_smccc_1_1_smc(FFA_ID_GET, 0, 0, 0, 0, 0, 0, 0, &res); + if (res.a0 != FFA_SUCCESS) + return -EOPNOTSUPP; + + if (res.a2 != HOST_FFA_ID) + return -EINVAL; + + return 0; +} diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c index 110f04627785..c4ca174a0592 100644 --- a/arch/arm64/kvm/hyp/nvhe/setup.c +++ b/arch/arm64/kvm/hyp/nvhe/setup.c @@ -11,6 +11,7 @@ #include #include +#include #include #include #include @@ -314,6 +315,10 @@ void __noreturn __pkvm_init_finalise(void) if (ret) goto out; + ret = hyp_ffa_init(); + if (ret) + goto out; + pkvm_hyp_vm_table_init(vm_table_base); out: /* -- cgit From bc3888a0f4e979ecf9dd8c33a84b8da8cc130790 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 23 May 2023 11:18:20 +0100 Subject: KVM: arm64: Allocate pages for hypervisor FF-A mailboxes The FF-A proxy code needs to allocate its own buffer pair for communication with EL3 and for forwarding calls from the host at EL1. Reserve a couple of pages for this purpose and use them to initialise the hypervisor's FF-A buffer structure. Co-developed-by: Andrew Walbran Signed-off-by: Andrew Walbran Signed-off-by: Will Deacon Link: https://lore.kernel.org/r/20230523101828.7328-4-will@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/kvm/hyp/include/nvhe/ffa.h | 2 +- arch/arm64/kvm/hyp/nvhe/ffa.c | 24 +++++++++++++++++++++++- arch/arm64/kvm/hyp/nvhe/setup.c | 8 +++++++- 3 files changed, 31 insertions(+), 3 deletions(-) (limited to 'arch/arm64/kvm/hyp') diff --git a/arch/arm64/kvm/hyp/include/nvhe/ffa.h b/arch/arm64/kvm/hyp/include/nvhe/ffa.h index 5c9b92430ff3..1becb10ecd80 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/ffa.h +++ b/arch/arm64/kvm/hyp/include/nvhe/ffa.h @@ -11,7 +11,7 @@ #define FFA_MIN_FUNC_NUM 0x60 #define FFA_MAX_FUNC_NUM 0x7F -int hyp_ffa_init(void); +int hyp_ffa_init(void *pages); bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt); #endif /* __KVM_HYP_FFA_H */ diff --git a/arch/arm64/kvm/hyp/nvhe/ffa.c b/arch/arm64/kvm/hyp/nvhe/ffa.c index abdcaf98d9b0..c85e5d46a90d 100644 --- a/arch/arm64/kvm/hyp/nvhe/ffa.c +++ b/arch/arm64/kvm/hyp/nvhe/ffa.c @@ -28,8 +28,11 @@ #include #include +#include + #include #include +#include /* * "ID value 0 must be returned at the Non-secure physical FF-A instance" @@ -37,6 +40,19 @@ */ #define HOST_FFA_ID 0 +struct kvm_ffa_buffers { + hyp_spinlock_t lock; + void *tx; + void *rx; +}; + +/* + * Note that we don't currently lock these buffers explicitly, instead + * relying on the locking of the host FFA buffers as we only have one + * client. + */ +static struct kvm_ffa_buffers hyp_buffers; + static void ffa_to_smccc_error(struct arm_smccc_res *res, u64 ffa_errno) { *res = (struct arm_smccc_res) { @@ -124,7 +140,7 @@ bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt) return true; } -int hyp_ffa_init(void) +int hyp_ffa_init(void *pages) { struct arm_smccc_res res; @@ -145,5 +161,11 @@ int hyp_ffa_init(void) if (res.a2 != HOST_FFA_ID) return -EINVAL; + hyp_buffers = (struct kvm_ffa_buffers) { + .lock = __HYP_SPIN_LOCK_UNLOCKED, + .tx = pages, + .rx = pages + (KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE), + }; + return 0; } diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c index c4ca174a0592..bb98630dfeaf 100644 --- a/arch/arm64/kvm/hyp/nvhe/setup.c +++ b/arch/arm64/kvm/hyp/nvhe/setup.c @@ -29,6 +29,7 @@ static void *vmemmap_base; static void *vm_table_base; static void *hyp_pgt_base; static void *host_s2_pgt_base; +static void *ffa_proxy_pages; static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops; static struct hyp_pool hpool; @@ -58,6 +59,11 @@ static int divide_memory_pool(void *virt, unsigned long size) if (!host_s2_pgt_base) return -ENOMEM; + nr_pages = hyp_ffa_proxy_pages(); + ffa_proxy_pages = hyp_early_alloc_contig(nr_pages); + if (!ffa_proxy_pages) + return -ENOMEM; + return 0; } @@ -315,7 +321,7 @@ void __noreturn __pkvm_init_finalise(void) if (ret) goto out; - ret = hyp_ffa_init(); + ret = hyp_ffa_init(ffa_proxy_pages); if (ret) goto out; -- cgit From 9d0c6a9af9e38efa675e565bd181794deca1188a Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 23 May 2023 11:18:21 +0100 Subject: KVM: arm64: Handle FFA_RXTX_MAP and FFA_RXTX_UNMAP calls from the host Handle FFA_RXTX_MAP and FFA_RXTX_UNMAP calls from the host by sharing the host's mailbox memory with the hypervisor and establishing a separate pair of mailboxes between the hypervisor and the SPMD at EL3. Co-developed-by: Andrew Walbran Signed-off-by: Andrew Walbran Signed-off-by: Will Deacon Link: https://lore.kernel.org/r/20230523101828.7328-5-will@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/kvm/hyp/nvhe/ffa.c | 188 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 188 insertions(+) (limited to 'arch/arm64/kvm/hyp') diff --git a/arch/arm64/kvm/hyp/nvhe/ffa.c b/arch/arm64/kvm/hyp/nvhe/ffa.c index c85e5d46a90d..0c020cd9f722 100644 --- a/arch/arm64/kvm/hyp/nvhe/ffa.c +++ b/arch/arm64/kvm/hyp/nvhe/ffa.c @@ -31,6 +31,8 @@ #include #include +#include +#include #include #include @@ -52,6 +54,7 @@ struct kvm_ffa_buffers { * client. */ static struct kvm_ffa_buffers hyp_buffers; +static struct kvm_ffa_buffers host_buffers; static void ffa_to_smccc_error(struct arm_smccc_res *res, u64 ffa_errno) { @@ -61,6 +64,15 @@ static void ffa_to_smccc_error(struct arm_smccc_res *res, u64 ffa_errno) }; } +static void ffa_to_smccc_res(struct arm_smccc_res *res, int ret) +{ + if (ret == FFA_RET_SUCCESS) { + *res = (struct arm_smccc_res) { .a0 = FFA_SUCCESS }; + } else { + ffa_to_smccc_error(res, ret); + } +} + static void ffa_set_retval(struct kvm_cpu_context *ctxt, struct arm_smccc_res *res) { @@ -78,6 +90,144 @@ static bool is_ffa_call(u64 func_id) ARM_SMCCC_FUNC_NUM(func_id) <= FFA_MAX_FUNC_NUM; } +static int ffa_map_hyp_buffers(u64 ffa_page_count) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_smc(FFA_FN64_RXTX_MAP, + hyp_virt_to_phys(hyp_buffers.tx), + hyp_virt_to_phys(hyp_buffers.rx), + ffa_page_count, + 0, 0, 0, 0, + &res); + + return res.a0 == FFA_SUCCESS ? FFA_RET_SUCCESS : res.a2; +} + +static int ffa_unmap_hyp_buffers(void) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_smc(FFA_RXTX_UNMAP, + HOST_FFA_ID, + 0, 0, 0, 0, 0, 0, + &res); + + return res.a0 == FFA_SUCCESS ? FFA_RET_SUCCESS : res.a2; +} + +static void do_ffa_rxtx_map(struct arm_smccc_res *res, + struct kvm_cpu_context *ctxt) +{ + DECLARE_REG(phys_addr_t, tx, ctxt, 1); + DECLARE_REG(phys_addr_t, rx, ctxt, 2); + DECLARE_REG(u32, npages, ctxt, 3); + int ret = 0; + void *rx_virt, *tx_virt; + + if (npages != (KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) / FFA_PAGE_SIZE) { + ret = FFA_RET_INVALID_PARAMETERS; + goto out; + } + + if (!PAGE_ALIGNED(tx) || !PAGE_ALIGNED(rx)) { + ret = FFA_RET_INVALID_PARAMETERS; + goto out; + } + + hyp_spin_lock(&host_buffers.lock); + if (host_buffers.tx) { + ret = FFA_RET_DENIED; + goto out_unlock; + } + + /* + * Map our hypervisor buffers into the SPMD before mapping and + * pinning the host buffers in our own address space. + */ + ret = ffa_map_hyp_buffers(npages); + if (ret) + goto out_unlock; + + ret = __pkvm_host_share_hyp(hyp_phys_to_pfn(tx)); + if (ret) { + ret = FFA_RET_INVALID_PARAMETERS; + goto err_unmap; + } + + ret = __pkvm_host_share_hyp(hyp_phys_to_pfn(rx)); + if (ret) { + ret = FFA_RET_INVALID_PARAMETERS; + goto err_unshare_tx; + } + + tx_virt = hyp_phys_to_virt(tx); + ret = hyp_pin_shared_mem(tx_virt, tx_virt + 1); + if (ret) { + ret = FFA_RET_INVALID_PARAMETERS; + goto err_unshare_rx; + } + + rx_virt = hyp_phys_to_virt(rx); + ret = hyp_pin_shared_mem(rx_virt, rx_virt + 1); + if (ret) { + ret = FFA_RET_INVALID_PARAMETERS; + goto err_unpin_tx; + } + + host_buffers.tx = tx_virt; + host_buffers.rx = rx_virt; + +out_unlock: + hyp_spin_unlock(&host_buffers.lock); +out: + ffa_to_smccc_res(res, ret); + return; + +err_unpin_tx: + hyp_unpin_shared_mem(tx_virt, tx_virt + 1); +err_unshare_rx: + __pkvm_host_unshare_hyp(hyp_phys_to_pfn(rx)); +err_unshare_tx: + __pkvm_host_unshare_hyp(hyp_phys_to_pfn(tx)); +err_unmap: + ffa_unmap_hyp_buffers(); + goto out_unlock; +} + +static void do_ffa_rxtx_unmap(struct arm_smccc_res *res, + struct kvm_cpu_context *ctxt) +{ + DECLARE_REG(u32, id, ctxt, 1); + int ret = 0; + + if (id != HOST_FFA_ID) { + ret = FFA_RET_INVALID_PARAMETERS; + goto out; + } + + hyp_spin_lock(&host_buffers.lock); + if (!host_buffers.tx) { + ret = FFA_RET_INVALID_PARAMETERS; + goto out_unlock; + } + + hyp_unpin_shared_mem(host_buffers.tx, host_buffers.tx + 1); + WARN_ON(__pkvm_host_unshare_hyp(hyp_virt_to_pfn(host_buffers.tx))); + host_buffers.tx = NULL; + + hyp_unpin_shared_mem(host_buffers.rx, host_buffers.rx + 1); + WARN_ON(__pkvm_host_unshare_hyp(hyp_virt_to_pfn(host_buffers.rx))); + host_buffers.rx = NULL; + + ffa_unmap_hyp_buffers(); + +out_unlock: + hyp_spin_unlock(&host_buffers.lock); +out: + ffa_to_smccc_res(res, ret); +} + /* * Is a given FFA function supported, either by forwarding on directly * or by handling at EL2? @@ -132,10 +282,21 @@ bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt) if (!is_ffa_call(func_id)) return false; + switch (func_id) { + /* Memory management */ + case FFA_FN64_RXTX_MAP: + do_ffa_rxtx_map(&res, host_ctxt); + goto out_handled; + case FFA_RXTX_UNMAP: + do_ffa_rxtx_unmap(&res, host_ctxt); + goto out_handled; + } + if (ffa_call_supported(func_id)) return false; /* Pass through */ ffa_to_smccc_error(&res, FFA_RET_NOT_SUPPORTED); +out_handled: ffa_set_retval(host_ctxt, &res); return true; } @@ -143,6 +304,7 @@ bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt) int hyp_ffa_init(void *pages) { struct arm_smccc_res res; + size_t min_rxtx_sz; if (kvm_host_psci_config.smccc_version < ARM_SMCCC_VERSION_1_2) return 0; @@ -161,11 +323,37 @@ int hyp_ffa_init(void *pages) if (res.a2 != HOST_FFA_ID) return -EINVAL; + arm_smccc_1_1_smc(FFA_FEATURES, FFA_FN64_RXTX_MAP, + 0, 0, 0, 0, 0, 0, &res); + if (res.a0 != FFA_SUCCESS) + return -EOPNOTSUPP; + + switch (res.a2) { + case FFA_FEAT_RXTX_MIN_SZ_4K: + min_rxtx_sz = SZ_4K; + break; + case FFA_FEAT_RXTX_MIN_SZ_16K: + min_rxtx_sz = SZ_16K; + break; + case FFA_FEAT_RXTX_MIN_SZ_64K: + min_rxtx_sz = SZ_64K; + break; + default: + return -EINVAL; + } + + if (min_rxtx_sz > PAGE_SIZE) + return -EOPNOTSUPP; + hyp_buffers = (struct kvm_ffa_buffers) { .lock = __HYP_SPIN_LOCK_UNLOCKED, .tx = pages, .rx = pages + (KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE), }; + host_buffers = (struct kvm_ffa_buffers) { + .lock = __HYP_SPIN_LOCK_UNLOCKED, + }; + return 0; } -- cgit From f9112eade788439d721ca3032369fb4bf4c7e222 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 23 May 2023 11:18:22 +0100 Subject: KVM: arm64: Add FF-A helpers to share/unshare memory with secure world Extend pKVM's memory protection code so that we can update the host's stage-2 page-table to track pages shared with secure world by the host using FF-A and prevent those pages from being mapped into a guest. Co-developed-by: Andrew Walbran Signed-off-by: Andrew Walbran Signed-off-by: Will Deacon Link: https://lore.kernel.org/r/20230523101828.7328-6-will@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/kvm/hyp/include/nvhe/mem_protect.h | 3 ++ arch/arm64/kvm/hyp/nvhe/mem_protect.c | 68 +++++++++++++++++++++++++++ 2 files changed, 71 insertions(+) (limited to 'arch/arm64/kvm/hyp') diff --git a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h index b7bdbe63deed..0972faccc2af 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h +++ b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h @@ -57,6 +57,7 @@ extern struct host_mmu host_mmu; enum pkvm_component_id { PKVM_ID_HOST, PKVM_ID_HYP, + PKVM_ID_FFA, }; extern unsigned long hyp_nr_cpus; @@ -66,6 +67,8 @@ int __pkvm_host_share_hyp(u64 pfn); int __pkvm_host_unshare_hyp(u64 pfn); int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages); int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages); +int __pkvm_host_share_ffa(u64 pfn, u64 nr_pages); +int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages); bool addr_is_memory(phys_addr_t phys); int host_stage2_idmap_locked(phys_addr_t addr, u64 size, enum kvm_pgtable_prot prot); diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c index 2e9ec4a2a4a3..e327e94d0e40 100644 --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c @@ -842,6 +842,13 @@ static int check_share(struct pkvm_mem_share *share) case PKVM_ID_HYP: ret = hyp_ack_share(completer_addr, tx, share->completer_prot); break; + case PKVM_ID_FFA: + /* + * We only check the host; the secure side will check the other + * end when we forward the FFA call. + */ + ret = 0; + break; default: ret = -EINVAL; } @@ -870,6 +877,13 @@ static int __do_share(struct pkvm_mem_share *share) case PKVM_ID_HYP: ret = hyp_complete_share(completer_addr, tx, share->completer_prot); break; + case PKVM_ID_FFA: + /* + * We're not responsible for any secure page-tables, so there's + * nothing to do here. + */ + ret = 0; + break; default: ret = -EINVAL; } @@ -918,6 +932,10 @@ static int check_unshare(struct pkvm_mem_share *share) case PKVM_ID_HYP: ret = hyp_ack_unshare(completer_addr, tx); break; + case PKVM_ID_FFA: + /* See check_share() */ + ret = 0; + break; default: ret = -EINVAL; } @@ -946,6 +964,10 @@ static int __do_unshare(struct pkvm_mem_share *share) case PKVM_ID_HYP: ret = hyp_complete_unshare(completer_addr, tx); break; + case PKVM_ID_FFA: + /* See __do_share() */ + ret = 0; + break; default: ret = -EINVAL; } @@ -1235,3 +1257,49 @@ void hyp_unpin_shared_mem(void *from, void *to) hyp_unlock_component(); host_unlock_component(); } + +int __pkvm_host_share_ffa(u64 pfn, u64 nr_pages) +{ + int ret; + struct pkvm_mem_share share = { + .tx = { + .nr_pages = nr_pages, + .initiator = { + .id = PKVM_ID_HOST, + .addr = hyp_pfn_to_phys(pfn), + }, + .completer = { + .id = PKVM_ID_FFA, + }, + }, + }; + + host_lock_component(); + ret = do_share(&share); + host_unlock_component(); + + return ret; +} + +int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages) +{ + int ret; + struct pkvm_mem_share share = { + .tx = { + .nr_pages = nr_pages, + .initiator = { + .id = PKVM_ID_HOST, + .addr = hyp_pfn_to_phys(pfn), + }, + .completer = { + .id = PKVM_ID_FFA, + }, + }, + }; + + host_lock_component(); + ret = do_unshare(&share); + host_unlock_component(); + + return ret; +} -- cgit From 43609000177625b1c93c55a16a07aee5a4258260 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 23 May 2023 11:18:23 +0100 Subject: KVM: arm64: Handle FFA_MEM_SHARE calls from the host Intercept FFA_MEM_SHARE/FFA_FN64_MEM_SHARE calls from the host and transition the host stage-2 page-table entries from the OWNED state to the SHARED_OWNED state prior to forwarding the call onto EL3. Co-developed-by: Andrew Walbran Signed-off-by: Andrew Walbran Signed-off-by: Will Deacon Link: https://lore.kernel.org/r/20230523101828.7328-7-will@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/kvm/hyp/nvhe/ffa.c | 155 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 155 insertions(+) (limited to 'arch/arm64/kvm/hyp') diff --git a/arch/arm64/kvm/hyp/nvhe/ffa.c b/arch/arm64/kvm/hyp/nvhe/ffa.c index 0c020cd9f722..83c41c0c441d 100644 --- a/arch/arm64/kvm/hyp/nvhe/ffa.c +++ b/arch/arm64/kvm/hyp/nvhe/ffa.c @@ -116,6 +116,14 @@ static int ffa_unmap_hyp_buffers(void) return res.a0 == FFA_SUCCESS ? FFA_RET_SUCCESS : res.a2; } +static void ffa_mem_share(struct arm_smccc_res *res, u32 len, u32 fraglen) +{ + arm_smccc_1_1_smc(FFA_FN64_MEM_SHARE, + len, fraglen, + 0, 0, 0, 0, 0, + res); +} + static void do_ffa_rxtx_map(struct arm_smccc_res *res, struct kvm_cpu_context *ctxt) { @@ -228,6 +236,149 @@ out: ffa_to_smccc_res(res, ret); } +static u32 __ffa_host_share_ranges(struct ffa_mem_region_addr_range *ranges, + u32 nranges) +{ + u32 i; + + for (i = 0; i < nranges; ++i) { + struct ffa_mem_region_addr_range *range = &ranges[i]; + u64 sz = (u64)range->pg_cnt * FFA_PAGE_SIZE; + u64 pfn = hyp_phys_to_pfn(range->address); + + if (!PAGE_ALIGNED(sz)) + break; + + if (__pkvm_host_share_ffa(pfn, sz / PAGE_SIZE)) + break; + } + + return i; +} + +static u32 __ffa_host_unshare_ranges(struct ffa_mem_region_addr_range *ranges, + u32 nranges) +{ + u32 i; + + for (i = 0; i < nranges; ++i) { + struct ffa_mem_region_addr_range *range = &ranges[i]; + u64 sz = (u64)range->pg_cnt * FFA_PAGE_SIZE; + u64 pfn = hyp_phys_to_pfn(range->address); + + if (!PAGE_ALIGNED(sz)) + break; + + if (__pkvm_host_unshare_ffa(pfn, sz / PAGE_SIZE)) + break; + } + + return i; +} + +static int ffa_host_share_ranges(struct ffa_mem_region_addr_range *ranges, + u32 nranges) +{ + u32 nshared = __ffa_host_share_ranges(ranges, nranges); + int ret = 0; + + if (nshared != nranges) { + WARN_ON(__ffa_host_unshare_ranges(ranges, nshared) != nshared); + ret = FFA_RET_DENIED; + } + + return ret; +} + +static int ffa_host_unshare_ranges(struct ffa_mem_region_addr_range *ranges, + u32 nranges) +{ + u32 nunshared = __ffa_host_unshare_ranges(ranges, nranges); + int ret = 0; + + if (nunshared != nranges) { + WARN_ON(__ffa_host_share_ranges(ranges, nunshared) != nunshared); + ret = FFA_RET_DENIED; + } + + return ret; +} + +static void do_ffa_mem_share(struct arm_smccc_res *res, + struct kvm_cpu_context *ctxt) +{ + DECLARE_REG(u32, len, ctxt, 1); + DECLARE_REG(u32, fraglen, ctxt, 2); + DECLARE_REG(u64, addr_mbz, ctxt, 3); + DECLARE_REG(u32, npages_mbz, ctxt, 4); + struct ffa_composite_mem_region *reg; + struct ffa_mem_region *buf; + int ret = 0; + u32 offset; + + if (addr_mbz || npages_mbz || fraglen > len || + fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) { + ret = FFA_RET_INVALID_PARAMETERS; + goto out; + } + + if (fraglen < len) { + ret = FFA_RET_ABORTED; + goto out; + } + + if (fraglen < sizeof(struct ffa_mem_region) + + sizeof(struct ffa_mem_region_attributes)) { + ret = FFA_RET_INVALID_PARAMETERS; + goto out; + } + + hyp_spin_lock(&host_buffers.lock); + if (!host_buffers.tx) { + ret = FFA_RET_INVALID_PARAMETERS; + goto out_unlock; + } + + buf = hyp_buffers.tx; + memcpy(buf, host_buffers.tx, fraglen); + + offset = buf->ep_mem_access[0].composite_off; + if (!offset || buf->ep_count != 1 || buf->sender_id != HOST_FFA_ID) { + ret = FFA_RET_INVALID_PARAMETERS; + goto out_unlock; + } + + if (fraglen < offset + sizeof(struct ffa_composite_mem_region)) { + ret = FFA_RET_INVALID_PARAMETERS; + goto out_unlock; + } + + reg = (void *)buf + offset; + if (fraglen < offset + sizeof(struct ffa_composite_mem_region) + + reg->addr_range_cnt * + sizeof(struct ffa_mem_region_addr_range)) { + ret = FFA_RET_INVALID_PARAMETERS; + goto out_unlock; + } + + ret = ffa_host_share_ranges(reg->constituents, reg->addr_range_cnt); + if (ret) + goto out_unlock; + + ffa_mem_share(res, len, fraglen); + if (res->a0 != FFA_SUCCESS) { + WARN_ON(ffa_host_unshare_ranges(reg->constituents, + reg->addr_range_cnt)); + } + +out_unlock: + hyp_spin_unlock(&host_buffers.lock); +out: + if (ret) + ffa_to_smccc_res(res, ret); + return; +} + /* * Is a given FFA function supported, either by forwarding on directly * or by handling at EL2? @@ -290,6 +441,10 @@ bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt) case FFA_RXTX_UNMAP: do_ffa_rxtx_unmap(&res, host_ctxt); goto out_handled; + case FFA_MEM_SHARE: + case FFA_FN64_MEM_SHARE: + do_ffa_mem_share(&res, host_ctxt); + goto out_handled; } if (ffa_call_supported(func_id)) -- cgit From 0e3bcb49c13567adb821c4251dcfd04ad7e1179f Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 23 May 2023 11:18:24 +0100 Subject: KVM: arm64: Handle FFA_MEM_RECLAIM calls from the host Intecept FFA_MEM_RECLAIM calls from the host and transition the host stage-2 page-table entries from the SHARED_OWNED state back to the OWNED state once EL3 has confirmed that the secure mapping has been reclaimed. Signed-off-by: Will Deacon Link: https://lore.kernel.org/r/20230523101828.7328-8-will@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/kvm/hyp/nvhe/ffa.c | 79 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 79 insertions(+) (limited to 'arch/arm64/kvm/hyp') diff --git a/arch/arm64/kvm/hyp/nvhe/ffa.c b/arch/arm64/kvm/hyp/nvhe/ffa.c index 83c41c0c441d..550936f4b5da 100644 --- a/arch/arm64/kvm/hyp/nvhe/ffa.c +++ b/arch/arm64/kvm/hyp/nvhe/ffa.c @@ -124,6 +124,23 @@ static void ffa_mem_share(struct arm_smccc_res *res, u32 len, u32 fraglen) res); } +static void ffa_mem_reclaim(struct arm_smccc_res *res, u32 handle_lo, + u32 handle_hi, u32 flags) +{ + arm_smccc_1_1_smc(FFA_MEM_RECLAIM, + handle_lo, handle_hi, flags, + 0, 0, 0, 0, + res); +} + +static void ffa_retrieve_req(struct arm_smccc_res *res, u32 len) +{ + arm_smccc_1_1_smc(FFA_FN64_MEM_RETRIEVE_REQ, + len, len, + 0, 0, 0, 0, 0, + res); +} + static void do_ffa_rxtx_map(struct arm_smccc_res *res, struct kvm_cpu_context *ctxt) { @@ -379,6 +396,65 @@ out: return; } +static void do_ffa_mem_reclaim(struct arm_smccc_res *res, + struct kvm_cpu_context *ctxt) +{ + DECLARE_REG(u32, handle_lo, ctxt, 1); + DECLARE_REG(u32, handle_hi, ctxt, 2); + DECLARE_REG(u32, flags, ctxt, 3); + struct ffa_composite_mem_region *reg; + struct ffa_mem_region *buf; + int ret = 0; + u32 offset; + u64 handle; + + handle = PACK_HANDLE(handle_lo, handle_hi); + + hyp_spin_lock(&host_buffers.lock); + + buf = hyp_buffers.tx; + *buf = (struct ffa_mem_region) { + .sender_id = HOST_FFA_ID, + .handle = handle, + }; + + ffa_retrieve_req(res, sizeof(*buf)); + buf = hyp_buffers.rx; + if (res->a0 != FFA_MEM_RETRIEVE_RESP) + goto out_unlock; + + /* Check for fragmentation */ + if (res->a1 != res->a2) { + ret = FFA_RET_ABORTED; + goto out_unlock; + } + + offset = buf->ep_mem_access[0].composite_off; + /* + * We can trust the SPMD to get this right, but let's at least + * check that we end up with something that doesn't look _completely_ + * bogus. + */ + if (WARN_ON(offset > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE)) { + ret = FFA_RET_ABORTED; + goto out_unlock; + } + + reg = (void *)buf + offset; + ffa_mem_reclaim(res, handle_lo, handle_hi, flags); + if (res->a0 != FFA_SUCCESS) + goto out_unlock; + + /* If the SPMD was happy, then we should be too. */ + WARN_ON(ffa_host_unshare_ranges(reg->constituents, + reg->addr_range_cnt)); +out_unlock: + hyp_spin_unlock(&host_buffers.lock); + + if (ret) + ffa_to_smccc_res(res, ret); +} + /* * Is a given FFA function supported, either by forwarding on directly * or by handling at EL2? @@ -445,6 +521,9 @@ bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt) case FFA_FN64_MEM_SHARE: do_ffa_mem_share(&res, host_ctxt); goto out_handled; + case FFA_MEM_RECLAIM: + do_ffa_mem_reclaim(&res, host_ctxt); + goto out_handled; } if (ffa_call_supported(func_id)) -- cgit From 634d90cf0ac6562f121a4acd4caec36d695d6aa2 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 23 May 2023 11:18:25 +0100 Subject: KVM: arm64: Handle FFA_MEM_LEND calls from the host Handle FFA_MEM_LEND calls from the host by treating them identically to FFA_MEM_SHARE calls for the purposes of the host stage-2 page-table, but forwarding on the original request to EL3. Signed-off-by: Will Deacon Link: https://lore.kernel.org/r/20230523101828.7328-9-will@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/kvm/hyp/nvhe/ffa.c | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) (limited to 'arch/arm64/kvm/hyp') diff --git a/arch/arm64/kvm/hyp/nvhe/ffa.c b/arch/arm64/kvm/hyp/nvhe/ffa.c index 550936f4b5da..d90ab1c9ec72 100644 --- a/arch/arm64/kvm/hyp/nvhe/ffa.c +++ b/arch/arm64/kvm/hyp/nvhe/ffa.c @@ -116,10 +116,10 @@ static int ffa_unmap_hyp_buffers(void) return res.a0 == FFA_SUCCESS ? FFA_RET_SUCCESS : res.a2; } -static void ffa_mem_share(struct arm_smccc_res *res, u32 len, u32 fraglen) +static void ffa_mem_xfer(struct arm_smccc_res *res, u64 func_id, u32 len, + u32 fraglen) { - arm_smccc_1_1_smc(FFA_FN64_MEM_SHARE, - len, fraglen, + arm_smccc_1_1_smc(func_id, len, fraglen, 0, 0, 0, 0, 0, res); } @@ -321,8 +321,9 @@ static int ffa_host_unshare_ranges(struct ffa_mem_region_addr_range *ranges, return ret; } -static void do_ffa_mem_share(struct arm_smccc_res *res, - struct kvm_cpu_context *ctxt) +static __always_inline void do_ffa_mem_xfer(const u64 func_id, + struct arm_smccc_res *res, + struct kvm_cpu_context *ctxt) { DECLARE_REG(u32, len, ctxt, 1); DECLARE_REG(u32, fraglen, ctxt, 2); @@ -333,6 +334,9 @@ static void do_ffa_mem_share(struct arm_smccc_res *res, int ret = 0; u32 offset; + BUILD_BUG_ON(func_id != FFA_FN64_MEM_SHARE && + func_id != FFA_FN64_MEM_LEND); + if (addr_mbz || npages_mbz || fraglen > len || fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) { ret = FFA_RET_INVALID_PARAMETERS; @@ -382,7 +386,7 @@ static void do_ffa_mem_share(struct arm_smccc_res *res, if (ret) goto out_unlock; - ffa_mem_share(res, len, fraglen); + ffa_mem_xfer(res, func_id, len, fraglen); if (res->a0 != FFA_SUCCESS) { WARN_ON(ffa_host_unshare_ranges(reg->constituents, reg->addr_range_cnt)); @@ -519,11 +523,15 @@ bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt) goto out_handled; case FFA_MEM_SHARE: case FFA_FN64_MEM_SHARE: - do_ffa_mem_share(&res, host_ctxt); + do_ffa_mem_xfer(FFA_FN64_MEM_SHARE, &res, host_ctxt); goto out_handled; case FFA_MEM_RECLAIM: do_ffa_mem_reclaim(&res, host_ctxt); goto out_handled; + case FFA_MEM_LEND: + case FFA_FN64_MEM_LEND: + do_ffa_mem_xfer(FFA_FN64_MEM_LEND, &res, host_ctxt); + goto out_handled; } if (ffa_call_supported(func_id)) -- cgit From 20936cd11479709ccd5de2dc77ac8063cd9bfad8 Mon Sep 17 00:00:00 2001 From: Fuad Tabba Date: Tue, 23 May 2023 11:18:26 +0100 Subject: KVM: arm64: Handle FFA_FEATURES call from the host Filter out advertising unsupported features, and only advertise features and properties that are supported by the hypervisor proxy. Signed-off-by: Fuad Tabba Signed-off-by: Will Deacon Link: https://lore.kernel.org/r/20230523101828.7328-10-will@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/kvm/hyp/nvhe/ffa.c | 45 +++++++++++++++++++++++++++++++++++++++---- 1 file changed, 41 insertions(+), 4 deletions(-) (limited to 'arch/arm64/kvm/hyp') diff --git a/arch/arm64/kvm/hyp/nvhe/ffa.c b/arch/arm64/kvm/hyp/nvhe/ffa.c index d90ab1c9ec72..926314a9b9d7 100644 --- a/arch/arm64/kvm/hyp/nvhe/ffa.c +++ b/arch/arm64/kvm/hyp/nvhe/ffa.c @@ -64,15 +64,21 @@ static void ffa_to_smccc_error(struct arm_smccc_res *res, u64 ffa_errno) }; } -static void ffa_to_smccc_res(struct arm_smccc_res *res, int ret) +static void ffa_to_smccc_res_prop(struct arm_smccc_res *res, int ret, u64 prop) { if (ret == FFA_RET_SUCCESS) { - *res = (struct arm_smccc_res) { .a0 = FFA_SUCCESS }; + *res = (struct arm_smccc_res) { .a0 = FFA_SUCCESS, + .a2 = prop }; } else { ffa_to_smccc_error(res, ret); } } +static void ffa_to_smccc_res(struct arm_smccc_res *res, int ret) +{ + ffa_to_smccc_res_prop(res, ret, 0); +} + static void ffa_set_retval(struct kvm_cpu_context *ctxt, struct arm_smccc_res *res) { @@ -484,14 +490,41 @@ static bool ffa_call_supported(u64 func_id) case FFA_RXTX_MAP: case FFA_MEM_DONATE: case FFA_MEM_RETRIEVE_REQ: - /* Don't advertise any features just yet */ - case FFA_FEATURES: return false; } return true; } +static bool do_ffa_features(struct arm_smccc_res *res, + struct kvm_cpu_context *ctxt) +{ + DECLARE_REG(u32, id, ctxt, 1); + u64 prop = 0; + int ret = 0; + + if (!ffa_call_supported(id)) { + ret = FFA_RET_NOT_SUPPORTED; + goto out_handled; + } + + switch (id) { + case FFA_MEM_SHARE: + case FFA_FN64_MEM_SHARE: + case FFA_MEM_LEND: + case FFA_FN64_MEM_LEND: + ret = FFA_RET_SUCCESS; + prop = 0; /* No support for dynamic buffers */ + goto out_handled; + default: + return false; + } + +out_handled: + ffa_to_smccc_res_prop(res, ret, prop); + return true; +} + bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt) { DECLARE_REG(u64, func_id, host_ctxt, 0); @@ -514,6 +547,10 @@ bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt) return false; switch (func_id) { + case FFA_FEATURES: + if (!do_ffa_features(&res, host_ctxt)) + return false; + goto out_handled; /* Memory management */ case FFA_FN64_RXTX_MAP: do_ffa_rxtx_map(&res, host_ctxt); -- cgit From 0a9f15fd56742b785ba4d06e64976f1f700b807c Mon Sep 17 00:00:00 2001 From: Quentin Perret Date: Tue, 23 May 2023 11:18:27 +0100 Subject: KVM: arm64: pkvm: Add support for fragmented FF-A descriptors FF-A memory descriptors may need to be sent in fragments when they don't fit in the mailboxes. Doing so involves using the FRAG_TX and FRAG_RX primitives defined in the FF-A protocol. Add support in the pKVM FF-A relayer for fragmented descriptors by monitoring outgoing FRAG_TX transactions and by buffering large descriptors on the reclaim path. Co-developed-by: Andrew Walbran Signed-off-by: Andrew Walbran Signed-off-by: Quentin Perret Signed-off-by: Will Deacon Link: https://lore.kernel.org/r/20230523101828.7328-11-will@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/kvm/hyp/nvhe/ffa.c | 170 ++++++++++++++++++++++++++++++++++++------ 1 file changed, 147 insertions(+), 23 deletions(-) (limited to 'arch/arm64/kvm/hyp') diff --git a/arch/arm64/kvm/hyp/nvhe/ffa.c b/arch/arm64/kvm/hyp/nvhe/ffa.c index 926314a9b9d7..58dcd92bf346 100644 --- a/arch/arm64/kvm/hyp/nvhe/ffa.c +++ b/arch/arm64/kvm/hyp/nvhe/ffa.c @@ -42,6 +42,18 @@ */ #define HOST_FFA_ID 0 +/* + * A buffer to hold the maximum descriptor size we can see from the host, + * which is required when the SPMD returns a fragmented FFA_MEM_RETRIEVE_RESP + * when resolving the handle on the reclaim path. + */ +struct kvm_ffa_descriptor_buffer { + void *buf; + size_t len; +}; + +static struct kvm_ffa_descriptor_buffer ffa_desc_buf; + struct kvm_ffa_buffers { hyp_spinlock_t lock; void *tx; @@ -122,6 +134,24 @@ static int ffa_unmap_hyp_buffers(void) return res.a0 == FFA_SUCCESS ? FFA_RET_SUCCESS : res.a2; } +static void ffa_mem_frag_tx(struct arm_smccc_res *res, u32 handle_lo, + u32 handle_hi, u32 fraglen, u32 endpoint_id) +{ + arm_smccc_1_1_smc(FFA_MEM_FRAG_TX, + handle_lo, handle_hi, fraglen, endpoint_id, + 0, 0, 0, + res); +} + +static void ffa_mem_frag_rx(struct arm_smccc_res *res, u32 handle_lo, + u32 handle_hi, u32 fragoff) +{ + arm_smccc_1_1_smc(FFA_MEM_FRAG_RX, + handle_lo, handle_hi, fragoff, HOST_FFA_ID, + 0, 0, 0, + res); +} + static void ffa_mem_xfer(struct arm_smccc_res *res, u64 func_id, u32 len, u32 fraglen) { @@ -327,6 +357,64 @@ static int ffa_host_unshare_ranges(struct ffa_mem_region_addr_range *ranges, return ret; } +static void do_ffa_mem_frag_tx(struct arm_smccc_res *res, + struct kvm_cpu_context *ctxt) +{ + DECLARE_REG(u32, handle_lo, ctxt, 1); + DECLARE_REG(u32, handle_hi, ctxt, 2); + DECLARE_REG(u32, fraglen, ctxt, 3); + DECLARE_REG(u32, endpoint_id, ctxt, 4); + struct ffa_mem_region_addr_range *buf; + int ret = FFA_RET_INVALID_PARAMETERS; + u32 nr_ranges; + + if (fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) + goto out; + + if (fraglen % sizeof(*buf)) + goto out; + + hyp_spin_lock(&host_buffers.lock); + if (!host_buffers.tx) + goto out_unlock; + + buf = hyp_buffers.tx; + memcpy(buf, host_buffers.tx, fraglen); + nr_ranges = fraglen / sizeof(*buf); + + ret = ffa_host_share_ranges(buf, nr_ranges); + if (ret) { + /* + * We're effectively aborting the transaction, so we need + * to restore the global state back to what it was prior to + * transmission of the first fragment. + */ + ffa_mem_reclaim(res, handle_lo, handle_hi, 0); + WARN_ON(res->a0 != FFA_SUCCESS); + goto out_unlock; + } + + ffa_mem_frag_tx(res, handle_lo, handle_hi, fraglen, endpoint_id); + if (res->a0 != FFA_SUCCESS && res->a0 != FFA_MEM_FRAG_RX) + WARN_ON(ffa_host_unshare_ranges(buf, nr_ranges)); + +out_unlock: + hyp_spin_unlock(&host_buffers.lock); +out: + if (ret) + ffa_to_smccc_res(res, ret); + + /* + * If for any reason this did not succeed, we're in trouble as we have + * now lost the content of the previous fragments and we can't rollback + * the host stage-2 changes. The pages previously marked as shared will + * remain stuck in that state forever, hence preventing the host from + * sharing/donating them again and may possibly lead to subsequent + * failures, but this will not compromise confidentiality. + */ + return; +} + static __always_inline void do_ffa_mem_xfer(const u64 func_id, struct arm_smccc_res *res, struct kvm_cpu_context *ctxt) @@ -337,8 +425,8 @@ static __always_inline void do_ffa_mem_xfer(const u64 func_id, DECLARE_REG(u32, npages_mbz, ctxt, 4); struct ffa_composite_mem_region *reg; struct ffa_mem_region *buf; + u32 offset, nr_ranges; int ret = 0; - u32 offset; BUILD_BUG_ON(func_id != FFA_FN64_MEM_SHARE && func_id != FFA_FN64_MEM_LEND); @@ -349,11 +437,6 @@ static __always_inline void do_ffa_mem_xfer(const u64 func_id, goto out; } - if (fraglen < len) { - ret = FFA_RET_ABORTED; - goto out; - } - if (fraglen < sizeof(struct ffa_mem_region) + sizeof(struct ffa_mem_region_attributes)) { ret = FFA_RET_INVALID_PARAMETERS; @@ -381,21 +464,26 @@ static __always_inline void do_ffa_mem_xfer(const u64 func_id, } reg = (void *)buf + offset; - if (fraglen < offset + sizeof(struct ffa_composite_mem_region) + - reg->addr_range_cnt * - sizeof(struct ffa_mem_region_addr_range)) { + nr_ranges = ((void *)buf + fraglen) - (void *)reg->constituents; + if (nr_ranges % sizeof(reg->constituents[0])) { ret = FFA_RET_INVALID_PARAMETERS; goto out_unlock; } - ret = ffa_host_share_ranges(reg->constituents, reg->addr_range_cnt); + nr_ranges /= sizeof(reg->constituents[0]); + ret = ffa_host_share_ranges(reg->constituents, nr_ranges); if (ret) goto out_unlock; ffa_mem_xfer(res, func_id, len, fraglen); - if (res->a0 != FFA_SUCCESS) { - WARN_ON(ffa_host_unshare_ranges(reg->constituents, - reg->addr_range_cnt)); + if (fraglen != len) { + if (res->a0 != FFA_MEM_FRAG_RX) + goto err_unshare; + + if (res->a3 != fraglen) + goto err_unshare; + } else if (res->a0 != FFA_SUCCESS) { + goto err_unshare; } out_unlock: @@ -404,6 +492,10 @@ out: if (ret) ffa_to_smccc_res(res, ret); return; + +err_unshare: + WARN_ON(ffa_host_unshare_ranges(reg->constituents, nr_ranges)); + goto out_unlock; } static void do_ffa_mem_reclaim(struct arm_smccc_res *res, @@ -413,9 +505,9 @@ static void do_ffa_mem_reclaim(struct arm_smccc_res *res, DECLARE_REG(u32, handle_hi, ctxt, 2); DECLARE_REG(u32, flags, ctxt, 3); struct ffa_composite_mem_region *reg; + u32 offset, len, fraglen, fragoff; struct ffa_mem_region *buf; int ret = 0; - u32 offset; u64 handle; handle = PACK_HANDLE(handle_lo, handle_hi); @@ -433,11 +525,8 @@ static void do_ffa_mem_reclaim(struct arm_smccc_res *res, if (res->a0 != FFA_MEM_RETRIEVE_RESP) goto out_unlock; - /* Check for fragmentation */ - if (res->a1 != res->a2) { - ret = FFA_RET_ABORTED; - goto out_unlock; - } + len = res->a1; + fraglen = res->a2; offset = buf->ep_mem_access[0].composite_off; /* @@ -445,16 +534,36 @@ static void do_ffa_mem_reclaim(struct arm_smccc_res *res, * check that we end up with something that doesn't look _completely_ * bogus. */ - if (WARN_ON(offset > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE)) { + if (WARN_ON(offset > len || + fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE)) { ret = FFA_RET_ABORTED; goto out_unlock; } - reg = (void *)buf + offset; + if (len > ffa_desc_buf.len) { + ret = FFA_RET_NO_MEMORY; + goto out_unlock; + } + + buf = ffa_desc_buf.buf; + memcpy(buf, hyp_buffers.rx, fraglen); + + for (fragoff = fraglen; fragoff < len; fragoff += fraglen) { + ffa_mem_frag_rx(res, handle_lo, handle_hi, fragoff); + if (res->a0 != FFA_MEM_FRAG_TX) { + ret = FFA_RET_INVALID_PARAMETERS; + goto out_unlock; + } + + fraglen = res->a3; + memcpy((void *)buf + fragoff, hyp_buffers.rx, fraglen); + } + ffa_mem_reclaim(res, handle_lo, handle_hi, flags); if (res->a0 != FFA_SUCCESS) goto out_unlock; + reg = (void *)buf + offset; /* If the SPMD was happy, then we should be too. */ WARN_ON(ffa_host_unshare_ranges(reg->constituents, reg->addr_range_cnt)); @@ -569,6 +678,9 @@ bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt) case FFA_FN64_MEM_LEND: do_ffa_mem_xfer(FFA_FN64_MEM_LEND, &res, host_ctxt); goto out_handled; + case FFA_MEM_FRAG_TX: + do_ffa_mem_frag_tx(&res, host_ctxt); + goto out_handled; } if (ffa_call_supported(func_id)) @@ -584,6 +696,7 @@ int hyp_ffa_init(void *pages) { struct arm_smccc_res res; size_t min_rxtx_sz; + void *tx, *rx; if (kvm_host_psci_config.smccc_version < ARM_SMCCC_VERSION_1_2) return 0; @@ -624,10 +737,21 @@ int hyp_ffa_init(void *pages) if (min_rxtx_sz > PAGE_SIZE) return -EOPNOTSUPP; + tx = pages; + pages += KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE; + rx = pages; + pages += KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE; + + ffa_desc_buf = (struct kvm_ffa_descriptor_buffer) { + .buf = pages, + .len = PAGE_SIZE * + (hyp_ffa_proxy_pages() - (2 * KVM_FFA_MBOX_NR_PAGES)), + }; + hyp_buffers = (struct kvm_ffa_buffers) { .lock = __HYP_SPIN_LOCK_UNLOCKED, - .tx = pages, - .rx = pages + (KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE), + .tx = tx, + .rx = rx, }; host_buffers = (struct kvm_ffa_buffers) { -- cgit From 9e7462bbe00d8431694720804a50b8f48d8ed0b0 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 9 Jun 2023 17:21:49 +0100 Subject: arm64: Allow EL1 physical timer access when running VHE To initialise the timer access from EL2 when HCR_EL2.E2H is set, we must make use the CNTHCTL_EL2 formap used is appropriate. This amounts to shifting the timer/counter enable bits by 10 to the left. Signed-off-by: Marc Zyngier Reviewed-by: Catalin Marinas Link: https://lore.kernel.org/r/20230609162200.2024064-7-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/kvm/hyp/nvhe/hyp-init.S | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'arch/arm64/kvm/hyp') diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-init.S b/arch/arm64/kvm/hyp/nvhe/hyp-init.S index a6d67c2bb5ae..f9ee10e29497 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-init.S +++ b/arch/arm64/kvm/hyp/nvhe/hyp-init.S @@ -95,6 +95,15 @@ SYM_CODE_START_LOCAL(___kvm_hyp_init) ldr x1, [x0, #NVHE_INIT_HCR_EL2] msr hcr_el2, x1 + mov x2, #HCR_E2H + and x2, x1, x2 + cbz x2, 1f + + mrs x1, cnthctl_el2 + and x1, x1, #~(BIT(0) | BIT(1)) + orr x1, x1, #(BIT(10) | BIT(11)) + msr cnthctl_el2, x1 +1: ldr x1, [x0, #NVHE_INIT_VTTBR] msr vttbr_el2, x1 -- cgit From 6537565fd9b7f169cda025482f6de2646cf7b60a Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 9 Jun 2023 17:21:55 +0100 Subject: KVM: arm64: Adjust EL2 stage-1 leaf AP bits when ARM64_KVM_HVHE is set El2 stage-1 page-table format is subtly (and annoyingly) different when HCR_EL2.E2H is set. Take the ARM64_KVM_HVHE configuration into account when setting the AP bits. Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230609162200.2024064-13-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/kvm/hyp/pgtable.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'arch/arm64/kvm/hyp') diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index 5282cb9ca4cf..21d0d0b1cf15 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -21,8 +21,10 @@ #define KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX GENMASK(4, 2) #define KVM_PTE_LEAF_ATTR_LO_S1_AP GENMASK(7, 6) -#define KVM_PTE_LEAF_ATTR_LO_S1_AP_RO 3 -#define KVM_PTE_LEAF_ATTR_LO_S1_AP_RW 1 +#define KVM_PTE_LEAF_ATTR_LO_S1_AP_RO \ + ({ cpus_have_final_cap(ARM64_KVM_HVHE) ? 2 : 3; }) +#define KVM_PTE_LEAF_ATTR_LO_S1_AP_RW \ + ({ cpus_have_final_cap(ARM64_KVM_HVHE) ? 0 : 1; }) #define KVM_PTE_LEAF_ATTR_LO_S1_SH GENMASK(9, 8) #define KVM_PTE_LEAF_ATTR_LO_S1_SH_IS 3 #define KVM_PTE_LEAF_ATTR_LO_S1_AF BIT(10) -- cgit From 75c76ab5a641b64e872b6e136b5edf8a72009cc6 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 9 Jun 2023 17:21:56 +0100 Subject: KVM: arm64: Rework CPTR_EL2 programming for HVHE configuration Just like we repainted the early arm64 code, we need to update the CPTR_EL2 accesses that are taking place in the nVHE code when hVHE is used, making them look as if they were CPACR_EL1 accesses. Just like the VHE code. Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230609162200.2024064-14-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/kvm/hyp/include/hyp/switch.h | 2 +- arch/arm64/kvm/hyp/nvhe/hyp-main.c | 6 +++++- arch/arm64/kvm/hyp/nvhe/pkvm.c | 24 ++++++++++++++++++------ arch/arm64/kvm/hyp/nvhe/switch.c | 28 ++++++++++++++++------------ arch/arm64/kvm/hyp/vhe/switch.c | 2 +- 5 files changed, 41 insertions(+), 21 deletions(-) (limited to 'arch/arm64/kvm/hyp') diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index e78a08a72a3c..b5f53d3fc14c 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -192,7 +192,7 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code) /* Valid trap. Switch the context: */ /* First disable enough traps to allow us to update the registers */ - if (has_vhe()) { + if (has_vhe() || has_hvhe()) { reg = CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN; if (sve_guest) reg |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN; diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c index 728e01d4536b..ce602f9e93eb 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c +++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c @@ -392,7 +392,11 @@ void handle_trap(struct kvm_cpu_context *host_ctxt) handle_host_smc(host_ctxt); break; case ESR_ELx_EC_SVE: - sysreg_clear_set(cptr_el2, CPTR_EL2_TZ, 0); + if (has_hvhe()) + sysreg_clear_set(cpacr_el1, 0, (CPACR_EL1_ZEN_EL1EN | + CPACR_EL1_ZEN_EL0EN)); + else + sysreg_clear_set(cptr_el2, CPTR_EL2_TZ, 0); isb(); sve_cond_update_zcr_vq(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2); break; diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c index a06ece14a6d8..5d5ee735a7d9 100644 --- a/arch/arm64/kvm/hyp/nvhe/pkvm.c +++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c @@ -27,6 +27,7 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu) u64 hcr_set = HCR_RW; u64 hcr_clear = 0; u64 cptr_set = 0; + u64 cptr_clear = 0; /* Protected KVM does not support AArch32 guests. */ BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), @@ -57,12 +58,17 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu) } /* Trap SVE */ - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids)) - cptr_set |= CPTR_EL2_TZ; + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids)) { + if (has_hvhe()) + cptr_clear |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN; + else + cptr_set |= CPTR_EL2_TZ; + } vcpu->arch.hcr_el2 |= hcr_set; vcpu->arch.hcr_el2 &= ~hcr_clear; vcpu->arch.cptr_el2 |= cptr_set; + vcpu->arch.cptr_el2 &= ~cptr_clear; } /* @@ -120,8 +126,12 @@ static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu) mdcr_set |= MDCR_EL2_TTRF; /* Trap Trace */ - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceVer), feature_ids)) - cptr_set |= CPTR_EL2_TTA; + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceVer), feature_ids)) { + if (has_hvhe()) + cptr_set |= CPACR_EL1_TTA; + else + cptr_set |= CPTR_EL2_TTA; + } vcpu->arch.mdcr_el2 |= mdcr_set; vcpu->arch.mdcr_el2 &= ~mdcr_clear; @@ -176,8 +186,10 @@ static void pvm_init_trap_regs(struct kvm_vcpu *vcpu) /* Clear res0 and set res1 bits to trap potential new features. */ vcpu->arch.hcr_el2 &= ~(HCR_RES0); vcpu->arch.mdcr_el2 &= ~(MDCR_EL2_RES0); - vcpu->arch.cptr_el2 |= CPTR_NVHE_EL2_RES1; - vcpu->arch.cptr_el2 &= ~(CPTR_NVHE_EL2_RES0); + if (!has_hvhe()) { + vcpu->arch.cptr_el2 |= CPTR_NVHE_EL2_RES1; + vcpu->arch.cptr_el2 &= ~(CPTR_NVHE_EL2_RES0); + } } /* diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index 71fa16a0dc77..5fa0b1c9ee8d 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -44,13 +44,24 @@ static void __activate_traps(struct kvm_vcpu *vcpu) __activate_traps_common(vcpu); val = vcpu->arch.cptr_el2; - val |= CPTR_EL2_TTA | CPTR_EL2_TAM; + val |= CPTR_EL2_TAM; /* Same bit irrespective of E2H */ + val |= has_hvhe() ? CPACR_EL1_TTA : CPTR_EL2_TTA; + if (cpus_have_final_cap(ARM64_SME)) { + if (has_hvhe()) + val &= ~(CPACR_EL1_SMEN_EL1EN | CPACR_EL1_SMEN_EL0EN); + else + val |= CPTR_EL2_TSM; + } + if (!guest_owns_fp_regs(vcpu)) { - val |= CPTR_EL2_TFP | CPTR_EL2_TZ; + if (has_hvhe()) + val &= ~(CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN | + CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN); + else + val |= CPTR_EL2_TFP | CPTR_EL2_TZ; + __activate_traps_fpsimd32(vcpu); } - if (cpus_have_final_cap(ARM64_SME)) - val |= CPTR_EL2_TSM; write_sysreg(val, cptr_el2); write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2); @@ -73,7 +84,6 @@ static void __activate_traps(struct kvm_vcpu *vcpu) static void __deactivate_traps(struct kvm_vcpu *vcpu) { extern char __kvm_hyp_host_vector[]; - u64 cptr; ___deactivate_traps(vcpu); @@ -98,13 +108,7 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu) write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2); - cptr = CPTR_EL2_DEFAULT; - if (vcpu_has_sve(vcpu) && (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED)) - cptr |= CPTR_EL2_TZ; - if (cpus_have_final_cap(ARM64_SME)) - cptr &= ~CPTR_EL2_TSM; - - write_sysreg(cptr, cptr_el2); + kvm_reset_cptr_el2(vcpu); write_sysreg(__kvm_hyp_host_vector, vbar_el2); } diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c index 3d868e84c7a0..034777a23b74 100644 --- a/arch/arm64/kvm/hyp/vhe/switch.c +++ b/arch/arm64/kvm/hyp/vhe/switch.c @@ -84,7 +84,7 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu) */ asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT)); - write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1); + kvm_reset_cptr_el2(vcpu); if (!arm64_kernel_unmapped_at_el0()) host_vectors = __this_cpu_read(this_cpu_vector); -- cgit From aca18585db4fd0ed0bd7420eddcdc39a535194fe Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 9 Jun 2023 17:21:57 +0100 Subject: KVM: arm64: Program the timer traps with VHE layout in hVHE mode Just like the rest of the timer code, we need to shift the enable bits around when HCR_EL2.E2H is set, which is the case in hVHE mode. Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230609162200.2024064-15-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/kvm/hyp/nvhe/timer-sr.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) (limited to 'arch/arm64/kvm/hyp') diff --git a/arch/arm64/kvm/hyp/nvhe/timer-sr.c b/arch/arm64/kvm/hyp/nvhe/timer-sr.c index b185ac0dbd47..3aaab20ae5b4 100644 --- a/arch/arm64/kvm/hyp/nvhe/timer-sr.c +++ b/arch/arm64/kvm/hyp/nvhe/timer-sr.c @@ -17,21 +17,24 @@ void __kvm_timer_set_cntvoff(u64 cntvoff) } /* - * Should only be called on non-VHE systems. + * Should only be called on non-VHE or hVHE setups. * VHE systems use EL2 timers and configure EL1 timers in kvm_timer_init_vhe(). */ void __timer_disable_traps(struct kvm_vcpu *vcpu) { - u64 val; + u64 val, shift = 0; + + if (has_hvhe()) + shift = 10; /* Allow physical timer/counter access for the host */ val = read_sysreg(cnthctl_el2); - val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN; + val |= (CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN) << shift; write_sysreg(val, cnthctl_el2); } /* - * Should only be called on non-VHE systems. + * Should only be called on non-VHE or hVHE setups. * VHE systems use EL2 timers and configure EL1 timers in kvm_timer_init_vhe(). */ void __timer_enable_traps(struct kvm_vcpu *vcpu) @@ -50,5 +53,10 @@ void __timer_enable_traps(struct kvm_vcpu *vcpu) else clr |= CNTHCTL_EL1PCTEN; + if (has_hvhe()) { + clr <<= 10; + set <<= 10; + } + sysreg_clear_set(cnthctl_el2, clr, set); } -- cgit From 38cba55008e5fab9181302ea5daf79e2070c9998 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 9 Jun 2023 17:21:58 +0100 Subject: KVM: arm64: Force HCR_E2H in guest context when ARM64_KVM_HVHE is set Also make sure HCR_EL2.E2H is set when switching HCR_EL2 in guest context. Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230609162200.2024064-16-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/kvm/hyp/nvhe/pkvm.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/arm64/kvm/hyp') diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c index 5d5ee735a7d9..8033ef353a5d 100644 --- a/arch/arm64/kvm/hyp/nvhe/pkvm.c +++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c @@ -44,6 +44,9 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu) BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AdvSIMD), PVM_ID_AA64PFR0_ALLOW)); + if (has_hvhe()) + hcr_set |= HCR_E2H; + /* Trap RAS unless all current versions are supported */ if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_RAS), feature_ids) < ID_AA64PFR0_EL1_RAS_V1P1) { -- cgit From 8c15c2a0281087d19f62d7c2b5ab1f9e961b8d97 Mon Sep 17 00:00:00 2001 From: Mostafa Saleh Date: Wed, 14 Jun 2023 12:25:59 +0000 Subject: KVM: arm64: Use different pointer authentication keys for pKVM When the use of pointer authentication is enabled in the kernel it applies to both the kernel itself as well as KVM's nVHE hypervisor. The same keys are used for both the kernel and the nVHE hypervisor, which is less than desirable for pKVM as the host is not trusted at runtime. Naturally, the fix is to use a different set of keys for the hypervisor when running in protected mode. Have the host generate a new set of keys for the hypervisor before deprivileging the kernel. While there might be other sources of random directly available at EL2, this keeps the implementation simple, and the host is trusted anyways until it is deprivileged. Since the host and hypervisor no longer share a set of pointer authentication keys, start context switching them on the host entry/exit path exactly as we do for guest entry/exit. There is no need to handle CPU migration as the nVHE code is not migratable in the first place. Signed-off-by: Mostafa Saleh Link: https://lore.kernel.org/r/20230614122600.2098901-1-smostafa@google.com Signed-off-by: Oliver Upton --- arch/arm64/kvm/hyp/nvhe/host.S | 36 +++++++++++++++++++++++++++++++++++- 1 file changed, 35 insertions(+), 1 deletion(-) (limited to 'arch/arm64/kvm/hyp') diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S index b6c0188c4b35..c87c63133e10 100644 --- a/arch/arm64/kvm/hyp/nvhe/host.S +++ b/arch/arm64/kvm/hyp/nvhe/host.S @@ -10,6 +10,7 @@ #include #include #include +#include .text @@ -37,10 +38,43 @@ SYM_FUNC_START(__host_exit) /* Save the host context pointer in x29 across the function call */ mov x29, x0 + +#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL +alternative_if_not ARM64_HAS_ADDRESS_AUTH +b __skip_pauth_save +alternative_else_nop_endif + +alternative_if ARM64_KVM_PROTECTED_MODE + /* Save kernel ptrauth keys. */ + add x18, x29, #CPU_APIAKEYLO_EL1 + ptrauth_save_state x18, x19, x20 + + /* Use hyp keys. */ + adr_this_cpu x18, kvm_hyp_ctxt, x19 + add x18, x18, #CPU_APIAKEYLO_EL1 + ptrauth_restore_state x18, x19, x20 + isb +alternative_else_nop_endif +__skip_pauth_save: +#endif /* CONFIG_ARM64_PTR_AUTH_KERNEL */ + bl handle_trap - /* Restore host regs x0-x17 */ __host_enter_restore_full: + /* Restore kernel keys. */ +#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL +alternative_if_not ARM64_HAS_ADDRESS_AUTH +b __skip_pauth_restore +alternative_else_nop_endif + +alternative_if ARM64_KVM_PROTECTED_MODE + add x18, x29, #CPU_APIAKEYLO_EL1 + ptrauth_restore_state x18, x19, x20 +alternative_else_nop_endif +__skip_pauth_restore: +#endif /* CONFIG_ARM64_PTR_AUTH_KERNEL */ + + /* Restore host regs x0-x17 */ ldp x0, x1, [x29, #CPU_XREG_OFFSET(0)] ldp x2, x3, [x29, #CPU_XREG_OFFSET(2)] ldp x4, x5, [x29, #CPU_XREG_OFFSET(4)] -- cgit From 1700f89cb99aae19e4f8ec38b1b59f3b7ae71b91 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 14 Jun 2023 16:51:29 +0100 Subject: KVM: arm64: Fix hVHE init on CPUs where HCR_EL2.E2H is not RES1 On CPUs where E2H is RES1, we very quickly set the scene for running EL2 with a VHE configuration, as we do not have any other choice. However, CPUs that conform to the current writing of the architecture start with E2H=0, and only later upgrade with E2H=1. This is all good, but nothing there is actually reconfiguring EL2 to be able to correctly run the kernel at EL1. Huhuh... The "obvious" solution is not to just reinitialise the timer controls like we do, but to really intitialise *everything* unconditionally. This requires a bit of surgery, and is a good opportunity to remove the macro that messes with SPSR_EL2 in init_el2_state. With that, hVHE now works correctly on my trusted A55 machine! Reported-by: Oliver Upton Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230614155129.2697388-1-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/kvm/hyp/nvhe/hyp-init.S | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) (limited to 'arch/arm64/kvm/hyp') diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-init.S b/arch/arm64/kvm/hyp/nvhe/hyp-init.S index f9ee10e29497..74ee77d9cfd0 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-init.S +++ b/arch/arm64/kvm/hyp/nvhe/hyp-init.S @@ -83,9 +83,6 @@ SYM_CODE_END(__kvm_hyp_init) * x0: struct kvm_nvhe_init_params PA */ SYM_CODE_START_LOCAL(___kvm_hyp_init) - ldr x1, [x0, #NVHE_INIT_TPIDR_EL2] - msr tpidr_el2, x1 - ldr x1, [x0, #NVHE_INIT_STACK_HYP_VA] mov sp, x1 @@ -99,11 +96,18 @@ SYM_CODE_START_LOCAL(___kvm_hyp_init) and x2, x1, x2 cbz x2, 1f - mrs x1, cnthctl_el2 - and x1, x1, #~(BIT(0) | BIT(1)) - orr x1, x1, #(BIT(10) | BIT(11)) - msr cnthctl_el2, x1 + // hVHE: Replay the EL2 setup to account for the E2H bit + // TPIDR_EL2 is used to preserve x0 across the macro maze... + isb + msr tpidr_el2, x0 + init_el2_state + finalise_el2_state + mrs x0, tpidr_el2 + 1: + ldr x1, [x0, #NVHE_INIT_TPIDR_EL2] + msr tpidr_el2, x1 + ldr x1, [x0, #NVHE_INIT_VTTBR] msr vttbr_el2, x1 @@ -193,6 +197,7 @@ SYM_CODE_START_LOCAL(__kvm_hyp_init_cpu) /* Initialize EL2 CPU state to sane values. */ init_el2_state // Clobbers x0..x2 finalise_el2_state + __init_el2_nvhe_prepare_eret /* Enable MMU, set vectors and stack. */ mov x0, x28 -- cgit From 6df696cd9bc1ceed0e92e36908f88bbd16d18255 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Fri, 9 Jun 2023 22:01:02 +0000 Subject: arm64: errata: Mitigate Ampere1 erratum AC03_CPU_38 at stage-2 AmpereOne has an erratum in its implementation of FEAT_HAFDBS that required disabling the feature on the design. This was done by reporting the feature as not implemented in the ID register, although the corresponding control bits were not actually RES0. This does not align well with the requirements of the architecture, which mandates these bits be RES0 if HAFDBS isn't implemented. The kernel's use of stage-1 is unaffected, as the HA and HD bits are only set if HAFDBS is detected in the ID register. KVM, on the other hand, relies on the RES0 behavior at stage-2 to use the same value for VTCR_EL2 on any cpu in the system. Mitigate the non-RES0 behavior by leaving VTCR_EL2.HA clear on affected systems. Cc: stable@vger.kernel.org Cc: D Scott Phillips Cc: Darren Hart Acked-by: D Scott Phillips Acked-by: Catalin Marinas Link: https://lore.kernel.org/r/20230609220104.1836988-2-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- arch/arm64/kvm/hyp/pgtable.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) (limited to 'arch/arm64/kvm/hyp') diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index 5282cb9ca4cf..32d92ce4bae5 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -611,10 +611,18 @@ u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift) #ifdef CONFIG_ARM64_HW_AFDBM /* * Enable the Hardware Access Flag management, unconditionally - * on all CPUs. The features is RES0 on CPUs without the support - * and must be ignored by the CPUs. + * on all CPUs. In systems that have asymmetric support for the feature + * this allows KVM to leverage hardware support on the subset of cores + * that implement the feature. + * + * The architecture requires VTCR_EL2.HA to be RES0 (thus ignored by + * hardware) on implementations that do not advertise support for the + * feature. As such, setting HA unconditionally is safe, unless you + * happen to be running on a design that has unadvertised support for + * HAFDBS. Here be dragons. */ - vtcr |= VTCR_EL2_HA; + if (!cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38)) + vtcr |= VTCR_EL2_HA; #endif /* CONFIG_ARM64_HW_AFDBM */ /* Set the vmid bits */ -- cgit From ce4a36225753a1a5f3641bff47ecd32fb394dd22 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Fri, 9 Jun 2023 22:01:03 +0000 Subject: KVM: arm64: Refactor HFGxTR configuration into separate helpers A subsequent change will need to flip more trap bits in HFGWTR_EL2. Make room for this by factoring out the programming of the HFGxTR registers into helpers and using locals to build the set/clear masks. Link: https://lore.kernel.org/r/20230609220104.1836988-3-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- arch/arm64/kvm/hyp/include/hyp/switch.h | 60 +++++++++++++++++++++++---------- 1 file changed, 42 insertions(+), 18 deletions(-) (limited to 'arch/arm64/kvm/hyp') diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index e78a08a72a3c..901e47a49070 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -70,6 +70,44 @@ static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu) } } +static inline bool __hfgxtr_traps_required(void) +{ + if (cpus_have_final_cap(ARM64_SME)) + return true; + + return false; +} + +static inline void __activate_traps_hfgxtr(void) +{ + u64 r_clr = 0, w_clr = 0, r_set = 0, w_set = 0, tmp; + + if (cpus_have_final_cap(ARM64_SME)) { + tmp = HFGxTR_EL2_nSMPRI_EL1_MASK | HFGxTR_EL2_nTPIDR2_EL0_MASK; + + r_clr |= tmp; + w_clr |= tmp; + } + + sysreg_clear_set_s(SYS_HFGRTR_EL2, r_clr, r_set); + sysreg_clear_set_s(SYS_HFGWTR_EL2, w_clr, w_set); +} + +static inline void __deactivate_traps_hfgxtr(void) +{ + u64 r_clr = 0, w_clr = 0, r_set = 0, w_set = 0, tmp; + + if (cpus_have_final_cap(ARM64_SME)) { + tmp = HFGxTR_EL2_nSMPRI_EL1_MASK | HFGxTR_EL2_nTPIDR2_EL0_MASK; + + r_set |= tmp; + w_set |= tmp; + } + + sysreg_clear_set_s(SYS_HFGRTR_EL2, r_clr, r_set); + sysreg_clear_set_s(SYS_HFGWTR_EL2, w_clr, w_set); +} + static inline void __activate_traps_common(struct kvm_vcpu *vcpu) { /* Trap on AArch32 cp15 c15 (impdef sysregs) accesses (EL1 or EL0) */ @@ -89,16 +127,8 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu) vcpu->arch.mdcr_el2_host = read_sysreg(mdcr_el2); write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2); - if (cpus_have_final_cap(ARM64_SME)) { - sysreg_clear_set_s(SYS_HFGRTR_EL2, - HFGxTR_EL2_nSMPRI_EL1_MASK | - HFGxTR_EL2_nTPIDR2_EL0_MASK, - 0); - sysreg_clear_set_s(SYS_HFGWTR_EL2, - HFGxTR_EL2_nSMPRI_EL1_MASK | - HFGxTR_EL2_nTPIDR2_EL0_MASK, - 0); - } + if (__hfgxtr_traps_required()) + __activate_traps_hfgxtr(); } static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu) @@ -109,14 +139,8 @@ static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu) if (kvm_arm_support_pmu_v3()) write_sysreg(0, pmuserenr_el0); - if (cpus_have_final_cap(ARM64_SME)) { - sysreg_clear_set_s(SYS_HFGRTR_EL2, 0, - HFGxTR_EL2_nSMPRI_EL1_MASK | - HFGxTR_EL2_nTPIDR2_EL0_MASK); - sysreg_clear_set_s(SYS_HFGWTR_EL2, 0, - HFGxTR_EL2_nSMPRI_EL1_MASK | - HFGxTR_EL2_nTPIDR2_EL0_MASK); - } + if (__hfgxtr_traps_required()) + __deactivate_traps_hfgxtr(); } static inline void ___activate_traps(struct kvm_vcpu *vcpu) -- cgit From 082fdfd13841fa4e38a8b073561d182e195d528c Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Fri, 9 Jun 2023 22:01:04 +0000 Subject: KVM: arm64: Prevent guests from enabling HA/HD on Ampere1 An erratum in the HAFDBS implementation in AmpereOne was addressed by clearing the feature in the ID register, with the expectation that software would not attempt to use the corresponding controls in TCR_EL1. The architecture, on the other hand, takes a much more pedantic stance on the subject, requiring the TCR bits behave as RES0. Take an extremely conservative stance on the issue and leverage the precise write trap afforded by FGT. Handle guest writes by clearing HA and HD before writing the intended value to the EL1 register alias. Link: https://lore.kernel.org/r/20230609220104.1836988-4-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- arch/arm64/kvm/hyp/include/hyp/switch.h | 39 +++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) (limited to 'arch/arm64/kvm/hyp') diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 901e47a49070..04cc34dd4275 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -75,6 +75,9 @@ static inline bool __hfgxtr_traps_required(void) if (cpus_have_final_cap(ARM64_SME)) return true; + if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38)) + return true; + return false; } @@ -89,6 +92,12 @@ static inline void __activate_traps_hfgxtr(void) w_clr |= tmp; } + /* + * Trap guest writes to TCR_EL1 to prevent it from enabling HA or HD. + */ + if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38)) + w_set |= HFGxTR_EL2_TCR_EL1_MASK; + sysreg_clear_set_s(SYS_HFGRTR_EL2, r_clr, r_set); sysreg_clear_set_s(SYS_HFGWTR_EL2, w_clr, w_set); } @@ -104,6 +113,9 @@ static inline void __deactivate_traps_hfgxtr(void) w_set |= tmp; } + if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38)) + w_clr |= HFGxTR_EL2_TCR_EL1_MASK; + sysreg_clear_set_s(SYS_HFGRTR_EL2, r_clr, r_set); sysreg_clear_set_s(SYS_HFGWTR_EL2, w_clr, w_set); } @@ -408,12 +420,39 @@ static bool kvm_hyp_handle_cntpct(struct kvm_vcpu *vcpu) return true; } +static bool handle_ampere1_tcr(struct kvm_vcpu *vcpu) +{ + u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu)); + int rt = kvm_vcpu_sys_get_rt(vcpu); + u64 val = vcpu_get_reg(vcpu, rt); + + if (sysreg != SYS_TCR_EL1) + return false; + + /* + * Affected parts do not advertise support for hardware Access Flag / + * Dirty state management in ID_AA64MMFR1_EL1.HAFDBS, but the underlying + * control bits are still functional. The architecture requires these be + * RES0 on systems that do not implement FEAT_HAFDBS. + * + * Uphold the requirements of the architecture by masking guest writes + * to TCR_EL1.{HA,HD} here. + */ + val &= ~(TCR_HD | TCR_HA); + write_sysreg_el1(val, SYS_TCR); + return true; +} + static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code) { if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) && handle_tx2_tvm(vcpu)) return true; + if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38) && + handle_ampere1_tcr(vcpu)) + return true; + if (static_branch_unlikely(&vgic_v3_cpuif_trap) && __vgic_v3_perform_cpuif_access(vcpu) == 1) return true; -- cgit