From 90f6e150e44a0dc3883110eeb3ab35d1be42b6bb Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 16 Mar 2017 18:20:49 +0000 Subject: arm/arm64: KVM: Take mmap_sem in stage2_unmap_vm We don't hold the mmap_sem while searching for the VMAs when we try to unmap each memslot for a VM. Fix this properly to avoid unexpected results. Fixes: commit 957db105c997 ("arm/arm64: KVM: Introduce stage2_unmap_vm") Cc: stable@vger.kernel.org # v3.19+ Reviewed-by: Christoffer Dall Signed-off-by: Suzuki K Poulose Signed-off-by: Marc Zyngier --- arch/arm/kvm/mmu.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm') diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 962616fd4ddd..f2e2e0c6d6fd 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -803,6 +803,7 @@ void stage2_unmap_vm(struct kvm *kvm) int idx; idx = srcu_read_lock(&kvm->srcu); + down_read(¤t->mm->mmap_sem); spin_lock(&kvm->mmu_lock); slots = kvm_memslots(kvm); @@ -810,6 +811,7 @@ void stage2_unmap_vm(struct kvm *kvm) stage2_unmap_memslot(kvm, memslot); spin_unlock(&kvm->mmu_lock); + up_read(¤t->mm->mmap_sem); srcu_read_unlock(&kvm->srcu, idx); } -- cgit From 72f310481a08db821b614e7b5d00febcc9064b36 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 16 Mar 2017 18:20:50 +0000 Subject: arm/arm64: KVM: Take mmap_sem in kvm_arch_prepare_memory_region We don't hold the mmap_sem while searching for VMAs (via find_vma), in kvm_arch_prepare_memory_region, which can end up in expected failures. Fixes: commit 8eef91239e57 ("arm/arm64: KVM: map MMIO regions at creation time") Cc: Ard Biesheuvel Cc: Eric Auger Cc: stable@vger.kernel.org # v3.18+ Reviewed-by: Christoffer Dall [ Handle dirty page logging failure case ] Signed-off-by: Suzuki K Poulose Signed-off-by: Marc Zyngier --- arch/arm/kvm/mmu.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index f2e2e0c6d6fd..13b9c1fa8961 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -1803,6 +1803,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, (KVM_PHYS_SIZE >> PAGE_SHIFT)) return -EFAULT; + down_read(¤t->mm->mmap_sem); /* * A memory region could potentially cover multiple VMAs, and any holes * between them, so iterate over all of them to find out if we can map @@ -1846,8 +1847,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, pa += vm_start - vma->vm_start; /* IO region dirty page logging not allowed */ - if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) - return -EINVAL; + if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) { + ret = -EINVAL; + goto out; + } ret = kvm_phys_addr_ioremap(kvm, gpa, pa, vm_end - vm_start, @@ -1859,7 +1862,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, } while (hva < reg_end); if (change == KVM_MR_FLAGS_ONLY) - return ret; + goto out; spin_lock(&kvm->mmu_lock); if (ret) @@ -1867,6 +1870,8 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, else stage2_flush_memslot(kvm, memslot); spin_unlock(&kvm->mmu_lock); +out: + up_read(¤t->mm->mmap_sem); return ret; } -- cgit From f3fbd7ec62dec1528fb8044034e2885f2b257941 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 14 Feb 2017 00:03:38 +0900 Subject: arm: kprobes: Allow to handle reentered kprobe on single-stepping This is arm port of commit 6a5022a56ac3 ("kprobes/x86: Allow to handle reentered kprobe on single-stepping") Since the FIQ handlers can interrupt in the single stepping (or preparing the single stepping, do_debug etc.), we should consider a kprobe is hit in the NMI handler. Even in that case, the kprobe is allowed to be reentered as same as the kprobes hit in kprobe handlers (KPROBE_HIT_ACTIVE or KPROBE_HIT_SSDONE). The real issue will happen when a kprobe hit while another reentered kprobe is processing (KPROBE_REENTER), because we already consumed a saved-area for the previous kprobe. Signed-off-by: Masami Hiramatsu Signed-off-by: Jon Medhurst --- arch/arm/probes/kprobes/core.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/arm') diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c index b6dc9d838a9a..35148b46c4f5 100644 --- a/arch/arm/probes/kprobes/core.c +++ b/arch/arm/probes/kprobes/core.c @@ -271,6 +271,7 @@ void __kprobes kprobe_handler(struct pt_regs *regs) switch (kcb->kprobe_status) { case KPROBE_HIT_ACTIVE: case KPROBE_HIT_SSDONE: + case KPROBE_HIT_SS: /* A pre- or post-handler probe got us here. */ kprobes_inc_nmissed_count(p); save_previous_kprobe(kcb); @@ -279,6 +280,11 @@ void __kprobes kprobe_handler(struct pt_regs *regs) singlestep(p, regs, kcb); restore_previous_kprobe(kcb); break; + case KPROBE_REENTER: + /* A nested probe was hit in FIQ, it is a BUG */ + pr_warn("Unrecoverable kprobe detected at %p.\n", + p->addr); + /* fall through */ default: /* impossible cases */ BUG(); -- cgit From 91fc862c613ab7a0ef6b0b7755c33619127f4e5a Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 14 Feb 2017 00:04:48 +0900 Subject: arm: kprobes: Skip single-stepping in recursing path if possible Kprobes/arm skips single-stepping (moreover handling the event) if the conditional instruction must not be executed. This also apply the rule when we hit the recursing kprobe, so that kprobe does not count nmissed up in that case. Signed-off-by: Masami Hiramatsu Signed-off-by: Jon Medhurst --- arch/arm/probes/kprobes/core.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c index 35148b46c4f5..269f66e66ff5 100644 --- a/arch/arm/probes/kprobes/core.c +++ b/arch/arm/probes/kprobes/core.c @@ -266,7 +266,15 @@ void __kprobes kprobe_handler(struct pt_regs *regs) #endif if (p) { - if (cur) { + if (!p->ainsn.insn_check_cc(regs->ARM_cpsr)) { + /* + * Probe hit but conditional execution check failed, + * so just skip the instruction and continue as if + * nothing had happened. + * In this case, we can skip recursing check too. + */ + singlestep_skip(p, regs); + } else if (cur) { /* Kprobe is pending, so we're recursing. */ switch (kcb->kprobe_status) { case KPROBE_HIT_ACTIVE: @@ -289,7 +297,7 @@ void __kprobes kprobe_handler(struct pt_regs *regs) /* impossible cases */ BUG(); } - } else if (p->ainsn.insn_check_cc(regs->ARM_cpsr)) { + } else { /* Probe hit and conditional execution check ok. */ set_current_kprobe(p); kcb->kprobe_status = KPROBE_HIT_ACTIVE; @@ -310,13 +318,6 @@ void __kprobes kprobe_handler(struct pt_regs *regs) } reset_current_kprobe(); } - } else { - /* - * Probe hit but conditional execution check failed, - * so just skip the instruction and continue as if - * nothing had happened. - */ - singlestep_skip(p, regs); } } else if (cur) { /* We probably hit a jprobe. Call its break handler. */ -- cgit From 06553175f585b52509c7df37d6f4a50aacb7b211 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 14 Feb 2017 00:05:59 +0900 Subject: arm: kprobes: Fix the return address of multiple kretprobes This is arm port of commit 737480a0d525 ("kprobes/x86: Fix the return address of multiple kretprobes"). Fix the return address of subsequent kretprobes when multiple kretprobes are set on the same function. For example: # cd /sys/kernel/debug/tracing # echo "r:event1 sys_symlink" > kprobe_events # echo "r:event2 sys_symlink" >> kprobe_events # echo 1 > events/kprobes/enable # ln -s /tmp/foo /tmp/bar (without this patch) # cat trace | grep -v ^# ln-82 [000] dn.2 68.446525: event1: (kretprobe_trampoline+0x0/0x18 <- SyS_symlink) ln-82 [000] dn.2 68.447831: event2: (ret_fast_syscall+0x0/0x1c <- SyS_symlink) (with this patch) # cat trace | grep -v ^# ln-81 [000] dn.1 39.463469: event1: (ret_fast_syscall+0x0/0x1c <- SyS_symlink) ln-81 [000] dn.1 39.464701: event2: (ret_fast_syscall+0x0/0x1c <- SyS_symlink) Signed-off-by: Masami Hiramatsu Cc: KUMANO Syuhei Signed-off-by: Jon Medhurst --- arch/arm/probes/kprobes/core.c | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c index 269f66e66ff5..ad1f4e6a9e33 100644 --- a/arch/arm/probes/kprobes/core.c +++ b/arch/arm/probes/kprobes/core.c @@ -441,6 +441,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs) struct hlist_node *tmp; unsigned long flags, orig_ret_address = 0; unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; + kprobe_opcode_t *correct_ret_addr = NULL; INIT_HLIST_HEAD(&empty_rp); kretprobe_hash_lock(current, &head, &flags); @@ -463,14 +464,34 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs) /* another task is sharing our hash bucket */ continue; + orig_ret_address = (unsigned long)ri->ret_addr; + + if (orig_ret_address != trampoline_address) + /* + * This is the real return address. Any other + * instances associated with this task are for + * other calls deeper on the call stack + */ + break; + } + + kretprobe_assert(ri, orig_ret_address, trampoline_address); + + correct_ret_addr = ri->ret_addr; + hlist_for_each_entry_safe(ri, tmp, head, hlist) { + if (ri->task != current) + /* another task is sharing our hash bucket */ + continue; + + orig_ret_address = (unsigned long)ri->ret_addr; if (ri->rp && ri->rp->handler) { __this_cpu_write(current_kprobe, &ri->rp->kp); get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; + ri->ret_addr = correct_ret_addr; ri->rp->handler(ri, regs); __this_cpu_write(current_kprobe, NULL); } - orig_ret_address = (unsigned long)ri->ret_addr; recycle_rp_inst(ri, &empty_rp); if (orig_ret_address != trampoline_address) @@ -482,7 +503,6 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs) break; } - kretprobe_assert(ri, orig_ret_address, trampoline_address); kretprobe_hash_unlock(current, &flags); hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { -- cgit From 974310d047f3c7788a51d10c8d255eebdb1fa857 Mon Sep 17 00:00:00 2001 From: Jon Medhurst Date: Thu, 2 Mar 2017 13:04:09 +0000 Subject: arm: kprobes: Align stack to 8-bytes in test code kprobes test cases need to have a stack that is aligned to an 8-byte boundary because they call other functions (and the ARM ABI mandates that alignment) and because test cases include 64-bit accesses to the stack. Unfortunately, GCC doesn't ensure this alignment for inline assembler and for the code in question seems to always misalign it by pushing just the LR register onto the stack. We therefore need to explicitly perform stack alignment at the start of each test case. Without this fix, some test cases will generate alignment faults on systems where alignment is enforced. Even if the kernel is configured to handle these faults in software, triggering them is ugly. It also exposes limitations in the fault handling code which doesn't cope with writes to the stack. E.g. when handling this instruction strd r6, [sp, #-64]! the fault handling code will write to a stack location below the SP value at the point the fault occurred, which coincides with where the exception handler has pushed the saved register context. This results in corruption of those registers. Signed-off-by: Jon Medhurst --- arch/arm/probes/kprobes/test-core.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/probes/kprobes/test-core.c b/arch/arm/probes/kprobes/test-core.c index c893726aa52d..1c98a87786ca 100644 --- a/arch/arm/probes/kprobes/test-core.c +++ b/arch/arm/probes/kprobes/test-core.c @@ -977,7 +977,10 @@ static void coverage_end(void) void __naked __kprobes_test_case_start(void) { __asm__ __volatile__ ( - "stmdb sp!, {r4-r11} \n\t" + "mov r2, sp \n\t" + "bic r3, r2, #7 \n\t" + "mov sp, r3 \n\t" + "stmdb sp!, {r2-r11} \n\t" "sub sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t" "bic r0, lr, #1 @ r0 = inline data \n\t" "mov r1, sp \n\t" @@ -997,7 +1000,8 @@ void __naked __kprobes_test_case_end_32(void) "movne pc, r0 \n\t" "mov r0, r4 \n\t" "add sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t" - "ldmia sp!, {r4-r11} \n\t" + "ldmia sp!, {r2-r11} \n\t" + "mov sp, r2 \n\t" "mov pc, r0 \n\t" ); } @@ -1013,7 +1017,8 @@ void __naked __kprobes_test_case_end_16(void) "bxne r0 \n\t" "mov r0, r4 \n\t" "add sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t" - "ldmia sp!, {r4-r11} \n\t" + "ldmia sp!, {r2-r11} \n\t" + "mov sp, r2 \n\t" "bx r0 \n\t" ); } -- cgit From 916a008b4b8ecc02fbd035cfb133773dba1ff3d7 Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 29 Mar 2017 17:12:47 +0100 Subject: ARM: dma-mapping: disallow dma_get_sgtable() for non-kernel managed memory dma_get_sgtable() tries to create a scatterlist table containing valid struct page pointers for the coherent memory allocation passed in to it. However, memory can be declared via dma_declare_coherent_memory(), or via other reservation schemes which means that coherent memory is not guaranteed to be backed by struct pages. In such cases, the resulting scatterlist table contains pointers to invalid pages, which causes kernel oops later. This patch adds detection of such memory, and refuses to create a scatterlist table for such memory. Reported-by: Shuah Khan Signed-off-by: Russell King --- arch/arm/mm/dma-mapping.c | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) (limited to 'arch/arm') diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 63eabb06f9f1..475811f5383a 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -935,13 +935,31 @@ static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_add __arm_dma_free(dev, size, cpu_addr, handle, attrs, true); } +/* + * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems + * that the intention is to allow exporting memory allocated via the + * coherent DMA APIs through the dma_buf API, which only accepts a + * scattertable. This presents a couple of problems: + * 1. Not all memory allocated via the coherent DMA APIs is backed by + * a struct page + * 2. Passing coherent DMA memory into the streaming APIs is not allowed + * as we will try to flush the memory through a different alias to that + * actually being used (and the flushes are redundant.) + */ int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t handle, size_t size, unsigned long attrs) { - struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); + unsigned long pfn = dma_to_pfn(dev, handle); + struct page *page; int ret; + /* If the PFN is not valid, we do not have a struct page */ + if (!pfn_valid(pfn)) + return -ENXIO; + + page = pfn_to_page(pfn); + ret = sg_alloc_table(sgt, 1, GFP_KERNEL); if (unlikely(ret)) return ret; -- cgit From 3cc070c1c81948b33ebe2ea68cd39307ce2b312d Mon Sep 17 00:00:00 2001 From: afzal mohammed Date: Thu, 23 Mar 2017 13:49:32 +0100 Subject: ARM: 8665/1: nommu: access ID_PFR1 only if CPUID scheme Greg upon trying to boot no-MMU Kernel on ARM926EJ reported boot failure. He root caused it to ID_PFR1 access introduced by the commit mentioned in the fixes tag below. All CP15 processors need not have processor feature registers, only for architectures defined by CPUID scheme would have it. Hence check for it before accessing processor feature register, ID_PFR1. Fixes: f8300a0b5de0 ("ARM: 8647/2: nommu: dynamic exception base address setting") Reported-by: Greg Ungerer Signed-off-by: afzal mohammed Tested-by: Greg Ungerer Signed-off-by: Russell King --- arch/arm/mm/nommu.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'arch/arm') diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 3b5c7aaf9c76..33a45bd96860 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c @@ -303,7 +303,10 @@ static inline void set_vbar(unsigned long val) */ static inline bool security_extensions_enabled(void) { - return !!cpuid_feature_extract(CPUID_EXT_PFR1, 4); + /* Check CPUID Identification Scheme before ID_PFR1 read */ + if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) + return !!cpuid_feature_extract(CPUID_EXT_PFR1, 4); + return 0; } static unsigned long __init setup_vectors_base(void) -- cgit From 8b3405e345b5a098101b0c31b264c812bba045d9 Mon Sep 17 00:00:00 2001 From: Suzuki K Poulose Date: Mon, 3 Apr 2017 15:12:43 +0100 Subject: kvm: arm/arm64: Fix locking for kvm_free_stage2_pgd In kvm_free_stage2_pgd() we don't hold the kvm->mmu_lock while calling unmap_stage2_range() on the entire memory range for the guest. This could cause problems with other callers (e.g, munmap on a memslot) trying to unmap a range. And since we have to unmap the entire Guest memory range holding a spinlock, make sure we yield the lock if necessary, after we unmap each PUD range. Fixes: commit d5d8184d35c9 ("KVM: ARM: Memory virtualization setup") Cc: stable@vger.kernel.org # v3.10+ Cc: Paolo Bonzini Cc: Marc Zyngier Cc: Christoffer Dall Cc: Mark Rutland Signed-off-by: Suzuki K Poulose [ Avoid vCPU starvation and lockup detector warnings ] Signed-off-by: Marc Zyngier Signed-off-by: Suzuki K Poulose Signed-off-by: Christoffer Dall --- arch/arm/kvm/mmu.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'arch/arm') diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 13b9c1fa8961..582a972371cf 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -292,11 +292,18 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) phys_addr_t addr = start, end = start + size; phys_addr_t next; + assert_spin_locked(&kvm->mmu_lock); pgd = kvm->arch.pgd + stage2_pgd_index(addr); do { next = stage2_pgd_addr_end(addr, end); if (!stage2_pgd_none(*pgd)) unmap_stage2_puds(kvm, pgd, addr, next); + /* + * If the range is too large, release the kvm->mmu_lock + * to prevent starvation and lockup detector warnings. + */ + if (next != end) + cond_resched_lock(&kvm->mmu_lock); } while (pgd++, addr = next, addr != end); } @@ -831,7 +838,10 @@ void kvm_free_stage2_pgd(struct kvm *kvm) if (kvm->arch.pgd == NULL) return; + spin_lock(&kvm->mmu_lock); unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); + spin_unlock(&kvm->mmu_lock); + /* Free the HW pgd, one page at a time */ free_pages_exact(kvm->arch.pgd, S2_PGD_SIZE); kvm->arch.pgd = NULL; -- cgit From 5b0d2cc2805897c14257f6dbb949639c499c3c25 Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Sat, 18 Mar 2017 13:56:56 +0100 Subject: KVM: arm64: Ensure LRs are clear when they should be We currently have some code to clear the list registers on GICv3, but we never call this code, because the caller got nuked when removing the old vgic. We also used to have a similar GICv2 part, but that got lost in the process too. Let's reintroduce the logic for GICv2 and call the logic when we initialize the use of hypervisors on the CPU, for example when first loading KVM or when exiting a low power state. Reviewed-by: Marc Zyngier Signed-off-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/kvm/arm.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/arm') diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 96dba7cd8be7..314eb6abe1ff 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -1124,6 +1124,9 @@ static void cpu_hyp_reinit(void) if (__hyp_get_vectors() == hyp_default_vectors) cpu_init_hyp_mode(NULL); } + + if (vgic_present) + kvm_vgic_init_cpu_hardware(); } static void cpu_hyp_reset(void) -- cgit