diff options
Diffstat (limited to 'arch/parisc/mm/fault.c')
| -rw-r--r-- | arch/parisc/mm/fault.c | 297 |
1 files changed, 212 insertions, 85 deletions
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index 5b101f6a5607..f1785640b049 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -17,9 +17,13 @@ #include <linux/interrupt.h> #include <linux/extable.h> #include <linux/uaccess.h> +#include <linux/hugetlb.h> +#include <linux/perf_event.h> #include <asm/traps.h> +#define DEBUG_NATLB 0 + /* Various important other fields */ #define bit22set(x) (x & 0x00000200) #define bits23_25set(x) (x & 0x000001c0) @@ -34,7 +38,7 @@ int show_unhandled_signals = 1; /* * parisc_acctyp(unsigned int inst) -- * Given a PA-RISC memory access instruction, determine if the - * the instruction would perform a memory read or memory write + * instruction would perform a memory read or memory write * operation. * * This function assumes that the given instruction is a memory access @@ -46,7 +50,7 @@ int show_unhandled_signals = 1; * VM_WRITE if write operation * VM_EXEC if execute operation */ -static unsigned long +unsigned long parisc_acctyp(unsigned long code, unsigned int inst) { if (code == 6 || code == 16) @@ -65,6 +69,7 @@ parisc_acctyp(unsigned long code, unsigned int inst) case 0x30000000: /* coproc2 */ if (bit22set(inst)) return VM_WRITE; + fallthrough; case 0x0: /* indexed/memory management */ if (bit22set(inst)) { @@ -145,11 +150,16 @@ int fixup_exception(struct pt_regs *regs) * Fix up get_user() and put_user(). * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() sets the least-significant * bit in the relative address of the fixup routine to indicate - * that %r8 should be loaded with -EFAULT to report a userspace + * that the register encoded in the "or %r0,%r0,register" + * opcode should be loaded with -EFAULT to report a userspace * access error. */ if (fix->fixup & 1) { - regs->gr[8] = -EFAULT; + int fault_error_reg = fix->err_opcode & 0x1f; + if (!WARN_ON(!fault_error_reg)) + regs->gr[fault_error_reg] = -EFAULT; + pr_debug("Unalignment fixup of register %d at %pS\n", + fault_error_reg, (void*)regs->iaoq[0]); /* zero target register for get_user() */ if (parisc_acctyp(0, regs->iir) == VM_READ) { @@ -187,31 +197,31 @@ int fixup_exception(struct pt_regs *regs) * For implementation see handle_interruption() in traps.c */ static const char * const trap_description[] = { - [1] "High-priority machine check (HPMC)", - [2] "Power failure interrupt", - [3] "Recovery counter trap", - [5] "Low-priority machine check", - [6] "Instruction TLB miss fault", - [7] "Instruction access rights / protection trap", - [8] "Illegal instruction trap", - [9] "Break instruction trap", - [10] "Privileged operation trap", - [11] "Privileged register trap", - [12] "Overflow trap", - [13] "Conditional trap", - [14] "FP Assist Exception trap", - [15] "Data TLB miss fault", - [16] "Non-access ITLB miss fault", - [17] "Non-access DTLB miss fault", - [18] "Data memory protection/unaligned access trap", - [19] "Data memory break trap", - [20] "TLB dirty bit trap", - [21] "Page reference trap", - [22] "Assist emulation trap", - [25] "Taken branch trap", - [26] "Data memory access rights trap", - [27] "Data memory protection ID trap", - [28] "Unaligned data reference trap", + [1] = "High-priority machine check (HPMC)", + [2] = "Power failure interrupt", + [3] = "Recovery counter trap", + [5] = "Low-priority machine check", + [6] = "Instruction TLB miss fault", + [7] = "Instruction access rights / protection trap", + [8] = "Illegal instruction trap", + [9] = "Break instruction trap", + [10] = "Privileged operation trap", + [11] = "Privileged register trap", + [12] = "Overflow trap", + [13] = "Conditional trap", + [14] = "FP Assist Exception trap", + [15] = "Data TLB miss fault", + [16] = "Non-access ITLB miss fault", + [17] = "Non-access DTLB miss fault", + [18] = "Data memory protection/unaligned access trap", + [19] = "Data memory break trap", + [20] = "TLB dirty bit trap", + [21] = "Page reference trap", + [22] = "Assist emulation trap", + [25] = "Taken branch trap", + [26] = "Data memory access rights trap", + [27] = "Data memory protection ID trap", + [28] = "Unaligned data reference trap", }; const char *trap_name(unsigned long code) @@ -261,36 +271,41 @@ void do_page_fault(struct pt_regs *regs, unsigned long code, struct task_struct *tsk; struct mm_struct *mm; unsigned long acc_type; - int fault; + vm_fault_t fault = 0; unsigned int flags; - - if (faulthandler_disabled()) - goto no_context; + char *msg; tsk = current; mm = tsk->mm; - if (!mm) + if (!mm) { + msg = "Page fault: no context"; goto no_context; + } - flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; + flags = FAULT_FLAG_DEFAULT; if (user_mode(regs)) flags |= FAULT_FLAG_USER; acc_type = parisc_acctyp(code, regs->iir); if (acc_type & VM_WRITE) flags |= FAULT_FLAG_WRITE; + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); retry: - down_read(&mm->mmap_sem); + mmap_read_lock(mm); vma = find_vma_prev(mm, address, &prev_vma); - if (!vma || address < vma->vm_start) - goto check_expansion; + if (!vma || address < vma->vm_start) { + if (!prev_vma || !(prev_vma->vm_flags & VM_GROWSUP)) + goto bad_area; + vma = expand_stack(mm, address); + if (!vma) + goto bad_area_nosemaphore; + } + /* * Ok, we have a good vm_area for this memory access. We still need to * check the access permissions. */ -good_area: - if ((vma->vm_flags & acc_type) != acc_type) goto bad_area; @@ -300,9 +315,18 @@ good_area: * fault. */ - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, regs); - if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) + if (fault_signal_pending(fault, regs)) { + if (!user_mode(regs)) { + msg = "Page fault: fault signal on kernel memory"; + goto no_context; + } + return; + } + + /* The fault is fully completed (including releasing mmap lock) */ + if (fault & VM_FAULT_COMPLETED) return; if (unlikely(fault & VM_FAULT_ERROR)) { @@ -315,82 +339,95 @@ good_area: goto out_of_memory; else if (fault & VM_FAULT_SIGSEGV) goto bad_area; - else if (fault & VM_FAULT_SIGBUS) + else if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| + VM_FAULT_HWPOISON_LARGE)) goto bad_area; BUG(); } - if (flags & FAULT_FLAG_ALLOW_RETRY) { - if (fault & VM_FAULT_MAJOR) - current->maj_flt++; - else - current->min_flt++; - if (fault & VM_FAULT_RETRY) { - flags &= ~FAULT_FLAG_ALLOW_RETRY; - - /* - * No need to up_read(&mm->mmap_sem) as we would - * have already released it in __lock_page_or_retry - * in mm/filemap.c. - */ - - goto retry; - } + if (fault & VM_FAULT_RETRY) { + /* + * No need to mmap_read_unlock(mm) as we would + * have already released it in __lock_page_or_retry + * in mm/filemap.c. + */ + flags |= FAULT_FLAG_TRIED; + goto retry; } - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); return; -check_expansion: - vma = prev_vma; - if (vma && (expand_stack(vma, address) == 0)) - goto good_area; - /* * Something tried to access memory that isn't in our memory map.. */ bad_area: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); - if (user_mode(regs)) { - struct siginfo si; +bad_area_nosemaphore: + if (!user_mode(regs) && fixup_exception(regs)) { + return; + } - show_signal_msg(regs, code, address, tsk, vma); + if (user_mode(regs)) { + int signo, si_code; switch (code) { case 15: /* Data TLB miss fault/Data page fault */ /* send SIGSEGV when outside of vma */ if (!vma || address < vma->vm_start || address >= vma->vm_end) { - si.si_signo = SIGSEGV; - si.si_code = SEGV_MAPERR; + signo = SIGSEGV; + si_code = SEGV_MAPERR; break; } /* send SIGSEGV for wrong permissions */ if ((vma->vm_flags & acc_type) != acc_type) { - si.si_signo = SIGSEGV; - si.si_code = SEGV_ACCERR; + signo = SIGSEGV; + si_code = SEGV_ACCERR; break; } /* probably address is outside of mapped file */ - /* fall through */ + fallthrough; case 17: /* NA data TLB miss / page fault */ case 18: /* Unaligned access - PCXS only */ - si.si_signo = SIGBUS; - si.si_code = (code == 18) ? BUS_ADRALN : BUS_ADRERR; + signo = SIGBUS; + si_code = (code == 18) ? BUS_ADRALN : BUS_ADRERR; break; case 16: /* Non-access instruction TLB miss fault */ case 26: /* PCXL: Data memory access rights trap */ default: - si.si_signo = SIGSEGV; - si.si_code = (code == 26) ? SEGV_ACCERR : SEGV_MAPERR; + signo = SIGSEGV; + si_code = (code == 26) ? SEGV_ACCERR : SEGV_MAPERR; break; } - si.si_errno = 0; - si.si_addr = (void __user *) address; - force_sig_info(si.si_signo, &si, current); +#ifdef CONFIG_MEMORY_FAILURE + if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) { + unsigned int lsb = 0; + printk(KERN_ERR + "MCE: Killing %s:%d due to hardware memory corruption fault at %08lx\n", + tsk->comm, tsk->pid, address); + /* + * Either small page or large page may be poisoned. + * In other words, VM_FAULT_HWPOISON_LARGE and + * VM_FAULT_HWPOISON are mutually exclusive. + */ + if (fault & VM_FAULT_HWPOISON_LARGE) + lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); + else if (fault & VM_FAULT_HWPOISON) + lsb = PAGE_SHIFT; + + force_sig_mceerr(BUS_MCEERR_AR, (void __user *) address, + lsb); + return; + } +#endif + show_signal_msg(regs, code, address, tsk, vma); + + force_sig_fault(signo, si_code, (void __user *) address); return; } + msg = "Page fault: bad address"; no_context: @@ -398,11 +435,101 @@ no_context: return; } - parisc_terminate("Bad Address (null pointer deref?)", regs, code, address); + parisc_terminate(msg, regs, code, address); - out_of_memory: - up_read(&mm->mmap_sem); - if (!user_mode(regs)) +out_of_memory: + mmap_read_unlock(mm); + if (!user_mode(regs)) { + msg = "Page fault: out of memory"; goto no_context; + } pagefault_out_of_memory(); } + +/* Handle non-access data TLB miss faults. + * + * For probe instructions, accesses to userspace are considered allowed + * if they lie in a valid VMA and the access type matches. We are not + * allowed to handle MM faults here so there may be situations where an + * actual access would fail even though a probe was successful. + */ +int +handle_nadtlb_fault(struct pt_regs *regs) +{ + unsigned long insn = regs->iir; + int breg, treg, xreg, val = 0; + struct vm_area_struct *vma; + struct task_struct *tsk; + struct mm_struct *mm; + unsigned long address; + unsigned long acc_type; + + switch (insn & 0x380) { + case 0x280: + /* FDC instruction */ + fallthrough; + case 0x380: + /* PDC and FIC instructions */ + if (DEBUG_NATLB && printk_ratelimit()) { + pr_warn("WARNING: nullifying cache flush/purge instruction\n"); + show_regs(regs); + } + if (insn & 0x20) { + /* Base modification */ + breg = (insn >> 21) & 0x1f; + xreg = (insn >> 16) & 0x1f; + if (breg && xreg) + regs->gr[breg] += regs->gr[xreg]; + } + regs->gr[0] |= PSW_N; + return 1; + + case 0x180: + /* PROBE instruction */ + treg = insn & 0x1f; + if (regs->isr) { + tsk = current; + mm = tsk->mm; + if (mm) { + /* Search for VMA */ + address = regs->ior; + mmap_read_lock(mm); + vma = vma_lookup(mm, address); + mmap_read_unlock(mm); + + /* + * Check if access to the VMA is okay. + * We don't allow for stack expansion. + */ + acc_type = (insn & 0x40) ? VM_WRITE : VM_READ; + if (vma + && (vma->vm_flags & acc_type) == acc_type) + val = 1; + } + } + if (treg) + regs->gr[treg] = val; + regs->gr[0] |= PSW_N; + return 1; + + case 0x300: + /* LPA instruction */ + if (insn & 0x20) { + /* Base modification */ + breg = (insn >> 21) & 0x1f; + xreg = (insn >> 16) & 0x1f; + if (breg && xreg) + regs->gr[breg] += regs->gr[xreg]; + } + treg = insn & 0x1f; + if (treg) + regs->gr[treg] = 0; + regs->gr[0] |= PSW_N; + return 1; + + default: + break; + } + + return 0; +} |
