diff options
Diffstat (limited to 'arch/m68k/kernel/sys_m68k.c')
| -rw-r--r-- | arch/m68k/kernel/sys_m68k.c | 55 |
1 files changed, 36 insertions, 19 deletions
diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c index 3a480b3df0d6..1af5e6082467 100644 --- a/arch/m68k/kernel/sys_m68k.c +++ b/arch/m68k/kernel/sys_m68k.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/m68k/kernel/sys_m68k.c * @@ -22,10 +23,11 @@ #include <linux/ipc.h> #include <asm/setup.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> #include <asm/cachectl.h> #include <asm/traps.h> #include <asm/page.h> +#include <asm/syscalls.h> #include <asm/unistd.h> #include <asm/cacheflush.h> @@ -33,8 +35,7 @@ #include <asm/tlb.h> -asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address, - unsigned long error_code); +#include "../mm/fault.h" asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, @@ -45,7 +46,7 @@ asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, * so we need to shift the argument down by 1; m68k mmap64(3) * (in libc) expects the last argument of mmap2 in 4Kb units. */ - return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); + return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); } /* Convert virtual (user) address VADDR to physical address PADDR */ @@ -376,7 +377,6 @@ cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len) asmlinkage int sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len) { - struct vm_area_struct *vma; int ret = -EINVAL; if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL || @@ -388,18 +388,23 @@ sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len) ret = -EPERM; if (!capable(CAP_SYS_ADMIN)) goto out; + + mmap_read_lock(current->mm); } else { + struct vm_area_struct *vma; + + /* Check for overflow. */ + if (addr + len < addr) + goto out; + /* * Verify that the specified address region actually belongs * to this process. */ - vma = find_vma (current->mm, addr); - ret = -EINVAL; - /* Check for overflow. */ - if (addr + len < addr) - goto out; - if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end) - goto out; + mmap_read_lock(current->mm); + vma = vma_lookup(current->mm, addr); + if (!vma || addr + len > vma->vm_end) + goto out_unlock; } if (CPU_IS_020_OR_030) { @@ -429,7 +434,7 @@ sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len) __asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr)); } ret = 0; - goto out; + goto out_unlock; } else { /* * 040 or 060: don't blindly trust 'scope', someone could @@ -446,6 +451,8 @@ sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len) ret = cache_flush_060 (addr, scope, cache, len); } } +out_unlock: + mmap_read_unlock(current->mm); out: return ret; } @@ -460,19 +467,29 @@ sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5, for (;;) { struct mm_struct *mm = current->mm; pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; pmd_t *pmd; pte_t *pte; spinlock_t *ptl; unsigned long mem_value; - down_read(&mm->mmap_sem); + mmap_read_lock(mm); pgd = pgd_offset(mm, (unsigned long)mem); if (!pgd_present(*pgd)) goto bad_access; - pmd = pmd_offset(pgd, (unsigned long)mem); + p4d = p4d_offset(pgd, (unsigned long)mem); + if (!p4d_present(*p4d)) + goto bad_access; + pud = pud_offset(p4d, (unsigned long)mem); + if (!pud_present(*pud)) + goto bad_access; + pmd = pmd_offset(pud, (unsigned long)mem); if (!pmd_present(*pmd)) goto bad_access; pte = pte_offset_map_lock(mm, pmd, (unsigned long)mem, &ptl); + if (!pte) + goto bad_access; if (!pte_present(*pte) || !pte_dirty(*pte) || !pte_write(*pte)) { pte_unmap_unlock(pte, ptl); @@ -488,11 +505,11 @@ sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5, __put_user(newval, mem); pte_unmap_unlock(pte, ptl); - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); return mem_value; bad_access: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); /* This is not necessarily a bad access, we can get here if a memory we're trying to write to should be copied-on-write. Make the kernel do the necessary page stuff, then re-iterate. @@ -532,13 +549,13 @@ sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5, struct mm_struct *mm = current->mm; unsigned long mem_value; - down_read(&mm->mmap_sem); + mmap_read_lock(mm); mem_value = *mem; if (mem_value == oldval) *mem = newval; - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); return mem_value; } |
