diff options
Diffstat (limited to 'arch/arm/lib/uaccess_with_memcpy.c')
-rw-r--r-- | arch/arm/lib/uaccess_with_memcpy.c | 46 |
1 files changed, 22 insertions, 24 deletions
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c index c9450982a155..c0ac7796d775 100644 --- a/arch/arm/lib/uaccess_with_memcpy.c +++ b/arch/arm/lib/uaccess_with_memcpy.c @@ -24,6 +24,7 @@ pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp) { unsigned long addr = (unsigned long)_addr; pgd_t *pgd; + p4d_t *p4d; pmd_t *pmd; pte_t *pte; pud_t *pud; @@ -33,7 +34,11 @@ pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp) if (unlikely(pgd_none(*pgd) || pgd_bad(*pgd))) return 0; - pud = pud_offset(pgd, addr); + p4d = p4d_offset(pgd, addr); + if (unlikely(p4d_none(*p4d) || p4d_bad(*p4d))) + return 0; + + pud = pud_offset(p4d, addr); if (unlikely(pud_none(*pud) || pud_bad(*pud))) return 0; @@ -51,10 +56,10 @@ pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp) * to see that it's still huge and whether or not we will * need to fault on write. */ - if (unlikely(pmd_thp_or_huge(*pmd))) { + if (unlikely(pmd_leaf(*pmd))) { ptl = ¤t->mm->page_table_lock; spin_lock(ptl); - if (unlikely(!pmd_thp_or_huge(*pmd) + if (unlikely(!pmd_leaf(*pmd) || pmd_hugewillfault(*pmd))) { spin_unlock(ptl); return 0; @@ -69,6 +74,9 @@ pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp) return 0; pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl); + if (unlikely(!pte)) + return 0; + if (unlikely(!pte_present(*pte) || !pte_young(*pte) || !pte_write(*pte) || !pte_dirty(*pte))) { pte_unmap_unlock(pte, ptl); @@ -87,16 +95,11 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n) unsigned long ua_flags; int atomic; - if (uaccess_kernel()) { - memcpy((void *)to, from, n); - return 0; - } - /* the mmap semaphore is taken only if not in an atomic context */ atomic = faulthandler_disabled(); if (!atomic) - down_read(¤t->mm->mmap_sem); + mmap_read_lock(current->mm); while (n) { pte_t *pte; spinlock_t *ptl; @@ -104,11 +107,11 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n) while (!pin_page_for_write(to, &pte, &ptl)) { if (!atomic) - up_read(¤t->mm->mmap_sem); + mmap_read_unlock(current->mm); if (__put_user(0, (char __user *)to)) goto out; if (!atomic) - down_read(¤t->mm->mmap_sem); + mmap_read_lock(current->mm); } tocopy = (~(unsigned long)to & ~PAGE_MASK) + 1; @@ -116,7 +119,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n) tocopy = n; ua_flags = uaccess_save_and_enable(); - memcpy((void *)to, from, tocopy); + __memcpy((void *)to, from, tocopy); uaccess_restore(ua_flags); to += tocopy; from += tocopy; @@ -128,7 +131,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n) spin_unlock(ptl); } if (!atomic) - up_read(¤t->mm->mmap_sem); + mmap_read_unlock(current->mm); out: return n; @@ -160,22 +163,17 @@ __clear_user_memset(void __user *addr, unsigned long n) { unsigned long ua_flags; - if (uaccess_kernel()) { - memset((void *)addr, 0, n); - return 0; - } - - down_read(¤t->mm->mmap_sem); + mmap_read_lock(current->mm); while (n) { pte_t *pte; spinlock_t *ptl; int tocopy; while (!pin_page_for_write(addr, &pte, &ptl)) { - up_read(¤t->mm->mmap_sem); + mmap_read_unlock(current->mm); if (__put_user(0, (char __user *)addr)) goto out; - down_read(¤t->mm->mmap_sem); + mmap_read_lock(current->mm); } tocopy = (~(unsigned long)addr & ~PAGE_MASK) + 1; @@ -183,7 +181,7 @@ __clear_user_memset(void __user *addr, unsigned long n) tocopy = n; ua_flags = uaccess_save_and_enable(); - memset((void *)addr, 0, tocopy); + __memset((void *)addr, 0, tocopy); uaccess_restore(ua_flags); addr += tocopy; n -= tocopy; @@ -193,7 +191,7 @@ __clear_user_memset(void __user *addr, unsigned long n) else spin_unlock(ptl); } - up_read(¤t->mm->mmap_sem); + mmap_read_unlock(current->mm); out: return n; @@ -242,7 +240,7 @@ static int __init test_size_treshold(void) if (!dst_page) goto no_dst; kernel_ptr = page_address(src_page); - user_ptr = vmap(&dst_page, 1, VM_IOREMAP, __pgprot(__P010)); + user_ptr = vmap(&dst_page, 1, VM_IOREMAP, __pgprot(__PAGE_COPY)); if (!user_ptr) goto no_vmap; |