summaryrefslogtreecommitdiff
path: root/arch/arm/mm/copypage-v6.c
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2008-10-31 16:32:19 +0000
committerRussell King <rmk+kernel@arm.linux.org.uk>2008-11-27 23:53:48 +0000
commit303c6443659bc1dc911356f5de149f48ff1d97b8 (patch)
tree75da0aef28ec8e843cdeb24c96349bdf812e2740 /arch/arm/mm/copypage-v6.c
parent063b0a4207e43acbeff3d4b09f43e750e0212b48 (diff)
[ARM] clearpage: provide our own clear_user_highpage()
For similar reasons as copy_user_page(), we want to avoid the additional kmap_atomic if it's unnecessary. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm/copypage-v6.c')
-rw-r--r--arch/arm/mm/copypage-v6.c23
1 files changed, 9 insertions, 14 deletions
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index 2ea75d0f5048..4127a7bddfe5 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -49,9 +49,11 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to,
* Clear the user page. No aliasing to deal with so we can just
* attack the kernel's existing mapping of this page.
*/
-static void v6_clear_user_page_nonaliasing(void *kaddr, unsigned long vaddr)
+static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr)
{
+ void *kaddr = kmap_atomic(page, KM_USER0);
clear_page(kaddr);
+ kunmap_atomic(kaddr, KM_USER0);
}
/*
@@ -107,20 +109,13 @@ static void v6_copy_user_highpage_aliasing(struct page *to,
* so remap the kernel page into the same cache colour as the user
* page.
*/
-static void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr)
+static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vaddr)
{
unsigned int offset = CACHE_COLOUR(vaddr);
unsigned long to = to_address + (offset << PAGE_SHIFT);
- /*
- * Discard data in the kernel mapping for the new page
- * FIXME: needs this MCRR to be supported.
- */
- __asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06"
- :
- : "r" (kaddr),
- "r" ((unsigned long)kaddr + PAGE_SIZE - L1_CACHE_BYTES)
- : "cc");
+ /* FIXME: not highmem safe */
+ discard_old_kernel_data(page_address(page));
/*
* Now clear the page using the same cache colour as
@@ -128,7 +123,7 @@ static void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr)
*/
spin_lock(&v6_lock);
- set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(__pa(kaddr) >> PAGE_SHIFT, PAGE_KERNEL), 0);
+ set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0);
flush_tlb_kernel_page(to);
clear_page((void *)to);
@@ -136,14 +131,14 @@ static void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr)
}
struct cpu_user_fns v6_user_fns __initdata = {
- .cpu_clear_user_page = v6_clear_user_page_nonaliasing,
+ .cpu_clear_user_highpage = v6_clear_user_highpage_nonaliasing,
.cpu_copy_user_highpage = v6_copy_user_highpage_nonaliasing,
};
static int __init v6_userpage_init(void)
{
if (cache_is_vipt_aliasing()) {
- cpu_user.cpu_clear_user_page = v6_clear_user_page_aliasing;
+ cpu_user.cpu_clear_user_highpage = v6_clear_user_highpage_aliasing;
cpu_user.cpu_copy_user_highpage = v6_copy_user_highpage_aliasing;
}