summaryrefslogtreecommitdiff
path: root/arch/arm/mm/copypage-xsc3.c
diff options
context:
space:
mode:
authorNicolas Pitre <nicolas.pitre@linaro.org>2018-11-07 17:49:00 +0100
committerRussell King <rmk+kernel@armlinux.org.uk>2018-11-08 10:57:09 +0000
commitb99afae1390140f5b0039e6b37a7380de31ae874 (patch)
treeeec57bfb462e01483c0dc6a7848e24b8ac6334c7 /arch/arm/mm/copypage-xsc3.c
parent31d0b9f9982f8e3a489e83419461d35ab003160a (diff)
ARM: 8805/2: remove unneeded naked function usage
The naked attribute is known to confuse some old gcc versions when function arguments aren't explicitly listed as inline assembly operands despite the gcc documentation. That resulted in commit 9a40ac86152c ("ARM: 6164/1: Add kto and kfrom to input operands list."). Yet that commit has problems of its own by having assembly operand constraints completely wrong. If the generated code has been OK since then, it is due to luck rather than correctness. So this patch also provides proper assembly operand constraints, and removes two instances of redundant register usages in the implementation while at it. Inspection of the generated code with this patch doesn't show any obvious quality degradation either, so not relying on __naked at all will make the code less fragile, and avoid some issues with clang. The only remaining __naked instances (excluding the kprobes test cases) are exynos_pm_power_up_setup(), tc2_pm_power_up_setup() and cci_enable_port_for_self(. But in the first two cases, only the function address is used by the compiler with no chance of inlining it by mistake, and the third case is called from assembly code only. And the fact that no stack is available when the corresponding code is executed does warrant the __naked usage in those cases. Signed-off-by: Nicolas Pitre <nico@linaro.org> Reviewed-by: Stefan Agner <stefan@agner.ch> Tested-by: Stefan Agner <stefan@agner.ch> Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
Diffstat (limited to 'arch/arm/mm/copypage-xsc3.c')
-rw-r--r--arch/arm/mm/copypage-xsc3.c71
1 files changed, 31 insertions, 40 deletions
diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c
index 03a2042aced5..55cbc3a89d85 100644
--- a/arch/arm/mm/copypage-xsc3.c
+++ b/arch/arm/mm/copypage-xsc3.c
@@ -21,53 +21,46 @@
/*
* XSC3 optimised copy_user_highpage
- * r0 = destination
- * r1 = source
*
* The source page may have some clean entries in the cache already, but we
* can safely ignore them - break_cow() will flush them out of the cache
* if we eventually end up using our copied page.
*
*/
-static void __naked
-xsc3_mc_copy_user_page(void *kto, const void *kfrom)
+static void xsc3_mc_copy_user_page(void *kto, const void *kfrom)
{
- asm("\
- stmfd sp!, {r4, r5, lr} \n\
- mov lr, %2 \n\
- \n\
- pld [r1, #0] \n\
- pld [r1, #32] \n\
-1: pld [r1, #64] \n\
- pld [r1, #96] \n\
+ int tmp;
+
+ asm volatile ("\
+ pld [%1, #0] \n\
+ pld [%1, #32] \n\
+1: pld [%1, #64] \n\
+ pld [%1, #96] \n\
\n\
-2: ldrd r2, [r1], #8 \n\
- mov ip, r0 \n\
- ldrd r4, [r1], #8 \n\
- mcr p15, 0, ip, c7, c6, 1 @ invalidate\n\
- strd r2, [r0], #8 \n\
- ldrd r2, [r1], #8 \n\
- strd r4, [r0], #8 \n\
- ldrd r4, [r1], #8 \n\
- strd r2, [r0], #8 \n\
- strd r4, [r0], #8 \n\
- ldrd r2, [r1], #8 \n\
- mov ip, r0 \n\
- ldrd r4, [r1], #8 \n\
- mcr p15, 0, ip, c7, c6, 1 @ invalidate\n\
- strd r2, [r0], #8 \n\
- ldrd r2, [r1], #8 \n\
- subs lr, lr, #1 \n\
- strd r4, [r0], #8 \n\
- ldrd r4, [r1], #8 \n\
- strd r2, [r0], #8 \n\
- strd r4, [r0], #8 \n\
+2: ldrd r2, [%1], #8 \n\
+ ldrd r4, [%1], #8 \n\
+ mcr p15, 0, %0, c7, c6, 1 @ invalidate\n\
+ strd r2, [%0], #8 \n\
+ ldrd r2, [%1], #8 \n\
+ strd r4, [%0], #8 \n\
+ ldrd r4, [%1], #8 \n\
+ strd r2, [%0], #8 \n\
+ strd r4, [%0], #8 \n\
+ ldrd r2, [%1], #8 \n\
+ ldrd r4, [%1], #8 \n\
+ mcr p15, 0, %0, c7, c6, 1 @ invalidate\n\
+ strd r2, [%0], #8 \n\
+ ldrd r2, [%1], #8 \n\
+ subs %2, %2, #1 \n\
+ strd r4, [%0], #8 \n\
+ ldrd r4, [%1], #8 \n\
+ strd r2, [%0], #8 \n\
+ strd r4, [%0], #8 \n\
bgt 1b \n\
- beq 2b \n\
- \n\
- ldmfd sp!, {r4, r5, pc}"
- :
- : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64 - 1));
+ beq 2b "
+ : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp)
+ : "2" (PAGE_SIZE / 64 - 1)
+ : "r2", "r3", "r4", "r5");
}
void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
@@ -85,8 +78,6 @@ void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
/*
* XScale optimised clear_user_page
- * r0 = destination
- * r1 = virtual user address of ultimate destination page
*/
void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
{