diff options
Diffstat (limited to 'arch/powerpc/lib/memcpy_power7.S')
| -rw-r--r-- | arch/powerpc/lib/memcpy_power7.S | 308 |
1 files changed, 147 insertions, 161 deletions
diff --git a/arch/powerpc/lib/memcpy_power7.S b/arch/powerpc/lib/memcpy_power7.S index 0663630baf3b..b7c5e7fca8b9 100644 --- a/arch/powerpc/lib/memcpy_power7.S +++ b/arch/powerpc/lib/memcpy_power7.S @@ -1,17 +1,5 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ /* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Copyright (C) IBM Corporation, 2012 * @@ -19,21 +7,30 @@ */ #include <asm/ppc_asm.h> +#ifndef SELFTEST_CASE +/* 0 == don't use VMX, 1 == use VMX */ +#define SELFTEST_CASE 0 +#endif + +#ifdef __BIG_ENDIAN__ +#define LVS(VRT,RA,RB) lvsl VRT,RA,RB +#define VPERM(VRT,VRA,VRB,VRC) vperm VRT,VRA,VRB,VRC +#else +#define LVS(VRT,RA,RB) lvsr VRT,RA,RB +#define VPERM(VRT,VRA,VRB,VRC) vperm VRT,VRB,VRA,VRC +#endif + _GLOBAL(memcpy_power7) -#ifdef CONFIG_ALTIVEC cmpldi r5,16 cmpldi cr1,r5,4096 - - std r3,48(r1) - + std r3,-STACKFRAMESIZE+STK_REG(R31)(r1) blt .Lshort_copy - bgt cr1,.Lvmx_copy -#else - cmpldi r5,16 - std r3,48(r1) - - blt .Lshort_copy +#ifdef CONFIG_ALTIVEC +test_feature = SELFTEST_CASE +BEGIN_FTR_SECTION + bgt cr1, .Lvmx_copy +END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) #endif .Lnonvmx_copy: @@ -207,26 +204,26 @@ _GLOBAL(memcpy_power7) lbz r0,0(r4) stb r0,0(r3) -15: ld r3,48(r1) +15: ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) blr .Lunwind_stack_nonvmx_copy: addi r1,r1,STACKFRAMESIZE b .Lnonvmx_copy -#ifdef CONFIG_ALTIVEC .Lvmx_copy: +#ifdef CONFIG_ALTIVEC mflr r0 - std r4,56(r1) - std r5,64(r1) + std r4,-STACKFRAMESIZE+STK_REG(R30)(r1) + std r5,-STACKFRAMESIZE+STK_REG(R29)(r1) std r0,16(r1) stdu r1,-STACKFRAMESIZE(r1) - bl .enter_vmx_copy + bl CFUNC(enter_vmx_ops) cmpwi cr1,r3,0 ld r0,STACKFRAMESIZE+16(r1) - ld r3,STACKFRAMESIZE+48(r1) - ld r4,STACKFRAMESIZE+56(r1) - ld r5,STACKFRAMESIZE+64(r1) + ld r3,STK_REG(R31)(r1) + ld r4,STK_REG(R30)(r1) + ld r5,STK_REG(R29)(r1) mtlr r0 /* @@ -247,18 +244,7 @@ _GLOBAL(memcpy_power7) or r7,r7,r0 ori r10,r7,1 /* stream=1 */ - lis r8,0x8000 /* GO=1 */ - clrldi r8,r8,32 - -.machine push -.machine "power4" - dcbt r0,r6,0b01000 - dcbt r0,r7,0b01010 - dcbtst r0,r9,0b01000 - dcbtst r0,r10,0b01010 - eieio - dcbt r0,r8,0b01010 /* GO */ -.machine pop + DCBT_SETUP_STREAMS(r6, r7, r9, r10, r8) beq cr1,.Lunwind_stack_nonvmx_copy @@ -312,29 +298,29 @@ _GLOBAL(memcpy_power7) li r11,48 bf cr7*4+3,5f - lvx vr1,r0,r4 + lvx v1,0,r4 addi r4,r4,16 - stvx vr1,r0,r3 + stvx v1,0,r3 addi r3,r3,16 5: bf cr7*4+2,6f - lvx vr1,r0,r4 - lvx vr0,r4,r9 + lvx v1,0,r4 + lvx v0,r4,r9 addi r4,r4,32 - stvx vr1,r0,r3 - stvx vr0,r3,r9 + stvx v1,0,r3 + stvx v0,r3,r9 addi r3,r3,32 6: bf cr7*4+1,7f - lvx vr3,r0,r4 - lvx vr2,r4,r9 - lvx vr1,r4,r10 - lvx vr0,r4,r11 + lvx v3,0,r4 + lvx v2,r4,r9 + lvx v1,r4,r10 + lvx v0,r4,r11 addi r4,r4,64 - stvx vr3,r0,r3 - stvx vr2,r3,r9 - stvx vr1,r3,r10 - stvx vr0,r3,r11 + stvx v3,0,r3 + stvx v2,r3,r9 + stvx v1,r3,r10 + stvx v0,r3,r11 addi r3,r3,64 7: sub r5,r5,r6 @@ -357,23 +343,23 @@ _GLOBAL(memcpy_power7) */ .align 5 8: - lvx vr7,r0,r4 - lvx vr6,r4,r9 - lvx vr5,r4,r10 - lvx vr4,r4,r11 - lvx vr3,r4,r12 - lvx vr2,r4,r14 - lvx vr1,r4,r15 - lvx vr0,r4,r16 + lvx v7,0,r4 + lvx v6,r4,r9 + lvx v5,r4,r10 + lvx v4,r4,r11 + lvx v3,r4,r12 + lvx v2,r4,r14 + lvx v1,r4,r15 + lvx v0,r4,r16 addi r4,r4,128 - stvx vr7,r0,r3 - stvx vr6,r3,r9 - stvx vr5,r3,r10 - stvx vr4,r3,r11 - stvx vr3,r3,r12 - stvx vr2,r3,r14 - stvx vr1,r3,r15 - stvx vr0,r3,r16 + stvx v7,0,r3 + stvx v6,r3,r9 + stvx v5,r3,r10 + stvx v4,r3,r11 + stvx v3,r3,r12 + stvx v2,r3,r14 + stvx v1,r3,r15 + stvx v0,r3,r16 addi r3,r3,128 bdnz 8b @@ -387,29 +373,29 @@ _GLOBAL(memcpy_power7) mtocrf 0x01,r6 bf cr7*4+1,9f - lvx vr3,r0,r4 - lvx vr2,r4,r9 - lvx vr1,r4,r10 - lvx vr0,r4,r11 + lvx v3,0,r4 + lvx v2,r4,r9 + lvx v1,r4,r10 + lvx v0,r4,r11 addi r4,r4,64 - stvx vr3,r0,r3 - stvx vr2,r3,r9 - stvx vr1,r3,r10 - stvx vr0,r3,r11 + stvx v3,0,r3 + stvx v2,r3,r9 + stvx v1,r3,r10 + stvx v0,r3,r11 addi r3,r3,64 9: bf cr7*4+2,10f - lvx vr1,r0,r4 - lvx vr0,r4,r9 + lvx v1,0,r4 + lvx v0,r4,r9 addi r4,r4,32 - stvx vr1,r0,r3 - stvx vr0,r3,r9 + stvx v1,0,r3 + stvx v0,r3,r9 addi r3,r3,32 10: bf cr7*4+3,11f - lvx vr1,r0,r4 + lvx v1,0,r4 addi r4,r4,16 - stvx vr1,r0,r3 + stvx v1,0,r3 addi r3,r3,16 /* Up to 15B to go */ @@ -438,8 +424,8 @@ _GLOBAL(memcpy_power7) stb r0,0(r3) 15: addi r1,r1,STACKFRAMESIZE - ld r3,48(r1) - b .exit_vmx_copy /* tail call optimise */ + ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) + b CFUNC(exit_vmx_ops) /* tail call optimise */ .Lvmx_unaligned_copy: /* Get the destination 16B aligned */ @@ -485,42 +471,42 @@ _GLOBAL(memcpy_power7) li r10,32 li r11,48 - lvsl vr16,0,r4 /* Setup permute control vector */ - lvx vr0,0,r4 + LVS(v16,0,r4) /* Setup permute control vector */ + lvx v0,0,r4 addi r4,r4,16 bf cr7*4+3,5f - lvx vr1,r0,r4 - vperm vr8,vr0,vr1,vr16 + lvx v1,0,r4 + VPERM(v8,v0,v1,v16) addi r4,r4,16 - stvx vr8,r0,r3 + stvx v8,0,r3 addi r3,r3,16 - vor vr0,vr1,vr1 + vor v0,v1,v1 5: bf cr7*4+2,6f - lvx vr1,r0,r4 - vperm vr8,vr0,vr1,vr16 - lvx vr0,r4,r9 - vperm vr9,vr1,vr0,vr16 + lvx v1,0,r4 + VPERM(v8,v0,v1,v16) + lvx v0,r4,r9 + VPERM(v9,v1,v0,v16) addi r4,r4,32 - stvx vr8,r0,r3 - stvx vr9,r3,r9 + stvx v8,0,r3 + stvx v9,r3,r9 addi r3,r3,32 6: bf cr7*4+1,7f - lvx vr3,r0,r4 - vperm vr8,vr0,vr3,vr16 - lvx vr2,r4,r9 - vperm vr9,vr3,vr2,vr16 - lvx vr1,r4,r10 - vperm vr10,vr2,vr1,vr16 - lvx vr0,r4,r11 - vperm vr11,vr1,vr0,vr16 + lvx v3,0,r4 + VPERM(v8,v0,v3,v16) + lvx v2,r4,r9 + VPERM(v9,v3,v2,v16) + lvx v1,r4,r10 + VPERM(v10,v2,v1,v16) + lvx v0,r4,r11 + VPERM(v11,v1,v0,v16) addi r4,r4,64 - stvx vr8,r0,r3 - stvx vr9,r3,r9 - stvx vr10,r3,r10 - stvx vr11,r3,r11 + stvx v8,0,r3 + stvx v9,r3,r9 + stvx v10,r3,r10 + stvx v11,r3,r11 addi r3,r3,64 7: sub r5,r5,r6 @@ -543,31 +529,31 @@ _GLOBAL(memcpy_power7) */ .align 5 8: - lvx vr7,r0,r4 - vperm vr8,vr0,vr7,vr16 - lvx vr6,r4,r9 - vperm vr9,vr7,vr6,vr16 - lvx vr5,r4,r10 - vperm vr10,vr6,vr5,vr16 - lvx vr4,r4,r11 - vperm vr11,vr5,vr4,vr16 - lvx vr3,r4,r12 - vperm vr12,vr4,vr3,vr16 - lvx vr2,r4,r14 - vperm vr13,vr3,vr2,vr16 - lvx vr1,r4,r15 - vperm vr14,vr2,vr1,vr16 - lvx vr0,r4,r16 - vperm vr15,vr1,vr0,vr16 + lvx v7,0,r4 + VPERM(v8,v0,v7,v16) + lvx v6,r4,r9 + VPERM(v9,v7,v6,v16) + lvx v5,r4,r10 + VPERM(v10,v6,v5,v16) + lvx v4,r4,r11 + VPERM(v11,v5,v4,v16) + lvx v3,r4,r12 + VPERM(v12,v4,v3,v16) + lvx v2,r4,r14 + VPERM(v13,v3,v2,v16) + lvx v1,r4,r15 + VPERM(v14,v2,v1,v16) + lvx v0,r4,r16 + VPERM(v15,v1,v0,v16) addi r4,r4,128 - stvx vr8,r0,r3 - stvx vr9,r3,r9 - stvx vr10,r3,r10 - stvx vr11,r3,r11 - stvx vr12,r3,r12 - stvx vr13,r3,r14 - stvx vr14,r3,r15 - stvx vr15,r3,r16 + stvx v8,0,r3 + stvx v9,r3,r9 + stvx v10,r3,r10 + stvx v11,r3,r11 + stvx v12,r3,r12 + stvx v13,r3,r14 + stvx v14,r3,r15 + stvx v15,r3,r16 addi r3,r3,128 bdnz 8b @@ -581,36 +567,36 @@ _GLOBAL(memcpy_power7) mtocrf 0x01,r6 bf cr7*4+1,9f - lvx vr3,r0,r4 - vperm vr8,vr0,vr3,vr16 - lvx vr2,r4,r9 - vperm vr9,vr3,vr2,vr16 - lvx vr1,r4,r10 - vperm vr10,vr2,vr1,vr16 - lvx vr0,r4,r11 - vperm vr11,vr1,vr0,vr16 + lvx v3,0,r4 + VPERM(v8,v0,v3,v16) + lvx v2,r4,r9 + VPERM(v9,v3,v2,v16) + lvx v1,r4,r10 + VPERM(v10,v2,v1,v16) + lvx v0,r4,r11 + VPERM(v11,v1,v0,v16) addi r4,r4,64 - stvx vr8,r0,r3 - stvx vr9,r3,r9 - stvx vr10,r3,r10 - stvx vr11,r3,r11 + stvx v8,0,r3 + stvx v9,r3,r9 + stvx v10,r3,r10 + stvx v11,r3,r11 addi r3,r3,64 9: bf cr7*4+2,10f - lvx vr1,r0,r4 - vperm vr8,vr0,vr1,vr16 - lvx vr0,r4,r9 - vperm vr9,vr1,vr0,vr16 + lvx v1,0,r4 + VPERM(v8,v0,v1,v16) + lvx v0,r4,r9 + VPERM(v9,v1,v0,v16) addi r4,r4,32 - stvx vr8,r0,r3 - stvx vr9,r3,r9 + stvx v8,0,r3 + stvx v9,r3,r9 addi r3,r3,32 10: bf cr7*4+3,11f - lvx vr1,r0,r4 - vperm vr8,vr0,vr1,vr16 + lvx v1,0,r4 + VPERM(v8,v0,v1,v16) addi r4,r4,16 - stvx vr8,r0,r3 + stvx v8,0,r3 addi r3,r3,16 /* Up to 15B to go */ @@ -642,6 +628,6 @@ _GLOBAL(memcpy_power7) stb r0,0(r3) 15: addi r1,r1,STACKFRAMESIZE - ld r3,48(r1) - b .exit_vmx_copy /* tail call optimise */ -#endif /* CONFiG_ALTIVEC */ + ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) + b CFUNC(exit_vmx_ops) /* tail call optimise */ +#endif /* CONFIG_ALTIVEC */ |
