From e89d6cc51034998607502cd3899173bfa7189571 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 24 May 2021 09:29:44 +0100 Subject: arm64: assembler: replace `kaddr` with `addr` The `__dcache_op_workaround_clean_cache` and `dcache_by_line_op` macros are only expected to be usedc on kernel memory, without a user fault fixup, and so we named their address variables `kaddr` to make this clear. Subseuqent patches will modify these to also work on user memory with an (optional) user fault fixup, where `kaddr` won't make as much sense. To aid the legibility of patches, this patch (only) replaces `kaddr` with `addr` as a preparatory step. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Signed-off-by: Fuad Tabba Cc: Ard Biesheuvel Cc: Catalin Marinas Cc: Fuad Tabba Cc: Will Deacon Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20210524083001.2586635-2-tabba@google.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/assembler.h | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) (limited to 'arch/arm64/include/asm/assembler.h') diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 8418c1bd8f04..6a0fbc599196 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -377,47 +377,47 @@ alternative_cb_end /* * Macro to perform a data cache maintenance for the interval - * [kaddr, kaddr + size) + * [addr, addr + size) * * op: operation passed to dc instruction * domain: domain used in dsb instruciton - * kaddr: starting virtual address of the region + * addr: starting virtual address of the region * size: size of the region - * Corrupts: kaddr, size, tmp1, tmp2 + * Corrupts: addr, size, tmp1, tmp2 */ - .macro __dcache_op_workaround_clean_cache, op, kaddr + .macro __dcache_op_workaround_clean_cache, op, addr alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE - dc \op, \kaddr + dc \op, \addr alternative_else - dc civac, \kaddr + dc civac, \addr alternative_endif .endm - .macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2 + .macro dcache_by_line_op op, domain, addr, size, tmp1, tmp2 dcache_line_size \tmp1, \tmp2 - add \size, \kaddr, \size + add \size, \addr, \size sub \tmp2, \tmp1, #1 - bic \kaddr, \kaddr, \tmp2 + bic \addr, \addr, \tmp2 9998: .ifc \op, cvau - __dcache_op_workaround_clean_cache \op, \kaddr + __dcache_op_workaround_clean_cache \op, \addr .else .ifc \op, cvac - __dcache_op_workaround_clean_cache \op, \kaddr + __dcache_op_workaround_clean_cache \op, \addr .else .ifc \op, cvap - sys 3, c7, c12, 1, \kaddr // dc cvap + sys 3, c7, c12, 1, \addr // dc cvap .else .ifc \op, cvadp - sys 3, c7, c13, 1, \kaddr // dc cvadp + sys 3, c7, c13, 1, \addr // dc cvadp .else - dc \op, \kaddr + dc \op, \addr .endif .endif .endif .endif - add \kaddr, \kaddr, \tmp1 - cmp \kaddr, \size + add \addr, \addr, \tmp1 + cmp \addr, \size b.lo 9998b dsb \domain .endm -- cgit From d11b187760f52480dd83bda0429ee3c94e542b1d Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 24 May 2021 09:29:45 +0100 Subject: arm64: assembler: add conditional cache fixups It would be helpful if we could use both `dcache_by_line_op` and `invalidate_icache_by_line` for user memory without accidentally fixing up unexpected faults when performing maintenance on kernel addresses. Let's make this possible by having both macros take an optional fixup label, and only generating an extable entry if a label is provided. At the same time, let's clean up the labels used to be globally unique using \@ as we do for other macros. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Signed-off-by: Fuad Tabba Cc: Ard Biesheuvel Cc: Catalin Marinas Cc: Fuad Tabba Cc: Will Deacon Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20210524083001.2586635-3-tabba@google.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/assembler.h | 39 +++++++++++++++++++++++++++----------- 1 file changed, 28 insertions(+), 11 deletions(-) (limited to 'arch/arm64/include/asm/assembler.h') diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 6a0fbc599196..0a276b46ef50 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -130,15 +130,27 @@ alternative_endif .endm /* - * Emit an entry into the exception table + * Create an exception table entry for `insn`, which will branch to `fixup` + * when an unhandled fault is taken. */ - .macro _asm_extable, from, to + .macro _asm_extable, insn, fixup .pushsection __ex_table, "a" .align 3 - .long (\from - .), (\to - .) + .long (\insn - .), (\fixup - .) .popsection .endm +/* + * Create an exception table entry for `insn` if `fixup` is provided. Otherwise + * do nothing. + */ + .macro _cond_extable, insn, fixup + .ifnc \fixup, + _asm_extable \insn, \fixup + .endif + .endm + + #define USER(l, x...) \ 9999: x; \ _asm_extable 9999b, l @@ -383,6 +395,7 @@ alternative_cb_end * domain: domain used in dsb instruciton * addr: starting virtual address of the region * size: size of the region + * fixup: optional label to branch to on user fault * Corrupts: addr, size, tmp1, tmp2 */ .macro __dcache_op_workaround_clean_cache, op, addr @@ -393,12 +406,12 @@ alternative_else alternative_endif .endm - .macro dcache_by_line_op op, domain, addr, size, tmp1, tmp2 + .macro dcache_by_line_op op, domain, addr, size, tmp1, tmp2, fixup dcache_line_size \tmp1, \tmp2 add \size, \addr, \size sub \tmp2, \tmp1, #1 bic \addr, \addr, \tmp2 -9998: +.Ldcache_op\@: .ifc \op, cvau __dcache_op_workaround_clean_cache \op, \addr .else @@ -418,8 +431,10 @@ alternative_endif .endif add \addr, \addr, \tmp1 cmp \addr, \size - b.lo 9998b + b.lo .Ldcache_op\@ dsb \domain + + _cond_extable .Ldcache_op\@, \fixup .endm /* @@ -427,20 +442,22 @@ alternative_endif * [start, end) * * start, end: virtual addresses describing the region - * label: A label to branch to on user fault. + * fixup: optional label to branch to on user fault * Corrupts: tmp1, tmp2 */ - .macro invalidate_icache_by_line start, end, tmp1, tmp2, label + .macro invalidate_icache_by_line start, end, tmp1, tmp2, fixup icache_line_size \tmp1, \tmp2 sub \tmp2, \tmp1, #1 bic \tmp2, \start, \tmp2 -9997: -USER(\label, ic ivau, \tmp2) // invalidate I line PoU +.Licache_op\@: + ic ivau, \tmp2 // invalidate I line PoU add \tmp2, \tmp2, \tmp1 cmp \tmp2, \end - b.lo 9997b + b.lo .Licache_op\@ dsb ish isb + + _cond_extable .Licache_op\@, \fixup .endm /* -- cgit From 06b7a568ca5e9cb79a0cc4737f498ea90d8fa89d Mon Sep 17 00:00:00 2001 From: Fuad Tabba Date: Mon, 24 May 2021 09:29:51 +0100 Subject: arm64: Move documentation of dcache_by_line_op The comment describing the macro dcache_by_line_op is placed right before the previous macro of the one it describes, which is a bit confusing. Move it to the macro it describes (dcache_by_line_op). No functional change intended. Acked-by: Mark Rutland Signed-off-by: Fuad Tabba Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20210524083001.2586635-9-tabba@google.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/assembler.h | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'arch/arm64/include/asm/assembler.h') diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 0a276b46ef50..ced791124b28 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -387,6 +387,14 @@ alternative_cb_end bfi \tcr, \tmp0, \pos, #3 .endm + .macro __dcache_op_workaround_clean_cache, op, addr +alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE + dc \op, \addr +alternative_else + dc civac, \addr +alternative_endif + .endm + /* * Macro to perform a data cache maintenance for the interval * [addr, addr + size) @@ -398,14 +406,6 @@ alternative_cb_end * fixup: optional label to branch to on user fault * Corrupts: addr, size, tmp1, tmp2 */ - .macro __dcache_op_workaround_clean_cache, op, addr -alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE - dc \op, \addr -alternative_else - dc civac, \addr -alternative_endif - .endm - .macro dcache_by_line_op op, domain, addr, size, tmp1, tmp2, fixup dcache_line_size \tmp1, \tmp2 add \size, \addr, \size -- cgit From 163d3f80695e31068c7d32244c9e6d406d5c5c00 Mon Sep 17 00:00:00 2001 From: Fuad Tabba Date: Mon, 24 May 2021 09:29:54 +0100 Subject: arm64: dcache_by_line_op to take end parameter instead of size To be consistent with other functions with similar names and functionality in cacheflush.h, cache.S, and cachetlb.rst, change to specify the range in terms of start and end, as opposed to start and size. No functional change intended. Reported-by: Will Deacon Acked-by: Mark Rutland Signed-off-by: Fuad Tabba Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20210524083001.2586635-12-tabba@google.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/assembler.h | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-) (limited to 'arch/arm64/include/asm/assembler.h') diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index ced791124b28..c4cecf85dccf 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -397,40 +397,39 @@ alternative_endif /* * Macro to perform a data cache maintenance for the interval - * [addr, addr + size) + * [start, end) * * op: operation passed to dc instruction * domain: domain used in dsb instruciton - * addr: starting virtual address of the region - * size: size of the region + * start: starting virtual address of the region + * end: end virtual address of the region * fixup: optional label to branch to on user fault - * Corrupts: addr, size, tmp1, tmp2 + * Corrupts: start, end, tmp1, tmp2 */ - .macro dcache_by_line_op op, domain, addr, size, tmp1, tmp2, fixup + .macro dcache_by_line_op op, domain, start, end, tmp1, tmp2, fixup dcache_line_size \tmp1, \tmp2 - add \size, \addr, \size sub \tmp2, \tmp1, #1 - bic \addr, \addr, \tmp2 + bic \start, \start, \tmp2 .Ldcache_op\@: .ifc \op, cvau - __dcache_op_workaround_clean_cache \op, \addr + __dcache_op_workaround_clean_cache \op, \start .else .ifc \op, cvac - __dcache_op_workaround_clean_cache \op, \addr + __dcache_op_workaround_clean_cache \op, \start .else .ifc \op, cvap - sys 3, c7, c12, 1, \addr // dc cvap + sys 3, c7, c12, 1, \start // dc cvap .else .ifc \op, cvadp - sys 3, c7, c13, 1, \addr // dc cvadp + sys 3, c7, c13, 1, \start // dc cvadp .else - dc \op, \addr + dc \op, \start .endif .endif .endif .endif - add \addr, \addr, \tmp1 - cmp \addr, \size + add \start, \start, \tmp1 + cmp \start, \end b.lo .Ldcache_op\@ dsb \domain -- cgit