diff options
author | Soby Mathew <soby.mathew@arm.com> | 2016-05-05 14:10:46 +0100 |
---|---|---|
committer | Soby Mathew <soby.mathew@arm.com> | 2016-08-10 12:35:46 +0100 |
commit | e33b78a658bd54a815c780e17c2d0073db6f59db (patch) | |
tree | 17ede96d62e85ca511b0291b6a913a2d486dd91c /lib | |
parent | 66be868e9acc7b34852f755934664b191e9fae13 (diff) |
AArch32: Add support in TF libraries
This patch adds AArch32 support to cpu ops, context management,
per-cpu data and spinlock libraries. The `entrypoint_info`
structure is modified to add support for AArch32 register
arguments. The CPU operations for AEM generic cpu in AArch32
mode is also added.
Change-Id: I1e52e79f498661d8f31f1e7b3a29e222bc7a4483
Diffstat (limited to 'lib')
-rw-r--r-- | lib/cpus/aarch32/aem_generic.S | 68 | ||||
-rw-r--r-- | lib/cpus/aarch32/cpu_helpers.S | 177 | ||||
-rw-r--r-- | lib/el3_runtime/aarch32/context_mgmt.c | 235 | ||||
-rw-r--r-- | lib/el3_runtime/aarch32/cpu_data.S | 63 | ||||
-rw-r--r-- | lib/locks/exclusive/aarch32/spinlock.S | 55 |
5 files changed, 598 insertions, 0 deletions
diff --git a/lib/cpus/aarch32/aem_generic.S b/lib/cpus/aarch32/aem_generic.S new file mode 100644 index 00000000..10ea4e47 --- /dev/null +++ b/lib/cpus/aarch32/aem_generic.S @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#include <aem_generic.h> +#include <arch.h> +#include <asm_macros.S> +#include <assert_macros.S> +#include <cpu_macros.S> + +func aem_generic_core_pwr_dwn + /* Assert if cache is enabled */ +#if ASM_ASSERTION + ldcopr r0, SCTLR + tst r0, #SCTLR_C_BIT + ASM_ASSERT(eq) +#endif + /* --------------------------------------------- + * Flush L1 cache to PoU. + * --------------------------------------------- + */ + mov r0, #DC_OP_CISW + b dcsw_op_louis +endfunc aem_generic_core_pwr_dwn + + +func aem_generic_cluster_pwr_dwn + /* Assert if cache is enabled */ +#if ASM_ASSERTION + ldcopr r0, SCTLR + tst r0, #SCTLR_C_BIT + ASM_ASSERT(eq) +#endif + /* --------------------------------------------- + * Flush L1 and L2 caches to PoC. + * --------------------------------------------- + */ + mov r0, #DC_OP_CISW + b dcsw_op_all +endfunc aem_generic_cluster_pwr_dwn + +/* cpu_ops for Base AEM FVP */ +declare_cpu_ops aem_generic, BASE_AEM_MIDR, 1 diff --git a/lib/cpus/aarch32/cpu_helpers.S b/lib/cpus/aarch32/cpu_helpers.S new file mode 100644 index 00000000..927a6f50 --- /dev/null +++ b/lib/cpus/aarch32/cpu_helpers.S @@ -0,0 +1,177 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <arch.h> +#include <asm_macros.S> +#include <assert_macros.S> +#include <cpu_data.h> +#include <cpu_macros.S> + + /* + * The reset handler common to all platforms. After a matching + * cpu_ops structure entry is found, the correponding reset_handler + * in the cpu_ops is invoked. The reset handler is invoked very early + * in the boot sequence and it is assumed that we can clobber r0 - r10 + * without the need to follow AAPCS. + * Clobbers: r0 - r10 + */ + .globl reset_handler +func reset_handler + mov r10, lr + + /* The plat_reset_handler can clobber r0 - r9 */ + bl plat_reset_handler + + /* Get the matching cpu_ops pointer (clobbers: r0 - r5) */ + bl get_cpu_ops_ptr + +#if ASM_ASSERTION + cmp r0, #0 + ASM_ASSERT(ne) +#endif + + /* Get the cpu_ops reset handler */ + ldr r1, [r0, #CPU_RESET_FUNC] + cmp r1, #0 + mov lr, r10 + bxne r1 + bx lr +endfunc reset_handler + + /* + * The prepare core power down function for all platforms. After + * the cpu_ops pointer is retrieved from cpu_data, the corresponding + * pwr_dwn_core in the cpu_ops is invoked. Follows AAPCS. + */ + .globl prepare_core_pwr_dwn +func prepare_core_pwr_dwn + push {lr} + bl _cpu_data + pop {lr} + + ldr r1, [r0, #CPU_DATA_CPU_OPS_PTR] +#if ASM_ASSERTION + cmp r1, #0 + ASM_ASSERT(ne) +#endif + + /* Get the cpu_ops core_pwr_dwn handler */ + ldr r0, [r1, #CPU_PWR_DWN_CORE] + bx r0 +endfunc prepare_core_pwr_dwn + + /* + * The prepare cluster power down function for all platforms. After + * the cpu_ops pointer is retrieved from cpu_data, the corresponding + * pwr_dwn_cluster in the cpu_ops is invoked. Follows AAPCS. + */ + .globl prepare_cluster_pwr_dwn +func prepare_cluster_pwr_dwn + push {lr} + bl _cpu_data + pop {lr} + + ldr r1, [r0, #CPU_DATA_CPU_OPS_PTR] +#if ASM_ASSERTION + cmp r1, #0 + ASM_ASSERT(ne) +#endif + + /* Get the cpu_ops cluster_pwr_dwn handler */ + ldr r0, [r1, #CPU_PWR_DWN_CLUSTER] + bx r0 +endfunc prepare_cluster_pwr_dwn + + /* + * Initializes the cpu_ops_ptr if not already initialized + * in cpu_data. This must only be called after the data cache + * is enabled. AAPCS is followed. + */ + .globl init_cpu_ops +func init_cpu_ops + push {r4 - r6, lr} + bl _cpu_data + mov r6, r0 + ldr r1, [r0, #CPU_DATA_CPU_OPS_PTR] + cmp r1, #0 + bne 1f + bl get_cpu_ops_ptr +#if ASM_ASSERTION + cmp r0, #0 + ASM_ASSERT(ne) +#endif + str r0, [r6, #CPU_DATA_CPU_OPS_PTR]! +1: + pop {r4 - r6, pc} +endfunc init_cpu_ops + + /* + * The below function returns the cpu_ops structure matching the + * midr of the core. It reads the MIDR and finds the matching + * entry in cpu_ops entries. Only the implementation and part number + * are used to match the entries. + * Return : + * r0 - The matching cpu_ops pointer on Success + * r0 - 0 on failure. + * Clobbers: r0 - r5 + */ + .globl get_cpu_ops_ptr +func get_cpu_ops_ptr + /* Get the cpu_ops start and end locations */ + ldr r4, =(__CPU_OPS_START__ + CPU_MIDR) + ldr r5, =(__CPU_OPS_END__ + CPU_MIDR) + + /* Initialize the return parameter */ + mov r0, #0 + + /* Read the MIDR_EL1 */ + ldcopr r2, MIDR + ldr r3, =CPU_IMPL_PN_MASK + + /* Retain only the implementation and part number using mask */ + and r2, r2, r3 +1: + /* Check if we have reached end of list */ + cmp r4, r5 + bge error_exit + + /* load the midr from the cpu_ops */ + ldr r1, [r4], #CPU_OPS_SIZE + and r1, r1, r3 + + /* Check if midr matches to midr of this core */ + cmp r1, r2 + bne 1b + + /* Subtract the increment and offset to get the cpu-ops pointer */ + sub r0, r4, #(CPU_OPS_SIZE + CPU_MIDR) +error_exit: + bx lr +endfunc get_cpu_ops_ptr diff --git a/lib/el3_runtime/aarch32/context_mgmt.c b/lib/el3_runtime/aarch32/context_mgmt.c new file mode 100644 index 00000000..6915ded7 --- /dev/null +++ b/lib/el3_runtime/aarch32/context_mgmt.c @@ -0,0 +1,235 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <arch.h> +#include <arch_helpers.h> +#include <assert.h> +#include <bl_common.h> +#include <context.h> +#include <context_mgmt.h> +#include <platform.h> +#include <platform_def.h> +#include <smcc_helpers.h> +#include <string.h> + +/******************************************************************************* + * Context management library initialisation routine. This library is used by + * runtime services to share pointers to 'cpu_context' structures for the secure + * and non-secure states. Management of the structures and their associated + * memory is not done by the context management library e.g. the PSCI service + * manages the cpu context used for entry from and exit to the non-secure state. + * The Secure payload manages the context(s) corresponding to the secure state. + * It also uses this library to get access to the non-secure + * state cpu context pointers. + ******************************************************************************/ +void cm_init(void) +{ + /* + * The context management library has only global data to initialize, but + * that will be done when the BSS is zeroed out + */ +} + +/******************************************************************************* + * The following function initializes the cpu_context 'ctx' for + * first use, and sets the initial entrypoint state as specified by the + * entry_point_info structure. + * + * The security state to initialize is determined by the SECURE attribute + * of the entry_point_info. The function returns a pointer to the initialized + * context and sets this as the next context to return to. + * + * The EE and ST attributes are used to configure the endianness and secure + * timer availability for the new execution context. + * + * To prepare the register state for entry call cm_prepare_el3_exit() and + * el3_exit(). For Secure-EL1 cm_prepare_el3_exit() is equivalent to + * cm_e1_sysreg_context_restore(). + ******************************************************************************/ +static void cm_init_context_common(cpu_context_t *ctx, const entry_point_info_t *ep) +{ + unsigned int security_state; + uint32_t scr, sctlr; + regs_t *reg_ctx; + + assert(ctx); + + security_state = GET_SECURITY_STATE(ep->h.attr); + + /* Clear any residual register values from the context */ + memset(ctx, 0, sizeof(*ctx)); + + /* + * Base the context SCR on the current value, adjust for entry point + * specific requirements + */ + scr = read_scr(); + scr &= ~(SCR_NS_BIT | SCR_HCE_BIT); + + if (security_state != SECURE) + scr |= SCR_NS_BIT; + + /* + * Set up SCTLR for the Non Secure context. + * EE bit is taken from the entrypoint attributes + * M, C and I bits must be zero (as required by PSCI specification) + * + * The target exception level is based on the spsr mode requested. + * If execution is requested to hyp mode, HVC is enabled + * via SCR.HCE. + * + * Always compute the SCTLR_EL1 value and save in the cpu_context + * - the HYP registers are set up by cm_preapre_ns_entry() as they + * are not part of the stored cpu_context + * + * TODO: In debug builds the spsr should be validated and checked + * against the CPU support, security state, endianness and pc + */ + if (security_state != SECURE) { + sctlr = EP_GET_EE(ep->h.attr) ? SCTLR_EE_BIT : 0; + sctlr |= SCTLR_RES1; + write_ctx_reg(reg_ctx, CTX_NS_SCTLR, sctlr); + } + + if (GET_M32(ep->spsr) == MODE32_hyp) + scr |= SCR_HCE_BIT; + + reg_ctx = get_regs_ctx(ctx); + + write_ctx_reg(reg_ctx, CTX_SCR, scr); + write_ctx_reg(reg_ctx, CTX_LR, ep->pc); + write_ctx_reg(reg_ctx, CTX_SPSR, ep->spsr); + + /* + * Store the r0-r3 value from the entrypoint into the context + * Use memcpy as we are in control of the layout of the structures + */ + memcpy((void *)reg_ctx, (void *)&ep->args, sizeof(aapcs32_params_t)); +} + +/******************************************************************************* + * The following function initializes the cpu_context for a CPU specified by + * its `cpu_idx` for first use, and sets the initial entrypoint state as + * specified by the entry_point_info structure. + ******************************************************************************/ +void cm_init_context_by_index(unsigned int cpu_idx, + const entry_point_info_t *ep) +{ + cpu_context_t *ctx; + ctx = cm_get_context_by_index(cpu_idx, GET_SECURITY_STATE(ep->h.attr)); + cm_init_context_common(ctx, ep); +} + +/******************************************************************************* + * The following function initializes the cpu_context for the current CPU + * for first use, and sets the initial entrypoint state as specified by the + * entry_point_info structure. + ******************************************************************************/ +void cm_init_my_context(const entry_point_info_t *ep) +{ + cpu_context_t *ctx; + ctx = cm_get_context(GET_SECURITY_STATE(ep->h.attr)); + cm_init_context_common(ctx, ep); +} + +/******************************************************************************* + * Prepare the CPU system registers for first entry into secure or normal world + * + * If execution is requested to hyp mode, HSCTLR is initialized + * If execution is requested to non-secure PL1, and the CPU supports + * HYP mode then HYP mode is disabled by configuring all necessary HYP mode + * registers. + ******************************************************************************/ +void cm_prepare_el3_exit(uint32_t security_state) +{ + uint32_t sctlr, scr, hcptr; + cpu_context_t *ctx = cm_get_context(security_state); + + assert(ctx); + + if (security_state == NON_SECURE) { + scr = read_ctx_reg(get_regs_ctx(ctx), CTX_SCR); + if (scr & SCR_HCE_BIT) { + /* Use SCTLR value to initialize HSCTLR */ + sctlr = read_ctx_reg(get_regs_ctx(ctx), + CTX_NS_SCTLR); + sctlr |= HSCTLR_RES1; + /* Temporarily set the NS bit to access HSCTLR */ + write_scr(read_scr() | SCR_NS_BIT); + /* + * Make sure the write to SCR is complete so that + * we can access HSCTLR + */ + isb(); + write_hsctlr(sctlr); + isb(); + + write_scr(read_scr() & ~SCR_NS_BIT); + isb(); + } else if (read_id_pfr1() & + (ID_PFR1_VIRTEXT_MASK << ID_PFR1_VIRTEXT_SHIFT)) { + /* Set the NS bit to access HCR, HCPTR, CNTHCTL, VPIDR, VMPIDR */ + write_scr(read_scr() | SCR_NS_BIT); + isb(); + + /* PL2 present but unused, need to disable safely */ + write_hcr(0); + + /* HSCTLR : can be ignored when bypassing */ + + /* HCPTR : disable all traps TCPAC, TTA, TCP */ + hcptr = read_hcptr(); + hcptr &= ~(TCPAC_BIT | TTA_BIT | TCP11_BIT | TCP10_BIT); + write_hcptr(hcptr); + + /* Enable EL1 access to timer */ + write_cnthctl(PL1PCEN_BIT | PL1PCTEN_BIT); + + /* Reset CNTVOFF_EL2 */ + write64_cntvoff(0); + + /* Set VPIDR, VMPIDR to match MIDR, MPIDR */ + write_vpidr(read_midr()); + write_vmpidr(read_mpidr()); + + /* + * Reset VTTBR. + * Needed because cache maintenance operations depend on + * the VMID even when non-secure EL1&0 stage 2 address + * translation are disabled. + */ + write64_vttbr(0); + isb(); + + write_scr(read_scr() & ~SCR_NS_BIT); + isb(); + } + } +} diff --git a/lib/el3_runtime/aarch32/cpu_data.S b/lib/el3_runtime/aarch32/cpu_data.S new file mode 100644 index 00000000..b97911fb --- /dev/null +++ b/lib/el3_runtime/aarch32/cpu_data.S @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <asm_macros.S> +#include <cpu_data.h> + + .globl _cpu_data + .globl _cpu_data_by_index + +/* ----------------------------------------------------------------- + * cpu_data_t *_cpu_data(void) + * + * Return the cpu_data structure for the current CPU. + * ----------------------------------------------------------------- + */ +func _cpu_data + push {lr} + bl plat_my_core_pos + pop {lr} + b _cpu_data_by_index +endfunc _cpu_data + +/* ----------------------------------------------------------------- + * cpu_data_t *_cpu_data_by_index(uint32_t cpu_index) + * + * Return the cpu_data structure for the CPU with given linear index + * + * This can be called without a valid stack. + * clobbers: r0, r1 + * ----------------------------------------------------------------- + */ +func _cpu_data_by_index + ldr r1, =percpu_data + add r0, r1, r0, LSL #CPU_DATA_LOG2SIZE + bx lr +endfunc _cpu_data_by_index diff --git a/lib/locks/exclusive/aarch32/spinlock.S b/lib/locks/exclusive/aarch32/spinlock.S new file mode 100644 index 00000000..f3a2bc36 --- /dev/null +++ b/lib/locks/exclusive/aarch32/spinlock.S @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <asm_macros.S> + + .globl spin_lock + .globl spin_unlock + + +func spin_lock + mov r2, #1 +1: + ldrex r1, [r0] + cmp r1, #0 + wfene + strexeq r1, r2, [r0] + cmpeq r1, #0 + bne 1b + dmb + bx lr +endfunc spin_lock + + +func spin_unlock + mov r1, #0 + stl r1, [r0] + bx lr +endfunc spin_unlock |