summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorSoby Mathew <soby.mathew@arm.com>2016-05-05 14:11:23 +0100
committerSoby Mathew <soby.mathew@arm.com>2016-08-10 14:43:48 +0100
commit727e5238fa3e9220d6a2718fab3b1df22af1dc61 (patch)
treeac9260c77ac755b99c1727c836e1e314f8ce0505 /lib
parente33b78a658bd54a815c780e17c2d0073db6f59db (diff)
AArch32: Add support to PSCI lib
This patch adds AArch32 support to PSCI library, as follows : * The `psci_helpers.S` is implemented for AArch32. * AArch32 version of internal helper function `psci_get_ns_ep_info()` is defined. * The PSCI Library is responsible for the Non Secure context initialization. Hence a library interface `psci_prepare_next_non_secure_ctx()` is introduced to enable EL3 runtime firmware to initialize the non secure context without invoking context management library APIs. Change-Id: I25595b0cc2dbfdf39dbf7c589b875cba33317b9d
Diffstat (limited to 'lib')
-rw-r--r--lib/psci/aarch32/psci_helpers.S180
-rw-r--r--lib/psci/psci_common.c48
-rw-r--r--lib/psci/psci_lib.mk15
-rw-r--r--lib/psci/psci_setup.c12
4 files changed, 249 insertions, 6 deletions
diff --git a/lib/psci/aarch32/psci_helpers.S b/lib/psci/aarch32/psci_helpers.S
new file mode 100644
index 00000000..36d5d7d9
--- /dev/null
+++ b/lib/psci/aarch32/psci_helpers.S
@@ -0,0 +1,180 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm_macros.S>
+#include <platform_def.h>
+#include <psci.h>
+
+ .globl psci_do_pwrdown_cache_maintenance
+ .globl psci_do_pwrup_cache_maintenance
+ .globl psci_power_down_wfi
+
+/* -----------------------------------------------------------------------
+ * void psci_do_pwrdown_cache_maintenance(unsigned int power level);
+ *
+ * This function performs cache maintenance for the specified power
+ * level. The levels of cache affected are determined by the power
+ * level which is passed as the argument i.e. level 0 results
+ * in a flush of the L1 cache. Both the L1 and L2 caches are flushed
+ * for a higher power level.
+ *
+ * Additionally, this function also ensures that stack memory is correctly
+ * flushed out to avoid coherency issues due to a change in its memory
+ * attributes after the data cache is disabled.
+ * -----------------------------------------------------------------------
+ */
+func psci_do_pwrdown_cache_maintenance
+ push {r4, lr}
+
+ /* ----------------------------------------------
+ * Turn OFF cache and do stack maintenance
+ * prior to cpu operations . This sequence is
+ * different from AArch64 because in AArch32 the
+ * assembler routines for cpu operations utilize
+ * the stack whereas in AArch64 it doesn't.
+ * ----------------------------------------------
+ */
+ mov r4, r0
+ bl do_stack_maintenance
+
+ /* ---------------------------------------------
+ * Determine how many levels of cache will be
+ * subject to cache maintenance. Power level
+ * 0 implies that only the cpu is being powered
+ * down. Only the L1 data cache needs to be
+ * flushed to the PoU in this case. For a higher
+ * power level we are assuming that a flush
+ * of L1 data and L2 unified cache is enough.
+ * This information should be provided by the
+ * platform.
+ * ---------------------------------------------
+ */
+ cmp r4, #PSCI_CPU_PWR_LVL
+ pop {r4,lr}
+
+ beq prepare_core_pwr_dwn
+ b prepare_cluster_pwr_dwn
+endfunc psci_do_pwrdown_cache_maintenance
+
+
+/* -----------------------------------------------------------------------
+ * void psci_do_pwrup_cache_maintenance(void);
+ *
+ * This function performs cache maintenance after this cpu is powered up.
+ * Currently, this involves managing the used stack memory before turning
+ * on the data cache.
+ * -----------------------------------------------------------------------
+ */
+func psci_do_pwrup_cache_maintenance
+ push {lr}
+
+ /* ---------------------------------------------
+ * Ensure any inflight stack writes have made it
+ * to main memory.
+ * ---------------------------------------------
+ */
+ dmb st
+
+ /* ---------------------------------------------
+ * Calculate and store the size of the used
+ * stack memory in r1. Calculate and store the
+ * stack base address in r0.
+ * ---------------------------------------------
+ */
+ bl plat_get_my_stack
+ mov r1, sp
+ sub r1, r0, r1
+ mov r0, sp
+ bl inv_dcache_range
+
+ /* ---------------------------------------------
+ * Enable the data cache.
+ * ---------------------------------------------
+ */
+ ldcopr r0, SCTLR
+ orr r0, r0, #SCTLR_C_BIT
+ stcopr r0, SCTLR
+ isb
+
+ pop {pc}
+endfunc psci_do_pwrup_cache_maintenance
+
+ /* ---------------------------------------------
+ * void do_stack_maintenance(void)
+ * Do stack maintenance by flushing the used
+ * stack to the main memory and invalidating the
+ * remainder.
+ * ---------------------------------------------
+ */
+func do_stack_maintenance
+ push {r4, lr}
+ bl plat_get_my_stack
+
+ /* Turn off the D-cache */
+ ldcopr r1, SCTLR
+ bic r1, #SCTLR_C_BIT
+ stcopr r1, SCTLR
+ isb
+
+ /* ---------------------------------------------
+ * Calculate and store the size of the used
+ * stack memory in r1.
+ * ---------------------------------------------
+ */
+ mov r4, r0
+ mov r1, sp
+ sub r1, r0, r1
+ mov r0, sp
+ bl flush_dcache_range
+
+ /* ---------------------------------------------
+ * Calculate and store the size of the unused
+ * stack memory in r1. Calculate and store the
+ * stack base address in r0.
+ * ---------------------------------------------
+ */
+ sub r0, r4, #PLATFORM_STACK_SIZE
+ sub r1, sp, r0
+ bl inv_dcache_range
+
+ pop {r4, pc}
+endfunc do_stack_maintenance
+
+/* -----------------------------------------------------------------------
+ * This function is called to indicate to the power controller that it
+ * is safe to power down this cpu. It should not exit the wfi and will
+ * be released from reset upon power up.
+ * -----------------------------------------------------------------------
+ */
+func psci_power_down_wfi
+ dsb sy // ensure write buffer empty
+ wfi
+ bl plat_panic_handler
+endfunc psci_power_down_wfi
diff --git a/lib/psci/psci_common.c b/lib/psci/psci_common.c
index e87e8c05..68cdd6eb 100644
--- a/lib/psci/psci_common.c
+++ b/lib/psci/psci_common.c
@@ -592,6 +592,53 @@ int psci_validate_mpidr(u_register_t mpidr)
* This function determines the full entrypoint information for the requested
* PSCI entrypoint on power on/resume and returns it.
******************************************************************************/
+#ifdef AARCH32
+static int psci_get_ns_ep_info(entry_point_info_t *ep,
+ uintptr_t entrypoint,
+ u_register_t context_id)
+{
+ u_register_t ep_attr;
+ unsigned int aif, ee, mode;
+ u_register_t scr = read_scr();
+ u_register_t ns_sctlr, sctlr;
+
+ /* Switch to non secure state */
+ write_scr(scr | SCR_NS_BIT);
+ isb();
+ ns_sctlr = read_sctlr();
+
+ sctlr = scr & SCR_HCE_BIT ? read_hsctlr() : ns_sctlr;
+
+ /* Return to original state */
+ write_scr(scr);
+ isb();
+ ee = 0;
+
+ ep_attr = NON_SECURE | EP_ST_DISABLE;
+ if (sctlr & SCTLR_EE_BIT) {
+ ep_attr |= EP_EE_BIG;
+ ee = 1;
+ }
+ SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr);
+
+ ep->pc = entrypoint;
+ memset(&ep->args, 0, sizeof(ep->args));
+ ep->args.arg0 = context_id;
+
+ mode = scr & SCR_HCE_BIT ? MODE32_hyp : MODE32_svc;
+
+ /*
+ * TODO: Choose async. exception bits if HYP mode is not
+ * implemented according to the values of SCR.{AW, FW} bits
+ */
+ aif = SPSR_ABT_BIT | SPSR_IRQ_BIT | SPSR_FIQ_BIT;
+
+ ep->spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, aif);
+
+ return PSCI_E_SUCCESS;
+}
+
+#else
static int psci_get_ns_ep_info(entry_point_info_t *ep,
uintptr_t entrypoint,
u_register_t context_id)
@@ -646,6 +693,7 @@ static int psci_get_ns_ep_info(entry_point_info_t *ep,
return PSCI_E_SUCCESS;
}
+#endif
/*******************************************************************************
* This function validates the entrypoint with the platform layer if the
diff --git a/lib/psci/psci_lib.mk b/lib/psci/psci_lib.mk
index 3a5833ba..8daa8318 100644
--- a/lib/psci/psci_lib.mk
+++ b/lib/psci/psci_lib.mk
@@ -29,11 +29,10 @@
#
PSCI_LIB_SOURCES := lib/el3_runtime/cpu_data_array.c \
- lib/el3_runtime/aarch64/context.S \
- lib/el3_runtime/aarch64/cpu_data.S \
- lib/el3_runtime/aarch64/context_mgmt.c \
- lib/cpus/aarch64/cpu_helpers.S \
- lib/locks/exclusive/aarch64/spinlock.S \
+ lib/el3_runtime/${ARCH}/cpu_data.S \
+ lib/el3_runtime/${ARCH}/context_mgmt.c \
+ lib/cpus/${ARCH}/cpu_helpers.S \
+ lib/locks/exclusive/${ARCH}/spinlock.S \
lib/psci/psci_off.c \
lib/psci/psci_on.c \
lib/psci/psci_suspend.c \
@@ -41,7 +40,11 @@ PSCI_LIB_SOURCES := lib/el3_runtime/cpu_data_array.c \
lib/psci/psci_main.c \
lib/psci/psci_setup.c \
lib/psci/psci_system_off.c \
- lib/psci/aarch64/psci_helpers.S
+ lib/psci/${ARCH}/psci_helpers.S
+
+ifeq (${ARCH}, aarch64)
+PSCI_LIB_SOURCES += lib/el3_runtime/aarch64/context.S
+endif
ifeq (${USE_COHERENT_MEM}, 1)
PSCI_LIB_SOURCES += lib/locks/bakery/bakery_lock_coherent.c
diff --git a/lib/psci/psci_setup.c b/lib/psci/psci_setup.c
index d35e0001..20d06352 100644
--- a/lib/psci/psci_setup.c
+++ b/lib/psci/psci_setup.c
@@ -278,3 +278,15 @@ void psci_arch_setup(void)
/* Initialize the cpu_ops pointer. */
init_cpu_ops();
}
+
+/******************************************************************************
+ * PSCI Library interface to initialize the cpu context for the next non
+ * secure image during cold boot. The relevant registers in the cpu context
+ * need to be retrieved and programmed on return from this interface.
+ *****************************************************************************/
+void psci_prepare_next_non_secure_ctx(entry_point_info_t *next_image_info)
+{
+ assert(GET_SECURITY_STATE(next_image_info->h.attr) == NON_SECURE);
+ cm_init_my_context(next_image_info);
+ cm_prepare_el3_exit(NON_SECURE);
+}