summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/el3_runtime/aarch64/context.S405
-rw-r--r--lib/el3_runtime/aarch64/context_mgmt.c383
-rw-r--r--lib/el3_runtime/aarch64/cpu_data.S69
-rw-r--r--lib/el3_runtime/cpu_data_array.c36
-rw-r--r--lib/psci/aarch64/psci_entry.S111
-rw-r--r--lib/psci/aarch64/psci_helpers.S154
-rw-r--r--lib/psci/psci_common.c928
-rw-r--r--lib/psci/psci_lib.mk55
-rw-r--r--lib/psci/psci_main.c440
-rw-r--r--lib/psci/psci_off.c169
-rw-r--r--lib/psci/psci_on.c212
-rw-r--r--lib/psci/psci_private.h254
-rw-r--r--lib/psci/psci_setup.c261
-rw-r--r--lib/psci/psci_stat.c309
-rw-r--r--lib/psci/psci_suspend.c284
-rw-r--r--lib/psci/psci_system_off.c70
16 files changed, 4140 insertions, 0 deletions
diff --git a/lib/el3_runtime/aarch64/context.S b/lib/el3_runtime/aarch64/context.S
new file mode 100644
index 00000000..7982e50b
--- /dev/null
+++ b/lib/el3_runtime/aarch64/context.S
@@ -0,0 +1,405 @@
+/*
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <context.h>
+
+ .global el1_sysregs_context_save
+ .global el1_sysregs_context_restore
+#if CTX_INCLUDE_FPREGS
+ .global fpregs_context_save
+ .global fpregs_context_restore
+#endif
+ .global save_gp_registers
+ .global restore_gp_registers_eret
+ .global restore_gp_registers_callee_eret
+ .global el3_exit
+
+/* -----------------------------------------------------
+ * The following function strictly follows the AArch64
+ * PCS to use x9-x17 (temporary caller-saved registers)
+ * to save EL1 system register context. It assumes that
+ * 'x0' is pointing to a 'el1_sys_regs' structure where
+ * the register context will be saved.
+ * -----------------------------------------------------
+ */
+func el1_sysregs_context_save
+
+ mrs x9, spsr_el1
+ mrs x10, elr_el1
+ stp x9, x10, [x0, #CTX_SPSR_EL1]
+
+ mrs x15, sctlr_el1
+ mrs x16, actlr_el1
+ stp x15, x16, [x0, #CTX_SCTLR_EL1]
+
+ mrs x17, cpacr_el1
+ mrs x9, csselr_el1
+ stp x17, x9, [x0, #CTX_CPACR_EL1]
+
+ mrs x10, sp_el1
+ mrs x11, esr_el1
+ stp x10, x11, [x0, #CTX_SP_EL1]
+
+ mrs x12, ttbr0_el1
+ mrs x13, ttbr1_el1
+ stp x12, x13, [x0, #CTX_TTBR0_EL1]
+
+ mrs x14, mair_el1
+ mrs x15, amair_el1
+ stp x14, x15, [x0, #CTX_MAIR_EL1]
+
+ mrs x16, tcr_el1
+ mrs x17, tpidr_el1
+ stp x16, x17, [x0, #CTX_TCR_EL1]
+
+ mrs x9, tpidr_el0
+ mrs x10, tpidrro_el0
+ stp x9, x10, [x0, #CTX_TPIDR_EL0]
+
+ mrs x13, par_el1
+ mrs x14, far_el1
+ stp x13, x14, [x0, #CTX_PAR_EL1]
+
+ mrs x15, afsr0_el1
+ mrs x16, afsr1_el1
+ stp x15, x16, [x0, #CTX_AFSR0_EL1]
+
+ mrs x17, contextidr_el1
+ mrs x9, vbar_el1
+ stp x17, x9, [x0, #CTX_CONTEXTIDR_EL1]
+
+ /* Save AArch32 system registers if the build has instructed so */
+#if CTX_INCLUDE_AARCH32_REGS
+ mrs x11, spsr_abt
+ mrs x12, spsr_und
+ stp x11, x12, [x0, #CTX_SPSR_ABT]
+
+ mrs x13, spsr_irq
+ mrs x14, spsr_fiq
+ stp x13, x14, [x0, #CTX_SPSR_IRQ]
+
+ mrs x15, dacr32_el2
+ mrs x16, ifsr32_el2
+ stp x15, x16, [x0, #CTX_DACR32_EL2]
+
+ mrs x17, fpexc32_el2
+ str x17, [x0, #CTX_FP_FPEXC32_EL2]
+#endif
+
+ /* Save NS timer registers if the build has instructed so */
+#if NS_TIMER_SWITCH
+ mrs x10, cntp_ctl_el0
+ mrs x11, cntp_cval_el0
+ stp x10, x11, [x0, #CTX_CNTP_CTL_EL0]
+
+ mrs x12, cntv_ctl_el0
+ mrs x13, cntv_cval_el0
+ stp x12, x13, [x0, #CTX_CNTV_CTL_EL0]
+
+ mrs x14, cntkctl_el1
+ str x14, [x0, #CTX_CNTKCTL_EL1]
+#endif
+
+ ret
+endfunc el1_sysregs_context_save
+
+/* -----------------------------------------------------
+ * The following function strictly follows the AArch64
+ * PCS to use x9-x17 (temporary caller-saved registers)
+ * to restore EL1 system register context. It assumes
+ * that 'x0' is pointing to a 'el1_sys_regs' structure
+ * from where the register context will be restored
+ * -----------------------------------------------------
+ */
+func el1_sysregs_context_restore
+
+ ldp x9, x10, [x0, #CTX_SPSR_EL1]
+ msr spsr_el1, x9
+ msr elr_el1, x10
+
+ ldp x15, x16, [x0, #CTX_SCTLR_EL1]
+ msr sctlr_el1, x15
+ msr actlr_el1, x16
+
+ ldp x17, x9, [x0, #CTX_CPACR_EL1]
+ msr cpacr_el1, x17
+ msr csselr_el1, x9
+
+ ldp x10, x11, [x0, #CTX_SP_EL1]
+ msr sp_el1, x10
+ msr esr_el1, x11
+
+ ldp x12, x13, [x0, #CTX_TTBR0_EL1]
+ msr ttbr0_el1, x12
+ msr ttbr1_el1, x13
+
+ ldp x14, x15, [x0, #CTX_MAIR_EL1]
+ msr mair_el1, x14
+ msr amair_el1, x15
+
+ ldp x16, x17, [x0, #CTX_TCR_EL1]
+ msr tcr_el1, x16
+ msr tpidr_el1, x17
+
+ ldp x9, x10, [x0, #CTX_TPIDR_EL0]
+ msr tpidr_el0, x9
+ msr tpidrro_el0, x10
+
+ ldp x13, x14, [x0, #CTX_PAR_EL1]
+ msr par_el1, x13
+ msr far_el1, x14
+
+ ldp x15, x16, [x0, #CTX_AFSR0_EL1]
+ msr afsr0_el1, x15
+ msr afsr1_el1, x16
+
+ ldp x17, x9, [x0, #CTX_CONTEXTIDR_EL1]
+ msr contextidr_el1, x17
+ msr vbar_el1, x9
+
+ /* Restore AArch32 system registers if the build has instructed so */
+#if CTX_INCLUDE_AARCH32_REGS
+ ldp x11, x12, [x0, #CTX_SPSR_ABT]
+ msr spsr_abt, x11
+ msr spsr_und, x12
+
+ ldp x13, x14, [x0, #CTX_SPSR_IRQ]
+ msr spsr_irq, x13
+ msr spsr_fiq, x14
+
+ ldp x15, x16, [x0, #CTX_DACR32_EL2]
+ msr dacr32_el2, x15
+ msr ifsr32_el2, x16
+
+ ldr x17, [x0, #CTX_FP_FPEXC32_EL2]
+ msr fpexc32_el2, x17
+#endif
+ /* Restore NS timer registers if the build has instructed so */
+#if NS_TIMER_SWITCH
+ ldp x10, x11, [x0, #CTX_CNTP_CTL_EL0]
+ msr cntp_ctl_el0, x10
+ msr cntp_cval_el0, x11
+
+ ldp x12, x13, [x0, #CTX_CNTV_CTL_EL0]
+ msr cntv_ctl_el0, x12
+ msr cntv_cval_el0, x13
+
+ ldr x14, [x0, #CTX_CNTKCTL_EL1]
+ msr cntkctl_el1, x14
+#endif
+
+ /* No explict ISB required here as ERET covers it */
+ ret
+endfunc el1_sysregs_context_restore
+
+/* -----------------------------------------------------
+ * The following function follows the aapcs_64 strictly
+ * to use x9-x17 (temporary caller-saved registers
+ * according to AArch64 PCS) to save floating point
+ * register context. It assumes that 'x0' is pointing to
+ * a 'fp_regs' structure where the register context will
+ * be saved.
+ *
+ * Access to VFP registers will trap if CPTR_EL3.TFP is
+ * set. However currently we don't use VFP registers
+ * nor set traps in Trusted Firmware, and assume it's
+ * cleared
+ *
+ * TODO: Revisit when VFP is used in secure world
+ * -----------------------------------------------------
+ */
+#if CTX_INCLUDE_FPREGS
+func fpregs_context_save
+ stp q0, q1, [x0, #CTX_FP_Q0]
+ stp q2, q3, [x0, #CTX_FP_Q2]
+ stp q4, q5, [x0, #CTX_FP_Q4]
+ stp q6, q7, [x0, #CTX_FP_Q6]
+ stp q8, q9, [x0, #CTX_FP_Q8]
+ stp q10, q11, [x0, #CTX_FP_Q10]
+ stp q12, q13, [x0, #CTX_FP_Q12]
+ stp q14, q15, [x0, #CTX_FP_Q14]
+ stp q16, q17, [x0, #CTX_FP_Q16]
+ stp q18, q19, [x0, #CTX_FP_Q18]
+ stp q20, q21, [x0, #CTX_FP_Q20]
+ stp q22, q23, [x0, #CTX_FP_Q22]
+ stp q24, q25, [x0, #CTX_FP_Q24]
+ stp q26, q27, [x0, #CTX_FP_Q26]
+ stp q28, q29, [x0, #CTX_FP_Q28]
+ stp q30, q31, [x0, #CTX_FP_Q30]
+
+ mrs x9, fpsr
+ str x9, [x0, #CTX_FP_FPSR]
+
+ mrs x10, fpcr
+ str x10, [x0, #CTX_FP_FPCR]
+
+ ret
+endfunc fpregs_context_save
+
+/* -----------------------------------------------------
+ * The following function follows the aapcs_64 strictly
+ * to use x9-x17 (temporary caller-saved registers
+ * according to AArch64 PCS) to restore floating point
+ * register context. It assumes that 'x0' is pointing to
+ * a 'fp_regs' structure from where the register context
+ * will be restored.
+ *
+ * Access to VFP registers will trap if CPTR_EL3.TFP is
+ * set. However currently we don't use VFP registers
+ * nor set traps in Trusted Firmware, and assume it's
+ * cleared
+ *
+ * TODO: Revisit when VFP is used in secure world
+ * -----------------------------------------------------
+ */
+func fpregs_context_restore
+ ldp q0, q1, [x0, #CTX_FP_Q0]
+ ldp q2, q3, [x0, #CTX_FP_Q2]
+ ldp q4, q5, [x0, #CTX_FP_Q4]
+ ldp q6, q7, [x0, #CTX_FP_Q6]
+ ldp q8, q9, [x0, #CTX_FP_Q8]
+ ldp q10, q11, [x0, #CTX_FP_Q10]
+ ldp q12, q13, [x0, #CTX_FP_Q12]
+ ldp q14, q15, [x0, #CTX_FP_Q14]
+ ldp q16, q17, [x0, #CTX_FP_Q16]
+ ldp q18, q19, [x0, #CTX_FP_Q18]
+ ldp q20, q21, [x0, #CTX_FP_Q20]
+ ldp q22, q23, [x0, #CTX_FP_Q22]
+ ldp q24, q25, [x0, #CTX_FP_Q24]
+ ldp q26, q27, [x0, #CTX_FP_Q26]
+ ldp q28, q29, [x0, #CTX_FP_Q28]
+ ldp q30, q31, [x0, #CTX_FP_Q30]
+
+ ldr x9, [x0, #CTX_FP_FPSR]
+ msr fpsr, x9
+
+ ldr x10, [x0, #CTX_FP_FPCR]
+ msr fpcr, x10
+
+ /*
+ * No explict ISB required here as ERET to
+ * switch to secure EL1 or non-secure world
+ * covers it
+ */
+
+ ret
+endfunc fpregs_context_restore
+#endif /* CTX_INCLUDE_FPREGS */
+
+/* -----------------------------------------------------
+ * The following functions are used to save and restore
+ * all the general purpose registers. Ideally we would
+ * only save and restore the callee saved registers when
+ * a world switch occurs but that type of implementation
+ * is more complex. So currently we will always save and
+ * restore these registers on entry and exit of EL3.
+ * These are not macros to ensure their invocation fits
+ * within the 32 instructions per exception vector.
+ * clobbers: x18
+ * -----------------------------------------------------
+ */
+func save_gp_registers
+ stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+ stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+ stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
+ stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
+ stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
+ stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
+ stp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
+ stp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
+ stp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
+ stp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
+ stp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
+ stp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
+ stp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
+ stp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
+ stp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
+ mrs x18, sp_el0
+ str x18, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0]
+ ret
+endfunc save_gp_registers
+
+func restore_gp_registers_eret
+ ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+ ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
+ b restore_gp_registers_callee_eret
+endfunc restore_gp_registers_eret
+
+func restore_gp_registers_callee_eret
+ ldp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
+ ldp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6]
+ ldp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8]
+ ldp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10]
+ ldp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12]
+ ldp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14]
+ ldp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18]
+ ldp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20]
+ ldp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22]
+ ldp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24]
+ ldp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26]
+ ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
+ ldp x30, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
+ msr sp_el0, x17
+ ldp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16]
+ eret
+endfunc restore_gp_registers_callee_eret
+
+ /* -----------------------------------------------------
+ * This routine assumes that the SP_EL3 is pointing to
+ * a valid context structure from where the gp regs and
+ * other special registers can be retrieved.
+ * -----------------------------------------------------
+ */
+func el3_exit
+ /* -----------------------------------------------------
+ * Save the current SP_EL0 i.e. the EL3 runtime stack
+ * which will be used for handling the next SMC. Then
+ * switch to SP_EL3
+ * -----------------------------------------------------
+ */
+ mov x17, sp
+ msr spsel, #1
+ str x17, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
+
+ /* -----------------------------------------------------
+ * Restore SPSR_EL3, ELR_EL3 and SCR_EL3 prior to ERET
+ * -----------------------------------------------------
+ */
+ ldr x18, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
+ ldp x16, x17, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
+ msr scr_el3, x18
+ msr spsr_el3, x16
+ msr elr_el3, x17
+
+ /* Restore saved general purpose registers and return */
+ b restore_gp_registers_eret
+endfunc el3_exit
diff --git a/lib/el3_runtime/aarch64/context_mgmt.c b/lib/el3_runtime/aarch64/context_mgmt.c
new file mode 100644
index 00000000..4527aa34
--- /dev/null
+++ b/lib/el3_runtime/aarch64/context_mgmt.c
@@ -0,0 +1,383 @@
+/*
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <context.h>
+#include <context_mgmt.h>
+#include <interrupt_mgmt.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <smcc_helpers.h>
+#include <string.h>
+
+
+/*******************************************************************************
+ * Context management library initialisation routine. This library is used by
+ * runtime services to share pointers to 'cpu_context' structures for the secure
+ * and non-secure states. Management of the structures and their associated
+ * memory is not done by the context management library e.g. the PSCI service
+ * manages the cpu context used for entry from and exit to the non-secure state.
+ * The Secure payload dispatcher service manages the context(s) corresponding to
+ * the secure state. It also uses this library to get access to the non-secure
+ * state cpu context pointers.
+ * Lastly, this library provides the api to make SP_EL3 point to the cpu context
+ * which will used for programming an entry into a lower EL. The same context
+ * will used to save state upon exception entry from that EL.
+ ******************************************************************************/
+void cm_init(void)
+{
+ /*
+ * The context management library has only global data to intialize, but
+ * that will be done when the BSS is zeroed out
+ */
+}
+
+/*******************************************************************************
+ * The following function initializes the cpu_context 'ctx' for
+ * first use, and sets the initial entrypoint state as specified by the
+ * entry_point_info structure.
+ *
+ * The security state to initialize is determined by the SECURE attribute
+ * of the entry_point_info. The function returns a pointer to the initialized
+ * context and sets this as the next context to return to.
+ *
+ * The EE and ST attributes are used to configure the endianess and secure
+ * timer availability for the new execution context.
+ *
+ * To prepare the register state for entry call cm_prepare_el3_exit() and
+ * el3_exit(). For Secure-EL1 cm_prepare_el3_exit() is equivalent to
+ * cm_e1_sysreg_context_restore().
+ ******************************************************************************/
+static void cm_init_context_common(cpu_context_t *ctx, const entry_point_info_t *ep)
+{
+ unsigned int security_state;
+ uint32_t scr_el3;
+ el3_state_t *state;
+ gp_regs_t *gp_regs;
+ unsigned long sctlr_elx;
+
+ assert(ctx);
+
+ security_state = GET_SECURITY_STATE(ep->h.attr);
+
+ /* Clear any residual register values from the context */
+ memset(ctx, 0, sizeof(*ctx));
+
+ /*
+ * Base the context SCR on the current value, adjust for entry point
+ * specific requirements and set trap bits from the IMF
+ * TODO: provide the base/global SCR bits using another mechanism?
+ */
+ scr_el3 = read_scr();
+ scr_el3 &= ~(SCR_NS_BIT | SCR_RW_BIT | SCR_FIQ_BIT | SCR_IRQ_BIT |
+ SCR_ST_BIT | SCR_HCE_BIT);
+
+ if (security_state != SECURE)
+ scr_el3 |= SCR_NS_BIT;
+
+ if (GET_RW(ep->spsr) == MODE_RW_64)
+ scr_el3 |= SCR_RW_BIT;
+
+ if (EP_GET_ST(ep->h.attr))
+ scr_el3 |= SCR_ST_BIT;
+
+#ifndef HANDLE_EA_EL3_FIRST
+ /* Explicitly stop to trap aborts from lower exception levels. */
+ scr_el3 &= ~SCR_EA_BIT;
+#endif
+
+#if IMAGE_BL31
+ /*
+ * IRQ/FIQ bits only need setting if interrupt routing
+ * model has been set up for BL31.
+ */
+ scr_el3 |= get_scr_el3_from_routing_model(security_state);
+#endif
+
+ /*
+ * Set up SCTLR_ELx for the target exception level:
+ * EE bit is taken from the entrypoint attributes
+ * M, C and I bits must be zero (as required by PSCI specification)
+ *
+ * The target exception level is based on the spsr mode requested.
+ * If execution is requested to EL2 or hyp mode, HVC is enabled
+ * via SCR_EL3.HCE.
+ *
+ * Always compute the SCTLR_EL1 value and save in the cpu_context
+ * - the EL2 registers are set up by cm_preapre_ns_entry() as they
+ * are not part of the stored cpu_context
+ *
+ * TODO: In debug builds the spsr should be validated and checked
+ * against the CPU support, security state, endianess and pc
+ */
+ sctlr_elx = EP_GET_EE(ep->h.attr) ? SCTLR_EE_BIT : 0;
+ if (GET_RW(ep->spsr) == MODE_RW_64)
+ sctlr_elx |= SCTLR_EL1_RES1;
+ else
+ sctlr_elx |= SCTLR_AARCH32_EL1_RES1;
+ write_ctx_reg(get_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_elx);
+
+ if ((GET_RW(ep->spsr) == MODE_RW_64
+ && GET_EL(ep->spsr) == MODE_EL2)
+ || (GET_RW(ep->spsr) != MODE_RW_64
+ && GET_M32(ep->spsr) == MODE32_hyp)) {
+ scr_el3 |= SCR_HCE_BIT;
+ }
+
+ /* Populate EL3 state so that we've the right context before doing ERET */
+ state = get_el3state_ctx(ctx);
+ write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
+ write_ctx_reg(state, CTX_ELR_EL3, ep->pc);
+ write_ctx_reg(state, CTX_SPSR_EL3, ep->spsr);
+
+ /*
+ * Store the X0-X7 value from the entrypoint into the context
+ * Use memcpy as we are in control of the layout of the structures
+ */
+ gp_regs = get_gpregs_ctx(ctx);
+ memcpy(gp_regs, (void *)&ep->args, sizeof(aapcs64_params_t));
+}
+
+/*******************************************************************************
+ * The following function initializes the cpu_context for a CPU specified by
+ * its `cpu_idx` for first use, and sets the initial entrypoint state as
+ * specified by the entry_point_info structure.
+ ******************************************************************************/
+void cm_init_context_by_index(unsigned int cpu_idx,
+ const entry_point_info_t *ep)
+{
+ cpu_context_t *ctx;
+ ctx = cm_get_context_by_index(cpu_idx, GET_SECURITY_STATE(ep->h.attr));
+ cm_init_context_common(ctx, ep);
+}
+
+/*******************************************************************************
+ * The following function initializes the cpu_context for the current CPU
+ * for first use, and sets the initial entrypoint state as specified by the
+ * entry_point_info structure.
+ ******************************************************************************/
+void cm_init_my_context(const entry_point_info_t *ep)
+{
+ cpu_context_t *ctx;
+ ctx = cm_get_context(GET_SECURITY_STATE(ep->h.attr));
+ cm_init_context_common(ctx, ep);
+}
+
+/*******************************************************************************
+ * Prepare the CPU system registers for first entry into secure or normal world
+ *
+ * If execution is requested to EL2 or hyp mode, SCTLR_EL2 is initialized
+ * If execution is requested to non-secure EL1 or svc mode, and the CPU supports
+ * EL2 then EL2 is disabled by configuring all necessary EL2 registers.
+ * For all entries, the EL1 registers are initialized from the cpu_context
+ ******************************************************************************/
+void cm_prepare_el3_exit(uint32_t security_state)
+{
+ uint32_t sctlr_elx, scr_el3, cptr_el2;
+ cpu_context_t *ctx = cm_get_context(security_state);
+
+ assert(ctx);
+
+ if (security_state == NON_SECURE) {
+ scr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3);
+ if (scr_el3 & SCR_HCE_BIT) {
+ /* Use SCTLR_EL1.EE value to initialise sctlr_el2 */
+ sctlr_elx = read_ctx_reg(get_sysregs_ctx(ctx),
+ CTX_SCTLR_EL1);
+ sctlr_elx &= ~SCTLR_EE_BIT;
+ sctlr_elx |= SCTLR_EL2_RES1;
+ write_sctlr_el2(sctlr_elx);
+ } else if (read_id_aa64pfr0_el1() &
+ (ID_AA64PFR0_ELX_MASK << ID_AA64PFR0_EL2_SHIFT)) {
+ /* EL2 present but unused, need to disable safely */
+
+ /* HCR_EL2 = 0, except RW bit set to match SCR_EL3 */
+ write_hcr_el2((scr_el3 & SCR_RW_BIT) ? HCR_RW_BIT : 0);
+
+ /* SCTLR_EL2 : can be ignored when bypassing */
+
+ /* CPTR_EL2 : disable all traps TCPAC, TTA, TFP */
+ cptr_el2 = read_cptr_el2();
+ cptr_el2 &= ~(TCPAC_BIT | TTA_BIT | TFP_BIT);
+ write_cptr_el2(cptr_el2);
+
+ /* Enable EL1 access to timer */
+ write_cnthctl_el2(EL1PCEN_BIT | EL1PCTEN_BIT);
+
+ /* Reset CNTVOFF_EL2 */
+ write_cntvoff_el2(0);
+
+ /* Set VPIDR, VMPIDR to match MIDR, MPIDR */
+ write_vpidr_el2(read_midr_el1());
+ write_vmpidr_el2(read_mpidr_el1());
+
+ /*
+ * Reset VTTBR_EL2.
+ * Needed because cache maintenance operations depend on
+ * the VMID even when non-secure EL1&0 stage 2 address
+ * translation are disabled.
+ */
+ write_vttbr_el2(0);
+ }
+ }
+
+ el1_sysregs_context_restore(get_sysregs_ctx(ctx));
+
+ cm_set_next_context(ctx);
+}
+
+/*******************************************************************************
+ * The next four functions are used by runtime services to save and restore
+ * EL1 context on the 'cpu_context' structure for the specified security
+ * state.
+ ******************************************************************************/
+void cm_el1_sysregs_context_save(uint32_t security_state)
+{
+ cpu_context_t *ctx;
+
+ ctx = cm_get_context(security_state);
+ assert(ctx);
+
+ el1_sysregs_context_save(get_sysregs_ctx(ctx));
+}
+
+void cm_el1_sysregs_context_restore(uint32_t security_state)
+{
+ cpu_context_t *ctx;
+
+ ctx = cm_get_context(security_state);
+ assert(ctx);
+
+ el1_sysregs_context_restore(get_sysregs_ctx(ctx));
+}
+
+/*******************************************************************************
+ * This function populates ELR_EL3 member of 'cpu_context' pertaining to the
+ * given security state with the given entrypoint
+ ******************************************************************************/
+void cm_set_elr_el3(uint32_t security_state, uintptr_t entrypoint)
+{
+ cpu_context_t *ctx;
+ el3_state_t *state;
+
+ ctx = cm_get_context(security_state);
+ assert(ctx);
+
+ /* Populate EL3 state so that ERET jumps to the correct entry */
+ state = get_el3state_ctx(ctx);
+ write_ctx_reg(state, CTX_ELR_EL3, entrypoint);
+}
+
+/*******************************************************************************
+ * This function populates ELR_EL3 and SPSR_EL3 members of 'cpu_context'
+ * pertaining to the given security state
+ ******************************************************************************/
+void cm_set_elr_spsr_el3(uint32_t security_state,
+ uintptr_t entrypoint, uint32_t spsr)
+{
+ cpu_context_t *ctx;
+ el3_state_t *state;
+
+ ctx = cm_get_context(security_state);
+ assert(ctx);
+
+ /* Populate EL3 state so that ERET jumps to the correct entry */
+ state = get_el3state_ctx(ctx);
+ write_ctx_reg(state, CTX_ELR_EL3, entrypoint);
+ write_ctx_reg(state, CTX_SPSR_EL3, spsr);
+}
+
+/*******************************************************************************
+ * This function updates a single bit in the SCR_EL3 member of the 'cpu_context'
+ * pertaining to the given security state using the value and bit position
+ * specified in the parameters. It preserves all other bits.
+ ******************************************************************************/
+void cm_write_scr_el3_bit(uint32_t security_state,
+ uint32_t bit_pos,
+ uint32_t value)
+{
+ cpu_context_t *ctx;
+ el3_state_t *state;
+ uint32_t scr_el3;
+
+ ctx = cm_get_context(security_state);
+ assert(ctx);
+
+ /* Ensure that the bit position is a valid one */
+ assert((1 << bit_pos) & SCR_VALID_BIT_MASK);
+
+ /* Ensure that the 'value' is only a bit wide */
+ assert(value <= 1);
+
+ /*
+ * Get the SCR_EL3 value from the cpu context, clear the desired bit
+ * and set it to its new value.
+ */
+ state = get_el3state_ctx(ctx);
+ scr_el3 = read_ctx_reg(state, CTX_SCR_EL3);
+ scr_el3 &= ~(1 << bit_pos);
+ scr_el3 |= value << bit_pos;
+ write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
+}
+
+/*******************************************************************************
+ * This function retrieves SCR_EL3 member of 'cpu_context' pertaining to the
+ * given security state.
+ ******************************************************************************/
+uint32_t cm_get_scr_el3(uint32_t security_state)
+{
+ cpu_context_t *ctx;
+ el3_state_t *state;
+
+ ctx = cm_get_context(security_state);
+ assert(ctx);
+
+ /* Populate EL3 state so that ERET jumps to the correct entry */
+ state = get_el3state_ctx(ctx);
+ return read_ctx_reg(state, CTX_SCR_EL3);
+}
+
+/*******************************************************************************
+ * This function is used to program the context that's used for exception
+ * return. This initializes the SP_EL3 to a pointer to a 'cpu_context' set for
+ * the required security state
+ ******************************************************************************/
+void cm_set_next_eret_context(uint32_t security_state)
+{
+ cpu_context_t *ctx;
+
+ ctx = cm_get_context(security_state);
+ assert(ctx);
+
+ cm_set_next_context(ctx);
+}
diff --git a/lib/el3_runtime/aarch64/cpu_data.S b/lib/el3_runtime/aarch64/cpu_data.S
new file mode 100644
index 00000000..2cc07ba5
--- /dev/null
+++ b/lib/el3_runtime/aarch64/cpu_data.S
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm_macros.S>
+#include <cpu_data.h>
+
+.globl init_cpu_data_ptr
+.globl _cpu_data_by_index
+
+/* -----------------------------------------------------------------
+ * void init_cpu_data_ptr(void)
+ *
+ * Initialise the TPIDR_EL3 register to refer to the cpu_data_t
+ * for the calling CPU. This must be called before cm_get_cpu_data()
+ *
+ * This can be called without a valid stack. It assumes that
+ * plat_my_core_pos() does not clobber register x10.
+ * clobbers: x0, x1, x10
+ * -----------------------------------------------------------------
+ */
+func init_cpu_data_ptr
+ mov x10, x30
+ bl plat_my_core_pos
+ bl _cpu_data_by_index
+ msr tpidr_el3, x0
+ ret x10
+endfunc init_cpu_data_ptr
+
+/* -----------------------------------------------------------------
+ * cpu_data_t *_cpu_data_by_index(uint32_t cpu_index)
+ *
+ * Return the cpu_data structure for the CPU with given linear index
+ *
+ * This can be called without a valid stack.
+ * clobbers: x0, x1
+ * -----------------------------------------------------------------
+ */
+func _cpu_data_by_index
+ adr x1, percpu_data
+ add x0, x1, x0, LSL #CPU_DATA_LOG2SIZE
+ ret
+endfunc _cpu_data_by_index
diff --git a/lib/el3_runtime/cpu_data_array.c b/lib/el3_runtime/cpu_data_array.c
new file mode 100644
index 00000000..eba21a50
--- /dev/null
+++ b/lib/el3_runtime/cpu_data_array.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <cassert.h>
+#include <cpu_data.h>
+#include <platform_def.h>
+
+/* The per_cpu_ptr_cache_t space allocation */
+cpu_data_t percpu_data[PLATFORM_CORE_COUNT];
diff --git a/lib/psci/aarch64/psci_entry.S b/lib/psci/aarch64/psci_entry.S
new file mode 100644
index 00000000..646ebcf8
--- /dev/null
+++ b/lib/psci/aarch64/psci_entry.S
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <el3_common_macros.S>
+#include <psci.h>
+#include <xlat_tables.h>
+
+ .globl psci_entrypoint
+ .globl psci_power_down_wfi
+
+ /* --------------------------------------------------------------------
+ * This CPU has been physically powered up. It is either resuming from
+ * suspend or has simply been turned on. In both cases, call the power
+ * on finisher.
+ * --------------------------------------------------------------------
+ */
+func psci_entrypoint
+ /*
+ * On the warm boot path, most of the EL3 initialisations performed by
+ * 'el3_entrypoint_common' must be skipped:
+ *
+ * - Only when the platform bypasses the BL1/BL31 entrypoint by
+ * programming the reset address do we need to set the CPU endianness.
+ * In other cases, we assume this has been taken care by the
+ * entrypoint code.
+ *
+ * - No need to determine the type of boot, we know it is a warm boot.
+ *
+ * - Do not try to distinguish between primary and secondary CPUs, this
+ * notion only exists for a cold boot.
+ *
+ * - No need to initialise the memory or the C runtime environment,
+ * it has been done once and for all on the cold boot path.
+ */
+ el3_entrypoint_common \
+ _set_endian=PROGRAMMABLE_RESET_ADDRESS \
+ _warm_boot_mailbox=0 \
+ _secondary_cold_boot=0 \
+ _init_memory=0 \
+ _init_c_runtime=0 \
+ _exception_vectors=runtime_exceptions
+
+ /* --------------------------------------------
+ * Enable the MMU with the DCache disabled. It
+ * is safe to use stacks allocated in normal
+ * memory as a result. All memory accesses are
+ * marked nGnRnE when the MMU is disabled. So
+ * all the stack writes will make it to memory.
+ * All memory accesses are marked Non-cacheable
+ * when the MMU is enabled but D$ is disabled.
+ * So used stack memory is guaranteed to be
+ * visible immediately after the MMU is enabled
+ * Enabling the DCache at the same time as the
+ * MMU can lead to speculatively fetched and
+ * possibly stale stack memory being read from
+ * other caches. This can lead to coherency
+ * issues.
+ * --------------------------------------------
+ */
+ mov x0, #DISABLE_DCACHE
+ bl bl31_plat_enable_mmu
+
+ bl psci_power_up_finish
+
+ b el3_exit
+endfunc psci_entrypoint
+
+ /* --------------------------------------------
+ * This function is called to indicate to the
+ * power controller that it is safe to power
+ * down this cpu. It should not exit the wfi
+ * and will be released from reset upon power
+ * up. 'wfi_spill' is used to catch erroneous
+ * exits from wfi.
+ * --------------------------------------------
+ */
+func psci_power_down_wfi
+ dsb sy // ensure write buffer empty
+ wfi
+ bl plat_panic_handler
+endfunc psci_power_down_wfi
+
diff --git a/lib/psci/aarch64/psci_helpers.S b/lib/psci/aarch64/psci_helpers.S
new file mode 100644
index 00000000..87144dd9
--- /dev/null
+++ b/lib/psci/aarch64/psci_helpers.S
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <platform_def.h>
+#include <psci.h>
+
+ .globl psci_do_pwrdown_cache_maintenance
+ .globl psci_do_pwrup_cache_maintenance
+
+/* -----------------------------------------------------------------------
+ * void psci_do_pwrdown_cache_maintenance(unsigned int power level);
+ *
+ * This function performs cache maintenance for the specified power
+ * level. The levels of cache affected are determined by the power
+ * level which is passed as the argument i.e. level 0 results
+ * in a flush of the L1 cache. Both the L1 and L2 caches are flushed
+ * for a higher power level.
+ *
+ * Additionally, this function also ensures that stack memory is correctly
+ * flushed out to avoid coherency issues due to a change in its memory
+ * attributes after the data cache is disabled.
+ * -----------------------------------------------------------------------
+ */
+func psci_do_pwrdown_cache_maintenance
+ stp x29, x30, [sp,#-16]!
+ stp x19, x20, [sp,#-16]!
+
+ /* ---------------------------------------------
+ * Determine to how many levels of cache will be
+ * subject to cache maintenance. Power level
+ * 0 implies that only the cpu is being powered
+ * down. Only the L1 data cache needs to be
+ * flushed to the PoU in this case. For a higher
+ * power level we are assuming that a flush
+ * of L1 data and L2 unified cache is enough.
+ * This information should be provided by the
+ * platform.
+ * ---------------------------------------------
+ */
+ cmp w0, #PSCI_CPU_PWR_LVL
+ b.eq do_core_pwr_dwn
+ bl prepare_cluster_pwr_dwn
+ b do_stack_maintenance
+
+do_core_pwr_dwn:
+ bl prepare_core_pwr_dwn
+
+ /* ---------------------------------------------
+ * Do stack maintenance by flushing the used
+ * stack to the main memory and invalidating the
+ * remainder.
+ * ---------------------------------------------
+ */
+do_stack_maintenance:
+ bl plat_get_my_stack
+
+ /* ---------------------------------------------
+ * Calculate and store the size of the used
+ * stack memory in x1.
+ * ---------------------------------------------
+ */
+ mov x19, x0
+ mov x1, sp
+ sub x1, x0, x1
+ mov x0, sp
+ bl flush_dcache_range
+
+ /* ---------------------------------------------
+ * Calculate and store the size of the unused
+ * stack memory in x1. Calculate and store the
+ * stack base address in x0.
+ * ---------------------------------------------
+ */
+ sub x0, x19, #PLATFORM_STACK_SIZE
+ sub x1, sp, x0
+ bl inv_dcache_range
+
+ ldp x19, x20, [sp], #16
+ ldp x29, x30, [sp], #16
+ ret
+endfunc psci_do_pwrdown_cache_maintenance
+
+
+/* -----------------------------------------------------------------------
+ * void psci_do_pwrup_cache_maintenance(void);
+ *
+ * This function performs cache maintenance after this cpu is powered up.
+ * Currently, this involves managing the used stack memory before turning
+ * on the data cache.
+ * -----------------------------------------------------------------------
+ */
+func psci_do_pwrup_cache_maintenance
+ stp x29, x30, [sp,#-16]!
+
+ /* ---------------------------------------------
+ * Ensure any inflight stack writes have made it
+ * to main memory.
+ * ---------------------------------------------
+ */
+ dmb st
+
+ /* ---------------------------------------------
+ * Calculate and store the size of the used
+ * stack memory in x1. Calculate and store the
+ * stack base address in x0.
+ * ---------------------------------------------
+ */
+ bl plat_get_my_stack
+ mov x1, sp
+ sub x1, x0, x1
+ mov x0, sp
+ bl inv_dcache_range
+
+ /* ---------------------------------------------
+ * Enable the data cache.
+ * ---------------------------------------------
+ */
+ mrs x0, sctlr_el3
+ orr x0, x0, #SCTLR_C_BIT
+ msr sctlr_el3, x0
+ isb
+
+ ldp x29, x30, [sp], #16
+ ret
+endfunc psci_do_pwrup_cache_maintenance
diff --git a/lib/psci/psci_common.c b/lib/psci/psci_common.c
new file mode 100644
index 00000000..2a0afb4c
--- /dev/null
+++ b/lib/psci/psci_common.c
@@ -0,0 +1,928 @@
+/*
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <context.h>
+#include <context_mgmt.h>
+#include <debug.h>
+#include <platform.h>
+#include <string.h>
+#include "psci_private.h"
+
+/*
+ * SPD power management operations, expected to be supplied by the registered
+ * SPD on successful SP initialization
+ */
+const spd_pm_ops_t *psci_spd_pm;
+
+/*
+ * PSCI requested local power state map. This array is used to store the local
+ * power states requested by a CPU for power levels from level 1 to
+ * PLAT_MAX_PWR_LVL. It does not store the requested local power state for power
+ * level 0 (PSCI_CPU_PWR_LVL) as the requested and the target power state for a
+ * CPU are the same.
+ *
+ * During state coordination, the platform is passed an array containing the
+ * local states requested for a particular non cpu power domain by each cpu
+ * within the domain.
+ *
+ * TODO: Dense packing of the requested states will cause cache thrashing
+ * when multiple power domains write to it. If we allocate the requested
+ * states at each power level in a cache-line aligned per-domain memory,
+ * the cache thrashing can be avoided.
+ */
+static plat_local_state_t
+ psci_req_local_pwr_states[PLAT_MAX_PWR_LVL][PLATFORM_CORE_COUNT];
+
+
+/*******************************************************************************
+ * Arrays that hold the platform's power domain tree information for state
+ * management of power domains.
+ * Each node in the array 'psci_non_cpu_pd_nodes' corresponds to a power domain
+ * which is an ancestor of a CPU power domain.
+ * Each node in the array 'psci_cpu_pd_nodes' corresponds to a cpu power domain
+ ******************************************************************************/
+non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS]
+#if USE_COHERENT_MEM
+__section("tzfw_coherent_mem")
+#endif
+;
+
+DEFINE_BAKERY_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]);
+
+cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT];
+
+/*******************************************************************************
+ * Pointer to functions exported by the platform to complete power mgmt. ops
+ ******************************************************************************/
+const plat_psci_ops_t *psci_plat_pm_ops;
+
+/******************************************************************************
+ * Check that the maximum power level supported by the platform makes sense
+ *****************************************************************************/
+CASSERT(PLAT_MAX_PWR_LVL <= PSCI_MAX_PWR_LVL && \
+ PLAT_MAX_PWR_LVL >= PSCI_CPU_PWR_LVL, \
+ assert_platform_max_pwrlvl_check);
+
+/*
+ * The plat_local_state used by the platform is one of these types: RUN,
+ * RETENTION and OFF. The platform can define further sub-states for each type
+ * apart from RUN. This categorization is done to verify the sanity of the
+ * psci_power_state passed by the platform and to print debug information. The
+ * categorization is done on the basis of the following conditions:
+ *
+ * 1. If (plat_local_state == 0) then the category is STATE_TYPE_RUN.
+ *
+ * 2. If (0 < plat_local_state <= PLAT_MAX_RET_STATE), then the category is
+ * STATE_TYPE_RETN.
+ *
+ * 3. If (plat_local_state > PLAT_MAX_RET_STATE), then the category is
+ * STATE_TYPE_OFF.
+ */
+typedef enum plat_local_state_type {
+ STATE_TYPE_RUN = 0,
+ STATE_TYPE_RETN,
+ STATE_TYPE_OFF
+} plat_local_state_type_t;
+
+/* The macro used to categorize plat_local_state. */
+#define find_local_state_type(plat_local_state) \
+ ((plat_local_state) ? ((plat_local_state > PLAT_MAX_RET_STATE) \
+ ? STATE_TYPE_OFF : STATE_TYPE_RETN) \
+ : STATE_TYPE_RUN)
+
+/******************************************************************************
+ * Check that the maximum retention level supported by the platform is less
+ * than the maximum off level.
+ *****************************************************************************/
+CASSERT(PLAT_MAX_RET_STATE < PLAT_MAX_OFF_STATE, \
+ assert_platform_max_off_and_retn_state_check);
+
+/******************************************************************************
+ * This function ensures that the power state parameter in a CPU_SUSPEND request
+ * is valid. If so, it returns the requested states for each power level.
+ *****************************************************************************/
+int psci_validate_power_state(unsigned int power_state,
+ psci_power_state_t *state_info)
+{
+ /* Check SBZ bits in power state are zero */
+ if (psci_check_power_state(power_state))
+ return PSCI_E_INVALID_PARAMS;
+
+ assert(psci_plat_pm_ops->validate_power_state);
+
+ /* Validate the power_state using platform pm_ops */
+ return psci_plat_pm_ops->validate_power_state(power_state, state_info);
+}
+
+/******************************************************************************
+ * This function retrieves the `psci_power_state_t` for system suspend from
+ * the platform.
+ *****************************************************************************/
+void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info)
+{
+ /*
+ * Assert that the required pm_ops hook is implemented to ensure that
+ * the capability detected during psci_setup() is valid.
+ */
+ assert(psci_plat_pm_ops->get_sys_suspend_power_state);
+
+ /*
+ * Query the platform for the power_state required for system suspend
+ */
+ psci_plat_pm_ops->get_sys_suspend_power_state(state_info);
+}
+
+/*******************************************************************************
+ * This function verifies that the all the other cores in the system have been
+ * turned OFF and the current CPU is the last running CPU in the system.
+ * Returns 1 (true) if the current CPU is the last ON CPU or 0 (false)
+ * otherwise.
+ ******************************************************************************/
+unsigned int psci_is_last_on_cpu(void)
+{
+ unsigned int cpu_idx, my_idx = plat_my_core_pos();
+
+ for (cpu_idx = 0; cpu_idx < PLATFORM_CORE_COUNT; cpu_idx++) {
+ if (cpu_idx == my_idx) {
+ assert(psci_get_aff_info_state() == AFF_STATE_ON);
+ continue;
+ }
+
+ if (psci_get_aff_info_state_by_idx(cpu_idx) != AFF_STATE_OFF)
+ return 0;
+ }
+
+ return 1;
+}
+
+/*******************************************************************************
+ * Routine to return the maximum power level to traverse to after a cpu has
+ * been physically powered up. It is expected to be called immediately after
+ * reset from assembler code.
+ ******************************************************************************/
+static unsigned int get_power_on_target_pwrlvl(void)
+{
+ unsigned int pwrlvl;
+
+ /*
+ * Assume that this cpu was suspended and retrieve its target power
+ * level. If it is invalid then it could only have been turned off
+ * earlier. PLAT_MAX_PWR_LVL will be the highest power level a
+ * cpu can be turned off to.
+ */
+ pwrlvl = psci_get_suspend_pwrlvl();
+ if (pwrlvl == PSCI_INVALID_PWR_LVL)
+ pwrlvl = PLAT_MAX_PWR_LVL;
+ return pwrlvl;
+}
+
+/******************************************************************************
+ * Helper function to update the requested local power state array. This array
+ * does not store the requested state for the CPU power level. Hence an
+ * assertion is added to prevent us from accessing the wrong index.
+ *****************************************************************************/
+static void psci_set_req_local_pwr_state(unsigned int pwrlvl,
+ unsigned int cpu_idx,
+ plat_local_state_t req_pwr_state)
+{
+ assert(pwrlvl > PSCI_CPU_PWR_LVL);
+ psci_req_local_pwr_states[pwrlvl - 1][cpu_idx] = req_pwr_state;
+}
+
+/******************************************************************************
+ * This function initializes the psci_req_local_pwr_states.
+ *****************************************************************************/
+void psci_init_req_local_pwr_states(void)
+{
+ /* Initialize the requested state of all non CPU power domains as OFF */
+ memset(&psci_req_local_pwr_states, PLAT_MAX_OFF_STATE,
+ sizeof(psci_req_local_pwr_states));
+}
+
+/******************************************************************************
+ * Helper function to return a reference to an array containing the local power
+ * states requested by each cpu for a power domain at 'pwrlvl'. The size of the
+ * array will be the number of cpu power domains of which this power domain is
+ * an ancestor. These requested states will be used to determine a suitable
+ * target state for this power domain during psci state coordination. An
+ * assertion is added to prevent us from accessing the CPU power level.
+ *****************************************************************************/
+static plat_local_state_t *psci_get_req_local_pwr_states(unsigned int pwrlvl,
+ unsigned int cpu_idx)
+{
+ assert(pwrlvl > PSCI_CPU_PWR_LVL);
+
+ return &psci_req_local_pwr_states[pwrlvl - 1][cpu_idx];
+}
+
+/******************************************************************************
+ * Helper function to return the current local power state of each power domain
+ * from the current cpu power domain to its ancestor at the 'end_pwrlvl'. This
+ * function will be called after a cpu is powered on to find the local state
+ * each power domain has emerged from.
+ *****************************************************************************/
+static void psci_get_target_local_pwr_states(unsigned int end_pwrlvl,
+ psci_power_state_t *target_state)
+{
+ unsigned int parent_idx, lvl;
+ plat_local_state_t *pd_state = target_state->pwr_domain_state;
+
+ pd_state[PSCI_CPU_PWR_LVL] = psci_get_cpu_local_state();
+ parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node;
+
+ /* Copy the local power state from node to state_info */
+ for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) {
+#if !USE_COHERENT_MEM
+ /*
+ * If using normal memory for psci_non_cpu_pd_nodes, we need
+ * to flush before reading the local power state as another
+ * cpu in the same power domain could have updated it and this
+ * code runs before caches are enabled.
+ */
+ flush_dcache_range(
+ (uintptr_t) &psci_non_cpu_pd_nodes[parent_idx],
+ sizeof(psci_non_cpu_pd_nodes[parent_idx]));
+#endif
+ pd_state[lvl] = psci_non_cpu_pd_nodes[parent_idx].local_state;
+ parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
+ }
+
+ /* Set the the higher levels to RUN */
+ for (; lvl <= PLAT_MAX_PWR_LVL; lvl++)
+ target_state->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN;
+}
+
+/******************************************************************************
+ * Helper function to set the target local power state that each power domain
+ * from the current cpu power domain to its ancestor at the 'end_pwrlvl' will
+ * enter. This function will be called after coordination of requested power
+ * states has been done for each power level.
+ *****************************************************************************/
+static void psci_set_target_local_pwr_states(unsigned int end_pwrlvl,
+ const psci_power_state_t *target_state)
+{
+ unsigned int parent_idx, lvl;
+ const plat_local_state_t *pd_state = target_state->pwr_domain_state;
+
+ psci_set_cpu_local_state(pd_state[PSCI_CPU_PWR_LVL]);
+
+ /*
+ * Need to flush as local_state will be accessed with Data Cache
+ * disabled during power on
+ */
+ flush_cpu_data(psci_svc_cpu_data.local_state);
+
+ parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node;
+
+ /* Copy the local_state from state_info */
+ for (lvl = 1; lvl <= end_pwrlvl; lvl++) {
+ psci_non_cpu_pd_nodes[parent_idx].local_state = pd_state[lvl];
+#if !USE_COHERENT_MEM
+ flush_dcache_range(
+ (uintptr_t)&psci_non_cpu_pd_nodes[parent_idx],
+ sizeof(psci_non_cpu_pd_nodes[parent_idx]));
+#endif
+ parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
+ }
+}
+
+
+/*******************************************************************************
+ * PSCI helper function to get the parent nodes corresponding to a cpu_index.
+ ******************************************************************************/
+void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx,
+ unsigned int end_lvl,
+ unsigned int node_index[])
+{
+ unsigned int parent_node = psci_cpu_pd_nodes[cpu_idx].parent_node;
+ int i;
+
+ for (i = PSCI_CPU_PWR_LVL + 1; i <= end_lvl; i++) {
+ *node_index++ = parent_node;
+ parent_node = psci_non_cpu_pd_nodes[parent_node].parent_node;
+ }
+}
+
+/******************************************************************************
+ * This function is invoked post CPU power up and initialization. It sets the
+ * affinity info state, target power state and requested power state for the
+ * current CPU and all its ancestor power domains to RUN.
+ *****************************************************************************/
+void psci_set_pwr_domains_to_run(unsigned int end_pwrlvl)
+{
+ unsigned int parent_idx, cpu_idx = plat_my_core_pos(), lvl;
+ parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
+
+ /* Reset the local_state to RUN for the non cpu power domains. */
+ for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) {
+ psci_non_cpu_pd_nodes[parent_idx].local_state =
+ PSCI_LOCAL_STATE_RUN;
+#if !USE_COHERENT_MEM
+ flush_dcache_range(
+ (uintptr_t) &psci_non_cpu_pd_nodes[parent_idx],
+ sizeof(psci_non_cpu_pd_nodes[parent_idx]));
+#endif
+ psci_set_req_local_pwr_state(lvl,
+ cpu_idx,
+ PSCI_LOCAL_STATE_RUN);
+ parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
+ }
+
+ /* Set the affinity info state to ON */
+ psci_set_aff_info_state(AFF_STATE_ON);
+
+ psci_set_cpu_local_state(PSCI_LOCAL_STATE_RUN);
+ flush_cpu_data(psci_svc_cpu_data);
+}
+
+/******************************************************************************
+ * This function is passed the local power states requested for each power
+ * domain (state_info) between the current CPU domain and its ancestors until
+ * the target power level (end_pwrlvl). It updates the array of requested power
+ * states with this information.
+ *
+ * Then, for each level (apart from the CPU level) until the 'end_pwrlvl', it
+ * retrieves the states requested by all the cpus of which the power domain at
+ * that level is an ancestor. It passes this information to the platform to
+ * coordinate and return the target power state. If the target state for a level
+ * is RUN then subsequent levels are not considered. At the CPU level, state
+ * coordination is not required. Hence, the requested and the target states are
+ * the same.
+ *
+ * The 'state_info' is updated with the target state for each level between the
+ * CPU and the 'end_pwrlvl' and returned to the caller.
+ *
+ * This function will only be invoked with data cache enabled and while
+ * powering down a core.
+ *****************************************************************************/
+void psci_do_state_coordination(unsigned int end_pwrlvl,
+ psci_power_state_t *state_info)
+{
+ unsigned int lvl, parent_idx, cpu_idx = plat_my_core_pos();
+ unsigned int start_idx, ncpus;
+ plat_local_state_t target_state, *req_states;
+
+ assert(end_pwrlvl <= PLAT_MAX_PWR_LVL);
+ parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
+
+ /* For level 0, the requested state will be equivalent
+ to target state */
+ for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) {
+
+ /* First update the requested power state */
+ psci_set_req_local_pwr_state(lvl, cpu_idx,
+ state_info->pwr_domain_state[lvl]);
+
+ /* Get the requested power states for this power level */
+ start_idx = psci_non_cpu_pd_nodes[parent_idx].cpu_start_idx;
+ req_states = psci_get_req_local_pwr_states(lvl, start_idx);
+
+ /*
+ * Let the platform coordinate amongst the requested states at
+ * this power level and return the target local power state.
+ */
+ ncpus = psci_non_cpu_pd_nodes[parent_idx].ncpus;
+ target_state = plat_get_target_pwr_state(lvl,
+ req_states,
+ ncpus);
+
+ state_info->pwr_domain_state[lvl] = target_state;
+
+ /* Break early if the negotiated target power state is RUN */
+ if (is_local_state_run(state_info->pwr_domain_state[lvl]))
+ break;
+
+ parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
+ }
+
+ /*
+ * This is for cases when we break out of the above loop early because
+ * the target power state is RUN at a power level < end_pwlvl.
+ * We update the requested power state from state_info and then
+ * set the target state as RUN.
+ */
+ for (lvl = lvl + 1; lvl <= end_pwrlvl; lvl++) {
+ psci_set_req_local_pwr_state(lvl, cpu_idx,
+ state_info->pwr_domain_state[lvl]);
+ state_info->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN;
+
+ }
+
+ /* Update the target state in the power domain nodes */
+ psci_set_target_local_pwr_states(end_pwrlvl, state_info);
+}
+
+/******************************************************************************
+ * This function validates a suspend request by making sure that if a standby
+ * state is requested then no power level is turned off and the highest power
+ * level is placed in a standby/retention state.
+ *
+ * It also ensures that the state level X will enter is not shallower than the
+ * state level X + 1 will enter.
+ *
+ * This validation will be enabled only for DEBUG builds as the platform is
+ * expected to perform these validations as well.
+ *****************************************************************************/
+int psci_validate_suspend_req(const psci_power_state_t *state_info,
+ unsigned int is_power_down_state)
+{
+ unsigned int max_off_lvl, target_lvl, max_retn_lvl;
+ plat_local_state_t state;
+ plat_local_state_type_t req_state_type, deepest_state_type;
+ int i;
+
+ /* Find the target suspend power level */
+ target_lvl = psci_find_target_suspend_lvl(state_info);
+ if (target_lvl == PSCI_INVALID_PWR_LVL)
+ return PSCI_E_INVALID_PARAMS;
+
+ /* All power domain levels are in a RUN state to begin with */
+ deepest_state_type = STATE_TYPE_RUN;
+
+ for (i = target_lvl; i >= PSCI_CPU_PWR_LVL; i--) {
+ state = state_info->pwr_domain_state[i];
+ req_state_type = find_local_state_type(state);
+
+ /*
+ * While traversing from the highest power level to the lowest,
+ * the state requested for lower levels has to be the same or
+ * deeper i.e. equal to or greater than the state at the higher
+ * levels. If this condition is true, then the requested state
+ * becomes the deepest state encountered so far.
+ */
+ if (req_state_type < deepest_state_type)
+ return PSCI_E_INVALID_PARAMS;
+ deepest_state_type = req_state_type;
+ }
+
+ /* Find the highest off power level */
+ max_off_lvl = psci_find_max_off_lvl(state_info);
+
+ /* The target_lvl is either equal to the max_off_lvl or max_retn_lvl */
+ max_retn_lvl = PSCI_INVALID_PWR_LVL;
+ if (target_lvl != max_off_lvl)
+ max_retn_lvl = target_lvl;
+
+ /*
+ * If this is not a request for a power down state then max off level
+ * has to be invalid and max retention level has to be a valid power
+ * level.
+ */
+ if (!is_power_down_state && (max_off_lvl != PSCI_INVALID_PWR_LVL ||
+ max_retn_lvl == PSCI_INVALID_PWR_LVL))
+ return PSCI_E_INVALID_PARAMS;
+
+ return PSCI_E_SUCCESS;
+}
+
+/******************************************************************************
+ * This function finds the highest power level which will be powered down
+ * amongst all the power levels specified in the 'state_info' structure
+ *****************************************************************************/
+unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info)
+{
+ int i;
+
+ for (i = PLAT_MAX_PWR_LVL; i >= PSCI_CPU_PWR_LVL; i--) {
+ if (is_local_state_off(state_info->pwr_domain_state[i]))
+ return i;
+ }
+
+ return PSCI_INVALID_PWR_LVL;
+}
+
+/******************************************************************************
+ * This functions finds the level of the highest power domain which will be
+ * placed in a low power state during a suspend operation.
+ *****************************************************************************/
+unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info)
+{
+ int i;
+
+ for (i = PLAT_MAX_PWR_LVL; i >= PSCI_CPU_PWR_LVL; i--) {
+ if (!is_local_state_run(state_info->pwr_domain_state[i]))
+ return i;
+ }
+
+ return PSCI_INVALID_PWR_LVL;
+}
+
+/*******************************************************************************
+ * This function is passed a cpu_index and the highest level in the topology
+ * tree that the operation should be applied to. It picks up locks in order of
+ * increasing power domain level in the range specified.
+ ******************************************************************************/
+void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl,
+ unsigned int cpu_idx)
+{
+ unsigned int parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
+ unsigned int level;
+
+ /* No locking required for level 0. Hence start locking from level 1 */
+ for (level = PSCI_CPU_PWR_LVL + 1; level <= end_pwrlvl; level++) {
+ psci_lock_get(&psci_non_cpu_pd_nodes[parent_idx]);
+ parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
+ }
+}
+
+/*******************************************************************************
+ * This function is passed a cpu_index and the highest level in the topology
+ * tree that the operation should be applied to. It releases the locks in order
+ * of decreasing power domain level in the range specified.
+ ******************************************************************************/
+void psci_release_pwr_domain_locks(unsigned int end_pwrlvl,
+ unsigned int cpu_idx)
+{
+ unsigned int parent_idx, parent_nodes[PLAT_MAX_PWR_LVL] = {0};
+ int level;
+
+ /* Get the parent nodes */
+ psci_get_parent_pwr_domain_nodes(cpu_idx, end_pwrlvl, parent_nodes);
+
+ /* Unlock top down. No unlocking required for level 0. */
+ for (level = end_pwrlvl; level >= PSCI_CPU_PWR_LVL + 1; level--) {
+ parent_idx = parent_nodes[level - 1];
+ psci_lock_release(&psci_non_cpu_pd_nodes[parent_idx]);
+ }
+}
+
+/*******************************************************************************
+ * Simple routine to determine whether a mpidr is valid or not.
+ ******************************************************************************/
+int psci_validate_mpidr(u_register_t mpidr)
+{
+ if (plat_core_pos_by_mpidr(mpidr) < 0)
+ return PSCI_E_INVALID_PARAMS;
+
+ return PSCI_E_SUCCESS;
+}
+
+/*******************************************************************************
+ * This function determines the full entrypoint information for the requested
+ * PSCI entrypoint on power on/resume and returns it.
+ ******************************************************************************/
+static int psci_get_ns_ep_info(entry_point_info_t *ep,
+ uintptr_t entrypoint,
+ u_register_t context_id)
+{
+ u_register_t ep_attr, sctlr;
+ unsigned int daif, ee, mode;
+ u_register_t ns_scr_el3 = read_scr_el3();
+ u_register_t ns_sctlr_el1 = read_sctlr_el1();
+
+ sctlr = ns_scr_el3 & SCR_HCE_BIT ? read_sctlr_el2() : ns_sctlr_el1;
+ ee = 0;
+
+ ep_attr = NON_SECURE | EP_ST_DISABLE;
+ if (sctlr & SCTLR_EE_BIT) {
+ ep_attr |= EP_EE_BIG;
+ ee = 1;
+ }
+ SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr);
+
+ ep->pc = entrypoint;
+ memset(&ep->args, 0, sizeof(ep->args));
+ ep->args.arg0 = context_id;
+
+ /*
+ * Figure out whether the cpu enters the non-secure address space
+ * in aarch32 or aarch64
+ */
+ if (ns_scr_el3 & SCR_RW_BIT) {
+
+ /*
+ * Check whether a Thumb entry point has been provided for an
+ * aarch64 EL
+ */
+ if (entrypoint & 0x1)
+ return PSCI_E_INVALID_ADDRESS;
+
+ mode = ns_scr_el3 & SCR_HCE_BIT ? MODE_EL2 : MODE_EL1;
+
+ ep->spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
+ } else {
+
+ mode = ns_scr_el3 & SCR_HCE_BIT ? MODE32_hyp : MODE32_svc;
+
+ /*
+ * TODO: Choose async. exception bits if HYP mode is not
+ * implemented according to the values of SCR.{AW, FW} bits
+ */
+ daif = DAIF_ABT_BIT | DAIF_IRQ_BIT | DAIF_FIQ_BIT;
+
+ ep->spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, daif);
+ }
+
+ return PSCI_E_SUCCESS;
+}
+
+/*******************************************************************************
+ * This function validates the entrypoint with the platform layer if the
+ * appropriate pm_ops hook is exported by the platform and returns the
+ * 'entry_point_info'.
+ ******************************************************************************/
+int psci_validate_entry_point(entry_point_info_t *ep,
+ uintptr_t entrypoint,
+ u_register_t context_id)
+{
+ int rc;
+
+ /* Validate the entrypoint using platform psci_ops */
+ if (psci_plat_pm_ops->validate_ns_entrypoint) {
+ rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint);
+ if (rc != PSCI_E_SUCCESS)
+ return PSCI_E_INVALID_ADDRESS;
+ }
+
+ /*
+ * Verify and derive the re-entry information for
+ * the non-secure world from the non-secure state from
+ * where this call originated.
+ */
+ rc = psci_get_ns_ep_info(ep, entrypoint, context_id);
+ return rc;
+}
+
+/*******************************************************************************
+ * Generic handler which is called when a cpu is physically powered on. It
+ * traverses the node information and finds the highest power level powered
+ * off and performs generic, architectural, platform setup and state management
+ * to power on that power level and power levels below it.
+ * e.g. For a cpu that's been powered on, it will call the platform specific
+ * code to enable the gic cpu interface and for a cluster it will enable
+ * coherency at the interconnect level in addition to gic cpu interface.
+ ******************************************************************************/
+void psci_power_up_finish(void)
+{
+ unsigned int end_pwrlvl, cpu_idx = plat_my_core_pos();
+ psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} };
+
+ /*
+ * Verify that we have been explicitly turned ON or resumed from
+ * suspend.
+ */
+ if (psci_get_aff_info_state() == AFF_STATE_OFF) {
+ ERROR("Unexpected affinity info state");
+ panic();
+ }
+
+ /*
+ * Get the maximum power domain level to traverse to after this cpu
+ * has been physically powered up.
+ */
+ end_pwrlvl = get_power_on_target_pwrlvl();
+
+ /*
+ * This function acquires the lock corresponding to each power level so
+ * that by the time all locks are taken, the system topology is snapshot
+ * and state management can be done safely.
+ */
+ psci_acquire_pwr_domain_locks(end_pwrlvl,
+ cpu_idx);
+
+#if ENABLE_PSCI_STAT
+ /*
+ * Capture power up time-stamp.
+ * No cache maintenance is required as caches are off
+ * and writes are direct to the main memory.
+ */
+ PMF_CAPTURE_TIMESTAMP(psci_svc, PSCI_STAT_ID_EXIT_LOW_PWR,
+ PMF_NO_CACHE_MAINT);
+#endif
+
+ psci_get_target_local_pwr_states(end_pwrlvl, &state_info);
+
+ /*
+ * This CPU could be resuming from suspend or it could have just been
+ * turned on. To distinguish between these 2 cases, we examine the
+ * affinity state of the CPU:
+ * - If the affinity state is ON_PENDING then it has just been
+ * turned on.
+ * - Else it is resuming from suspend.
+ *
+ * Depending on the type of warm reset identified, choose the right set
+ * of power management handler and perform the generic, architecture
+ * and platform specific handling.
+ */
+ if (psci_get_aff_info_state() == AFF_STATE_ON_PENDING)
+ psci_cpu_on_finish(cpu_idx, &state_info);
+ else
+ psci_cpu_suspend_finish(cpu_idx, &state_info);
+
+ /*
+ * Set the requested and target state of this CPU and all the higher
+ * power domains which are ancestors of this CPU to run.
+ */
+ psci_set_pwr_domains_to_run(end_pwrlvl);
+
+#if ENABLE_PSCI_STAT
+ /*
+ * Update PSCI stats.
+ * Caches are off when writing stats data on the power down path.
+ * Since caches are now enabled, it's necessary to do cache
+ * maintenance before reading that same data.
+ */
+ psci_stats_update_pwr_up(end_pwrlvl, &state_info, PMF_CACHE_MAINT);
+#endif
+
+ /*
+ * This loop releases the lock corresponding to each power level
+ * in the reverse order to which they were acquired.
+ */
+ psci_release_pwr_domain_locks(end_pwrlvl,
+ cpu_idx);
+}
+
+/*******************************************************************************
+ * This function initializes the set of hooks that PSCI invokes as part of power
+ * management operation. The power management hooks are expected to be provided
+ * by the SPD, after it finishes all its initialization
+ ******************************************************************************/
+void psci_register_spd_pm_hook(const spd_pm_ops_t *pm)
+{
+ assert(pm);
+ psci_spd_pm = pm;
+
+ if (pm->svc_migrate)
+ psci_caps |= define_psci_cap(PSCI_MIG_AARCH64);
+
+ if (pm->svc_migrate_info)
+ psci_caps |= define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64)
+ | define_psci_cap(PSCI_MIG_INFO_TYPE);
+}
+
+/*******************************************************************************
+ * This function invokes the migrate info hook in the spd_pm_ops. It performs
+ * the necessary return value validation. If the Secure Payload is UP and
+ * migrate capable, it returns the mpidr of the CPU on which the Secure payload
+ * is resident through the mpidr parameter. Else the value of the parameter on
+ * return is undefined.
+ ******************************************************************************/
+int psci_spd_migrate_info(u_register_t *mpidr)
+{
+ int rc;
+
+ if (!psci_spd_pm || !psci_spd_pm->svc_migrate_info)
+ return PSCI_E_NOT_SUPPORTED;
+
+ rc = psci_spd_pm->svc_migrate_info(mpidr);
+
+ assert(rc == PSCI_TOS_UP_MIG_CAP || rc == PSCI_TOS_NOT_UP_MIG_CAP \
+ || rc == PSCI_TOS_NOT_PRESENT_MP || rc == PSCI_E_NOT_SUPPORTED);
+
+ return rc;
+}
+
+
+/*******************************************************************************
+ * This function prints the state of all power domains present in the
+ * system
+ ******************************************************************************/
+void psci_print_power_domain_map(void)
+{
+#if LOG_LEVEL >= LOG_LEVEL_INFO
+ unsigned int idx;
+ plat_local_state_t state;
+ plat_local_state_type_t state_type;
+
+ /* This array maps to the PSCI_STATE_X definitions in psci.h */
+ static const char * const psci_state_type_str[] = {
+ "ON",
+ "RETENTION",
+ "OFF",
+ };
+
+ INFO("PSCI Power Domain Map:\n");
+ for (idx = 0; idx < (PSCI_NUM_PWR_DOMAINS - PLATFORM_CORE_COUNT);
+ idx++) {
+ state_type = find_local_state_type(
+ psci_non_cpu_pd_nodes[idx].local_state);
+ INFO(" Domain Node : Level %u, parent_node %d,"
+ " State %s (0x%x)\n",
+ psci_non_cpu_pd_nodes[idx].level,
+ psci_non_cpu_pd_nodes[idx].parent_node,
+ psci_state_type_str[state_type],
+ psci_non_cpu_pd_nodes[idx].local_state);
+ }
+
+ for (idx = 0; idx < PLATFORM_CORE_COUNT; idx++) {
+ state = psci_get_cpu_local_state_by_idx(idx);
+ state_type = find_local_state_type(state);
+ INFO(" CPU Node : MPID 0x%llx, parent_node %d,"
+ " State %s (0x%x)\n",
+ (unsigned long long)psci_cpu_pd_nodes[idx].mpidr,
+ psci_cpu_pd_nodes[idx].parent_node,
+ psci_state_type_str[state_type],
+ psci_get_cpu_local_state_by_idx(idx));
+ }
+#endif
+}
+
+#if ENABLE_PLAT_COMPAT
+/*******************************************************************************
+ * PSCI Compatibility helper function to return the 'power_state' parameter of
+ * the PSCI CPU SUSPEND request for the current CPU. Returns PSCI_INVALID_DATA
+ * if not invoked within CPU_SUSPEND for the current CPU.
+ ******************************************************************************/
+int psci_get_suspend_powerstate(void)
+{
+ /* Sanity check to verify that CPU is within CPU_SUSPEND */
+ if (psci_get_aff_info_state() == AFF_STATE_ON &&
+ !is_local_state_run(psci_get_cpu_local_state()))
+ return psci_power_state_compat[plat_my_core_pos()];
+
+ return PSCI_INVALID_DATA;
+}
+
+/*******************************************************************************
+ * PSCI Compatibility helper function to return the state id of the current
+ * cpu encoded in the 'power_state' parameter. Returns PSCI_INVALID_DATA
+ * if not invoked within CPU_SUSPEND for the current CPU.
+ ******************************************************************************/
+int psci_get_suspend_stateid(void)
+{
+ unsigned int power_state;
+ power_state = psci_get_suspend_powerstate();
+ if (power_state != PSCI_INVALID_DATA)
+ return psci_get_pstate_id(power_state);
+
+ return PSCI_INVALID_DATA;
+}
+
+/*******************************************************************************
+ * PSCI Compatibility helper function to return the state id encoded in the
+ * 'power_state' parameter of the CPU specified by 'mpidr'. Returns
+ * PSCI_INVALID_DATA if the CPU is not in CPU_SUSPEND.
+ ******************************************************************************/
+int psci_get_suspend_stateid_by_mpidr(unsigned long mpidr)
+{
+ int cpu_idx = plat_core_pos_by_mpidr(mpidr);
+
+ if (cpu_idx == -1)
+ return PSCI_INVALID_DATA;
+
+ /* Sanity check to verify that the CPU is in CPU_SUSPEND */
+ if (psci_get_aff_info_state_by_idx(cpu_idx) == AFF_STATE_ON &&
+ !is_local_state_run(psci_get_cpu_local_state_by_idx(cpu_idx)))
+ return psci_get_pstate_id(psci_power_state_compat[cpu_idx]);
+
+ return PSCI_INVALID_DATA;
+}
+
+/*******************************************************************************
+ * This function returns highest affinity level which is in OFF
+ * state. The affinity instance with which the level is associated is
+ * determined by the caller.
+ ******************************************************************************/
+unsigned int psci_get_max_phys_off_afflvl(void)
+{
+ psci_power_state_t state_info;
+
+ memset(&state_info, 0, sizeof(state_info));
+ psci_get_target_local_pwr_states(PLAT_MAX_PWR_LVL, &state_info);
+
+ return psci_find_target_suspend_lvl(&state_info);
+}
+
+/*******************************************************************************
+ * PSCI Compatibility helper function to return target affinity level requested
+ * for the CPU_SUSPEND. This function assumes affinity levels correspond to
+ * power domain levels on the platform.
+ ******************************************************************************/
+int psci_get_suspend_afflvl(void)
+{
+ return psci_get_suspend_pwrlvl();
+}
+
+#endif
diff --git a/lib/psci/psci_lib.mk b/lib/psci/psci_lib.mk
new file mode 100644
index 00000000..93c78329
--- /dev/null
+++ b/lib/psci/psci_lib.mk
@@ -0,0 +1,55 @@
+#
+# Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# Redistributions of source code must retain the above copyright notice, this
+# list of conditions and the following disclaimer.
+#
+# Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# Neither the name of ARM nor the names of its contributors may be used
+# to endorse or promote products derived from this software without specific
+# prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+
+PSCI_LIB_SOURCES := lib/el3_runtime/cpu_data_array.c \
+ lib/el3_runtime/aarch64/context.S \
+ lib/el3_runtime/aarch64/cpu_data.S \
+ lib/el3_runtime/aarch64/context_mgmt.c \
+ lib/cpus/aarch64/cpu_helpers.S \
+ lib/locks/exclusive/spinlock.S \
+ lib/psci/psci_off.c \
+ lib/psci/psci_on.c \
+ lib/psci/psci_suspend.c \
+ lib/psci/psci_common.c \
+ lib/psci/psci_main.c \
+ lib/psci/psci_setup.c \
+ lib/psci/psci_system_off.c \
+ lib/psci/aarch64/psci_entry.S \
+ lib/psci/aarch64/psci_helpers.S \
+
+ifeq (${USE_COHERENT_MEM}, 1)
+PSCI_LIB_SOURCES += lib/locks/bakery/bakery_lock_coherent.c
+else
+PSCI_LIB_SOURCES += lib/locks/bakery/bakery_lock_normal.c
+endif
+
+ifeq (${ENABLE_PSCI_STAT}, 1)
+PSCI_LIB_SOURCES += lib/psci/psci_stat.c
+endif
diff --git a/lib/psci/psci_main.c b/lib/psci/psci_main.c
new file mode 100644
index 00000000..04ef10e7
--- /dev/null
+++ b/lib/psci/psci_main.c
@@ -0,0 +1,440 @@
+/*
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <platform.h>
+#include <runtime_svc.h>
+#include <std_svc.h>
+#include <string.h>
+#include "psci_private.h"
+
+/*******************************************************************************
+ * PSCI frontend api for servicing SMCs. Described in the PSCI spec.
+ ******************************************************************************/
+int psci_cpu_on(u_register_t target_cpu,
+ uintptr_t entrypoint,
+ u_register_t context_id)
+
+{
+ int rc;
+ entry_point_info_t ep;
+
+ /* Determine if the cpu exists of not */
+ rc = psci_validate_mpidr(target_cpu);
+ if (rc != PSCI_E_SUCCESS)
+ return PSCI_E_INVALID_PARAMS;
+
+ /* Validate the entry point and get the entry_point_info */
+ rc = psci_validate_entry_point(&ep, entrypoint, context_id);
+ if (rc != PSCI_E_SUCCESS)
+ return rc;
+
+ /*
+ * To turn this cpu on, specify which power
+ * levels need to be turned on
+ */
+ return psci_cpu_on_start(target_cpu, &ep);
+}
+
+unsigned int psci_version(void)
+{
+ return PSCI_MAJOR_VER | PSCI_MINOR_VER;
+}
+
+int psci_cpu_suspend(unsigned int power_state,
+ uintptr_t entrypoint,
+ u_register_t context_id)
+{
+ int rc;
+ unsigned int target_pwrlvl, is_power_down_state;
+ entry_point_info_t ep;
+ psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} };
+ plat_local_state_t cpu_pd_state;
+
+ /* Validate the power_state parameter */
+ rc = psci_validate_power_state(power_state, &state_info);
+ if (rc != PSCI_E_SUCCESS) {
+ assert(rc == PSCI_E_INVALID_PARAMS);
+ return rc;
+ }
+
+ /*
+ * Get the value of the state type bit from the power state parameter.
+ */
+ is_power_down_state = psci_get_pstate_type(power_state);
+
+ /* Sanity check the requested suspend levels */
+ assert(psci_validate_suspend_req(&state_info, is_power_down_state)
+ == PSCI_E_SUCCESS);
+
+ target_pwrlvl = psci_find_target_suspend_lvl(&state_info);
+
+ /* Fast path for CPU standby.*/
+ if (is_cpu_standby_req(is_power_down_state, target_pwrlvl)) {
+ if (!psci_plat_pm_ops->cpu_standby)
+ return PSCI_E_INVALID_PARAMS;
+
+ /*
+ * Set the state of the CPU power domain to the platform
+ * specific retention state and enter the standby state.
+ */
+ cpu_pd_state = state_info.pwr_domain_state[PSCI_CPU_PWR_LVL];
+ psci_set_cpu_local_state(cpu_pd_state);
+
+#if ENABLE_PSCI_STAT
+ /*
+ * Capture time-stamp before CPU standby
+ * No cache maintenance is needed as caches
+ * are ON through out the CPU standby operation.
+ */
+ PMF_CAPTURE_TIMESTAMP(psci_svc, PSCI_STAT_ID_ENTER_LOW_PWR,
+ PMF_NO_CACHE_MAINT);
+#endif
+
+ psci_plat_pm_ops->cpu_standby(cpu_pd_state);
+
+ /* Upon exit from standby, set the state back to RUN. */
+ psci_set_cpu_local_state(PSCI_LOCAL_STATE_RUN);
+
+#if ENABLE_PSCI_STAT
+ /* Capture time-stamp after CPU standby */
+ PMF_CAPTURE_TIMESTAMP(psci_svc, PSCI_STAT_ID_EXIT_LOW_PWR,
+ PMF_NO_CACHE_MAINT);
+
+ /* Update PSCI stats */
+ psci_stats_update_pwr_up(PSCI_CPU_PWR_LVL, &state_info,
+ PMF_NO_CACHE_MAINT);
+#endif
+
+ return PSCI_E_SUCCESS;
+ }
+
+ /*
+ * If a power down state has been requested, we need to verify entry
+ * point and program entry information.
+ */
+ if (is_power_down_state) {
+ rc = psci_validate_entry_point(&ep, entrypoint, context_id);
+ if (rc != PSCI_E_SUCCESS)
+ return rc;
+ }
+
+ /*
+ * Do what is needed to enter the power down state. Upon success,
+ * enter the final wfi which will power down this CPU. This function
+ * might return if the power down was abandoned for any reason, e.g.
+ * arrival of an interrupt
+ */
+ psci_cpu_suspend_start(&ep,
+ target_pwrlvl,
+ &state_info,
+ is_power_down_state);
+
+ return PSCI_E_SUCCESS;
+}
+
+
+int psci_system_suspend(uintptr_t entrypoint, u_register_t context_id)
+{
+ int rc;
+ psci_power_state_t state_info;
+ entry_point_info_t ep;
+
+ /* Check if the current CPU is the last ON CPU in the system */
+ if (!psci_is_last_on_cpu())
+ return PSCI_E_DENIED;
+
+ /* Validate the entry point and get the entry_point_info */
+ rc = psci_validate_entry_point(&ep, entrypoint, context_id);
+ if (rc != PSCI_E_SUCCESS)
+ return rc;
+
+ /* Query the psci_power_state for system suspend */
+ psci_query_sys_suspend_pwrstate(&state_info);
+
+ /* Ensure that the psci_power_state makes sense */
+ assert(psci_find_target_suspend_lvl(&state_info) == PLAT_MAX_PWR_LVL);
+ assert(psci_validate_suspend_req(&state_info, PSTATE_TYPE_POWERDOWN)
+ == PSCI_E_SUCCESS);
+ assert(is_local_state_off(state_info.pwr_domain_state[PLAT_MAX_PWR_LVL]));
+
+ /*
+ * Do what is needed to enter the system suspend state. This function
+ * might return if the power down was abandoned for any reason, e.g.
+ * arrival of an interrupt
+ */
+ psci_cpu_suspend_start(&ep,
+ PLAT_MAX_PWR_LVL,
+ &state_info,
+ PSTATE_TYPE_POWERDOWN);
+
+ return PSCI_E_SUCCESS;
+}
+
+int psci_cpu_off(void)
+{
+ int rc;
+ unsigned int target_pwrlvl = PLAT_MAX_PWR_LVL;
+
+ /*
+ * Do what is needed to power off this CPU and possible higher power
+ * levels if it able to do so. Upon success, enter the final wfi
+ * which will power down this CPU.
+ */
+ rc = psci_do_cpu_off(target_pwrlvl);
+
+ /*
+ * The only error cpu_off can return is E_DENIED. So check if that's
+ * indeed the case.
+ */
+ assert(rc == PSCI_E_DENIED);
+
+ return rc;
+}
+
+int psci_affinity_info(u_register_t target_affinity,
+ unsigned int lowest_affinity_level)
+{
+ unsigned int target_idx;
+
+ /* We dont support level higher than PSCI_CPU_PWR_LVL */
+ if (lowest_affinity_level > PSCI_CPU_PWR_LVL)
+ return PSCI_E_INVALID_PARAMS;
+
+ /* Calculate the cpu index of the target */
+ target_idx = plat_core_pos_by_mpidr(target_affinity);
+ if (target_idx == -1)
+ return PSCI_E_INVALID_PARAMS;
+
+ return psci_get_aff_info_state_by_idx(target_idx);
+}
+
+int psci_migrate(u_register_t target_cpu)
+{
+ int rc;
+ u_register_t resident_cpu_mpidr;
+
+ rc = psci_spd_migrate_info(&resident_cpu_mpidr);
+ if (rc != PSCI_TOS_UP_MIG_CAP)
+ return (rc == PSCI_TOS_NOT_UP_MIG_CAP) ?
+ PSCI_E_DENIED : PSCI_E_NOT_SUPPORTED;
+
+ /*
+ * Migrate should only be invoked on the CPU where
+ * the Secure OS is resident.
+ */
+ if (resident_cpu_mpidr != read_mpidr_el1())
+ return PSCI_E_NOT_PRESENT;
+
+ /* Check the validity of the specified target cpu */
+ rc = psci_validate_mpidr(target_cpu);
+ if (rc != PSCI_E_SUCCESS)
+ return PSCI_E_INVALID_PARAMS;
+
+ assert(psci_spd_pm && psci_spd_pm->svc_migrate);
+
+ rc = psci_spd_pm->svc_migrate(read_mpidr_el1(), target_cpu);
+ assert(rc == PSCI_E_SUCCESS || rc == PSCI_E_INTERN_FAIL);
+
+ return rc;
+}
+
+int psci_migrate_info_type(void)
+{
+ u_register_t resident_cpu_mpidr;
+
+ return psci_spd_migrate_info(&resident_cpu_mpidr);
+}
+
+long psci_migrate_info_up_cpu(void)
+{
+ u_register_t resident_cpu_mpidr;
+ int rc;
+
+ /*
+ * Return value of this depends upon what
+ * psci_spd_migrate_info() returns.
+ */
+ rc = psci_spd_migrate_info(&resident_cpu_mpidr);
+ if (rc != PSCI_TOS_NOT_UP_MIG_CAP && rc != PSCI_TOS_UP_MIG_CAP)
+ return PSCI_E_INVALID_PARAMS;
+
+ return resident_cpu_mpidr;
+}
+
+int psci_features(unsigned int psci_fid)
+{
+ unsigned int local_caps = psci_caps;
+
+ /* Check if it is a 64 bit function */
+ if (((psci_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_64)
+ local_caps &= PSCI_CAP_64BIT_MASK;
+
+ /* Check for invalid fid */
+ if (!(is_std_svc_call(psci_fid) && is_valid_fast_smc(psci_fid)
+ && is_psci_fid(psci_fid)))
+ return PSCI_E_NOT_SUPPORTED;
+
+
+ /* Check if the psci fid is supported or not */
+ if (!(local_caps & define_psci_cap(psci_fid)))
+ return PSCI_E_NOT_SUPPORTED;
+
+ /* Format the feature flags */
+ if (psci_fid == PSCI_CPU_SUSPEND_AARCH32 ||
+ psci_fid == PSCI_CPU_SUSPEND_AARCH64) {
+ /*
+ * The trusted firmware does not support OS Initiated Mode.
+ */
+ return (FF_PSTATE << FF_PSTATE_SHIFT) |
+ ((!FF_SUPPORTS_OS_INIT_MODE) << FF_MODE_SUPPORT_SHIFT);
+ }
+
+ /* Return 0 for all other fid's */
+ return PSCI_E_SUCCESS;
+}
+
+/*******************************************************************************
+ * PSCI top level handler for servicing SMCs.
+ ******************************************************************************/
+uintptr_t psci_smc_handler(uint32_t smc_fid,
+ u_register_t x1,
+ u_register_t x2,
+ u_register_t x3,
+ u_register_t x4,
+ void *cookie,
+ void *handle,
+ u_register_t flags)
+{
+ if (is_caller_secure(flags))
+ SMC_RET1(handle, SMC_UNK);
+
+ /* Check the fid against the capabilities */
+ if (!(psci_caps & define_psci_cap(smc_fid)))
+ SMC_RET1(handle, SMC_UNK);
+
+ if (((smc_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_32) {
+ /* 32-bit PSCI function, clear top parameter bits */
+
+ x1 = (uint32_t)x1;
+ x2 = (uint32_t)x2;
+ x3 = (uint32_t)x3;
+
+ switch (smc_fid) {
+ case PSCI_VERSION:
+ SMC_RET1(handle, psci_version());
+
+ case PSCI_CPU_OFF:
+ SMC_RET1(handle, psci_cpu_off());
+
+ case PSCI_CPU_SUSPEND_AARCH32:
+ SMC_RET1(handle, psci_cpu_suspend(x1, x2, x3));
+
+ case PSCI_CPU_ON_AARCH32:
+ SMC_RET1(handle, psci_cpu_on(x1, x2, x3));
+
+ case PSCI_AFFINITY_INFO_AARCH32:
+ SMC_RET1(handle, psci_affinity_info(x1, x2));
+
+ case PSCI_MIG_AARCH32:
+ SMC_RET1(handle, psci_migrate(x1));
+
+ case PSCI_MIG_INFO_TYPE:
+ SMC_RET1(handle, psci_migrate_info_type());
+
+ case PSCI_MIG_INFO_UP_CPU_AARCH32:
+ SMC_RET1(handle, psci_migrate_info_up_cpu());
+
+ case PSCI_SYSTEM_SUSPEND_AARCH32:
+ SMC_RET1(handle, psci_system_suspend(x1, x2));
+
+ case PSCI_SYSTEM_OFF:
+ psci_system_off();
+ /* We should never return from psci_system_off() */
+
+ case PSCI_SYSTEM_RESET:
+ psci_system_reset();
+ /* We should never return from psci_system_reset() */
+
+ case PSCI_FEATURES:
+ SMC_RET1(handle, psci_features(x1));
+
+#if ENABLE_PSCI_STAT
+ case PSCI_STAT_RESIDENCY_AARCH32:
+ SMC_RET1(handle, psci_stat_residency(x1, x2));
+
+ case PSCI_STAT_COUNT_AARCH32:
+ SMC_RET1(handle, psci_stat_count(x1, x2));
+#endif
+
+ default:
+ break;
+ }
+ } else {
+ /* 64-bit PSCI function */
+
+ switch (smc_fid) {
+ case PSCI_CPU_SUSPEND_AARCH64:
+ SMC_RET1(handle, psci_cpu_suspend(x1, x2, x3));
+
+ case PSCI_CPU_ON_AARCH64:
+ SMC_RET1(handle, psci_cpu_on(x1, x2, x3));
+
+ case PSCI_AFFINITY_INFO_AARCH64:
+ SMC_RET1(handle, psci_affinity_info(x1, x2));
+
+ case PSCI_MIG_AARCH64:
+ SMC_RET1(handle, psci_migrate(x1));
+
+ case PSCI_MIG_INFO_UP_CPU_AARCH64:
+ SMC_RET1(handle, psci_migrate_info_up_cpu());
+
+ case PSCI_SYSTEM_SUSPEND_AARCH64:
+ SMC_RET1(handle, psci_system_suspend(x1, x2));
+
+#if ENABLE_PSCI_STAT
+ case PSCI_STAT_RESIDENCY_AARCH64:
+ SMC_RET1(handle, psci_stat_residency(x1, x2));
+
+ case PSCI_STAT_COUNT_AARCH64:
+ SMC_RET1(handle, psci_stat_count(x1, x2));
+#endif
+
+ default:
+ break;
+ }
+ }
+
+ WARN("Unimplemented PSCI Call: 0x%x \n", smc_fid);
+ SMC_RET1(handle, SMC_UNK);
+}
diff --git a/lib/psci/psci_off.c b/lib/psci/psci_off.c
new file mode 100644
index 00000000..471141dd
--- /dev/null
+++ b/lib/psci/psci_off.c
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <platform.h>
+#include <string.h>
+#include "psci_private.h"
+
+/******************************************************************************
+ * Construct the psci_power_state to request power OFF at all power levels.
+ ******************************************************************************/
+static void psci_set_power_off_state(psci_power_state_t *state_info)
+{
+ int lvl;
+
+ for (lvl = PSCI_CPU_PWR_LVL; lvl <= PLAT_MAX_PWR_LVL; lvl++)
+ state_info->pwr_domain_state[lvl] = PLAT_MAX_OFF_STATE;
+}
+
+/******************************************************************************
+ * Top level handler which is called when a cpu wants to power itself down.
+ * It's assumed that along with turning the cpu power domain off, power
+ * domains at higher levels will be turned off as far as possible. It finds
+ * the highest level where a domain has to be powered off by traversing the
+ * node information and then performs generic, architectural, platform setup
+ * and state management required to turn OFF that power domain and domains
+ * below it. e.g. For a cpu that's to be powered OFF, it could mean programming
+ * the power controller whereas for a cluster that's to be powered off, it will
+ * call the platform specific code which will disable coherency at the
+ * interconnect level if the cpu is the last in the cluster and also the
+ * program the power controller.
+ ******************************************************************************/
+int psci_do_cpu_off(unsigned int end_pwrlvl)
+{
+ int rc = PSCI_E_SUCCESS, idx = plat_my_core_pos();
+ psci_power_state_t state_info;
+
+ /*
+ * This function must only be called on platforms where the
+ * CPU_OFF platform hooks have been implemented.
+ */
+ assert(psci_plat_pm_ops->pwr_domain_off);
+
+ /*
+ * This function acquires the lock corresponding to each power
+ * level so that by the time all locks are taken, the system topology
+ * is snapshot and state management can be done safely.
+ */
+ psci_acquire_pwr_domain_locks(end_pwrlvl,
+ idx);
+
+ /*
+ * Call the cpu off handler registered by the Secure Payload Dispatcher
+ * to let it do any bookkeeping. Assume that the SPD always reports an
+ * E_DENIED error if SP refuse to power down
+ */
+ if (psci_spd_pm && psci_spd_pm->svc_off) {
+ rc = psci_spd_pm->svc_off(0);
+ if (rc)
+ goto exit;
+ }
+
+ /* Construct the psci_power_state for CPU_OFF */
+ psci_set_power_off_state(&state_info);
+
+ /*
+ * This function is passed the requested state info and
+ * it returns the negotiated state info for each power level upto
+ * the end level specified.
+ */
+ psci_do_state_coordination(end_pwrlvl, &state_info);
+
+#if ENABLE_PSCI_STAT
+ /* Update the last cpu for each level till end_pwrlvl */
+ psci_stats_update_pwr_down(end_pwrlvl, &state_info);
+#endif
+
+ /*
+ * Arch. management. Perform the necessary steps to flush all
+ * cpu caches.
+ */
+ psci_do_pwrdown_cache_maintenance(psci_find_max_off_lvl(&state_info));
+
+ /*
+ * Plat. management: Perform platform specific actions to turn this
+ * cpu off e.g. exit cpu coherency, program the power controller etc.
+ */
+ psci_plat_pm_ops->pwr_domain_off(&state_info);
+
+#if ENABLE_PSCI_STAT
+ /*
+ * Capture time-stamp while entering low power state.
+ * No cache maintenance needed because caches are off
+ * and writes are direct to main memory.
+ */
+ PMF_CAPTURE_TIMESTAMP(psci_svc, PSCI_STAT_ID_ENTER_LOW_PWR,
+ PMF_NO_CACHE_MAINT);
+#endif
+
+exit:
+ /*
+ * Release the locks corresponding to each power level in the
+ * reverse order to which they were acquired.
+ */
+ psci_release_pwr_domain_locks(end_pwrlvl,
+ idx);
+
+ /*
+ * Check if all actions needed to safely power down this cpu have
+ * successfully completed.
+ */
+ if (rc == PSCI_E_SUCCESS) {
+ /*
+ * Set the affinity info state to OFF. This writes directly to
+ * main memory as caches are disabled, so cache maintenance is
+ * required to ensure that later cached reads of aff_info_state
+ * return AFF_STATE_OFF. A dsbish() ensures ordering of the
+ * update to the affinity info state prior to cache line
+ * invalidation.
+ */
+ flush_cpu_data(psci_svc_cpu_data.aff_info_state);
+ psci_set_aff_info_state(AFF_STATE_OFF);
+ dsbish();
+ inv_cpu_data(psci_svc_cpu_data.aff_info_state);
+
+ if (psci_plat_pm_ops->pwr_domain_pwr_down_wfi) {
+ /* This function must not return */
+ psci_plat_pm_ops->pwr_domain_pwr_down_wfi(&state_info);
+ } else {
+ /*
+ * Enter a wfi loop which will allow the power
+ * controller to physically power down this cpu.
+ */
+ psci_power_down_wfi();
+ }
+ }
+
+ return rc;
+}
diff --git a/lib/psci/psci_on.c b/lib/psci/psci_on.c
new file mode 100644
index 00000000..d4826ed8
--- /dev/null
+++ b/lib/psci/psci_on.c
@@ -0,0 +1,212 @@
+/*
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <bl31.h>
+#include <debug.h>
+#include <context_mgmt.h>
+#include <platform.h>
+#include <runtime_svc.h>
+#include <stddef.h>
+#include "psci_private.h"
+
+/*******************************************************************************
+ * This function checks whether a cpu which has been requested to be turned on
+ * is OFF to begin with.
+ ******************************************************************************/
+static int cpu_on_validate_state(aff_info_state_t aff_state)
+{
+ if (aff_state == AFF_STATE_ON)
+ return PSCI_E_ALREADY_ON;
+
+ if (aff_state == AFF_STATE_ON_PENDING)
+ return PSCI_E_ON_PENDING;
+
+ assert(aff_state == AFF_STATE_OFF);
+ return PSCI_E_SUCCESS;
+}
+
+/*******************************************************************************
+ * Generic handler which is called to physically power on a cpu identified by
+ * its mpidr. It performs the generic, architectural, platform setup and state
+ * management to power on the target cpu e.g. it will ensure that
+ * enough information is stashed for it to resume execution in the non-secure
+ * security state.
+ *
+ * The state of all the relevant power domains are changed after calling the
+ * platform handler as it can return error.
+ ******************************************************************************/
+int psci_cpu_on_start(u_register_t target_cpu,
+ entry_point_info_t *ep)
+{
+ int rc;
+ unsigned int target_idx = plat_core_pos_by_mpidr(target_cpu);
+ aff_info_state_t target_aff_state;
+
+ /* Calling function must supply valid input arguments */
+ assert((int) target_idx >= 0);
+ assert(ep != NULL);
+
+ /*
+ * This function must only be called on platforms where the
+ * CPU_ON platform hooks have been implemented.
+ */
+ assert(psci_plat_pm_ops->pwr_domain_on &&
+ psci_plat_pm_ops->pwr_domain_on_finish);
+
+ /* Protect against multiple CPUs trying to turn ON the same target CPU */
+ psci_spin_lock_cpu(target_idx);
+
+ /*
+ * Generic management: Ensure that the cpu is off to be
+ * turned on.
+ */
+ rc = cpu_on_validate_state(psci_get_aff_info_state_by_idx(target_idx));
+ if (rc != PSCI_E_SUCCESS)
+ goto exit;
+
+ /*
+ * Call the cpu on handler registered by the Secure Payload Dispatcher
+ * to let it do any bookeeping. If the handler encounters an error, it's
+ * expected to assert within
+ */
+ if (psci_spd_pm && psci_spd_pm->svc_on)
+ psci_spd_pm->svc_on(target_cpu);
+
+ /*
+ * Set the Affinity info state of the target cpu to ON_PENDING.
+ * Flush aff_info_state as it will be accessed with caches
+ * turned OFF.
+ */
+ psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_ON_PENDING);
+ flush_cpu_data_by_index(target_idx, psci_svc_cpu_data.aff_info_state);
+
+ /*
+ * The cache line invalidation by the target CPU after setting the
+ * state to OFF (see psci_do_cpu_off()), could cause the update to
+ * aff_info_state to be invalidated. Retry the update if the target
+ * CPU aff_info_state is not ON_PENDING.
+ */
+ target_aff_state = psci_get_aff_info_state_by_idx(target_idx);
+ if (target_aff_state != AFF_STATE_ON_PENDING) {
+ assert(target_aff_state == AFF_STATE_OFF);
+ psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_ON_PENDING);
+ flush_cpu_data_by_index(target_idx, psci_svc_cpu_data.aff_info_state);
+
+ assert(psci_get_aff_info_state_by_idx(target_idx) == AFF_STATE_ON_PENDING);
+ }
+
+ /*
+ * Perform generic, architecture and platform specific handling.
+ */
+ /*
+ * Plat. management: Give the platform the current state
+ * of the target cpu to allow it to perform the necessary
+ * steps to power on.
+ */
+ rc = psci_plat_pm_ops->pwr_domain_on(target_cpu);
+ assert(rc == PSCI_E_SUCCESS || rc == PSCI_E_INTERN_FAIL);
+
+ if (rc == PSCI_E_SUCCESS)
+ /* Store the re-entry information for the non-secure world. */
+ cm_init_context_by_index(target_idx, ep);
+ else {
+ /* Restore the state on error. */
+ psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_OFF);
+ flush_cpu_data_by_index(target_idx, psci_svc_cpu_data.aff_info_state);
+ }
+
+exit:
+ psci_spin_unlock_cpu(target_idx);
+ return rc;
+}
+
+/*******************************************************************************
+ * The following function finish an earlier power on request. They
+ * are called by the common finisher routine in psci_common.c. The `state_info`
+ * is the psci_power_state from which this CPU has woken up from.
+ ******************************************************************************/
+void psci_cpu_on_finish(unsigned int cpu_idx,
+ psci_power_state_t *state_info)
+{
+ /*
+ * Plat. management: Perform the platform specific actions
+ * for this cpu e.g. enabling the gic or zeroing the mailbox
+ * register. The actual state of this cpu has already been
+ * changed.
+ */
+ psci_plat_pm_ops->pwr_domain_on_finish(state_info);
+
+ /*
+ * Arch. management: Enable data cache and manage stack memory
+ */
+ psci_do_pwrup_cache_maintenance();
+
+ /*
+ * All the platform specific actions for turning this cpu
+ * on have completed. Perform enough arch.initialization
+ * to run in the non-secure address space.
+ */
+ bl31_arch_setup();
+
+ /*
+ * Lock the CPU spin lock to make sure that the context initialization
+ * is done. Since the lock is only used in this function to create
+ * a synchronization point with cpu_on_start(), it can be released
+ * immediately.
+ */
+ psci_spin_lock_cpu(cpu_idx);
+ psci_spin_unlock_cpu(cpu_idx);
+
+ /* Ensure we have been explicitly woken up by another cpu */
+ assert(psci_get_aff_info_state() == AFF_STATE_ON_PENDING);
+
+ /*
+ * Call the cpu on finish handler registered by the Secure Payload
+ * Dispatcher to let it do any bookeeping. If the handler encounters an
+ * error, it's expected to assert within
+ */
+ if (psci_spd_pm && psci_spd_pm->svc_on_finish)
+ psci_spd_pm->svc_on_finish(0);
+
+ /* Populate the mpidr field within the cpu node array */
+ /* This needs to be done only once */
+ psci_cpu_pd_nodes[cpu_idx].mpidr = read_mpidr() & MPIDR_AFFINITY_MASK;
+
+ /*
+ * Generic management: Now we just need to retrieve the
+ * information that we had stashed away during the cpu_on
+ * call to set this cpu on its way.
+ */
+ cm_prepare_el3_exit(NON_SECURE);
+}
diff --git a/lib/psci/psci_private.h b/lib/psci/psci_private.h
new file mode 100644
index 00000000..f42ce551
--- /dev/null
+++ b/lib/psci/psci_private.h
@@ -0,0 +1,254 @@
+/*
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __PSCI_PRIVATE_H__
+#define __PSCI_PRIVATE_H__
+
+#include <arch.h>
+#include <bakery_lock.h>
+#include <bl_common.h>
+#include <cpu_data.h>
+#include <pmf.h>
+#include <psci.h>
+#include <spinlock.h>
+
+/*
+ * The following helper macros abstract the interface to the Bakery
+ * Lock API.
+ */
+#define psci_lock_init(non_cpu_pd_node, idx) \
+ ((non_cpu_pd_node)[(idx)].lock_index = (idx))
+#define psci_lock_get(non_cpu_pd_node) \
+ bakery_lock_get(&psci_locks[(non_cpu_pd_node)->lock_index])
+#define psci_lock_release(non_cpu_pd_node) \
+ bakery_lock_release(&psci_locks[(non_cpu_pd_node)->lock_index])
+
+/*
+ * The PSCI capability which are provided by the generic code but does not
+ * depend on the platform or spd capabilities.
+ */
+#define PSCI_GENERIC_CAP \
+ (define_psci_cap(PSCI_VERSION) | \
+ define_psci_cap(PSCI_AFFINITY_INFO_AARCH64) | \
+ define_psci_cap(PSCI_FEATURES))
+
+/*
+ * The PSCI capabilities mask for 64 bit functions.
+ */
+#define PSCI_CAP_64BIT_MASK \
+ (define_psci_cap(PSCI_CPU_SUSPEND_AARCH64) | \
+ define_psci_cap(PSCI_CPU_ON_AARCH64) | \
+ define_psci_cap(PSCI_AFFINITY_INFO_AARCH64) | \
+ define_psci_cap(PSCI_MIG_AARCH64) | \
+ define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64) | \
+ define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64) | \
+ define_psci_cap(PSCI_STAT_RESIDENCY_AARCH64) | \
+ define_psci_cap(PSCI_STAT_COUNT_AARCH64))
+
+/*
+ * Helper macros to get/set the fields of PSCI per-cpu data.
+ */
+#define psci_set_aff_info_state(aff_state) \
+ set_cpu_data(psci_svc_cpu_data.aff_info_state, aff_state)
+#define psci_get_aff_info_state() \
+ get_cpu_data(psci_svc_cpu_data.aff_info_state)
+#define psci_get_aff_info_state_by_idx(idx) \
+ get_cpu_data_by_index(idx, psci_svc_cpu_data.aff_info_state)
+#define psci_set_aff_info_state_by_idx(idx, aff_state) \
+ set_cpu_data_by_index(idx, psci_svc_cpu_data.aff_info_state,\
+ aff_state)
+#define psci_get_suspend_pwrlvl() \
+ get_cpu_data(psci_svc_cpu_data.target_pwrlvl)
+#define psci_set_suspend_pwrlvl(target_lvl) \
+ set_cpu_data(psci_svc_cpu_data.target_pwrlvl, target_lvl)
+#define psci_set_cpu_local_state(state) \
+ set_cpu_data(psci_svc_cpu_data.local_state, state)
+#define psci_get_cpu_local_state() \
+ get_cpu_data(psci_svc_cpu_data.local_state)
+#define psci_get_cpu_local_state_by_idx(idx) \
+ get_cpu_data_by_index(idx, psci_svc_cpu_data.local_state)
+
+/*
+ * Helper macros for the CPU level spinlocks
+ */
+#define psci_spin_lock_cpu(idx) spin_lock(&psci_cpu_pd_nodes[idx].cpu_lock)
+#define psci_spin_unlock_cpu(idx) spin_unlock(&psci_cpu_pd_nodes[idx].cpu_lock)
+
+/* Helper macro to identify a CPU standby request in PSCI Suspend call */
+#define is_cpu_standby_req(is_power_down_state, retn_lvl) \
+ (((!(is_power_down_state)) && ((retn_lvl) == 0)) ? 1 : 0)
+
+/* Following are used as ID's to capture time-stamp */
+#define PSCI_STAT_ID_ENTER_LOW_PWR 0
+#define PSCI_STAT_ID_EXIT_LOW_PWR 1
+#define PSCI_STAT_TOTAL_IDS 2
+
+/* Declare PMF service functions for PSCI */
+PMF_DECLARE_CAPTURE_TIMESTAMP(psci_svc)
+PMF_DECLARE_GET_TIMESTAMP(psci_svc)
+
+/*******************************************************************************
+ * The following two data structures implement the power domain tree. The tree
+ * is used to track the state of all the nodes i.e. power domain instances
+ * described by the platform. The tree consists of nodes that describe CPU power
+ * domains i.e. leaf nodes and all other power domains which are parents of a
+ * CPU power domain i.e. non-leaf nodes.
+ ******************************************************************************/
+typedef struct non_cpu_pwr_domain_node {
+ /*
+ * Index of the first CPU power domain node level 0 which has this node
+ * as its parent.
+ */
+ unsigned int cpu_start_idx;
+
+ /*
+ * Number of CPU power domains which are siblings of the domain indexed
+ * by 'cpu_start_idx' i.e. all the domains in the range 'cpu_start_idx
+ * -> cpu_start_idx + ncpus' have this node as their parent.
+ */
+ unsigned int ncpus;
+
+ /*
+ * Index of the parent power domain node.
+ * TODO: Figure out whether to whether using pointer is more efficient.
+ */
+ unsigned int parent_node;
+
+ plat_local_state_t local_state;
+
+ unsigned char level;
+
+ /* For indexing the psci_lock array*/
+ unsigned char lock_index;
+} non_cpu_pd_node_t;
+
+typedef struct cpu_pwr_domain_node {
+ u_register_t mpidr;
+
+ /*
+ * Index of the parent power domain node.
+ * TODO: Figure out whether to whether using pointer is more efficient.
+ */
+ unsigned int parent_node;
+
+ /*
+ * A CPU power domain does not require state coordination like its
+ * parent power domains. Hence this node does not include a bakery
+ * lock. A spinlock is required by the CPU_ON handler to prevent a race
+ * when multiple CPUs try to turn ON the same target CPU.
+ */
+ spinlock_t cpu_lock;
+} cpu_pd_node_t;
+
+/*******************************************************************************
+ * Data prototypes
+ ******************************************************************************/
+extern const plat_psci_ops_t *psci_plat_pm_ops;
+extern non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS];
+extern cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT];
+extern unsigned int psci_caps;
+
+/* One bakery lock is required for each non-cpu power domain */
+DECLARE_BAKERY_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]);
+
+/*******************************************************************************
+ * SPD's power management hooks registered with PSCI
+ ******************************************************************************/
+extern const spd_pm_ops_t *psci_spd_pm;
+
+/*******************************************************************************
+ * Function prototypes
+ ******************************************************************************/
+/* Private exported functions from psci_common.c */
+int psci_validate_power_state(unsigned int power_state,
+ psci_power_state_t *state_info);
+void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info);
+int psci_validate_mpidr(u_register_t mpidr);
+void psci_init_req_local_pwr_states(void);
+void psci_power_up_finish(void);
+int psci_validate_entry_point(entry_point_info_t *ep,
+ uintptr_t entrypoint, u_register_t context_id);
+void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx,
+ unsigned int end_lvl,
+ unsigned int node_index[]);
+void psci_do_state_coordination(unsigned int end_pwrlvl,
+ psci_power_state_t *state_info);
+void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl,
+ unsigned int cpu_idx);
+void psci_release_pwr_domain_locks(unsigned int end_pwrlvl,
+ unsigned int cpu_idx);
+int psci_validate_suspend_req(const psci_power_state_t *state_info,
+ unsigned int is_power_down_state_req);
+unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info);
+unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info);
+void psci_set_pwr_domains_to_run(unsigned int end_pwrlvl);
+void psci_print_power_domain_map(void);
+unsigned int psci_is_last_on_cpu(void);
+int psci_spd_migrate_info(u_register_t *mpidr);
+
+/* Private exported functions from psci_on.c */
+int psci_cpu_on_start(u_register_t target_cpu,
+ entry_point_info_t *ep);
+
+void psci_cpu_on_finish(unsigned int cpu_idx,
+ psci_power_state_t *state_info);
+
+/* Private exported functions from psci_off.c */
+int psci_do_cpu_off(unsigned int end_pwrlvl);
+
+/* Private exported functions from psci_suspend.c */
+void psci_cpu_suspend_start(entry_point_info_t *ep,
+ unsigned int end_pwrlvl,
+ psci_power_state_t *state_info,
+ unsigned int is_power_down_state_req);
+
+void psci_cpu_suspend_finish(unsigned int cpu_idx,
+ psci_power_state_t *state_info);
+
+/* Private exported functions from psci_helpers.S */
+void psci_do_pwrdown_cache_maintenance(unsigned int pwr_level);
+void psci_do_pwrup_cache_maintenance(void);
+
+/* Private exported functions from psci_system_off.c */
+void __dead2 psci_system_off(void);
+void __dead2 psci_system_reset(void);
+
+/* Private exported functions from psci_stat.c */
+void psci_stats_update_pwr_down(unsigned int end_pwrlvl,
+ const psci_power_state_t *state_info);
+void psci_stats_update_pwr_up(unsigned int end_pwrlvl,
+ const psci_power_state_t *state_info,
+ unsigned int flags);
+u_register_t psci_stat_residency(u_register_t target_cpu,
+ unsigned int power_state);
+u_register_t psci_stat_count(u_register_t target_cpu,
+ unsigned int power_state);
+
+#endif /* __PSCI_PRIVATE_H__ */
diff --git a/lib/psci/psci_setup.c b/lib/psci/psci_setup.c
new file mode 100644
index 00000000..fac0edec
--- /dev/null
+++ b/lib/psci/psci_setup.c
@@ -0,0 +1,261 @@
+/*
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <bl_common.h>
+#include <context.h>
+#include <context_mgmt.h>
+#include <platform.h>
+#include <stddef.h>
+#include "psci_private.h"
+
+/*******************************************************************************
+ * Per cpu non-secure contexts used to program the architectural state prior
+ * return to the normal world.
+ * TODO: Use the memory allocator to set aside memory for the contexts instead
+ * of relying on platform defined constants.
+ ******************************************************************************/
+static cpu_context_t psci_ns_context[PLATFORM_CORE_COUNT];
+
+/******************************************************************************
+ * Define the psci capability variable.
+ *****************************************************************************/
+unsigned int psci_caps;
+
+/*******************************************************************************
+ * Function which initializes the 'psci_non_cpu_pd_nodes' or the
+ * 'psci_cpu_pd_nodes' corresponding to the power level.
+ ******************************************************************************/
+static void psci_init_pwr_domain_node(unsigned int node_idx,
+ unsigned int parent_idx,
+ unsigned int level)
+{
+ if (level > PSCI_CPU_PWR_LVL) {
+ psci_non_cpu_pd_nodes[node_idx].level = level;
+ psci_lock_init(psci_non_cpu_pd_nodes, node_idx);
+ psci_non_cpu_pd_nodes[node_idx].parent_node = parent_idx;
+ psci_non_cpu_pd_nodes[node_idx].local_state =
+ PLAT_MAX_OFF_STATE;
+ } else {
+ psci_cpu_data_t *svc_cpu_data;
+
+ psci_cpu_pd_nodes[node_idx].parent_node = parent_idx;
+
+ /* Initialize with an invalid mpidr */
+ psci_cpu_pd_nodes[node_idx].mpidr = PSCI_INVALID_MPIDR;
+
+ svc_cpu_data =
+ &(_cpu_data_by_index(node_idx)->psci_svc_cpu_data);
+
+ /* Set the Affinity Info for the cores as OFF */
+ svc_cpu_data->aff_info_state = AFF_STATE_OFF;
+
+ /* Invalidate the suspend level for the cpu */
+ svc_cpu_data->target_pwrlvl = PSCI_INVALID_PWR_LVL;
+
+ /* Set the power state to OFF state */
+ svc_cpu_data->local_state = PLAT_MAX_OFF_STATE;
+
+ flush_dcache_range((uintptr_t)svc_cpu_data,
+ sizeof(*svc_cpu_data));
+
+ cm_set_context_by_index(node_idx,
+ (void *) &psci_ns_context[node_idx],
+ NON_SECURE);
+ }
+}
+
+/*******************************************************************************
+ * This functions updates cpu_start_idx and ncpus field for each of the node in
+ * psci_non_cpu_pd_nodes[]. It does so by comparing the parent nodes of each of
+ * the CPUs and check whether they match with the parent of the previous
+ * CPU. The basic assumption for this work is that children of the same parent
+ * are allocated adjacent indices. The platform should ensure this though proper
+ * mapping of the CPUs to indices via plat_core_pos_by_mpidr() and
+ * plat_my_core_pos() APIs.
+ *******************************************************************************/
+static void psci_update_pwrlvl_limits(void)
+{
+ int j;
+ unsigned int nodes_idx[PLAT_MAX_PWR_LVL] = {0};
+ unsigned int temp_index[PLAT_MAX_PWR_LVL], cpu_idx;
+
+ for (cpu_idx = 0; cpu_idx < PLATFORM_CORE_COUNT; cpu_idx++) {
+ psci_get_parent_pwr_domain_nodes(cpu_idx,
+ PLAT_MAX_PWR_LVL,
+ temp_index);
+ for (j = PLAT_MAX_PWR_LVL - 1; j >= 0; j--) {
+ if (temp_index[j] != nodes_idx[j]) {
+ nodes_idx[j] = temp_index[j];
+ psci_non_cpu_pd_nodes[nodes_idx[j]].cpu_start_idx
+ = cpu_idx;
+ }
+ psci_non_cpu_pd_nodes[nodes_idx[j]].ncpus++;
+ }
+ }
+}
+
+/*******************************************************************************
+ * Core routine to populate the power domain tree. The tree descriptor passed by
+ * the platform is populated breadth-first and the first entry in the map
+ * informs the number of root power domains. The parent nodes of the root nodes
+ * will point to an invalid entry(-1).
+ ******************************************************************************/
+static void populate_power_domain_tree(const unsigned char *topology)
+{
+ unsigned int i, j = 0, num_nodes_at_lvl = 1, num_nodes_at_next_lvl;
+ unsigned int node_index = 0, parent_node_index = 0, num_children;
+ int level = PLAT_MAX_PWR_LVL;
+
+ /*
+ * For each level the inputs are:
+ * - number of nodes at this level in plat_array i.e. num_nodes_at_level
+ * This is the sum of values of nodes at the parent level.
+ * - Index of first entry at this level in the plat_array i.e.
+ * parent_node_index.
+ * - Index of first free entry in psci_non_cpu_pd_nodes[] or
+ * psci_cpu_pd_nodes[] i.e. node_index depending upon the level.
+ */
+ while (level >= PSCI_CPU_PWR_LVL) {
+ num_nodes_at_next_lvl = 0;
+ /*
+ * For each entry (parent node) at this level in the plat_array:
+ * - Find the number of children
+ * - Allocate a node in a power domain array for each child
+ * - Set the parent of the child to the parent_node_index - 1
+ * - Increment parent_node_index to point to the next parent
+ * - Accumulate the number of children at next level.
+ */
+ for (i = 0; i < num_nodes_at_lvl; i++) {
+ assert(parent_node_index <=
+ PSCI_NUM_NON_CPU_PWR_DOMAINS);
+ num_children = topology[parent_node_index];
+
+ for (j = node_index;
+ j < node_index + num_children; j++)
+ psci_init_pwr_domain_node(j,
+ parent_node_index - 1,
+ level);
+
+ node_index = j;
+ num_nodes_at_next_lvl += num_children;
+ parent_node_index++;
+ }
+
+ num_nodes_at_lvl = num_nodes_at_next_lvl;
+ level--;
+
+ /* Reset the index for the cpu power domain array */
+ if (level == PSCI_CPU_PWR_LVL)
+ node_index = 0;
+ }
+
+ /* Validate the sanity of array exported by the platform */
+ assert(j == PLATFORM_CORE_COUNT);
+}
+
+/*******************************************************************************
+ * This function initializes the power domain topology tree by querying the
+ * platform. The power domain nodes higher than the CPU are populated in the
+ * array psci_non_cpu_pd_nodes[] and the CPU power domains are populated in
+ * psci_cpu_pd_nodes[]. The platform exports its static topology map through the
+ * populate_power_domain_topology_tree() API. The algorithm populates the
+ * psci_non_cpu_pd_nodes and psci_cpu_pd_nodes iteratively by using this
+ * topology map. On a platform that implements two clusters of 2 cpus each, and
+ * supporting 3 domain levels, the populated psci_non_cpu_pd_nodes would look
+ * like this:
+ *
+ * ---------------------------------------------------
+ * | system node | cluster 0 node | cluster 1 node |
+ * ---------------------------------------------------
+ *
+ * And populated psci_cpu_pd_nodes would look like this :
+ * <- cpus cluster0 -><- cpus cluster1 ->
+ * ------------------------------------------------
+ * | CPU 0 | CPU 1 | CPU 2 | CPU 3 |
+ * ------------------------------------------------
+ ******************************************************************************/
+int psci_setup(void)
+{
+ const unsigned char *topology_tree;
+
+ /* Query the topology map from the platform */
+ topology_tree = plat_get_power_domain_tree_desc();
+
+ /* Populate the power domain arrays using the platform topology map */
+ populate_power_domain_tree(topology_tree);
+
+ /* Update the CPU limits for each node in psci_non_cpu_pd_nodes */
+ psci_update_pwrlvl_limits();
+
+ /* Populate the mpidr field of cpu node for this CPU */
+ psci_cpu_pd_nodes[plat_my_core_pos()].mpidr =
+ read_mpidr() & MPIDR_AFFINITY_MASK;
+
+ psci_init_req_local_pwr_states();
+
+ /*
+ * Set the requested and target state of this CPU and all the higher
+ * power domain levels for this CPU to run.
+ */
+ psci_set_pwr_domains_to_run(PLAT_MAX_PWR_LVL);
+
+ plat_setup_psci_ops((uintptr_t)psci_entrypoint,
+ &psci_plat_pm_ops);
+ assert(psci_plat_pm_ops);
+
+ /* Initialize the psci capability */
+ psci_caps = PSCI_GENERIC_CAP;
+
+ if (psci_plat_pm_ops->pwr_domain_off)
+ psci_caps |= define_psci_cap(PSCI_CPU_OFF);
+ if (psci_plat_pm_ops->pwr_domain_on &&
+ psci_plat_pm_ops->pwr_domain_on_finish)
+ psci_caps |= define_psci_cap(PSCI_CPU_ON_AARCH64);
+ if (psci_plat_pm_ops->pwr_domain_suspend &&
+ psci_plat_pm_ops->pwr_domain_suspend_finish) {
+ psci_caps |= define_psci_cap(PSCI_CPU_SUSPEND_AARCH64);
+ if (psci_plat_pm_ops->get_sys_suspend_power_state)
+ psci_caps |= define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64);
+ }
+ if (psci_plat_pm_ops->system_off)
+ psci_caps |= define_psci_cap(PSCI_SYSTEM_OFF);
+ if (psci_plat_pm_ops->system_reset)
+ psci_caps |= define_psci_cap(PSCI_SYSTEM_RESET);
+
+#if ENABLE_PSCI_STAT
+ psci_caps |= define_psci_cap(PSCI_STAT_RESIDENCY_AARCH64);
+ psci_caps |= define_psci_cap(PSCI_STAT_COUNT_AARCH64);
+#endif
+
+ return 0;
+}
diff --git a/lib/psci/psci_stat.c b/lib/psci/psci_stat.c
new file mode 100644
index 00000000..155bbb07
--- /dev/null
+++ b/lib/psci/psci_stat.c
@@ -0,0 +1,309 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <debug.h>
+#include <platform.h>
+#include <platform_def.h>
+#include "psci_private.h"
+
+#ifndef PLAT_MAX_PWR_LVL_STATES
+#define PLAT_MAX_PWR_LVL_STATES 2
+#endif
+
+/* Ticks elapsed in one second by a signal of 1 MHz */
+#define MHZ_TICKS_PER_SEC 1000000
+
+/* Following structure is used for PSCI STAT */
+typedef struct psci_stat {
+ u_register_t residency;
+ u_register_t count;
+} psci_stat_t;
+
+/*
+ * Following is used to keep track of the last cpu
+ * that goes to power down in non cpu power domains.
+ */
+static int last_cpu_in_non_cpu_pd[PSCI_NUM_NON_CPU_PWR_DOMAINS] = {-1};
+
+/*
+ * Following are used to store PSCI STAT values for
+ * CPU and non CPU power domains.
+ */
+static psci_stat_t psci_cpu_stat[PLATFORM_CORE_COUNT]
+ [PLAT_MAX_PWR_LVL_STATES];
+static psci_stat_t psci_non_cpu_stat[PSCI_NUM_NON_CPU_PWR_DOMAINS]
+ [PLAT_MAX_PWR_LVL_STATES];
+
+/* Register PMF PSCI service */
+PMF_REGISTER_SERVICE(psci_svc, PMF_PSCI_STAT_SVC_ID,
+ PSCI_STAT_TOTAL_IDS, PMF_STORE_ENABLE)
+
+/* The divisor to use to convert raw timestamp into microseconds */
+u_register_t residency_div;
+
+/*
+ * This macro calculates the stats residency in microseconds,
+ * taking in account the wrap around condition.
+ */
+#define calc_stat_residency(_pwrupts, _pwrdnts, _res) \
+ do { \
+ if (_pwrupts < _pwrdnts) \
+ _res = UINT64_MAX - _pwrdnts + _pwrupts;\
+ else \
+ _res = _pwrupts - _pwrdnts; \
+ /* Convert timestamp into microseconds */ \
+ _res = _res/residency_div; \
+ } while (0)
+
+/*
+ * This functions returns the index into the `psci_stat_t` array given the
+ * local power state and power domain level. If the platform implements the
+ * `get_pwr_lvl_state_idx` pm hook, then that will be used to return the index.
+ */
+static int get_stat_idx(plat_local_state_t local_state, int pwr_lvl)
+{
+ int idx;
+
+ if (psci_plat_pm_ops->get_pwr_lvl_state_idx == NULL) {
+ assert(PLAT_MAX_PWR_LVL_STATES == 2);
+ if (is_local_state_retn(local_state))
+ return 0;
+
+ assert(is_local_state_off(local_state));
+ return 1;
+ }
+
+ idx = psci_plat_pm_ops->get_pwr_lvl_state_idx(local_state, pwr_lvl);
+ assert((idx >= 0) && (idx < PLAT_MAX_PWR_LVL_STATES));
+ return idx;
+}
+
+/*******************************************************************************
+ * This function is passed the target local power states for each power
+ * domain (state_info) between the current CPU domain and its ancestors until
+ * the target power level (end_pwrlvl).
+ *
+ * Then, for each level (apart from the CPU level) until the 'end_pwrlvl', it
+ * updates the `last_cpu_in_non_cpu_pd[]` with last power down cpu id.
+ *
+ * This function will only be invoked with data cache enabled and while
+ * powering down a core.
+ ******************************************************************************/
+void psci_stats_update_pwr_down(unsigned int end_pwrlvl,
+ const psci_power_state_t *state_info)
+{
+ int lvl, parent_idx, cpu_idx = plat_my_core_pos();
+
+ assert(end_pwrlvl <= PLAT_MAX_PWR_LVL);
+ assert(state_info);
+
+ parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
+
+ for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) {
+
+ /* Break early if the target power state is RUN */
+ if (is_local_state_run(state_info->pwr_domain_state[lvl]))
+ break;
+
+ /*
+ * The power domain is entering a low power state, so this is
+ * the last CPU for this power domain
+ */
+ last_cpu_in_non_cpu_pd[parent_idx] = cpu_idx;
+
+ parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
+ }
+
+}
+
+/*******************************************************************************
+ * This function updates the PSCI STATS(residency time and count) for CPU
+ * and NON-CPU power domains.
+ * It is called with caches enabled and locks acquired(for NON-CPU domain)
+ ******************************************************************************/
+void psci_stats_update_pwr_up(unsigned int end_pwrlvl,
+ const psci_power_state_t *state_info,
+ unsigned int flags)
+{
+ int parent_idx, cpu_idx = plat_my_core_pos();
+ int lvl, stat_idx;
+ plat_local_state_t local_state;
+ unsigned long long pwrup_ts = 0, pwrdn_ts = 0;
+ u_register_t residency;
+
+ assert(end_pwrlvl <= PLAT_MAX_PWR_LVL);
+ assert(state_info);
+
+ /* Initialize the residency divisor if not already initialized */
+ if (!residency_div) {
+ /* Pre-calculate divisor so that it can be directly used to
+ convert time-stamp into microseconds */
+ residency_div = read_cntfrq_el0() / MHZ_TICKS_PER_SEC;
+ assert(residency_div);
+ }
+
+ /* Get power down time-stamp for current CPU */
+ PMF_GET_TIMESTAMP_BY_INDEX(psci_svc, PSCI_STAT_ID_ENTER_LOW_PWR,
+ cpu_idx, flags, pwrdn_ts);
+
+ /* In the case of 1st power on just return */
+ if (!pwrdn_ts)
+ return;
+
+ /* Get power up time-stamp for current CPU */
+ PMF_GET_TIMESTAMP_BY_INDEX(psci_svc, PSCI_STAT_ID_EXIT_LOW_PWR,
+ cpu_idx, flags, pwrup_ts);
+
+ /* Get the index into the stats array */
+ local_state = state_info->pwr_domain_state[PSCI_CPU_PWR_LVL];
+ stat_idx = get_stat_idx(local_state, PSCI_CPU_PWR_LVL);
+
+ /* Calculate stats residency */
+ calc_stat_residency(pwrup_ts, pwrdn_ts, residency);
+
+ /* Update CPU stats. */
+ psci_cpu_stat[cpu_idx][stat_idx].residency += residency;
+ psci_cpu_stat[cpu_idx][stat_idx].count++;
+
+ /*
+ * Check what power domains above CPU were off
+ * prior to this CPU powering on.
+ */
+ parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
+ for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) {
+ local_state = state_info->pwr_domain_state[lvl];
+ if (is_local_state_run(local_state)) {
+ /* Break early */
+ break;
+ }
+
+ assert(last_cpu_in_non_cpu_pd[parent_idx] != -1);
+
+ /* Get power down time-stamp for last CPU */
+ PMF_GET_TIMESTAMP_BY_INDEX(psci_svc, PSCI_STAT_ID_ENTER_LOW_PWR,
+ last_cpu_in_non_cpu_pd[parent_idx],
+ flags, pwrdn_ts);
+
+ /* Initialize back to reset value */
+ last_cpu_in_non_cpu_pd[parent_idx] = -1;
+
+ /* Get the index into the stats array */
+ stat_idx = get_stat_idx(local_state, lvl);
+
+ /* Calculate stats residency */
+ calc_stat_residency(pwrup_ts, pwrdn_ts, residency);
+
+ /* Update non cpu stats */
+ psci_non_cpu_stat[parent_idx][stat_idx].residency += residency;
+ psci_non_cpu_stat[parent_idx][stat_idx].count++;
+
+ parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
+ }
+
+}
+
+/*******************************************************************************
+ * This function returns the appropriate count and residency time of the
+ * local state for the highest power level expressed in the `power_state`
+ * for the node represented by `target_cpu`.
+ ******************************************************************************/
+int psci_get_stat(u_register_t target_cpu, unsigned int power_state,
+ psci_stat_t *psci_stat)
+{
+ int rc, pwrlvl, lvl, parent_idx, stat_idx, target_idx;
+ psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} };
+ plat_local_state_t local_state;
+
+ /* Validate the target_cpu parameter and determine the cpu index */
+ target_idx = plat_core_pos_by_mpidr(target_cpu);
+ if (target_idx == -1)
+ return PSCI_E_INVALID_PARAMS;
+
+ /* Validate the power_state parameter */
+ if (!psci_plat_pm_ops->translate_power_state_by_mpidr)
+ rc = psci_validate_power_state(power_state, &state_info);
+ else
+ rc = psci_plat_pm_ops->translate_power_state_by_mpidr(
+ target_cpu, power_state, &state_info);
+
+ if (rc != PSCI_E_SUCCESS)
+ return PSCI_E_INVALID_PARAMS;
+
+ /* Find the highest power level */
+ pwrlvl = psci_find_target_suspend_lvl(&state_info);
+ if (pwrlvl == PSCI_INVALID_PWR_LVL)
+ return PSCI_E_INVALID_PARAMS;
+
+ /* Get the index into the stats array */
+ local_state = state_info.pwr_domain_state[pwrlvl];
+ stat_idx = get_stat_idx(local_state, pwrlvl);
+
+ if (pwrlvl > PSCI_CPU_PWR_LVL) {
+ /* Get the power domain index */
+ parent_idx = psci_cpu_pd_nodes[target_idx].parent_node;
+ for (lvl = PSCI_CPU_PWR_LVL + 1; lvl < pwrlvl; lvl++)
+ parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
+
+ /* Get the non cpu power domain stats */
+ *psci_stat = psci_non_cpu_stat[parent_idx][stat_idx];
+ } else {
+ /* Get the cpu power domain stats */
+ *psci_stat = psci_cpu_stat[target_idx][stat_idx];
+ }
+
+ return PSCI_E_SUCCESS;
+}
+
+/* This is the top level function for PSCI_STAT_RESIDENCY SMC. */
+u_register_t psci_stat_residency(u_register_t target_cpu,
+ unsigned int power_state)
+{
+ psci_stat_t psci_stat;
+
+ int rc = psci_get_stat(target_cpu, power_state, &psci_stat);
+ if (rc == PSCI_E_SUCCESS)
+ return psci_stat.residency;
+ else
+ return 0;
+}
+
+/* This is the top level function for PSCI_STAT_COUNT SMC. */
+u_register_t psci_stat_count(u_register_t target_cpu,
+ unsigned int power_state)
+{
+ psci_stat_t psci_stat;
+
+ int rc = psci_get_stat(target_cpu, power_state, &psci_stat);
+ if (rc == PSCI_E_SUCCESS)
+ return psci_stat.count;
+ else
+ return 0;
+}
diff --git a/lib/psci/psci_suspend.c b/lib/psci/psci_suspend.c
new file mode 100644
index 00000000..ee1ccef2
--- /dev/null
+++ b/lib/psci/psci_suspend.c
@@ -0,0 +1,284 @@
+/*
+ * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <bl_common.h>
+#include <arch.h>
+#include <arch_helpers.h>
+#include <context.h>
+#include <context_mgmt.h>
+#include <cpu_data.h>
+#include <debug.h>
+#include <platform.h>
+#include <runtime_svc.h>
+#include <stddef.h>
+#include "psci_private.h"
+
+/*******************************************************************************
+ * This function does generic and platform specific operations after a wake-up
+ * from standby/retention states at multiple power levels.
+ ******************************************************************************/
+static void psci_suspend_to_standby_finisher(unsigned int cpu_idx,
+ psci_power_state_t *state_info,
+ unsigned int end_pwrlvl)
+{
+ psci_acquire_pwr_domain_locks(end_pwrlvl,
+ cpu_idx);
+
+ /*
+ * Plat. management: Allow the platform to do operations
+ * on waking up from retention.
+ */
+ psci_plat_pm_ops->pwr_domain_suspend_finish(state_info);
+
+ /*
+ * Set the requested and target state of this CPU and all the higher
+ * power domain levels for this CPU to run.
+ */
+ psci_set_pwr_domains_to_run(end_pwrlvl);
+
+ psci_release_pwr_domain_locks(end_pwrlvl,
+ cpu_idx);
+}
+
+/*******************************************************************************
+ * This function does generic and platform specific suspend to power down
+ * operations.
+ ******************************************************************************/
+static void psci_suspend_to_pwrdown_start(unsigned int end_pwrlvl,
+ entry_point_info_t *ep,
+ psci_power_state_t *state_info)
+{
+ unsigned int max_off_lvl = psci_find_max_off_lvl(state_info);
+
+ /* Save PSCI target power level for the suspend finisher handler */
+ psci_set_suspend_pwrlvl(end_pwrlvl);
+
+ /*
+ * Flush the target power level as it will be accessed on power up with
+ * Data cache disabled.
+ */
+ flush_cpu_data(psci_svc_cpu_data.target_pwrlvl);
+
+ /*
+ * Call the cpu suspend handler registered by the Secure Payload
+ * Dispatcher to let it do any book-keeping. If the handler encounters an
+ * error, it's expected to assert within
+ */
+ if (psci_spd_pm && psci_spd_pm->svc_suspend)
+ psci_spd_pm->svc_suspend(max_off_lvl);
+
+ /*
+ * Store the re-entry information for the non-secure world.
+ */
+ cm_init_my_context(ep);
+
+ /*
+ * Arch. management. Perform the necessary steps to flush all
+ * cpu caches. Currently we assume that the power level correspond
+ * the cache level.
+ * TODO : Introduce a mechanism to query the cache level to flush
+ * and the cpu-ops power down to perform from the platform.
+ */
+ psci_do_pwrdown_cache_maintenance(max_off_lvl);
+}
+
+/*******************************************************************************
+ * Top level handler which is called when a cpu wants to suspend its execution.
+ * It is assumed that along with suspending the cpu power domain, power domains
+ * at higher levels until the target power level will be suspended as well. It
+ * coordinates with the platform to negotiate the target state for each of
+ * the power domain level till the target power domain level. It then performs
+ * generic, architectural, platform setup and state management required to
+ * suspend that power domain level and power domain levels below it.
+ * e.g. For a cpu that's to be suspended, it could mean programming the
+ * power controller whereas for a cluster that's to be suspended, it will call
+ * the platform specific code which will disable coherency at the interconnect
+ * level if the cpu is the last in the cluster and also the program the power
+ * controller.
+ *
+ * All the required parameter checks are performed at the beginning and after
+ * the state transition has been done, no further error is expected and it is
+ * not possible to undo any of the actions taken beyond that point.
+ ******************************************************************************/
+void psci_cpu_suspend_start(entry_point_info_t *ep,
+ unsigned int end_pwrlvl,
+ psci_power_state_t *state_info,
+ unsigned int is_power_down_state)
+{
+ int skip_wfi = 0;
+ unsigned int idx = plat_my_core_pos();
+
+ /*
+ * This function must only be called on platforms where the
+ * CPU_SUSPEND platform hooks have been implemented.
+ */
+ assert(psci_plat_pm_ops->pwr_domain_suspend &&
+ psci_plat_pm_ops->pwr_domain_suspend_finish);
+
+ /*
+ * This function acquires the lock corresponding to each power
+ * level so that by the time all locks are taken, the system topology
+ * is snapshot and state management can be done safely.
+ */
+ psci_acquire_pwr_domain_locks(end_pwrlvl,
+ idx);
+
+ /*
+ * We check if there are any pending interrupts after the delay
+ * introduced by lock contention to increase the chances of early
+ * detection that a wake-up interrupt has fired.
+ */
+ if (read_isr_el1()) {
+ skip_wfi = 1;
+ goto exit;
+ }
+
+ /*
+ * This function is passed the requested state info and
+ * it returns the negotiated state info for each power level upto
+ * the end level specified.
+ */
+ psci_do_state_coordination(end_pwrlvl, state_info);
+
+#if ENABLE_PSCI_STAT
+ /* Update the last cpu for each level till end_pwrlvl */
+ psci_stats_update_pwr_down(end_pwrlvl, state_info);
+#endif
+
+ if (is_power_down_state)
+ psci_suspend_to_pwrdown_start(end_pwrlvl, ep, state_info);
+
+ /*
+ * Plat. management: Allow the platform to perform the
+ * necessary actions to turn off this cpu e.g. set the
+ * platform defined mailbox with the psci entrypoint,
+ * program the power controller etc.
+ */
+ psci_plat_pm_ops->pwr_domain_suspend(state_info);
+
+#if ENABLE_PSCI_STAT
+ /*
+ * Capture time-stamp while entering low power state.
+ * No cache maintenance needed because caches are off
+ * and writes are direct to main memory.
+ */
+ PMF_CAPTURE_TIMESTAMP(psci_svc, PSCI_STAT_ID_ENTER_LOW_PWR,
+ PMF_NO_CACHE_MAINT);
+#endif
+
+exit:
+ /*
+ * Release the locks corresponding to each power level in the
+ * reverse order to which they were acquired.
+ */
+ psci_release_pwr_domain_locks(end_pwrlvl,
+ idx);
+ if (skip_wfi)
+ return;
+
+ if (is_power_down_state) {
+ /* The function calls below must not return */
+ if (psci_plat_pm_ops->pwr_domain_pwr_down_wfi)
+ psci_plat_pm_ops->pwr_domain_pwr_down_wfi(state_info);
+ else
+ psci_power_down_wfi();
+ }
+
+ /*
+ * We will reach here if only retention/standby states have been
+ * requested at multiple power levels. This means that the cpu
+ * context will be preserved.
+ */
+ wfi();
+
+ /*
+ * After we wake up from context retaining suspend, call the
+ * context retaining suspend finisher.
+ */
+ psci_suspend_to_standby_finisher(idx, state_info, end_pwrlvl);
+}
+
+/*******************************************************************************
+ * The following functions finish an earlier suspend request. They
+ * are called by the common finisher routine in psci_common.c. The `state_info`
+ * is the psci_power_state from which this CPU has woken up from.
+ ******************************************************************************/
+void psci_cpu_suspend_finish(unsigned int cpu_idx,
+ psci_power_state_t *state_info)
+{
+ unsigned int counter_freq;
+ unsigned int max_off_lvl;
+
+ /* Ensure we have been woken up from a suspended state */
+ assert(psci_get_aff_info_state() == AFF_STATE_ON && is_local_state_off(\
+ state_info->pwr_domain_state[PSCI_CPU_PWR_LVL]));
+
+ /*
+ * Plat. management: Perform the platform specific actions
+ * before we change the state of the cpu e.g. enabling the
+ * gic or zeroing the mailbox register. If anything goes
+ * wrong then assert as there is no way to recover from this
+ * situation.
+ */
+ psci_plat_pm_ops->pwr_domain_suspend_finish(state_info);
+
+ /*
+ * Arch. management: Enable the data cache, manage stack memory and
+ * restore the stashed EL3 architectural context from the 'cpu_context'
+ * structure for this cpu.
+ */
+ psci_do_pwrup_cache_maintenance();
+
+ /* Re-init the cntfrq_el0 register */
+ counter_freq = plat_get_syscnt_freq2();
+ write_cntfrq_el0(counter_freq);
+
+ /*
+ * Call the cpu suspend finish handler registered by the Secure Payload
+ * Dispatcher to let it do any bookeeping. If the handler encounters an
+ * error, it's expected to assert within
+ */
+ if (psci_spd_pm && psci_spd_pm->svc_suspend) {
+ max_off_lvl = psci_find_max_off_lvl(state_info);
+ assert (max_off_lvl != PSCI_INVALID_PWR_LVL);
+ psci_spd_pm->svc_suspend_finish(max_off_lvl);
+ }
+
+ /* Invalidate the suspend level for the cpu */
+ psci_set_suspend_pwrlvl(PSCI_INVALID_PWR_LVL);
+
+ /*
+ * Generic management: Now we just need to retrieve the
+ * information that we had stashed away during the suspend
+ * call to set this cpu on its way.
+ */
+ cm_prepare_el3_exit(NON_SECURE);
+}
diff --git a/lib/psci/psci_system_off.c b/lib/psci/psci_system_off.c
new file mode 100644
index 00000000..de9ec643
--- /dev/null
+++ b/lib/psci/psci_system_off.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stddef.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <debug.h>
+#include <platform.h>
+#include "psci_private.h"
+
+void psci_system_off(void)
+{
+ psci_print_power_domain_map();
+
+ assert(psci_plat_pm_ops->system_off);
+
+ /* Notify the Secure Payload Dispatcher */
+ if (psci_spd_pm && psci_spd_pm->svc_system_off) {
+ psci_spd_pm->svc_system_off();
+ }
+
+ /* Call the platform specific hook */
+ psci_plat_pm_ops->system_off();
+
+ /* This function does not return. We should never get here */
+}
+
+void psci_system_reset(void)
+{
+ psci_print_power_domain_map();
+
+ assert(psci_plat_pm_ops->system_reset);
+
+ /* Notify the Secure Payload Dispatcher */
+ if (psci_spd_pm && psci_spd_pm->svc_system_reset) {
+ psci_spd_pm->svc_system_reset();
+ }
+
+ /* Call the platform specific hook */
+ psci_plat_pm_ops->system_reset();
+
+ /* This function does not return. We should never get here */
+}