summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorYatharth Kochar <yatharth.kochar@arm.com>2016-06-28 16:58:26 +0100
committerYatharth Kochar <yatharth.kochar@arm.com>2016-09-21 16:27:15 +0100
commit1a0a3f0622e4b569513304109d9a0d093b71228a (patch)
tree9850566923168a92a6ec7ebbabe03119f14b2786 /lib
parenta8aa7fec1d4a6df8617c0d0463f1e10f1827a609 (diff)
AArch32: Common changes needed for BL1/BL2
This patch adds common changes to support AArch32 state in BL1 and BL2. Following are the changes: * Added functions for disabling MMU from Secure state. * Added AArch32 specific SMC function. * Added semihosting support. * Added reporting of unhandled exceptions. * Added uniprocessor stack support. * Added `el3_entrypoint_common` macro that can be shared by BL1 and BL32 (SP_MIN) BL stages. The `el3_entrypoint_common` is similar to the AArch64 counterpart with the main difference in the assembly instructions and the registers that are relevant to AArch32 execution state. * Enabled `LOAD_IMAGE_V2` flag in Makefile for `ARCH=aarch32` and added check to make sure that platform has not overridden to disable it. Change-Id: I33c6d8dfefb2e5d142fdfd06a0f4a7332962e1a3
Diffstat (limited to 'lib')
-rw-r--r--lib/aarch32/misc_helpers.S36
-rw-r--r--lib/cpus/aarch32/cpu_helpers.S6
-rw-r--r--lib/semihosting/aarch32/semihosting_call.S38
3 files changed, 80 insertions, 0 deletions
diff --git a/lib/aarch32/misc_helpers.S b/lib/aarch32/misc_helpers.S
index 63ac1a7e..fd7c6dd1 100644
--- a/lib/aarch32/misc_helpers.S
+++ b/lib/aarch32/misc_helpers.S
@@ -32,7 +32,21 @@
#include <asm_macros.S>
#include <assert_macros.S>
+ .globl smc
.globl zeromem
+ .globl disable_mmu_icache_secure
+ .globl disable_mmu_secure
+
+func smc
+ /*
+ * For AArch32 only r0-r3 will be in the registers;
+ * rest r4-r6 will be pushed on to the stack. So here, we'll
+ * have to load them from the stack to registers r4-r6 explicitly.
+ * Clobbers: r4-r6
+ */
+ ldm sp, {r4, r5, r6}
+ smc #0
+endfunc smc
/* -----------------------------------------------------------------------
* void zeromem(void *mem, unsigned int length);
@@ -58,3 +72,25 @@ z_loop:
z_end:
bx lr
endfunc zeromem
+
+/* ---------------------------------------------------------------------------
+ * Disable the MMU in Secure State
+ * ---------------------------------------------------------------------------
+ */
+
+func disable_mmu_secure
+ mov r1, #(SCTLR_M_BIT | SCTLR_C_BIT)
+do_disable_mmu:
+ ldcopr r0, SCTLR
+ bic r0, r0, r1
+ stcopr r0, SCTLR
+ isb // ensure MMU is off
+ dsb sy
+ bx lr
+endfunc disable_mmu_secure
+
+
+func disable_mmu_icache_secure
+ ldr r1, =(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT)
+ b do_disable_mmu
+endfunc disable_mmu_icache_secure
diff --git a/lib/cpus/aarch32/cpu_helpers.S b/lib/cpus/aarch32/cpu_helpers.S
index 927a6f50..042ffbdd 100644
--- a/lib/cpus/aarch32/cpu_helpers.S
+++ b/lib/cpus/aarch32/cpu_helpers.S
@@ -34,6 +34,7 @@
#include <cpu_data.h>
#include <cpu_macros.S>
+#if IMAGE_BL1 || IMAGE_BL32
/*
* The reset handler common to all platforms. After a matching
* cpu_ops structure entry is found, the correponding reset_handler
@@ -65,6 +66,9 @@ func reset_handler
bx lr
endfunc reset_handler
+#endif /* IMAGE_BL1 || IMAGE_BL32 */
+
+#if IMAGE_BL32 /* The power down core and cluster is needed only in BL32 */
/*
* The prepare core power down function for all platforms. After
* the cpu_ops pointer is retrieved from cpu_data, the corresponding
@@ -132,6 +136,8 @@ func init_cpu_ops
pop {r4 - r6, pc}
endfunc init_cpu_ops
+#endif /* IMAGE_BL32 */
+
/*
* The below function returns the cpu_ops structure matching the
* midr of the core. It reads the MIDR and finds the matching
diff --git a/lib/semihosting/aarch32/semihosting_call.S b/lib/semihosting/aarch32/semihosting_call.S
new file mode 100644
index 00000000..0cc707a0
--- /dev/null
+++ b/lib/semihosting/aarch32/semihosting_call.S
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm_macros.S>
+
+ .globl semihosting_call
+
+func semihosting_call
+ svc #0x123456
+ bx lr
+endfunc semihosting_call