diff --git a/include/common/bl_common.h b/include/common/bl_common.h index 3aa0836..942843c 100644 --- a/include/common/bl_common.h +++ b/include/common/bl_common.h @@ -50,7 +50,11 @@ * 'entry_point_info' structure at their correct offsets. ******************************************************************************/ #define ENTRY_POINT_INFO_PC_OFFSET 0x08 +#ifdef AARCH32 +#define ENTRY_POINT_INFO_ARGS_OFFSET 0x10 +#else #define ENTRY_POINT_INFO_ARGS_OFFSET 0x18 +#endif /* The following are used to set/get image attributes. */ #define PARAM_EP_SECURITY_MASK (0x1) @@ -192,6 +196,13 @@ u_register_t arg7; } aapcs64_params_t; +typedef struct aapcs32_params { + u_register_t arg0; + u_register_t arg1; + u_register_t arg2; + u_register_t arg3; +} aapcs32_params_t; + /*************************************************************************** * This structure provides version information and the size of the * structure, attributes for the structure it represents @@ -216,7 +227,11 @@ param_header_t h; uintptr_t pc; uint32_t spsr; +#ifdef AARCH32 + aapcs32_params_t args; +#else aapcs64_params_t args; +#endif } entry_point_info_t; /***************************************************************************** diff --git a/include/lib/cpus/aarch32/aem_generic.h b/include/lib/cpus/aarch32/aem_generic.h new file mode 100644 index 0000000..9b31367 --- /dev/null +++ b/include/lib/cpus/aarch32/aem_generic.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __AEM_GENERIC_H__ +#define __AEM_GENERIC_H__ + +/* BASE AEM midr for revision 0 */ +#define BASE_AEM_MIDR 0x410FD0F0 + +#endif /* __AEM_GENERIC_H__ */ diff --git a/include/lib/cpus/aarch32/cpu_macros.S b/include/lib/cpus/aarch32/cpu_macros.S new file mode 100644 index 0000000..f58f3e9 --- /dev/null +++ b/include/lib/cpus/aarch32/cpu_macros.S @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef __CPU_MACROS_S__ +#define __CPU_MACROS_S__ + +#include + +#define CPU_IMPL_PN_MASK (MIDR_IMPL_MASK << MIDR_IMPL_SHIFT) | \ + (MIDR_PN_MASK << MIDR_PN_SHIFT) + + /* + * Define the offsets to the fields in cpu_ops structure. + */ + .struct 0 +CPU_MIDR: /* cpu_ops midr */ + .space 4 +/* Reset fn is needed during reset */ +CPU_RESET_FUNC: /* cpu_ops reset_func */ + .space 4 +CPU_PWR_DWN_CORE: /* cpu_ops core_pwr_dwn */ + .space 4 +CPU_PWR_DWN_CLUSTER: /* cpu_ops cluster_pwr_dwn */ + .space 4 +CPU_OPS_SIZE = . + + /* + * Convenience macro to declare cpu_ops structure. + * Make sure the structure fields are as per the offsets + * defined above. + */ + .macro declare_cpu_ops _name:req, _midr:req, _noresetfunc = 0 + .section cpu_ops, "a" + .align 2 + .type cpu_ops_\_name, %object + .word \_midr + .if \_noresetfunc + .word 0 + .else + .word \_name\()_reset_func + .endif + .word \_name\()_core_pwr_dwn + .word \_name\()_cluster_pwr_dwn + .endm + +#endif /* __CPU_MACROS_S__ */ diff --git a/include/lib/el3_runtime/aarch32/context.h b/include/lib/el3_runtime/aarch32/context.h new file mode 100644 index 0000000..5108141 --- /dev/null +++ b/include/lib/el3_runtime/aarch32/context.h @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __CONTEXT_H__ +#define __CONTEXT_H__ + +/******************************************************************************* + * Constants that allow assembler code to access members of and the 'regs' + * structure at their correct offsets. + ******************************************************************************/ +#define CTX_REGS_OFFSET 0x0 +#define CTX_GPREG_R0 0x0 +#define CTX_GPREG_R1 0x4 +#define CTX_GPREG_R2 0x8 +#define CTX_GPREG_R3 0xC +#define CTX_LR 0x10 +#define CTX_SCR 0x14 +#define CTX_SPSR 0x18 +#define CTX_NS_SCTLR 0x1C +#define CTX_REGS_END 0x20 + +#ifndef __ASSEMBLY__ + +#include +#include + +/* + * Common constants to help define the 'cpu_context' structure and its + * members below. + */ +#define WORD_SHIFT 2 +#define DEFINE_REG_STRUCT(name, num_regs) \ + typedef struct name { \ + uint32_t _regs[num_regs]; \ + } __aligned(8) name##_t + +/* Constants to determine the size of individual context structures */ +#define CTX_REG_ALL (CTX_REGS_END >> WORD_SHIFT) + +DEFINE_REG_STRUCT(regs, CTX_REG_ALL); + +#undef CTX_REG_ALL + +#define read_ctx_reg(ctx, offset) ((ctx)->_regs[offset >> WORD_SHIFT]) +#define write_ctx_reg(ctx, offset, val) (((ctx)->_regs[offset >> WORD_SHIFT]) \ + = val) +typedef struct cpu_context { + regs_t regs_ctx; +} cpu_context_t; + +/* Macros to access members of the 'cpu_context_t' structure */ +#define get_regs_ctx(h) (&((cpu_context_t *) h)->regs_ctx) + +/* + * Compile time assertions related to the 'cpu_context' structure to + * ensure that the assembler and the compiler view of the offsets of + * the structure members is the same. + */ +CASSERT(CTX_REGS_OFFSET == __builtin_offsetof(cpu_context_t, regs_ctx), \ + assert_core_context_regs_offset_mismatch); + +#endif /* __ASSEMBLY__ */ + +#endif /* __CONTEXT_H__ */ diff --git a/include/lib/el3_runtime/context_mgmt.h b/include/lib/el3_runtime/context_mgmt.h index 672ea11..b264fc3 100644 --- a/include/lib/el3_runtime/context_mgmt.h +++ b/include/lib/el3_runtime/context_mgmt.h @@ -42,11 +42,6 @@ * Function & variable prototypes ******************************************************************************/ void cm_init(void); -void *cm_get_context_by_mpidr(uint64_t mpidr, - uint32_t security_state) __deprecated; -void cm_set_context_by_mpidr(uint64_t mpidr, - void *context, - uint32_t security_state) __deprecated; void *cm_get_context_by_index(unsigned int cpu_idx, unsigned int security_state); void cm_set_context_by_index(unsigned int cpu_idx, @@ -54,12 +49,12 @@ unsigned int security_state); void *cm_get_context(uint32_t security_state); void cm_set_context(void *context, uint32_t security_state); -void cm_init_context(uint64_t mpidr, - const struct entry_point_info *ep) __deprecated; void cm_init_my_context(const struct entry_point_info *ep); void cm_init_context_by_index(unsigned int cpu_idx, const struct entry_point_info *ep); void cm_prepare_el3_exit(uint32_t security_state); + +#ifndef AARCH32 void cm_el1_sysregs_context_save(uint32_t security_state); void cm_el1_sysregs_context_restore(uint32_t security_state); void cm_set_elr_el3(uint32_t security_state, uintptr_t entrypoint); @@ -71,6 +66,16 @@ void cm_set_next_eret_context(uint32_t security_state); uint32_t cm_get_scr_el3(uint32_t security_state); + +void cm_init_context(uint64_t mpidr, + const struct entry_point_info *ep) __deprecated; + +void *cm_get_context_by_mpidr(uint64_t mpidr, + uint32_t security_state) __deprecated; +void cm_set_context_by_mpidr(uint64_t mpidr, + void *context, + uint32_t security_state) __deprecated; + /* Inline definitions */ /******************************************************************************* @@ -98,4 +103,5 @@ "msr spsel, #0\n" : : "r" (context)); } +#endif /* AARCH32 */ #endif /* __CM_H__ */ diff --git a/include/lib/el3_runtime/cpu_data.h b/include/lib/el3_runtime/cpu_data.h index 4fc801b..910b153 100644 --- a/include/lib/el3_runtime/cpu_data.h +++ b/include/lib/el3_runtime/cpu_data.h @@ -31,16 +31,28 @@ #ifndef __CPU_DATA_H__ #define __CPU_DATA_H__ +#ifdef AARCH32 + +#if CRASH_REPORTING +#error "Crash reporting is not supported in AArch32" +#endif +#define CPU_DATA_CPU_OPS_PTR 0x0 + +#else /* AARCH32 */ + /* Offsets for the cpu_data structure */ #define CPU_DATA_CRASH_BUF_OFFSET 0x18 +/* need enough space in crash buffer to save 8 registers */ +#define CPU_DATA_CRASH_BUF_SIZE 64 +#define CPU_DATA_CPU_OPS_PTR 0x10 + +#endif /* AARCH32 */ + #if CRASH_REPORTING #define CPU_DATA_LOG2SIZE 7 #else #define CPU_DATA_LOG2SIZE 6 #endif -/* need enough space in crash buffer to save 8 registers */ -#define CPU_DATA_CRASH_BUF_SIZE 64 -#define CPU_DATA_CPU_OPS_PTR 0x10 #ifndef __ASSEMBLY__ @@ -77,7 +89,9 @@ * used for this. ******************************************************************************/ typedef struct cpu_data { +#ifndef AARCH32 void *cpu_context[2]; +#endif uintptr_t cpu_ops_ptr; #if CRASH_REPORTING u_register_t crash_buf[CPU_DATA_CRASH_BUF_SIZE >> 3]; @@ -104,12 +118,15 @@ struct cpu_data *_cpu_data_by_index(uint32_t cpu_index); +#ifndef AARCH32 /* Return the cpu_data structure for the current CPU. */ static inline struct cpu_data *_cpu_data(void) { return (cpu_data_t *)read_tpidr_el3(); } - +#else +struct cpu_data *_cpu_data(void); +#endif /************************************************************************** * APIs for initialising and accessing per-cpu data diff --git a/lib/cpus/aarch32/aem_generic.S b/lib/cpus/aarch32/aem_generic.S new file mode 100644 index 0000000..10ea4e4 --- /dev/null +++ b/lib/cpus/aarch32/aem_generic.S @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#include +#include +#include +#include +#include + +func aem_generic_core_pwr_dwn + /* Assert if cache is enabled */ +#if ASM_ASSERTION + ldcopr r0, SCTLR + tst r0, #SCTLR_C_BIT + ASM_ASSERT(eq) +#endif + /* --------------------------------------------- + * Flush L1 cache to PoU. + * --------------------------------------------- + */ + mov r0, #DC_OP_CISW + b dcsw_op_louis +endfunc aem_generic_core_pwr_dwn + + +func aem_generic_cluster_pwr_dwn + /* Assert if cache is enabled */ +#if ASM_ASSERTION + ldcopr r0, SCTLR + tst r0, #SCTLR_C_BIT + ASM_ASSERT(eq) +#endif + /* --------------------------------------------- + * Flush L1 and L2 caches to PoC. + * --------------------------------------------- + */ + mov r0, #DC_OP_CISW + b dcsw_op_all +endfunc aem_generic_cluster_pwr_dwn + +/* cpu_ops for Base AEM FVP */ +declare_cpu_ops aem_generic, BASE_AEM_MIDR, 1 diff --git a/lib/cpus/aarch32/cpu_helpers.S b/lib/cpus/aarch32/cpu_helpers.S new file mode 100644 index 0000000..927a6f5 --- /dev/null +++ b/lib/cpus/aarch32/cpu_helpers.S @@ -0,0 +1,177 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include + + /* + * The reset handler common to all platforms. After a matching + * cpu_ops structure entry is found, the correponding reset_handler + * in the cpu_ops is invoked. The reset handler is invoked very early + * in the boot sequence and it is assumed that we can clobber r0 - r10 + * without the need to follow AAPCS. + * Clobbers: r0 - r10 + */ + .globl reset_handler +func reset_handler + mov r10, lr + + /* The plat_reset_handler can clobber r0 - r9 */ + bl plat_reset_handler + + /* Get the matching cpu_ops pointer (clobbers: r0 - r5) */ + bl get_cpu_ops_ptr + +#if ASM_ASSERTION + cmp r0, #0 + ASM_ASSERT(ne) +#endif + + /* Get the cpu_ops reset handler */ + ldr r1, [r0, #CPU_RESET_FUNC] + cmp r1, #0 + mov lr, r10 + bxne r1 + bx lr +endfunc reset_handler + + /* + * The prepare core power down function for all platforms. After + * the cpu_ops pointer is retrieved from cpu_data, the corresponding + * pwr_dwn_core in the cpu_ops is invoked. Follows AAPCS. + */ + .globl prepare_core_pwr_dwn +func prepare_core_pwr_dwn + push {lr} + bl _cpu_data + pop {lr} + + ldr r1, [r0, #CPU_DATA_CPU_OPS_PTR] +#if ASM_ASSERTION + cmp r1, #0 + ASM_ASSERT(ne) +#endif + + /* Get the cpu_ops core_pwr_dwn handler */ + ldr r0, [r1, #CPU_PWR_DWN_CORE] + bx r0 +endfunc prepare_core_pwr_dwn + + /* + * The prepare cluster power down function for all platforms. After + * the cpu_ops pointer is retrieved from cpu_data, the corresponding + * pwr_dwn_cluster in the cpu_ops is invoked. Follows AAPCS. + */ + .globl prepare_cluster_pwr_dwn +func prepare_cluster_pwr_dwn + push {lr} + bl _cpu_data + pop {lr} + + ldr r1, [r0, #CPU_DATA_CPU_OPS_PTR] +#if ASM_ASSERTION + cmp r1, #0 + ASM_ASSERT(ne) +#endif + + /* Get the cpu_ops cluster_pwr_dwn handler */ + ldr r0, [r1, #CPU_PWR_DWN_CLUSTER] + bx r0 +endfunc prepare_cluster_pwr_dwn + + /* + * Initializes the cpu_ops_ptr if not already initialized + * in cpu_data. This must only be called after the data cache + * is enabled. AAPCS is followed. + */ + .globl init_cpu_ops +func init_cpu_ops + push {r4 - r6, lr} + bl _cpu_data + mov r6, r0 + ldr r1, [r0, #CPU_DATA_CPU_OPS_PTR] + cmp r1, #0 + bne 1f + bl get_cpu_ops_ptr +#if ASM_ASSERTION + cmp r0, #0 + ASM_ASSERT(ne) +#endif + str r0, [r6, #CPU_DATA_CPU_OPS_PTR]! +1: + pop {r4 - r6, pc} +endfunc init_cpu_ops + + /* + * The below function returns the cpu_ops structure matching the + * midr of the core. It reads the MIDR and finds the matching + * entry in cpu_ops entries. Only the implementation and part number + * are used to match the entries. + * Return : + * r0 - The matching cpu_ops pointer on Success + * r0 - 0 on failure. + * Clobbers: r0 - r5 + */ + .globl get_cpu_ops_ptr +func get_cpu_ops_ptr + /* Get the cpu_ops start and end locations */ + ldr r4, =(__CPU_OPS_START__ + CPU_MIDR) + ldr r5, =(__CPU_OPS_END__ + CPU_MIDR) + + /* Initialize the return parameter */ + mov r0, #0 + + /* Read the MIDR_EL1 */ + ldcopr r2, MIDR + ldr r3, =CPU_IMPL_PN_MASK + + /* Retain only the implementation and part number using mask */ + and r2, r2, r3 +1: + /* Check if we have reached end of list */ + cmp r4, r5 + bge error_exit + + /* load the midr from the cpu_ops */ + ldr r1, [r4], #CPU_OPS_SIZE + and r1, r1, r3 + + /* Check if midr matches to midr of this core */ + cmp r1, r2 + bne 1b + + /* Subtract the increment and offset to get the cpu-ops pointer */ + sub r0, r4, #(CPU_OPS_SIZE + CPU_MIDR) +error_exit: + bx lr +endfunc get_cpu_ops_ptr diff --git a/lib/el3_runtime/aarch32/context_mgmt.c b/lib/el3_runtime/aarch32/context_mgmt.c new file mode 100644 index 0000000..6915ded --- /dev/null +++ b/lib/el3_runtime/aarch32/context_mgmt.c @@ -0,0 +1,235 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/******************************************************************************* + * Context management library initialisation routine. This library is used by + * runtime services to share pointers to 'cpu_context' structures for the secure + * and non-secure states. Management of the structures and their associated + * memory is not done by the context management library e.g. the PSCI service + * manages the cpu context used for entry from and exit to the non-secure state. + * The Secure payload manages the context(s) corresponding to the secure state. + * It also uses this library to get access to the non-secure + * state cpu context pointers. + ******************************************************************************/ +void cm_init(void) +{ + /* + * The context management library has only global data to initialize, but + * that will be done when the BSS is zeroed out + */ +} + +/******************************************************************************* + * The following function initializes the cpu_context 'ctx' for + * first use, and sets the initial entrypoint state as specified by the + * entry_point_info structure. + * + * The security state to initialize is determined by the SECURE attribute + * of the entry_point_info. The function returns a pointer to the initialized + * context and sets this as the next context to return to. + * + * The EE and ST attributes are used to configure the endianness and secure + * timer availability for the new execution context. + * + * To prepare the register state for entry call cm_prepare_el3_exit() and + * el3_exit(). For Secure-EL1 cm_prepare_el3_exit() is equivalent to + * cm_e1_sysreg_context_restore(). + ******************************************************************************/ +static void cm_init_context_common(cpu_context_t *ctx, const entry_point_info_t *ep) +{ + unsigned int security_state; + uint32_t scr, sctlr; + regs_t *reg_ctx; + + assert(ctx); + + security_state = GET_SECURITY_STATE(ep->h.attr); + + /* Clear any residual register values from the context */ + memset(ctx, 0, sizeof(*ctx)); + + /* + * Base the context SCR on the current value, adjust for entry point + * specific requirements + */ + scr = read_scr(); + scr &= ~(SCR_NS_BIT | SCR_HCE_BIT); + + if (security_state != SECURE) + scr |= SCR_NS_BIT; + + /* + * Set up SCTLR for the Non Secure context. + * EE bit is taken from the entrypoint attributes + * M, C and I bits must be zero (as required by PSCI specification) + * + * The target exception level is based on the spsr mode requested. + * If execution is requested to hyp mode, HVC is enabled + * via SCR.HCE. + * + * Always compute the SCTLR_EL1 value and save in the cpu_context + * - the HYP registers are set up by cm_preapre_ns_entry() as they + * are not part of the stored cpu_context + * + * TODO: In debug builds the spsr should be validated and checked + * against the CPU support, security state, endianness and pc + */ + if (security_state != SECURE) { + sctlr = EP_GET_EE(ep->h.attr) ? SCTLR_EE_BIT : 0; + sctlr |= SCTLR_RES1; + write_ctx_reg(reg_ctx, CTX_NS_SCTLR, sctlr); + } + + if (GET_M32(ep->spsr) == MODE32_hyp) + scr |= SCR_HCE_BIT; + + reg_ctx = get_regs_ctx(ctx); + + write_ctx_reg(reg_ctx, CTX_SCR, scr); + write_ctx_reg(reg_ctx, CTX_LR, ep->pc); + write_ctx_reg(reg_ctx, CTX_SPSR, ep->spsr); + + /* + * Store the r0-r3 value from the entrypoint into the context + * Use memcpy as we are in control of the layout of the structures + */ + memcpy((void *)reg_ctx, (void *)&ep->args, sizeof(aapcs32_params_t)); +} + +/******************************************************************************* + * The following function initializes the cpu_context for a CPU specified by + * its `cpu_idx` for first use, and sets the initial entrypoint state as + * specified by the entry_point_info structure. + ******************************************************************************/ +void cm_init_context_by_index(unsigned int cpu_idx, + const entry_point_info_t *ep) +{ + cpu_context_t *ctx; + ctx = cm_get_context_by_index(cpu_idx, GET_SECURITY_STATE(ep->h.attr)); + cm_init_context_common(ctx, ep); +} + +/******************************************************************************* + * The following function initializes the cpu_context for the current CPU + * for first use, and sets the initial entrypoint state as specified by the + * entry_point_info structure. + ******************************************************************************/ +void cm_init_my_context(const entry_point_info_t *ep) +{ + cpu_context_t *ctx; + ctx = cm_get_context(GET_SECURITY_STATE(ep->h.attr)); + cm_init_context_common(ctx, ep); +} + +/******************************************************************************* + * Prepare the CPU system registers for first entry into secure or normal world + * + * If execution is requested to hyp mode, HSCTLR is initialized + * If execution is requested to non-secure PL1, and the CPU supports + * HYP mode then HYP mode is disabled by configuring all necessary HYP mode + * registers. + ******************************************************************************/ +void cm_prepare_el3_exit(uint32_t security_state) +{ + uint32_t sctlr, scr, hcptr; + cpu_context_t *ctx = cm_get_context(security_state); + + assert(ctx); + + if (security_state == NON_SECURE) { + scr = read_ctx_reg(get_regs_ctx(ctx), CTX_SCR); + if (scr & SCR_HCE_BIT) { + /* Use SCTLR value to initialize HSCTLR */ + sctlr = read_ctx_reg(get_regs_ctx(ctx), + CTX_NS_SCTLR); + sctlr |= HSCTLR_RES1; + /* Temporarily set the NS bit to access HSCTLR */ + write_scr(read_scr() | SCR_NS_BIT); + /* + * Make sure the write to SCR is complete so that + * we can access HSCTLR + */ + isb(); + write_hsctlr(sctlr); + isb(); + + write_scr(read_scr() & ~SCR_NS_BIT); + isb(); + } else if (read_id_pfr1() & + (ID_PFR1_VIRTEXT_MASK << ID_PFR1_VIRTEXT_SHIFT)) { + /* Set the NS bit to access HCR, HCPTR, CNTHCTL, VPIDR, VMPIDR */ + write_scr(read_scr() | SCR_NS_BIT); + isb(); + + /* PL2 present but unused, need to disable safely */ + write_hcr(0); + + /* HSCTLR : can be ignored when bypassing */ + + /* HCPTR : disable all traps TCPAC, TTA, TCP */ + hcptr = read_hcptr(); + hcptr &= ~(TCPAC_BIT | TTA_BIT | TCP11_BIT | TCP10_BIT); + write_hcptr(hcptr); + + /* Enable EL1 access to timer */ + write_cnthctl(PL1PCEN_BIT | PL1PCTEN_BIT); + + /* Reset CNTVOFF_EL2 */ + write64_cntvoff(0); + + /* Set VPIDR, VMPIDR to match MIDR, MPIDR */ + write_vpidr(read_midr()); + write_vmpidr(read_mpidr()); + + /* + * Reset VTTBR. + * Needed because cache maintenance operations depend on + * the VMID even when non-secure EL1&0 stage 2 address + * translation are disabled. + */ + write64_vttbr(0); + isb(); + + write_scr(read_scr() & ~SCR_NS_BIT); + isb(); + } + } +} diff --git a/lib/el3_runtime/aarch32/cpu_data.S b/lib/el3_runtime/aarch32/cpu_data.S new file mode 100644 index 0000000..b97911f --- /dev/null +++ b/lib/el3_runtime/aarch32/cpu_data.S @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include + + .globl _cpu_data + .globl _cpu_data_by_index + +/* ----------------------------------------------------------------- + * cpu_data_t *_cpu_data(void) + * + * Return the cpu_data structure for the current CPU. + * ----------------------------------------------------------------- + */ +func _cpu_data + push {lr} + bl plat_my_core_pos + pop {lr} + b _cpu_data_by_index +endfunc _cpu_data + +/* ----------------------------------------------------------------- + * cpu_data_t *_cpu_data_by_index(uint32_t cpu_index) + * + * Return the cpu_data structure for the CPU with given linear index + * + * This can be called without a valid stack. + * clobbers: r0, r1 + * ----------------------------------------------------------------- + */ +func _cpu_data_by_index + ldr r1, =percpu_data + add r0, r1, r0, LSL #CPU_DATA_LOG2SIZE + bx lr +endfunc _cpu_data_by_index diff --git a/lib/locks/exclusive/aarch32/spinlock.S b/lib/locks/exclusive/aarch32/spinlock.S new file mode 100644 index 0000000..f3a2bc3 --- /dev/null +++ b/lib/locks/exclusive/aarch32/spinlock.S @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include + + .globl spin_lock + .globl spin_unlock + + +func spin_lock + mov r2, #1 +1: + ldrex r1, [r0] + cmp r1, #0 + wfene + strexeq r1, r2, [r0] + cmpeq r1, #0 + bne 1b + dmb + bx lr +endfunc spin_lock + + +func spin_unlock + mov r1, #0 + stl r1, [r0] + bx lr +endfunc spin_unlock