diff --git a/bl1/aarch64/bl1_exceptions.S b/bl1/aarch64/bl1_exceptions.S index 5415d39..de3086d 100644 --- a/bl1/aarch64/bl1_exceptions.S +++ b/bl1/aarch64/bl1_exceptions.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -31,7 +31,6 @@ #include #include #include -#include .globl bl1_exceptions diff --git a/bl31/aarch64/context.S b/bl31/aarch64/context.S deleted file mode 100644 index a72879b..0000000 --- a/bl31/aarch64/context.S +++ /dev/null @@ -1,305 +0,0 @@ -/* - * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include - -/* ----------------------------------------------------- - * The following function strictly follows the AArch64 - * PCS to use x9-x17 (temporary caller-saved registers) - * to save EL1 system register context. It assumes that - * 'x0' is pointing to a 'el1_sys_regs' structure where - * the register context will be saved. - * ----------------------------------------------------- - */ - .global el1_sysregs_context_save -func el1_sysregs_context_save - - mrs x9, spsr_el1 - mrs x10, elr_el1 - stp x9, x10, [x0, #CTX_SPSR_EL1] - - mrs x11, spsr_abt - mrs x12, spsr_und - stp x11, x12, [x0, #CTX_SPSR_ABT] - - mrs x13, spsr_irq - mrs x14, spsr_fiq - stp x13, x14, [x0, #CTX_SPSR_IRQ] - - mrs x15, sctlr_el1 - mrs x16, actlr_el1 - stp x15, x16, [x0, #CTX_SCTLR_EL1] - - mrs x17, cpacr_el1 - mrs x9, csselr_el1 - stp x17, x9, [x0, #CTX_CPACR_EL1] - - mrs x10, sp_el1 - mrs x11, esr_el1 - stp x10, x11, [x0, #CTX_SP_EL1] - - mrs x12, ttbr0_el1 - mrs x13, ttbr1_el1 - stp x12, x13, [x0, #CTX_TTBR0_EL1] - - mrs x14, mair_el1 - mrs x15, amair_el1 - stp x14, x15, [x0, #CTX_MAIR_EL1] - - mrs x16, tcr_el1 - mrs x17, tpidr_el1 - stp x16, x17, [x0, #CTX_TCR_EL1] - - mrs x9, tpidr_el0 - mrs x10, tpidrro_el0 - stp x9, x10, [x0, #CTX_TPIDR_EL0] - - mrs x11, dacr32_el2 - mrs x12, ifsr32_el2 - stp x11, x12, [x0, #CTX_DACR32_EL2] - - mrs x13, par_el1 - mrs x14, far_el1 - stp x13, x14, [x0, #CTX_PAR_EL1] - - mrs x15, afsr0_el1 - mrs x16, afsr1_el1 - stp x15, x16, [x0, #CTX_AFSR0_EL1] - - mrs x17, contextidr_el1 - mrs x9, vbar_el1 - stp x17, x9, [x0, #CTX_CONTEXTIDR_EL1] - - /* Save NS timer registers if the build has instructed so */ -#if NS_TIMER_SWITCH - mrs x10, cntp_ctl_el0 - mrs x11, cntp_cval_el0 - stp x10, x11, [x0, #CTX_CNTP_CTL_EL0] - - mrs x12, cntv_ctl_el0 - mrs x13, cntv_cval_el0 - stp x12, x13, [x0, #CTX_CNTV_CTL_EL0] - - mrs x14, cntkctl_el1 - str x14, [x0, #CTX_CNTKCTL_EL1] -#endif - - mrs x15, fpexc32_el2 - str x15, [x0, #CTX_FP_FPEXC32_EL2] - - ret -endfunc el1_sysregs_context_save - -/* ----------------------------------------------------- - * The following function strictly follows the AArch64 - * PCS to use x9-x17 (temporary caller-saved registers) - * to restore EL1 system register context. It assumes - * that 'x0' is pointing to a 'el1_sys_regs' structure - * from where the register context will be restored - * ----------------------------------------------------- - */ - .global el1_sysregs_context_restore -func el1_sysregs_context_restore - - ldp x9, x10, [x0, #CTX_SPSR_EL1] - msr spsr_el1, x9 - msr elr_el1, x10 - - ldp x11, x12, [x0, #CTX_SPSR_ABT] - msr spsr_abt, x11 - msr spsr_und, x12 - - ldp x13, x14, [x0, #CTX_SPSR_IRQ] - msr spsr_irq, x13 - msr spsr_fiq, x14 - - ldp x15, x16, [x0, #CTX_SCTLR_EL1] - msr sctlr_el1, x15 - msr actlr_el1, x16 - - ldp x17, x9, [x0, #CTX_CPACR_EL1] - msr cpacr_el1, x17 - msr csselr_el1, x9 - - ldp x10, x11, [x0, #CTX_SP_EL1] - msr sp_el1, x10 - msr esr_el1, x11 - - ldp x12, x13, [x0, #CTX_TTBR0_EL1] - msr ttbr0_el1, x12 - msr ttbr1_el1, x13 - - ldp x14, x15, [x0, #CTX_MAIR_EL1] - msr mair_el1, x14 - msr amair_el1, x15 - - ldp x16, x17, [x0, #CTX_TCR_EL1] - msr tcr_el1, x16 - msr tpidr_el1, x17 - - ldp x9, x10, [x0, #CTX_TPIDR_EL0] - msr tpidr_el0, x9 - msr tpidrro_el0, x10 - - ldp x11, x12, [x0, #CTX_DACR32_EL2] - msr dacr32_el2, x11 - msr ifsr32_el2, x12 - - ldp x13, x14, [x0, #CTX_PAR_EL1] - msr par_el1, x13 - msr far_el1, x14 - - ldp x15, x16, [x0, #CTX_AFSR0_EL1] - msr afsr0_el1, x15 - msr afsr1_el1, x16 - - ldp x17, x9, [x0, #CTX_CONTEXTIDR_EL1] - msr contextidr_el1, x17 - msr vbar_el1, x9 - - /* Restore NS timer registers if the build has instructed so */ -#if NS_TIMER_SWITCH - ldp x10, x11, [x0, #CTX_CNTP_CTL_EL0] - msr cntp_ctl_el0, x10 - msr cntp_cval_el0, x11 - - ldp x12, x13, [x0, #CTX_CNTV_CTL_EL0] - msr cntv_ctl_el0, x12 - msr cntv_cval_el0, x13 - - ldr x14, [x0, #CTX_CNTKCTL_EL1] - msr cntkctl_el1, x14 -#endif - - ldr x15, [x0, #CTX_FP_FPEXC32_EL2] - msr fpexc32_el2, x15 - - /* No explict ISB required here as ERET covers it */ - - ret -endfunc el1_sysregs_context_restore - -/* ----------------------------------------------------- - * The following function follows the aapcs_64 strictly - * to use x9-x17 (temporary caller-saved registers - * according to AArch64 PCS) to save floating point - * register context. It assumes that 'x0' is pointing to - * a 'fp_regs' structure where the register context will - * be saved. - * - * Access to VFP registers will trap if CPTR_EL3.TFP is - * set. However currently we don't use VFP registers - * nor set traps in Trusted Firmware, and assume it's - * cleared - * - * TODO: Revisit when VFP is used in secure world - * ----------------------------------------------------- - */ -#if CTX_INCLUDE_FPREGS - .global fpregs_context_save -func fpregs_context_save - stp q0, q1, [x0, #CTX_FP_Q0] - stp q2, q3, [x0, #CTX_FP_Q2] - stp q4, q5, [x0, #CTX_FP_Q4] - stp q6, q7, [x0, #CTX_FP_Q6] - stp q8, q9, [x0, #CTX_FP_Q8] - stp q10, q11, [x0, #CTX_FP_Q10] - stp q12, q13, [x0, #CTX_FP_Q12] - stp q14, q15, [x0, #CTX_FP_Q14] - stp q16, q17, [x0, #CTX_FP_Q16] - stp q18, q19, [x0, #CTX_FP_Q18] - stp q20, q21, [x0, #CTX_FP_Q20] - stp q22, q23, [x0, #CTX_FP_Q22] - stp q24, q25, [x0, #CTX_FP_Q24] - stp q26, q27, [x0, #CTX_FP_Q26] - stp q28, q29, [x0, #CTX_FP_Q28] - stp q30, q31, [x0, #CTX_FP_Q30] - - mrs x9, fpsr - str x9, [x0, #CTX_FP_FPSR] - - mrs x10, fpcr - str x10, [x0, #CTX_FP_FPCR] - - ret -endfunc fpregs_context_save - -/* ----------------------------------------------------- - * The following function follows the aapcs_64 strictly - * to use x9-x17 (temporary caller-saved registers - * according to AArch64 PCS) to restore floating point - * register context. It assumes that 'x0' is pointing to - * a 'fp_regs' structure from where the register context - * will be restored. - * - * Access to VFP registers will trap if CPTR_EL3.TFP is - * set. However currently we don't use VFP registers - * nor set traps in Trusted Firmware, and assume it's - * cleared - * - * TODO: Revisit when VFP is used in secure world - * ----------------------------------------------------- - */ - .global fpregs_context_restore -func fpregs_context_restore - ldp q0, q1, [x0, #CTX_FP_Q0] - ldp q2, q3, [x0, #CTX_FP_Q2] - ldp q4, q5, [x0, #CTX_FP_Q4] - ldp q6, q7, [x0, #CTX_FP_Q6] - ldp q8, q9, [x0, #CTX_FP_Q8] - ldp q10, q11, [x0, #CTX_FP_Q10] - ldp q12, q13, [x0, #CTX_FP_Q12] - ldp q14, q15, [x0, #CTX_FP_Q14] - ldp q16, q17, [x0, #CTX_FP_Q16] - ldp q18, q19, [x0, #CTX_FP_Q18] - ldp q20, q21, [x0, #CTX_FP_Q20] - ldp q22, q23, [x0, #CTX_FP_Q22] - ldp q24, q25, [x0, #CTX_FP_Q24] - ldp q26, q27, [x0, #CTX_FP_Q26] - ldp q28, q29, [x0, #CTX_FP_Q28] - ldp q30, q31, [x0, #CTX_FP_Q30] - - ldr x9, [x0, #CTX_FP_FPSR] - msr fpsr, x9 - - ldr x10, [x0, #CTX_FP_FPCR] - msr fpcr, x10 - - /* - * No explict ISB required here as ERET to - * swtich to secure EL1 or non-secure world - * covers it - */ - - ret -endfunc fpregs_context_restore -#endif /* CTX_INCLUDE_FPREGS */ diff --git a/bl31/aarch64/runtime_exceptions.S b/bl31/aarch64/runtime_exceptions.S index 2835320..dc11e0a 100644 --- a/bl31/aarch64/runtime_exceptions.S +++ b/bl31/aarch64/runtime_exceptions.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -36,7 +36,6 @@ #include .globl runtime_exceptions - .globl el3_exit /* ----------------------------------------------------- * Handle SMC exceptions separately from other sync. @@ -426,38 +425,7 @@ #endif blr x15 - /* ----------------------------------------------------- - * This routine assumes that the SP_EL3 is pointing to - * a valid context structure from where the gp regs and - * other special registers can be retrieved. - * - * Keep it in the same section as smc_handler as this - * function uses a fall-through to el3_exit - * ----------------------------------------------------- - */ -el3_exit: ; .type el3_exit, %function - /* ----------------------------------------------------- - * Save the current SP_EL0 i.e. the EL3 runtime stack - * which will be used for handling the next SMC. Then - * switch to SP_EL3 - * ----------------------------------------------------- - */ - mov x17, sp - msr spsel, #1 - str x17, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP] - - /* ----------------------------------------------------- - * Restore SPSR_EL3, ELR_EL3 and SCR_EL3 prior to ERET - * ----------------------------------------------------- - */ - ldr x18, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3] - ldp x16, x17, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3] - msr scr_el3, x18 - msr spsr_el3, x16 - msr elr_el3, x17 - - /* Restore saved general purpose registers and return */ - b restore_gp_registers_eret + b el3_exit smc_unknown: /* @@ -479,51 +447,3 @@ msr spsel, #1 /* Switch to SP_ELx */ bl report_unhandled_exception endfunc smc_handler - - /* ----------------------------------------------------- - * The following functions are used to saved and restore - * all the general pupose registers. Ideally we would - * only save and restore the callee saved registers when - * a world switch occurs but that type of implementation - * is more complex. So currently we will always save and - * restore these registers on entry and exit of EL3. - * These are not macros to ensure their invocation fits - * within the 32 instructions per exception vector. - * ----------------------------------------------------- - */ -func save_gp_registers - stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] - stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] - stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4] - stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6] - stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8] - stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10] - stp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12] - stp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14] - stp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16] - save_x18_to_x29_sp_el0 - ret -endfunc save_gp_registers - -func restore_gp_registers_eret - ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] - ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] - -restore_gp_registers_callee_eret: - ldp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4] - ldp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6] - ldp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8] - ldp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10] - ldp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12] - ldp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14] - ldp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18] - ldp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20] - ldp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22] - ldp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24] - ldp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26] - ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28] - ldp x30, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] - msr sp_el0, x17 - ldp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16] - eret -endfunc restore_gp_registers_eret diff --git a/bl31/bl31.mk b/bl31/bl31.mk index a31c1f4..0c2b631 100644 --- a/bl31/bl31.mk +++ b/bl31/bl31.mk @@ -29,16 +29,17 @@ # BL31_SOURCES += bl31/bl31_main.c \ - bl31/context_mgmt.c \ bl31/cpu_data_array.c \ bl31/runtime_svc.c \ bl31/interrupt_mgmt.c \ bl31/aarch64/bl31_arch_setup.c \ bl31/aarch64/bl31_entrypoint.S \ - bl31/aarch64/context.S \ bl31/aarch64/cpu_data.S \ bl31/aarch64/runtime_exceptions.S \ bl31/aarch64/crash_reporting.S \ + bl31/bl31_context_mgmt.c \ + common/aarch64/context.S \ + common/context_mgmt.c \ lib/cpus/aarch64/cpu_helpers.S \ lib/locks/exclusive/spinlock.S \ services/std_svc/std_svc_setup.c \ diff --git a/bl31/bl31_context_mgmt.c b/bl31/bl31_context_mgmt.c new file mode 100644 index 0000000..ae24424 --- /dev/null +++ b/bl31/bl31_context_mgmt.c @@ -0,0 +1,133 @@ +/* + * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include + + +/******************************************************************************* + * This function returns a pointer to the most recent 'cpu_context' structure + * for the calling CPU that was set as the context for the specified security + * state. NULL is returned if no such structure has been specified. + ******************************************************************************/ +void *cm_get_context(uint32_t security_state) +{ + assert(security_state <= NON_SECURE); + + return get_cpu_data(cpu_context[security_state]); +} + +/******************************************************************************* + * This function sets the pointer to the current 'cpu_context' structure for the + * specified security state for the calling CPU + ******************************************************************************/ +void cm_set_context(void *context, uint32_t security_state) +{ + assert(security_state <= NON_SECURE); + + set_cpu_data(cpu_context[security_state], context); +} + +/******************************************************************************* + * This function returns a pointer to the most recent 'cpu_context' structure + * for the CPU identified by `cpu_idx` that was set as the context for the + * specified security state. NULL is returned if no such structure has been + * specified. + ******************************************************************************/ +void *cm_get_context_by_index(unsigned int cpu_idx, + unsigned int security_state) +{ + assert(sec_state_is_valid(security_state)); + + return get_cpu_data_by_index(cpu_idx, cpu_context[security_state]); +} + +/******************************************************************************* + * This function sets the pointer to the current 'cpu_context' structure for the + * specified security state for the CPU identified by CPU index. + ******************************************************************************/ +void cm_set_context_by_index(unsigned int cpu_idx, void *context, + unsigned int security_state) +{ + assert(sec_state_is_valid(security_state)); + + set_cpu_data_by_index(cpu_idx, cpu_context[security_state], context); +} + +#if !ERROR_DEPRECATED +/* + * These context management helpers are deprecated but are maintained for use + * by SPDs which have not migrated to the new API. If ERROR_DEPRECATED + * is enabled, these are excluded from the build so as to force users to + * migrate to the new API. + */ + +/******************************************************************************* + * This function returns a pointer to the most recent 'cpu_context' structure + * for the CPU identified by MPIDR that was set as the context for the specified + * security state. NULL is returned if no such structure has been specified. + ******************************************************************************/ +void *cm_get_context_by_mpidr(uint64_t mpidr, uint32_t security_state) +{ + assert(sec_state_is_valid(security_state)); + + return cm_get_context_by_index(platform_get_core_pos(mpidr), security_state); +} + +/******************************************************************************* + * This function sets the pointer to the current 'cpu_context' structure for the + * specified security state for the CPU identified by MPIDR + ******************************************************************************/ +void cm_set_context_by_mpidr(uint64_t mpidr, void *context, uint32_t security_state) +{ + assert(sec_state_is_valid(security_state)); + + cm_set_context_by_index(platform_get_core_pos(mpidr), + context, security_state); +} + +/******************************************************************************* + * The following function provides a compatibility function for SPDs using the + * existing cm library routines. This function is expected to be invoked for + * initializing the cpu_context for the CPU specified by MPIDR for first use. + ******************************************************************************/ +void cm_init_context(unsigned long mpidr, const entry_point_info_t *ep) +{ + if ((mpidr & MPIDR_AFFINITY_MASK) == + (read_mpidr_el1() & MPIDR_AFFINITY_MASK)) + cm_init_my_context(ep); + else + cm_init_context_by_index(platform_get_core_pos(mpidr), ep); +} +#endif \ No newline at end of file diff --git a/bl31/context_mgmt.c b/bl31/context_mgmt.c deleted file mode 100644 index 2b619aa..0000000 --- a/bl31/context_mgmt.c +++ /dev/null @@ -1,473 +0,0 @@ -/* - * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - - -/******************************************************************************* - * Context management library initialisation routine. This library is used by - * runtime services to share pointers to 'cpu_context' structures for the secure - * and non-secure states. Management of the structures and their associated - * memory is not done by the context management library e.g. the PSCI service - * manages the cpu context used for entry from and exit to the non-secure state. - * The Secure payload dispatcher service manages the context(s) corresponding to - * the secure state. It also uses this library to get access to the non-secure - * state cpu context pointers. - * Lastly, this library provides the api to make SP_EL3 point to the cpu context - * which will used for programming an entry into a lower EL. The same context - * will used to save state upon exception entry from that EL. - ******************************************************************************/ -void cm_init(void) -{ - /* - * The context management library has only global data to intialize, but - * that will be done when the BSS is zeroed out - */ -} - -/******************************************************************************* - * This function returns a pointer to the most recent 'cpu_context' structure - * for the CPU identified by `cpu_idx` that was set as the context for the - * specified security state. NULL is returned if no such structure has been - * specified. - ******************************************************************************/ -void *cm_get_context_by_index(unsigned int cpu_idx, - unsigned int security_state) -{ - assert(sec_state_is_valid(security_state)); - - return get_cpu_data_by_index(cpu_idx, cpu_context[security_state]); -} - -/******************************************************************************* - * This function sets the pointer to the current 'cpu_context' structure for the - * specified security state for the CPU identified by CPU index. - ******************************************************************************/ -void cm_set_context_by_index(unsigned int cpu_idx, void *context, - unsigned int security_state) -{ - assert(sec_state_is_valid(security_state)); - - set_cpu_data_by_index(cpu_idx, cpu_context[security_state], context); -} - -#if !ERROR_DEPRECATED -/* - * These context management helpers are deprecated but are maintained for use - * by SPDs which have not migrated to the new API. If ERROR_DEPRECATED - * is enabled, these are excluded from the build so as to force users to - * migrate to the new API. - */ - -/******************************************************************************* - * This function returns a pointer to the most recent 'cpu_context' structure - * for the CPU identified by MPIDR that was set as the context for the specified - * security state. NULL is returned if no such structure has been specified. - ******************************************************************************/ -void *cm_get_context_by_mpidr(uint64_t mpidr, uint32_t security_state) -{ - assert(sec_state_is_valid(security_state)); - - return cm_get_context_by_index(platform_get_core_pos(mpidr), security_state); -} - -/******************************************************************************* - * This function sets the pointer to the current 'cpu_context' structure for the - * specified security state for the CPU identified by MPIDR - ******************************************************************************/ -void cm_set_context_by_mpidr(uint64_t mpidr, void *context, uint32_t security_state) -{ - assert(sec_state_is_valid(security_state)); - - cm_set_context_by_index(platform_get_core_pos(mpidr), - context, security_state); -} - -/******************************************************************************* - * The following function provides a compatibility function for SPDs using the - * existing cm library routines. This function is expected to be invoked for - * initializing the cpu_context for the CPU specified by MPIDR for first use. - ******************************************************************************/ -void cm_init_context(unsigned long mpidr, const entry_point_info_t *ep) -{ - if ((mpidr & MPIDR_AFFINITY_MASK) == - (read_mpidr_el1() & MPIDR_AFFINITY_MASK)) - cm_init_my_context(ep); - else - cm_init_context_by_index(platform_get_core_pos(mpidr), ep); -} -#endif - -/******************************************************************************* - * This function is used to program the context that's used for exception - * return. This initializes the SP_EL3 to a pointer to a 'cpu_context' set for - * the required security state - ******************************************************************************/ -static inline void cm_set_next_context(void *context) -{ -#if DEBUG - uint64_t sp_mode; - - /* - * Check that this function is called with SP_EL0 as the stack - * pointer - */ - __asm__ volatile("mrs %0, SPSel\n" - : "=r" (sp_mode)); - - assert(sp_mode == MODE_SP_EL0); -#endif - - __asm__ volatile("msr spsel, #1\n" - "mov sp, %0\n" - "msr spsel, #0\n" - : : "r" (context)); -} - -/******************************************************************************* - * The following function initializes the cpu_context 'ctx' for - * first use, and sets the initial entrypoint state as specified by the - * entry_point_info structure. - * - * The security state to initialize is determined by the SECURE attribute - * of the entry_point_info. The function returns a pointer to the initialized - * context and sets this as the next context to return to. - * - * The EE and ST attributes are used to configure the endianess and secure - * timer availability for the new execution context. - * - * To prepare the register state for entry call cm_prepare_el3_exit() and - * el3_exit(). For Secure-EL1 cm_prepare_el3_exit() is equivalent to - * cm_e1_sysreg_context_restore(). - ******************************************************************************/ -static void cm_init_context_common(cpu_context_t *ctx, const entry_point_info_t *ep) -{ - unsigned int security_state; - uint32_t scr_el3; - el3_state_t *state; - gp_regs_t *gp_regs; - unsigned long sctlr_elx; - - assert(ctx); - - security_state = GET_SECURITY_STATE(ep->h.attr); - - /* Clear any residual register values from the context */ - memset(ctx, 0, sizeof(*ctx)); - - /* - * Base the context SCR on the current value, adjust for entry point - * specific requirements and set trap bits from the IMF - * TODO: provide the base/global SCR bits using another mechanism? - */ - scr_el3 = read_scr(); - scr_el3 &= ~(SCR_NS_BIT | SCR_RW_BIT | SCR_FIQ_BIT | SCR_IRQ_BIT | - SCR_ST_BIT | SCR_HCE_BIT); - - if (security_state != SECURE) - scr_el3 |= SCR_NS_BIT; - - if (GET_RW(ep->spsr) == MODE_RW_64) - scr_el3 |= SCR_RW_BIT; - - if (EP_GET_ST(ep->h.attr)) - scr_el3 |= SCR_ST_BIT; - - scr_el3 |= get_scr_el3_from_routing_model(security_state); - - /* - * Set up SCTLR_ELx for the target exception level: - * EE bit is taken from the entrpoint attributes - * M, C and I bits must be zero (as required by PSCI specification) - * - * The target exception level is based on the spsr mode requested. - * If execution is requested to EL2 or hyp mode, HVC is enabled - * via SCR_EL3.HCE. - * - * Always compute the SCTLR_EL1 value and save in the cpu_context - * - the EL2 registers are set up by cm_preapre_ns_entry() as they - * are not part of the stored cpu_context - * - * TODO: In debug builds the spsr should be validated and checked - * against the CPU support, security state, endianess and pc - */ - sctlr_elx = EP_GET_EE(ep->h.attr) ? SCTLR_EE_BIT : 0; - if (GET_RW(ep->spsr) == MODE_RW_64) - sctlr_elx |= SCTLR_EL1_RES1; - else - sctlr_elx |= SCTLR_AARCH32_EL1_RES1; - write_ctx_reg(get_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_elx); - - if ((GET_RW(ep->spsr) == MODE_RW_64 - && GET_EL(ep->spsr) == MODE_EL2) - || (GET_RW(ep->spsr) != MODE_RW_64 - && GET_M32(ep->spsr) == MODE32_hyp)) { - scr_el3 |= SCR_HCE_BIT; - } - - /* Populate EL3 state so that we've the right context before doing ERET */ - state = get_el3state_ctx(ctx); - write_ctx_reg(state, CTX_SCR_EL3, scr_el3); - write_ctx_reg(state, CTX_ELR_EL3, ep->pc); - write_ctx_reg(state, CTX_SPSR_EL3, ep->spsr); - - /* - * Store the X0-X7 value from the entrypoint into the context - * Use memcpy as we are in control of the layout of the structures - */ - gp_regs = get_gpregs_ctx(ctx); - memcpy(gp_regs, (void *)&ep->args, sizeof(aapcs64_params_t)); -} - -/******************************************************************************* - * The following function initializes the cpu_context for a CPU specified by - * its `cpu_idx` for first use, and sets the initial entrypoint state as - * specified by the entry_point_info structure. - ******************************************************************************/ -void cm_init_context_by_index(unsigned int cpu_idx, - const entry_point_info_t *ep) -{ - cpu_context_t *ctx; - ctx = cm_get_context_by_index(cpu_idx, GET_SECURITY_STATE(ep->h.attr)); - cm_init_context_common(ctx, ep); -} - -/******************************************************************************* - * The following function initializes the cpu_context for the current CPU - * for first use, and sets the initial entrypoint state as specified by the - * entry_point_info structure. - ******************************************************************************/ -void cm_init_my_context(const entry_point_info_t *ep) -{ - cpu_context_t *ctx; - ctx = cm_get_context(GET_SECURITY_STATE(ep->h.attr)); - cm_init_context_common(ctx, ep); -} - -/******************************************************************************* - * Prepare the CPU system registers for first entry into secure or normal world - * - * If execution is requested to EL2 or hyp mode, SCTLR_EL2 is initialized - * If execution is requested to non-secure EL1 or svc mode, and the CPU supports - * EL2 then EL2 is disabled by configuring all necessary EL2 registers. - * For all entries, the EL1 registers are initialized from the cpu_context - ******************************************************************************/ -void cm_prepare_el3_exit(uint32_t security_state) -{ - uint32_t sctlr_elx, scr_el3, cptr_el2; - cpu_context_t *ctx = cm_get_context(security_state); - - assert(ctx); - - if (security_state == NON_SECURE) { - scr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3); - if (scr_el3 & SCR_HCE_BIT) { - /* Use SCTLR_EL1.EE value to initialise sctlr_el2 */ - sctlr_elx = read_ctx_reg(get_sysregs_ctx(ctx), - CTX_SCTLR_EL1); - sctlr_elx &= ~SCTLR_EE_BIT; - sctlr_elx |= SCTLR_EL2_RES1; - write_sctlr_el2(sctlr_elx); - } else if (read_id_aa64pfr0_el1() & - (ID_AA64PFR0_ELX_MASK << ID_AA64PFR0_EL2_SHIFT)) { - /* EL2 present but unused, need to disable safely */ - - /* HCR_EL2 = 0, except RW bit set to match SCR_EL3 */ - write_hcr_el2((scr_el3 & SCR_RW_BIT) ? HCR_RW_BIT : 0); - - /* SCTLR_EL2 : can be ignored when bypassing */ - - /* CPTR_EL2 : disable all traps TCPAC, TTA, TFP */ - cptr_el2 = read_cptr_el2(); - cptr_el2 &= ~(TCPAC_BIT | TTA_BIT | TFP_BIT); - write_cptr_el2(cptr_el2); - - /* Enable EL1 access to timer */ - write_cnthctl_el2(EL1PCEN_BIT | EL1PCTEN_BIT); - - /* Reset CNTVOFF_EL2 */ - write_cntvoff_el2(0); - - /* Set VPIDR, VMPIDR to match MIDR, MPIDR */ - write_vpidr_el2(read_midr_el1()); - write_vmpidr_el2(read_mpidr_el1()); - - /* - * Reset VTTBR_EL2. - * Needed because cache maintenance operations depend on - * the VMID even when non-secure EL1&0 stage 2 address - * translation are disabled. - */ - write_vttbr_el2(0); - } - } - - el1_sysregs_context_restore(get_sysregs_ctx(ctx)); - - cm_set_next_context(ctx); -} - -/******************************************************************************* - * The next four functions are used by runtime services to save and restore - * EL1 context on the 'cpu_context' structure for the specified security - * state. - ******************************************************************************/ -void cm_el1_sysregs_context_save(uint32_t security_state) -{ - cpu_context_t *ctx; - - ctx = cm_get_context(security_state); - assert(ctx); - - el1_sysregs_context_save(get_sysregs_ctx(ctx)); -} - -void cm_el1_sysregs_context_restore(uint32_t security_state) -{ - cpu_context_t *ctx; - - ctx = cm_get_context(security_state); - assert(ctx); - - el1_sysregs_context_restore(get_sysregs_ctx(ctx)); -} - -/******************************************************************************* - * This function populates ELR_EL3 member of 'cpu_context' pertaining to the - * given security state with the given entrypoint - ******************************************************************************/ -void cm_set_elr_el3(uint32_t security_state, uint64_t entrypoint) -{ - cpu_context_t *ctx; - el3_state_t *state; - - ctx = cm_get_context(security_state); - assert(ctx); - - /* Populate EL3 state so that ERET jumps to the correct entry */ - state = get_el3state_ctx(ctx); - write_ctx_reg(state, CTX_ELR_EL3, entrypoint); -} - -/******************************************************************************* - * This function populates ELR_EL3 and SPSR_EL3 members of 'cpu_context' - * pertaining to the given security state - ******************************************************************************/ -void cm_set_elr_spsr_el3(uint32_t security_state, - uint64_t entrypoint, uint32_t spsr) -{ - cpu_context_t *ctx; - el3_state_t *state; - - ctx = cm_get_context(security_state); - assert(ctx); - - /* Populate EL3 state so that ERET jumps to the correct entry */ - state = get_el3state_ctx(ctx); - write_ctx_reg(state, CTX_ELR_EL3, entrypoint); - write_ctx_reg(state, CTX_SPSR_EL3, spsr); -} - -/******************************************************************************* - * This function updates a single bit in the SCR_EL3 member of the 'cpu_context' - * pertaining to the given security state using the value and bit position - * specified in the parameters. It preserves all other bits. - ******************************************************************************/ -void cm_write_scr_el3_bit(uint32_t security_state, - uint32_t bit_pos, - uint32_t value) -{ - cpu_context_t *ctx; - el3_state_t *state; - uint32_t scr_el3; - - ctx = cm_get_context(security_state); - assert(ctx); - - /* Ensure that the bit position is a valid one */ - assert((1 << bit_pos) & SCR_VALID_BIT_MASK); - - /* Ensure that the 'value' is only a bit wide */ - assert(value <= 1); - - /* - * Get the SCR_EL3 value from the cpu context, clear the desired bit - * and set it to its new value. - */ - state = get_el3state_ctx(ctx); - scr_el3 = read_ctx_reg(state, CTX_SCR_EL3); - scr_el3 &= ~(1 << bit_pos); - scr_el3 |= value << bit_pos; - write_ctx_reg(state, CTX_SCR_EL3, scr_el3); -} - -/******************************************************************************* - * This function retrieves SCR_EL3 member of 'cpu_context' pertaining to the - * given security state. - ******************************************************************************/ -uint32_t cm_get_scr_el3(uint32_t security_state) -{ - cpu_context_t *ctx; - el3_state_t *state; - - ctx = cm_get_context(security_state); - assert(ctx); - - /* Populate EL3 state so that ERET jumps to the correct entry */ - state = get_el3state_ctx(ctx); - return read_ctx_reg(state, CTX_SCR_EL3); -} - -/******************************************************************************* - * This function is used to program the context that's used for exception - * return. This initializes the SP_EL3 to a pointer to a 'cpu_context' set for - * the required security state - ******************************************************************************/ -void cm_set_next_eret_context(uint32_t security_state) -{ - cpu_context_t *ctx; - - ctx = cm_get_context(security_state); - assert(ctx); - - cm_set_next_context(ctx); -} diff --git a/common/aarch64/context.S b/common/aarch64/context.S new file mode 100644 index 0000000..3d13a80 --- /dev/null +++ b/common/aarch64/context.S @@ -0,0 +1,401 @@ +/* + * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include + + .global el1_sysregs_context_save + .global el1_sysregs_context_restore +#if CTX_INCLUDE_FPREGS + .global fpregs_context_save + .global fpregs_context_restore +#endif + .global save_gp_registers + .global restore_gp_registers_eret + .global restore_gp_registers_callee_eret + .global el3_exit + +/* ----------------------------------------------------- + * The following function strictly follows the AArch64 + * PCS to use x9-x17 (temporary caller-saved registers) + * to save EL1 system register context. It assumes that + * 'x0' is pointing to a 'el1_sys_regs' structure where + * the register context will be saved. + * ----------------------------------------------------- + */ +func el1_sysregs_context_save + + mrs x9, spsr_el1 + mrs x10, elr_el1 + stp x9, x10, [x0, #CTX_SPSR_EL1] + + mrs x11, spsr_abt + mrs x12, spsr_und + stp x11, x12, [x0, #CTX_SPSR_ABT] + + mrs x13, spsr_irq + mrs x14, spsr_fiq + stp x13, x14, [x0, #CTX_SPSR_IRQ] + + mrs x15, sctlr_el1 + mrs x16, actlr_el1 + stp x15, x16, [x0, #CTX_SCTLR_EL1] + + mrs x17, cpacr_el1 + mrs x9, csselr_el1 + stp x17, x9, [x0, #CTX_CPACR_EL1] + + mrs x10, sp_el1 + mrs x11, esr_el1 + stp x10, x11, [x0, #CTX_SP_EL1] + + mrs x12, ttbr0_el1 + mrs x13, ttbr1_el1 + stp x12, x13, [x0, #CTX_TTBR0_EL1] + + mrs x14, mair_el1 + mrs x15, amair_el1 + stp x14, x15, [x0, #CTX_MAIR_EL1] + + mrs x16, tcr_el1 + mrs x17, tpidr_el1 + stp x16, x17, [x0, #CTX_TCR_EL1] + + mrs x9, tpidr_el0 + mrs x10, tpidrro_el0 + stp x9, x10, [x0, #CTX_TPIDR_EL0] + + mrs x11, dacr32_el2 + mrs x12, ifsr32_el2 + stp x11, x12, [x0, #CTX_DACR32_EL2] + + mrs x13, par_el1 + mrs x14, far_el1 + stp x13, x14, [x0, #CTX_PAR_EL1] + + mrs x15, afsr0_el1 + mrs x16, afsr1_el1 + stp x15, x16, [x0, #CTX_AFSR0_EL1] + + mrs x17, contextidr_el1 + mrs x9, vbar_el1 + stp x17, x9, [x0, #CTX_CONTEXTIDR_EL1] + + /* Save NS timer registers if the build has instructed so */ +#if NS_TIMER_SWITCH + mrs x10, cntp_ctl_el0 + mrs x11, cntp_cval_el0 + stp x10, x11, [x0, #CTX_CNTP_CTL_EL0] + + mrs x12, cntv_ctl_el0 + mrs x13, cntv_cval_el0 + stp x12, x13, [x0, #CTX_CNTV_CTL_EL0] + + mrs x14, cntkctl_el1 + str x14, [x0, #CTX_CNTKCTL_EL1] +#endif + + mrs x15, fpexc32_el2 + str x15, [x0, #CTX_FP_FPEXC32_EL2] + + ret +endfunc el1_sysregs_context_save + +/* ----------------------------------------------------- + * The following function strictly follows the AArch64 + * PCS to use x9-x17 (temporary caller-saved registers) + * to restore EL1 system register context. It assumes + * that 'x0' is pointing to a 'el1_sys_regs' structure + * from where the register context will be restored + * ----------------------------------------------------- + */ +func el1_sysregs_context_restore + + ldp x9, x10, [x0, #CTX_SPSR_EL1] + msr spsr_el1, x9 + msr elr_el1, x10 + + ldp x11, x12, [x0, #CTX_SPSR_ABT] + msr spsr_abt, x11 + msr spsr_und, x12 + + ldp x13, x14, [x0, #CTX_SPSR_IRQ] + msr spsr_irq, x13 + msr spsr_fiq, x14 + + ldp x15, x16, [x0, #CTX_SCTLR_EL1] + msr sctlr_el1, x15 + msr actlr_el1, x16 + + ldp x17, x9, [x0, #CTX_CPACR_EL1] + msr cpacr_el1, x17 + msr csselr_el1, x9 + + ldp x10, x11, [x0, #CTX_SP_EL1] + msr sp_el1, x10 + msr esr_el1, x11 + + ldp x12, x13, [x0, #CTX_TTBR0_EL1] + msr ttbr0_el1, x12 + msr ttbr1_el1, x13 + + ldp x14, x15, [x0, #CTX_MAIR_EL1] + msr mair_el1, x14 + msr amair_el1, x15 + + ldp x16, x17, [x0, #CTX_TCR_EL1] + msr tcr_el1, x16 + msr tpidr_el1, x17 + + ldp x9, x10, [x0, #CTX_TPIDR_EL0] + msr tpidr_el0, x9 + msr tpidrro_el0, x10 + + ldp x11, x12, [x0, #CTX_DACR32_EL2] + msr dacr32_el2, x11 + msr ifsr32_el2, x12 + + ldp x13, x14, [x0, #CTX_PAR_EL1] + msr par_el1, x13 + msr far_el1, x14 + + ldp x15, x16, [x0, #CTX_AFSR0_EL1] + msr afsr0_el1, x15 + msr afsr1_el1, x16 + + ldp x17, x9, [x0, #CTX_CONTEXTIDR_EL1] + msr contextidr_el1, x17 + msr vbar_el1, x9 + + /* Restore NS timer registers if the build has instructed so */ +#if NS_TIMER_SWITCH + ldp x10, x11, [x0, #CTX_CNTP_CTL_EL0] + msr cntp_ctl_el0, x10 + msr cntp_cval_el0, x11 + + ldp x12, x13, [x0, #CTX_CNTV_CTL_EL0] + msr cntv_ctl_el0, x12 + msr cntv_cval_el0, x13 + + ldr x14, [x0, #CTX_CNTKCTL_EL1] + msr cntkctl_el1, x14 +#endif + + ldr x15, [x0, #CTX_FP_FPEXC32_EL2] + msr fpexc32_el2, x15 + + /* No explict ISB required here as ERET covers it */ + + ret +endfunc el1_sysregs_context_restore + +/* ----------------------------------------------------- + * The following function follows the aapcs_64 strictly + * to use x9-x17 (temporary caller-saved registers + * according to AArch64 PCS) to save floating point + * register context. It assumes that 'x0' is pointing to + * a 'fp_regs' structure where the register context will + * be saved. + * + * Access to VFP registers will trap if CPTR_EL3.TFP is + * set. However currently we don't use VFP registers + * nor set traps in Trusted Firmware, and assume it's + * cleared + * + * TODO: Revisit when VFP is used in secure world + * ----------------------------------------------------- + */ +#if CTX_INCLUDE_FPREGS +func fpregs_context_save + stp q0, q1, [x0, #CTX_FP_Q0] + stp q2, q3, [x0, #CTX_FP_Q2] + stp q4, q5, [x0, #CTX_FP_Q4] + stp q6, q7, [x0, #CTX_FP_Q6] + stp q8, q9, [x0, #CTX_FP_Q8] + stp q10, q11, [x0, #CTX_FP_Q10] + stp q12, q13, [x0, #CTX_FP_Q12] + stp q14, q15, [x0, #CTX_FP_Q14] + stp q16, q17, [x0, #CTX_FP_Q16] + stp q18, q19, [x0, #CTX_FP_Q18] + stp q20, q21, [x0, #CTX_FP_Q20] + stp q22, q23, [x0, #CTX_FP_Q22] + stp q24, q25, [x0, #CTX_FP_Q24] + stp q26, q27, [x0, #CTX_FP_Q26] + stp q28, q29, [x0, #CTX_FP_Q28] + stp q30, q31, [x0, #CTX_FP_Q30] + + mrs x9, fpsr + str x9, [x0, #CTX_FP_FPSR] + + mrs x10, fpcr + str x10, [x0, #CTX_FP_FPCR] + + ret +endfunc fpregs_context_save + +/* ----------------------------------------------------- + * The following function follows the aapcs_64 strictly + * to use x9-x17 (temporary caller-saved registers + * according to AArch64 PCS) to restore floating point + * register context. It assumes that 'x0' is pointing to + * a 'fp_regs' structure from where the register context + * will be restored. + * + * Access to VFP registers will trap if CPTR_EL3.TFP is + * set. However currently we don't use VFP registers + * nor set traps in Trusted Firmware, and assume it's + * cleared + * + * TODO: Revisit when VFP is used in secure world + * ----------------------------------------------------- + */ +func fpregs_context_restore + ldp q0, q1, [x0, #CTX_FP_Q0] + ldp q2, q3, [x0, #CTX_FP_Q2] + ldp q4, q5, [x0, #CTX_FP_Q4] + ldp q6, q7, [x0, #CTX_FP_Q6] + ldp q8, q9, [x0, #CTX_FP_Q8] + ldp q10, q11, [x0, #CTX_FP_Q10] + ldp q12, q13, [x0, #CTX_FP_Q12] + ldp q14, q15, [x0, #CTX_FP_Q14] + ldp q16, q17, [x0, #CTX_FP_Q16] + ldp q18, q19, [x0, #CTX_FP_Q18] + ldp q20, q21, [x0, #CTX_FP_Q20] + ldp q22, q23, [x0, #CTX_FP_Q22] + ldp q24, q25, [x0, #CTX_FP_Q24] + ldp q26, q27, [x0, #CTX_FP_Q26] + ldp q28, q29, [x0, #CTX_FP_Q28] + ldp q30, q31, [x0, #CTX_FP_Q30] + + ldr x9, [x0, #CTX_FP_FPSR] + msr fpsr, x9 + + ldr x10, [x0, #CTX_FP_FPCR] + msr fpcr, x10 + + /* + * No explict ISB required here as ERET to + * swtich to secure EL1 or non-secure world + * covers it + */ + + ret +endfunc fpregs_context_restore +#endif /* CTX_INCLUDE_FPREGS */ + +/* ----------------------------------------------------- + * The following functions are used to save and restore + * all the general purpose registers. Ideally we would + * only save and restore the callee saved registers when + * a world switch occurs but that type of implementation + * is more complex. So currently we will always save and + * restore these registers on entry and exit of EL3. + * These are not macros to ensure their invocation fits + * within the 32 instructions per exception vector. + * clobbers: x18 + * ----------------------------------------------------- + */ +func save_gp_registers + stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] + stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] + stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4] + stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6] + stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8] + stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10] + stp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12] + stp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14] + stp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16] + stp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18] + stp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20] + stp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22] + stp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24] + stp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26] + stp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28] + mrs x18, sp_el0 + str x18, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0] + ret +endfunc save_gp_registers + +func restore_gp_registers_eret + ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] + ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] + b restore_gp_registers_callee_eret +endfunc restore_gp_registers_eret + +func restore_gp_registers_callee_eret + ldp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4] + ldp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6] + ldp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8] + ldp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10] + ldp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12] + ldp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14] + ldp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18] + ldp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20] + ldp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22] + ldp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24] + ldp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26] + ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28] + ldp x30, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] + msr sp_el0, x17 + ldp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16] + eret +endfunc restore_gp_registers_callee_eret + + /* ----------------------------------------------------- + * This routine assumes that the SP_EL3 is pointing to + * a valid context structure from where the gp regs and + * other special registers can be retrieved. + * ----------------------------------------------------- + */ +func el3_exit + /* ----------------------------------------------------- + * Save the current SP_EL0 i.e. the EL3 runtime stack + * which will be used for handling the next SMC. Then + * switch to SP_EL3 + * ----------------------------------------------------- + */ + mov x17, sp + msr spsel, #1 + str x17, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP] + + /* ----------------------------------------------------- + * Restore SPSR_EL3, ELR_EL3 and SCR_EL3 prior to ERET + * ----------------------------------------------------- + */ + ldr x18, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3] + ldp x16, x17, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3] + msr scr_el3, x18 + msr spsr_el3, x16 + msr elr_el3, x17 + + /* Restore saved general purpose registers and return */ + b restore_gp_registers_eret +endfunc el3_exit diff --git a/common/aarch64/early_exceptions.S b/common/aarch64/early_exceptions.S index 90f5421..780a38f 100644 --- a/common/aarch64/early_exceptions.S +++ b/common/aarch64/early_exceptions.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -29,7 +29,7 @@ */ #include -#include +#include .globl early_exceptions diff --git a/common/context_mgmt.c b/common/context_mgmt.c new file mode 100644 index 0000000..68ec894 --- /dev/null +++ b/common/context_mgmt.c @@ -0,0 +1,378 @@ +/* + * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +/******************************************************************************* + * Context management library initialisation routine. This library is used by + * runtime services to share pointers to 'cpu_context' structures for the secure + * and non-secure states. Management of the structures and their associated + * memory is not done by the context management library e.g. the PSCI service + * manages the cpu context used for entry from and exit to the non-secure state. + * The Secure payload dispatcher service manages the context(s) corresponding to + * the secure state. It also uses this library to get access to the non-secure + * state cpu context pointers. + * Lastly, this library provides the api to make SP_EL3 point to the cpu context + * which will used for programming an entry into a lower EL. The same context + * will used to save state upon exception entry from that EL. + ******************************************************************************/ +void cm_init(void) +{ + /* + * The context management library has only global data to intialize, but + * that will be done when the BSS is zeroed out + */ +} + +/******************************************************************************* + * The following function initializes the cpu_context 'ctx' for + * first use, and sets the initial entrypoint state as specified by the + * entry_point_info structure. + * + * The security state to initialize is determined by the SECURE attribute + * of the entry_point_info. The function returns a pointer to the initialized + * context and sets this as the next context to return to. + * + * The EE and ST attributes are used to configure the endianess and secure + * timer availability for the new execution context. + * + * To prepare the register state for entry call cm_prepare_el3_exit() and + * el3_exit(). For Secure-EL1 cm_prepare_el3_exit() is equivalent to + * cm_e1_sysreg_context_restore(). + ******************************************************************************/ +static void cm_init_context_common(cpu_context_t *ctx, const entry_point_info_t *ep) +{ + unsigned int security_state; + uint32_t scr_el3; + el3_state_t *state; + gp_regs_t *gp_regs; + unsigned long sctlr_elx; + + assert(ctx); + + security_state = GET_SECURITY_STATE(ep->h.attr); + + /* Clear any residual register values from the context */ + memset(ctx, 0, sizeof(*ctx)); + + /* + * Base the context SCR on the current value, adjust for entry point + * specific requirements and set trap bits from the IMF + * TODO: provide the base/global SCR bits using another mechanism? + */ + scr_el3 = read_scr(); + scr_el3 &= ~(SCR_NS_BIT | SCR_RW_BIT | SCR_FIQ_BIT | SCR_IRQ_BIT | + SCR_ST_BIT | SCR_HCE_BIT); + + if (security_state != SECURE) + scr_el3 |= SCR_NS_BIT; + + if (GET_RW(ep->spsr) == MODE_RW_64) + scr_el3 |= SCR_RW_BIT; + + if (EP_GET_ST(ep->h.attr)) + scr_el3 |= SCR_ST_BIT; + +#if IMAGE_BL31 + /* + * IRQ/FIQ bits only need setting if interrupt routing + * model has been set up for BL31. + */ + scr_el3 |= get_scr_el3_from_routing_model(security_state); +#endif + + /* + * Set up SCTLR_ELx for the target exception level: + * EE bit is taken from the entrpoint attributes + * M, C and I bits must be zero (as required by PSCI specification) + * + * The target exception level is based on the spsr mode requested. + * If execution is requested to EL2 or hyp mode, HVC is enabled + * via SCR_EL3.HCE. + * + * Always compute the SCTLR_EL1 value and save in the cpu_context + * - the EL2 registers are set up by cm_preapre_ns_entry() as they + * are not part of the stored cpu_context + * + * TODO: In debug builds the spsr should be validated and checked + * against the CPU support, security state, endianess and pc + */ + sctlr_elx = EP_GET_EE(ep->h.attr) ? SCTLR_EE_BIT : 0; + if (GET_RW(ep->spsr) == MODE_RW_64) + sctlr_elx |= SCTLR_EL1_RES1; + else + sctlr_elx |= SCTLR_AARCH32_EL1_RES1; + write_ctx_reg(get_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_elx); + + if ((GET_RW(ep->spsr) == MODE_RW_64 + && GET_EL(ep->spsr) == MODE_EL2) + || (GET_RW(ep->spsr) != MODE_RW_64 + && GET_M32(ep->spsr) == MODE32_hyp)) { + scr_el3 |= SCR_HCE_BIT; + } + + /* Populate EL3 state so that we've the right context before doing ERET */ + state = get_el3state_ctx(ctx); + write_ctx_reg(state, CTX_SCR_EL3, scr_el3); + write_ctx_reg(state, CTX_ELR_EL3, ep->pc); + write_ctx_reg(state, CTX_SPSR_EL3, ep->spsr); + + /* + * Store the X0-X7 value from the entrypoint into the context + * Use memcpy as we are in control of the layout of the structures + */ + gp_regs = get_gpregs_ctx(ctx); + memcpy(gp_regs, (void *)&ep->args, sizeof(aapcs64_params_t)); +} + +/******************************************************************************* + * The following function initializes the cpu_context for a CPU specified by + * its `cpu_idx` for first use, and sets the initial entrypoint state as + * specified by the entry_point_info structure. + ******************************************************************************/ +void cm_init_context_by_index(unsigned int cpu_idx, + const entry_point_info_t *ep) +{ + cpu_context_t *ctx; + ctx = cm_get_context_by_index(cpu_idx, GET_SECURITY_STATE(ep->h.attr)); + cm_init_context_common(ctx, ep); +} + +/******************************************************************************* + * The following function initializes the cpu_context for the current CPU + * for first use, and sets the initial entrypoint state as specified by the + * entry_point_info structure. + ******************************************************************************/ +void cm_init_my_context(const entry_point_info_t *ep) +{ + cpu_context_t *ctx; + ctx = cm_get_context(GET_SECURITY_STATE(ep->h.attr)); + cm_init_context_common(ctx, ep); +} + +/******************************************************************************* + * Prepare the CPU system registers for first entry into secure or normal world + * + * If execution is requested to EL2 or hyp mode, SCTLR_EL2 is initialized + * If execution is requested to non-secure EL1 or svc mode, and the CPU supports + * EL2 then EL2 is disabled by configuring all necessary EL2 registers. + * For all entries, the EL1 registers are initialized from the cpu_context + ******************************************************************************/ +void cm_prepare_el3_exit(uint32_t security_state) +{ + uint32_t sctlr_elx, scr_el3, cptr_el2; + cpu_context_t *ctx = cm_get_context(security_state); + + assert(ctx); + + if (security_state == NON_SECURE) { + scr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3); + if (scr_el3 & SCR_HCE_BIT) { + /* Use SCTLR_EL1.EE value to initialise sctlr_el2 */ + sctlr_elx = read_ctx_reg(get_sysregs_ctx(ctx), + CTX_SCTLR_EL1); + sctlr_elx &= ~SCTLR_EE_BIT; + sctlr_elx |= SCTLR_EL2_RES1; + write_sctlr_el2(sctlr_elx); + } else if (read_id_aa64pfr0_el1() & + (ID_AA64PFR0_ELX_MASK << ID_AA64PFR0_EL2_SHIFT)) { + /* EL2 present but unused, need to disable safely */ + + /* HCR_EL2 = 0, except RW bit set to match SCR_EL3 */ + write_hcr_el2((scr_el3 & SCR_RW_BIT) ? HCR_RW_BIT : 0); + + /* SCTLR_EL2 : can be ignored when bypassing */ + + /* CPTR_EL2 : disable all traps TCPAC, TTA, TFP */ + cptr_el2 = read_cptr_el2(); + cptr_el2 &= ~(TCPAC_BIT | TTA_BIT | TFP_BIT); + write_cptr_el2(cptr_el2); + + /* Enable EL1 access to timer */ + write_cnthctl_el2(EL1PCEN_BIT | EL1PCTEN_BIT); + + /* Reset CNTVOFF_EL2 */ + write_cntvoff_el2(0); + + /* Set VPIDR, VMPIDR to match MIDR, MPIDR */ + write_vpidr_el2(read_midr_el1()); + write_vmpidr_el2(read_mpidr_el1()); + + /* + * Reset VTTBR_EL2. + * Needed because cache maintenance operations depend on + * the VMID even when non-secure EL1&0 stage 2 address + * translation are disabled. + */ + write_vttbr_el2(0); + } + } + + el1_sysregs_context_restore(get_sysregs_ctx(ctx)); + + cm_set_next_context(ctx); +} + +/******************************************************************************* + * The next four functions are used by runtime services to save and restore + * EL1 context on the 'cpu_context' structure for the specified security + * state. + ******************************************************************************/ +void cm_el1_sysregs_context_save(uint32_t security_state) +{ + cpu_context_t *ctx; + + ctx = cm_get_context(security_state); + assert(ctx); + + el1_sysregs_context_save(get_sysregs_ctx(ctx)); +} + +void cm_el1_sysregs_context_restore(uint32_t security_state) +{ + cpu_context_t *ctx; + + ctx = cm_get_context(security_state); + assert(ctx); + + el1_sysregs_context_restore(get_sysregs_ctx(ctx)); +} + +/******************************************************************************* + * This function populates ELR_EL3 member of 'cpu_context' pertaining to the + * given security state with the given entrypoint + ******************************************************************************/ +void cm_set_elr_el3(uint32_t security_state, uint64_t entrypoint) +{ + cpu_context_t *ctx; + el3_state_t *state; + + ctx = cm_get_context(security_state); + assert(ctx); + + /* Populate EL3 state so that ERET jumps to the correct entry */ + state = get_el3state_ctx(ctx); + write_ctx_reg(state, CTX_ELR_EL3, entrypoint); +} + +/******************************************************************************* + * This function populates ELR_EL3 and SPSR_EL3 members of 'cpu_context' + * pertaining to the given security state + ******************************************************************************/ +void cm_set_elr_spsr_el3(uint32_t security_state, + uint64_t entrypoint, uint32_t spsr) +{ + cpu_context_t *ctx; + el3_state_t *state; + + ctx = cm_get_context(security_state); + assert(ctx); + + /* Populate EL3 state so that ERET jumps to the correct entry */ + state = get_el3state_ctx(ctx); + write_ctx_reg(state, CTX_ELR_EL3, entrypoint); + write_ctx_reg(state, CTX_SPSR_EL3, spsr); +} + +/******************************************************************************* + * This function updates a single bit in the SCR_EL3 member of the 'cpu_context' + * pertaining to the given security state using the value and bit position + * specified in the parameters. It preserves all other bits. + ******************************************************************************/ +void cm_write_scr_el3_bit(uint32_t security_state, + uint32_t bit_pos, + uint32_t value) +{ + cpu_context_t *ctx; + el3_state_t *state; + uint32_t scr_el3; + + ctx = cm_get_context(security_state); + assert(ctx); + + /* Ensure that the bit position is a valid one */ + assert((1 << bit_pos) & SCR_VALID_BIT_MASK); + + /* Ensure that the 'value' is only a bit wide */ + assert(value <= 1); + + /* + * Get the SCR_EL3 value from the cpu context, clear the desired bit + * and set it to its new value. + */ + state = get_el3state_ctx(ctx); + scr_el3 = read_ctx_reg(state, CTX_SCR_EL3); + scr_el3 &= ~(1 << bit_pos); + scr_el3 |= value << bit_pos; + write_ctx_reg(state, CTX_SCR_EL3, scr_el3); +} + +/******************************************************************************* + * This function retrieves SCR_EL3 member of 'cpu_context' pertaining to the + * given security state. + ******************************************************************************/ +uint32_t cm_get_scr_el3(uint32_t security_state) +{ + cpu_context_t *ctx; + el3_state_t *state; + + ctx = cm_get_context(security_state); + assert(ctx); + + /* Populate EL3 state so that ERET jumps to the correct entry */ + state = get_el3state_ctx(ctx); + return read_ctx_reg(state, CTX_SCR_EL3); +} + +/******************************************************************************* + * This function is used to program the context that's used for exception + * return. This initializes the SP_EL3 to a pointer to a 'cpu_context' set for + * the required security state + ******************************************************************************/ +void cm_set_next_eret_context(uint32_t security_state) +{ + cpu_context_t *ctx; + + ctx = cm_get_context(security_state); + assert(ctx); + + cm_set_next_context(ctx); +} diff --git a/include/bl31/context.h b/include/bl31/context.h deleted file mode 100644 index 0dfebe0..0000000 --- a/include/bl31/context.h +++ /dev/null @@ -1,341 +0,0 @@ -/* - * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef __CONTEXT_H__ -#define __CONTEXT_H__ - -/******************************************************************************* - * Constants that allow assembler code to access members of and the 'gp_regs' - * structure at their correct offsets. - ******************************************************************************/ -#define CTX_GPREGS_OFFSET 0x0 -#define CTX_GPREG_X0 0x0 -#define CTX_GPREG_X1 0x8 -#define CTX_GPREG_X2 0x10 -#define CTX_GPREG_X3 0x18 -#define CTX_GPREG_X4 0x20 -#define CTX_GPREG_X5 0x28 -#define CTX_GPREG_X6 0x30 -#define CTX_GPREG_X7 0x38 -#define CTX_GPREG_X8 0x40 -#define CTX_GPREG_X9 0x48 -#define CTX_GPREG_X10 0x50 -#define CTX_GPREG_X11 0x58 -#define CTX_GPREG_X12 0x60 -#define CTX_GPREG_X13 0x68 -#define CTX_GPREG_X14 0x70 -#define CTX_GPREG_X15 0x78 -#define CTX_GPREG_X16 0x80 -#define CTX_GPREG_X17 0x88 -#define CTX_GPREG_X18 0x90 -#define CTX_GPREG_X19 0x98 -#define CTX_GPREG_X20 0xa0 -#define CTX_GPREG_X21 0xa8 -#define CTX_GPREG_X22 0xb0 -#define CTX_GPREG_X23 0xb8 -#define CTX_GPREG_X24 0xc0 -#define CTX_GPREG_X25 0xc8 -#define CTX_GPREG_X26 0xd0 -#define CTX_GPREG_X27 0xd8 -#define CTX_GPREG_X28 0xe0 -#define CTX_GPREG_X29 0xe8 -#define CTX_GPREG_LR 0xf0 -#define CTX_GPREG_SP_EL0 0xf8 -#define CTX_GPREGS_END 0x100 - -/******************************************************************************* - * Constants that allow assembler code to access members of and the 'el3_state' - * structure at their correct offsets. Note that some of the registers are only - * 32-bits wide but are stored as 64-bit values for convenience - ******************************************************************************/ -#define CTX_EL3STATE_OFFSET (CTX_GPREGS_OFFSET + CTX_GPREGS_END) -#define CTX_SCR_EL3 0x0 -#define CTX_RUNTIME_SP 0x8 -#define CTX_SPSR_EL3 0x10 -#define CTX_ELR_EL3 0x18 -#define CTX_EL3STATE_END 0x20 - -/******************************************************************************* - * Constants that allow assembler code to access members of and the - * 'el1_sys_regs' structure at their correct offsets. Note that some of the - * registers are only 32-bits wide but are stored as 64-bit values for - * convenience - ******************************************************************************/ -#define CTX_SYSREGS_OFFSET (CTX_EL3STATE_OFFSET + CTX_EL3STATE_END) -#define CTX_SPSR_EL1 0x0 -#define CTX_ELR_EL1 0x8 -#define CTX_SPSR_ABT 0x10 -#define CTX_SPSR_UND 0x18 -#define CTX_SPSR_IRQ 0x20 -#define CTX_SPSR_FIQ 0x28 -#define CTX_SCTLR_EL1 0x30 -#define CTX_ACTLR_EL1 0x38 -#define CTX_CPACR_EL1 0x40 -#define CTX_CSSELR_EL1 0x48 -#define CTX_SP_EL1 0x50 -#define CTX_ESR_EL1 0x58 -#define CTX_TTBR0_EL1 0x60 -#define CTX_TTBR1_EL1 0x68 -#define CTX_MAIR_EL1 0x70 -#define CTX_AMAIR_EL1 0x78 -#define CTX_TCR_EL1 0x80 -#define CTX_TPIDR_EL1 0x88 -#define CTX_TPIDR_EL0 0x90 -#define CTX_TPIDRRO_EL0 0x98 -#define CTX_DACR32_EL2 0xa0 -#define CTX_IFSR32_EL2 0xa8 -#define CTX_PAR_EL1 0xb0 -#define CTX_FAR_EL1 0xb8 -#define CTX_AFSR0_EL1 0xc0 -#define CTX_AFSR1_EL1 0xc8 -#define CTX_CONTEXTIDR_EL1 0xd0 -#define CTX_VBAR_EL1 0xd8 -/* - * If the timer registers aren't saved and restored, we don't have to reserve - * space for them in the context - */ -#if NS_TIMER_SWITCH -#define CTX_CNTP_CTL_EL0 0xe0 -#define CTX_CNTP_CVAL_EL0 0xe8 -#define CTX_CNTV_CTL_EL0 0xf0 -#define CTX_CNTV_CVAL_EL0 0xf8 -#define CTX_CNTKCTL_EL1 0x100 -#define CTX_FP_FPEXC32_EL2 0x108 -#define CTX_SYSREGS_END 0x110 -#else -#define CTX_FP_FPEXC32_EL2 0xe0 -#define CTX_SYSREGS_END 0xf0 -#endif - -/******************************************************************************* - * Constants that allow assembler code to access members of and the 'fp_regs' - * structure at their correct offsets. - ******************************************************************************/ -#if CTX_INCLUDE_FPREGS -#define CTX_FPREGS_OFFSET (CTX_SYSREGS_OFFSET + CTX_SYSREGS_END) -#define CTX_FP_Q0 0x0 -#define CTX_FP_Q1 0x10 -#define CTX_FP_Q2 0x20 -#define CTX_FP_Q3 0x30 -#define CTX_FP_Q4 0x40 -#define CTX_FP_Q5 0x50 -#define CTX_FP_Q6 0x60 -#define CTX_FP_Q7 0x70 -#define CTX_FP_Q8 0x80 -#define CTX_FP_Q9 0x90 -#define CTX_FP_Q10 0xa0 -#define CTX_FP_Q11 0xb0 -#define CTX_FP_Q12 0xc0 -#define CTX_FP_Q13 0xd0 -#define CTX_FP_Q14 0xe0 -#define CTX_FP_Q15 0xf0 -#define CTX_FP_Q16 0x100 -#define CTX_FP_Q17 0x110 -#define CTX_FP_Q18 0x120 -#define CTX_FP_Q19 0x130 -#define CTX_FP_Q20 0x140 -#define CTX_FP_Q21 0x150 -#define CTX_FP_Q22 0x160 -#define CTX_FP_Q23 0x170 -#define CTX_FP_Q24 0x180 -#define CTX_FP_Q25 0x190 -#define CTX_FP_Q26 0x1a0 -#define CTX_FP_Q27 0x1b0 -#define CTX_FP_Q28 0x1c0 -#define CTX_FP_Q29 0x1d0 -#define CTX_FP_Q30 0x1e0 -#define CTX_FP_Q31 0x1f0 -#define CTX_FP_FPSR 0x200 -#define CTX_FP_FPCR 0x208 -#define CTX_FPREGS_END 0x210 -#endif - -#ifndef __ASSEMBLY__ - -#include -#include /* for CACHE_WRITEBACK_GRANULE */ -#include - -/* - * Common constants to help define the 'cpu_context' structure and its - * members below. - */ -#define DWORD_SHIFT 3 -#define DEFINE_REG_STRUCT(name, num_regs) \ - typedef struct name { \ - uint64_t _regs[num_regs]; \ - } __aligned(16) name##_t - -/* Constants to determine the size of individual context structures */ -#define CTX_GPREG_ALL (CTX_GPREGS_END >> DWORD_SHIFT) -#define CTX_SYSREG_ALL (CTX_SYSREGS_END >> DWORD_SHIFT) -#if CTX_INCLUDE_FPREGS -#define CTX_FPREG_ALL (CTX_FPREGS_END >> DWORD_SHIFT) -#endif -#define CTX_EL3STATE_ALL (CTX_EL3STATE_END >> DWORD_SHIFT) - -/* - * AArch64 general purpose register context structure. Usually x0-x18, - * lr are saved as the compiler is expected to preserve the remaining - * callee saved registers if used by the C runtime and the assembler - * does not touch the remaining. But in case of world switch during - * exception handling, we need to save the callee registers too. - */ -DEFINE_REG_STRUCT(gp_regs, CTX_GPREG_ALL); - -/* - * AArch64 EL1 system register context structure for preserving the - * architectural state during switches from one security state to - * another in EL1. - */ -DEFINE_REG_STRUCT(el1_sys_regs, CTX_SYSREG_ALL); - -/* - * AArch64 floating point register context structure for preserving - * the floating point state during switches from one security state to - * another. - */ -#if CTX_INCLUDE_FPREGS -DEFINE_REG_STRUCT(fp_regs, CTX_FPREG_ALL); -#endif - -/* - * Miscellaneous registers used by EL3 firmware to maintain its state - * across exception entries and exits - */ -DEFINE_REG_STRUCT(el3_state, CTX_EL3STATE_ALL); - -/* - * Macros to access members of any of the above structures using their - * offsets - */ -#define read_ctx_reg(ctx, offset) ((ctx)->_regs[offset >> DWORD_SHIFT]) -#define write_ctx_reg(ctx, offset, val) (((ctx)->_regs[offset >> DWORD_SHIFT]) \ - = val) - -/* - * Top-level context structure which is used by EL3 firmware to - * preserve the state of a core at EL1 in one of the two security - * states and save enough EL3 meta data to be able to return to that - * EL and security state. The context management library will be used - * to ensure that SP_EL3 always points to an instance of this - * structure at exception entry and exit. Each instance will - * correspond to either the secure or the non-secure state. - */ -typedef struct cpu_context { - gp_regs_t gpregs_ctx; - el3_state_t el3state_ctx; - el1_sys_regs_t sysregs_ctx; -#if CTX_INCLUDE_FPREGS - fp_regs_t fpregs_ctx; -#endif -} cpu_context_t; - -/* Macros to access members of the 'cpu_context_t' structure */ -#define get_el3state_ctx(h) (&((cpu_context_t *) h)->el3state_ctx) -#if CTX_INCLUDE_FPREGS -#define get_fpregs_ctx(h) (&((cpu_context_t *) h)->fpregs_ctx) -#endif -#define get_sysregs_ctx(h) (&((cpu_context_t *) h)->sysregs_ctx) -#define get_gpregs_ctx(h) (&((cpu_context_t *) h)->gpregs_ctx) - -/* - * Compile time assertions related to the 'cpu_context' structure to - * ensure that the assembler and the compiler view of the offsets of - * the structure members is the same. - */ -CASSERT(CTX_GPREGS_OFFSET == __builtin_offsetof(cpu_context_t, gpregs_ctx), \ - assert_core_context_gp_offset_mismatch); -CASSERT(CTX_SYSREGS_OFFSET == __builtin_offsetof(cpu_context_t, sysregs_ctx), \ - assert_core_context_sys_offset_mismatch); -#if CTX_INCLUDE_FPREGS -CASSERT(CTX_FPREGS_OFFSET == __builtin_offsetof(cpu_context_t, fpregs_ctx), \ - assert_core_context_fp_offset_mismatch); -#endif -CASSERT(CTX_EL3STATE_OFFSET == __builtin_offsetof(cpu_context_t, el3state_ctx), \ - assert_core_context_el3state_offset_mismatch); - -/* - * Helper macro to set the general purpose registers that correspond to - * parameters in an aapcs_64 call i.e. x0-x7 - */ -#define set_aapcs_args0(ctx, x0) do { \ - write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X0, x0); \ - } while (0); -#define set_aapcs_args1(ctx, x0, x1) do { \ - write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X1, x1); \ - set_aapcs_args0(ctx, x0); \ - } while (0); -#define set_aapcs_args2(ctx, x0, x1, x2) do { \ - write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X2, x2); \ - set_aapcs_args1(ctx, x0, x1); \ - } while (0); -#define set_aapcs_args3(ctx, x0, x1, x2, x3) do { \ - write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X3, x3); \ - set_aapcs_args2(ctx, x0, x1, x2); \ - } while (0); -#define set_aapcs_args4(ctx, x0, x1, x2, x3, x4) do { \ - write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X4, x4); \ - set_aapcs_args3(ctx, x0, x1, x2, x3); \ - } while (0); -#define set_aapcs_args5(ctx, x0, x1, x2, x3, x4, x5) do { \ - write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X5, x5); \ - set_aapcs_args4(ctx, x0, x1, x2, x3, x4); \ - } while (0); -#define set_aapcs_args6(ctx, x0, x1, x2, x3, x4, x5, x6) do { \ - write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X6, x6); \ - set_aapcs_args5(ctx, x0, x1, x2, x3, x4, x5); \ - } while (0); -#define set_aapcs_args7(ctx, x0, x1, x2, x3, x4, x5, x6, x7) do { \ - write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X7, x7); \ - set_aapcs_args6(ctx, x0, x1, x2, x3, x4, x5, x6); \ - } while (0); - -/******************************************************************************* - * Function prototypes - ******************************************************************************/ -void el1_sysregs_context_save(el1_sys_regs_t *regs); -void el1_sysregs_context_restore(el1_sys_regs_t *regs); -#if CTX_INCLUDE_FPREGS -void fpregs_context_save(fp_regs_t *regs); -void fpregs_context_restore(fp_regs_t *regs); -#endif - - -#undef CTX_SYSREG_ALL -#if CTX_INCLUDE_FPREGS -#undef CTX_FPREG_ALL -#endif -#undef CTX_GPREG_ALL -#undef CTX_EL3STATE_ALL - -#endif /* __ASSEMBLY__ */ - -#endif /* __CONTEXT_H__ */ diff --git a/include/bl31/context_mgmt.h b/include/bl31/context_mgmt.h deleted file mode 100644 index 1ef4076..0000000 --- a/include/bl31/context_mgmt.h +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef __CM_H__ -#define __CM_H__ - -#include -#include -#include - -/******************************************************************************* - * Forward declarations - ******************************************************************************/ -struct entry_point_info; - -/******************************************************************************* - * Function & variable prototypes - ******************************************************************************/ -void cm_init(void); -void *cm_get_context_by_mpidr(uint64_t mpidr, - uint32_t security_state) __warn_deprecated; -static inline void *cm_get_context(uint32_t security_state); -void cm_set_context_by_mpidr(uint64_t mpidr, - void *context, - uint32_t security_state) __warn_deprecated; -void *cm_get_context_by_index(unsigned int cpu_idx, - unsigned int security_state); -void cm_set_context_by_index(unsigned int cpu_idx, - void *context, - unsigned int security_state); -static inline void cm_set_context(void *context, uint32_t security_state); -void cm_init_context(uint64_t mpidr, - const struct entry_point_info *ep) __warn_deprecated; -void cm_init_my_context(const struct entry_point_info *ep); -void cm_init_context_by_index(unsigned int cpu_idx, - const struct entry_point_info *ep); -void cm_prepare_el3_exit(uint32_t security_state); -void cm_el1_sysregs_context_save(uint32_t security_state); -void cm_el1_sysregs_context_restore(uint32_t security_state); -void cm_set_elr_el3(uint32_t security_state, uint64_t entrypoint); -void cm_set_elr_spsr_el3(uint32_t security_state, - uint64_t entrypoint, uint32_t spsr); -void cm_write_scr_el3_bit(uint32_t security_state, - uint32_t bit_pos, - uint32_t value); -void cm_set_next_eret_context(uint32_t security_state); -uint32_t cm_get_scr_el3(uint32_t security_state); - -/* Inline definitions */ - -/******************************************************************************* - * This function returns a pointer to the most recent 'cpu_context' structure - * for the calling CPU that was set as the context for the specified security - * state. NULL is returned if no such structure has been specified. - ******************************************************************************/ -void *cm_get_context(uint32_t security_state) -{ - assert(security_state <= NON_SECURE); - - return get_cpu_data(cpu_context[security_state]); -} - -/******************************************************************************* - * This function sets the pointer to the current 'cpu_context' structure for the - * specified security state for the calling CPU - ******************************************************************************/ -void cm_set_context(void *context, uint32_t security_state) -{ - assert(security_state <= NON_SECURE); - - set_cpu_data(cpu_context[security_state], context); -} - - -#endif /* __CM_H__ */ diff --git a/include/bl31/runtime_svc.h b/include/bl31/runtime_svc.h index f112418..30ba29f 100644 --- a/include/bl31/runtime_svc.h +++ b/include/bl31/runtime_svc.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -31,74 +31,9 @@ #ifndef __RUNTIME_SVC_H__ #define __RUNTIME_SVC_H__ -/******************************************************************************* - * Bit definitions inside the function id as per the SMC calling convention - ******************************************************************************/ -#define FUNCID_TYPE_SHIFT 31 -#define FUNCID_CC_SHIFT 30 -#define FUNCID_OEN_SHIFT 24 -#define FUNCID_NUM_SHIFT 0 +#include /* to include exception types */ +#include /* to include SMCC definitions */ -#define FUNCID_TYPE_MASK 0x1 -#define FUNCID_CC_MASK 0x1 -#define FUNCID_OEN_MASK 0x3f -#define FUNCID_NUM_MASK 0xffff - -#define FUNCID_TYPE_WIDTH 1 -#define FUNCID_CC_WIDTH 1 -#define FUNCID_OEN_WIDTH 6 -#define FUNCID_NUM_WIDTH 16 - -#define GET_SMC_CC(id) ((id >> FUNCID_CC_SHIFT) & \ - FUNCID_CC_MASK) -#define GET_SMC_TYPE(id) ((id >> FUNCID_TYPE_SHIFT) & \ - FUNCID_TYPE_MASK) - -#define SMC_64 1 -#define SMC_32 0 -#define SMC_UNK 0xffffffff -#define SMC_TYPE_FAST 1 -#define SMC_TYPE_STD 0 -#define SMC_PREEMPTED 0xfffffffe -/******************************************************************************* - * Owning entity number definitions inside the function id as per the SMC - * calling convention - ******************************************************************************/ -#define OEN_ARM_START 0 -#define OEN_ARM_END 0 -#define OEN_CPU_START 1 -#define OEN_CPU_END 1 -#define OEN_SIP_START 2 -#define OEN_SIP_END 2 -#define OEN_OEM_START 3 -#define OEN_OEM_END 3 -#define OEN_STD_START 4 /* Standard Calls */ -#define OEN_STD_END 4 -#define OEN_TAP_START 48 /* Trusted Applications */ -#define OEN_TAP_END 49 -#define OEN_TOS_START 50 /* Trusted OS */ -#define OEN_TOS_END 63 -#define OEN_LIMIT 64 - -/******************************************************************************* - * Constants to indicate type of exception to the common exception handler. - ******************************************************************************/ -#define SYNC_EXCEPTION_SP_EL0 0x0 -#define IRQ_SP_EL0 0x1 -#define FIQ_SP_EL0 0x2 -#define SERROR_SP_EL0 0x3 -#define SYNC_EXCEPTION_SP_ELX 0x4 -#define IRQ_SP_ELX 0x5 -#define FIQ_SP_ELX 0x6 -#define SERROR_SP_ELX 0x7 -#define SYNC_EXCEPTION_AARCH64 0x8 -#define IRQ_AARCH64 0x9 -#define FIQ_AARCH64 0xa -#define SERROR_AARCH64 0xb -#define SYNC_EXCEPTION_AARCH32 0xc -#define IRQ_AARCH32 0xd -#define FIQ_AARCH32 0xe -#define SERROR_AARCH32 0xf /******************************************************************************* * Structure definition, typedefs & constants for the runtime service framework @@ -122,68 +57,9 @@ #ifndef __ASSEMBLY__ -#include -#include -#include - -/* Various flags passed to SMC handlers */ -#define SMC_FROM_SECURE (0 << 0) -#define SMC_FROM_NON_SECURE (1 << 0) - -#define is_caller_non_secure(_f) (!!(_f & SMC_FROM_NON_SECURE)) -#define is_caller_secure(_f) (!(is_caller_non_secure(_f))) - /* Prototype for runtime service initializing function */ typedef int32_t (*rt_svc_init_t)(void); -/* Convenience macros to return from SMC handler */ -#define SMC_RET0(_h) { \ - return (uint64_t) (_h); \ -} -#define SMC_RET1(_h, _x0) { \ - write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X0, (_x0)); \ - SMC_RET0(_h); \ -} -#define SMC_RET2(_h, _x0, _x1) { \ - write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X1, (_x1)); \ - SMC_RET1(_h, (_x0)); \ -} -#define SMC_RET3(_h, _x0, _x1, _x2) { \ - write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X2, (_x2)); \ - SMC_RET2(_h, (_x0), (_x1)); \ -} -#define SMC_RET4(_h, _x0, _x1, _x2, _x3) { \ - write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X3, (_x3)); \ - SMC_RET3(_h, (_x0), (_x1), (_x2)); \ -} - - -/* - * Convenience macros to access general purpose registers using handle provided - * to SMC handler. These takes the offset values defined in context.h - */ -#define SMC_GET_GP(_h, _g) \ - read_ctx_reg(get_gpregs_ctx(_h), (_g)); -#define SMC_SET_GP(_h, _g, _v) \ - write_ctx_reg(get_gpregs_ctx(_h), (_g), (_v)); - -/* - * Convenience macros to access EL3 context registers using handle provided to - * SMC handler. These takes the offset values defined in context.h - */ -#define SMC_GET_EL3(_h, _e) \ - read_ctx_reg(get_el3state_ctx(_h), (_e)); -#define SMC_SET_EL3(_h, _e, _v) \ - write_ctx_reg(get_el3state_ctx(_h), (_e), (_v)); - -/* The macro below is used to identify a Standard Service SMC call */ -#define is_std_svc_call(_fid) ((((_fid) >> FUNCID_OEN_SHIFT) & \ - FUNCID_OEN_MASK) == OEN_STD_START) - -/* The macro below is used to identify a valid Fast SMC call */ -#define is_valid_fast_smc(_fid) ((!(((_fid) >> 16) & 0xff)) && \ - (GET_SMC_TYPE(_fid) == SMC_TYPE_FAST)) - /* * Prototype for runtime service SMC handler function. x0 (SMC Function ID) to * x4 are as passed by the caller. Rest of the arguments to SMC and the context @@ -247,28 +123,6 @@ ((call_type & FUNCID_TYPE_MASK) \ << FUNCID_OEN_WIDTH)) - -/* - * Macro to define UUID for services. Apart from defining and initializing a - * uuid_t structure, this macro verifies that the first word of the defined UUID - * does not equal SMC_UNK. This is to ensure that the caller won't mistake the - * returned UUID in x0 for an invalid SMC error return - */ -#define DEFINE_SVC_UUID(_name, _tl, _tm, _th, _cl, _ch, \ - _n0, _n1, _n2, _n3, _n4, _n5) \ - CASSERT(_tl != SMC_UNK, invalid_svc_uuid);\ - static const uuid_t _name = { \ - _tl, _tm, _th, _cl, _ch, \ - { _n0, _n1, _n2, _n3, _n4, _n5 } \ - } - -/* Return a UUID in the SMC return registers */ -#define SMC_UUID_RET(_h, _uuid) \ - SMC_RET4(handle, ((const uint32_t *) &(_uuid))[0], \ - ((const uint32_t *) &(_uuid))[1], \ - ((const uint32_t *) &(_uuid))[2], \ - ((const uint32_t *) &(_uuid))[3]) - /******************************************************************************* * Function & variable prototypes ******************************************************************************/ diff --git a/include/common/bl_common.h b/include/common/bl_common.h index 0eec989..d8741c9 100644 --- a/include/common/bl_common.h +++ b/include/common/bl_common.h @@ -89,6 +89,26 @@ (_p)->h.attr = (uint32_t)(_attr) ; \ } while (0) +/******************************************************************************* + * Constants to indicate type of exception to the common exception handler. + ******************************************************************************/ +#define SYNC_EXCEPTION_SP_EL0 0x0 +#define IRQ_SP_EL0 0x1 +#define FIQ_SP_EL0 0x2 +#define SERROR_SP_EL0 0x3 +#define SYNC_EXCEPTION_SP_ELX 0x4 +#define IRQ_SP_ELX 0x5 +#define FIQ_SP_ELX 0x6 +#define SERROR_SP_ELX 0x7 +#define SYNC_EXCEPTION_AARCH64 0x8 +#define IRQ_AARCH64 0x9 +#define FIQ_AARCH64 0xa +#define SERROR_AARCH64 0xb +#define SYNC_EXCEPTION_AARCH32 0xc +#define IRQ_AARCH32 0xd +#define FIQ_AARCH32 0xe +#define SERROR_AARCH32 0xf + #ifndef __ASSEMBLY__ #include /* For __dead2 */ #include diff --git a/include/common/context.h b/include/common/context.h new file mode 100644 index 0000000..0dfebe0 --- /dev/null +++ b/include/common/context.h @@ -0,0 +1,341 @@ +/* + * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __CONTEXT_H__ +#define __CONTEXT_H__ + +/******************************************************************************* + * Constants that allow assembler code to access members of and the 'gp_regs' + * structure at their correct offsets. + ******************************************************************************/ +#define CTX_GPREGS_OFFSET 0x0 +#define CTX_GPREG_X0 0x0 +#define CTX_GPREG_X1 0x8 +#define CTX_GPREG_X2 0x10 +#define CTX_GPREG_X3 0x18 +#define CTX_GPREG_X4 0x20 +#define CTX_GPREG_X5 0x28 +#define CTX_GPREG_X6 0x30 +#define CTX_GPREG_X7 0x38 +#define CTX_GPREG_X8 0x40 +#define CTX_GPREG_X9 0x48 +#define CTX_GPREG_X10 0x50 +#define CTX_GPREG_X11 0x58 +#define CTX_GPREG_X12 0x60 +#define CTX_GPREG_X13 0x68 +#define CTX_GPREG_X14 0x70 +#define CTX_GPREG_X15 0x78 +#define CTX_GPREG_X16 0x80 +#define CTX_GPREG_X17 0x88 +#define CTX_GPREG_X18 0x90 +#define CTX_GPREG_X19 0x98 +#define CTX_GPREG_X20 0xa0 +#define CTX_GPREG_X21 0xa8 +#define CTX_GPREG_X22 0xb0 +#define CTX_GPREG_X23 0xb8 +#define CTX_GPREG_X24 0xc0 +#define CTX_GPREG_X25 0xc8 +#define CTX_GPREG_X26 0xd0 +#define CTX_GPREG_X27 0xd8 +#define CTX_GPREG_X28 0xe0 +#define CTX_GPREG_X29 0xe8 +#define CTX_GPREG_LR 0xf0 +#define CTX_GPREG_SP_EL0 0xf8 +#define CTX_GPREGS_END 0x100 + +/******************************************************************************* + * Constants that allow assembler code to access members of and the 'el3_state' + * structure at their correct offsets. Note that some of the registers are only + * 32-bits wide but are stored as 64-bit values for convenience + ******************************************************************************/ +#define CTX_EL3STATE_OFFSET (CTX_GPREGS_OFFSET + CTX_GPREGS_END) +#define CTX_SCR_EL3 0x0 +#define CTX_RUNTIME_SP 0x8 +#define CTX_SPSR_EL3 0x10 +#define CTX_ELR_EL3 0x18 +#define CTX_EL3STATE_END 0x20 + +/******************************************************************************* + * Constants that allow assembler code to access members of and the + * 'el1_sys_regs' structure at their correct offsets. Note that some of the + * registers are only 32-bits wide but are stored as 64-bit values for + * convenience + ******************************************************************************/ +#define CTX_SYSREGS_OFFSET (CTX_EL3STATE_OFFSET + CTX_EL3STATE_END) +#define CTX_SPSR_EL1 0x0 +#define CTX_ELR_EL1 0x8 +#define CTX_SPSR_ABT 0x10 +#define CTX_SPSR_UND 0x18 +#define CTX_SPSR_IRQ 0x20 +#define CTX_SPSR_FIQ 0x28 +#define CTX_SCTLR_EL1 0x30 +#define CTX_ACTLR_EL1 0x38 +#define CTX_CPACR_EL1 0x40 +#define CTX_CSSELR_EL1 0x48 +#define CTX_SP_EL1 0x50 +#define CTX_ESR_EL1 0x58 +#define CTX_TTBR0_EL1 0x60 +#define CTX_TTBR1_EL1 0x68 +#define CTX_MAIR_EL1 0x70 +#define CTX_AMAIR_EL1 0x78 +#define CTX_TCR_EL1 0x80 +#define CTX_TPIDR_EL1 0x88 +#define CTX_TPIDR_EL0 0x90 +#define CTX_TPIDRRO_EL0 0x98 +#define CTX_DACR32_EL2 0xa0 +#define CTX_IFSR32_EL2 0xa8 +#define CTX_PAR_EL1 0xb0 +#define CTX_FAR_EL1 0xb8 +#define CTX_AFSR0_EL1 0xc0 +#define CTX_AFSR1_EL1 0xc8 +#define CTX_CONTEXTIDR_EL1 0xd0 +#define CTX_VBAR_EL1 0xd8 +/* + * If the timer registers aren't saved and restored, we don't have to reserve + * space for them in the context + */ +#if NS_TIMER_SWITCH +#define CTX_CNTP_CTL_EL0 0xe0 +#define CTX_CNTP_CVAL_EL0 0xe8 +#define CTX_CNTV_CTL_EL0 0xf0 +#define CTX_CNTV_CVAL_EL0 0xf8 +#define CTX_CNTKCTL_EL1 0x100 +#define CTX_FP_FPEXC32_EL2 0x108 +#define CTX_SYSREGS_END 0x110 +#else +#define CTX_FP_FPEXC32_EL2 0xe0 +#define CTX_SYSREGS_END 0xf0 +#endif + +/******************************************************************************* + * Constants that allow assembler code to access members of and the 'fp_regs' + * structure at their correct offsets. + ******************************************************************************/ +#if CTX_INCLUDE_FPREGS +#define CTX_FPREGS_OFFSET (CTX_SYSREGS_OFFSET + CTX_SYSREGS_END) +#define CTX_FP_Q0 0x0 +#define CTX_FP_Q1 0x10 +#define CTX_FP_Q2 0x20 +#define CTX_FP_Q3 0x30 +#define CTX_FP_Q4 0x40 +#define CTX_FP_Q5 0x50 +#define CTX_FP_Q6 0x60 +#define CTX_FP_Q7 0x70 +#define CTX_FP_Q8 0x80 +#define CTX_FP_Q9 0x90 +#define CTX_FP_Q10 0xa0 +#define CTX_FP_Q11 0xb0 +#define CTX_FP_Q12 0xc0 +#define CTX_FP_Q13 0xd0 +#define CTX_FP_Q14 0xe0 +#define CTX_FP_Q15 0xf0 +#define CTX_FP_Q16 0x100 +#define CTX_FP_Q17 0x110 +#define CTX_FP_Q18 0x120 +#define CTX_FP_Q19 0x130 +#define CTX_FP_Q20 0x140 +#define CTX_FP_Q21 0x150 +#define CTX_FP_Q22 0x160 +#define CTX_FP_Q23 0x170 +#define CTX_FP_Q24 0x180 +#define CTX_FP_Q25 0x190 +#define CTX_FP_Q26 0x1a0 +#define CTX_FP_Q27 0x1b0 +#define CTX_FP_Q28 0x1c0 +#define CTX_FP_Q29 0x1d0 +#define CTX_FP_Q30 0x1e0 +#define CTX_FP_Q31 0x1f0 +#define CTX_FP_FPSR 0x200 +#define CTX_FP_FPCR 0x208 +#define CTX_FPREGS_END 0x210 +#endif + +#ifndef __ASSEMBLY__ + +#include +#include /* for CACHE_WRITEBACK_GRANULE */ +#include + +/* + * Common constants to help define the 'cpu_context' structure and its + * members below. + */ +#define DWORD_SHIFT 3 +#define DEFINE_REG_STRUCT(name, num_regs) \ + typedef struct name { \ + uint64_t _regs[num_regs]; \ + } __aligned(16) name##_t + +/* Constants to determine the size of individual context structures */ +#define CTX_GPREG_ALL (CTX_GPREGS_END >> DWORD_SHIFT) +#define CTX_SYSREG_ALL (CTX_SYSREGS_END >> DWORD_SHIFT) +#if CTX_INCLUDE_FPREGS +#define CTX_FPREG_ALL (CTX_FPREGS_END >> DWORD_SHIFT) +#endif +#define CTX_EL3STATE_ALL (CTX_EL3STATE_END >> DWORD_SHIFT) + +/* + * AArch64 general purpose register context structure. Usually x0-x18, + * lr are saved as the compiler is expected to preserve the remaining + * callee saved registers if used by the C runtime and the assembler + * does not touch the remaining. But in case of world switch during + * exception handling, we need to save the callee registers too. + */ +DEFINE_REG_STRUCT(gp_regs, CTX_GPREG_ALL); + +/* + * AArch64 EL1 system register context structure for preserving the + * architectural state during switches from one security state to + * another in EL1. + */ +DEFINE_REG_STRUCT(el1_sys_regs, CTX_SYSREG_ALL); + +/* + * AArch64 floating point register context structure for preserving + * the floating point state during switches from one security state to + * another. + */ +#if CTX_INCLUDE_FPREGS +DEFINE_REG_STRUCT(fp_regs, CTX_FPREG_ALL); +#endif + +/* + * Miscellaneous registers used by EL3 firmware to maintain its state + * across exception entries and exits + */ +DEFINE_REG_STRUCT(el3_state, CTX_EL3STATE_ALL); + +/* + * Macros to access members of any of the above structures using their + * offsets + */ +#define read_ctx_reg(ctx, offset) ((ctx)->_regs[offset >> DWORD_SHIFT]) +#define write_ctx_reg(ctx, offset, val) (((ctx)->_regs[offset >> DWORD_SHIFT]) \ + = val) + +/* + * Top-level context structure which is used by EL3 firmware to + * preserve the state of a core at EL1 in one of the two security + * states and save enough EL3 meta data to be able to return to that + * EL and security state. The context management library will be used + * to ensure that SP_EL3 always points to an instance of this + * structure at exception entry and exit. Each instance will + * correspond to either the secure or the non-secure state. + */ +typedef struct cpu_context { + gp_regs_t gpregs_ctx; + el3_state_t el3state_ctx; + el1_sys_regs_t sysregs_ctx; +#if CTX_INCLUDE_FPREGS + fp_regs_t fpregs_ctx; +#endif +} cpu_context_t; + +/* Macros to access members of the 'cpu_context_t' structure */ +#define get_el3state_ctx(h) (&((cpu_context_t *) h)->el3state_ctx) +#if CTX_INCLUDE_FPREGS +#define get_fpregs_ctx(h) (&((cpu_context_t *) h)->fpregs_ctx) +#endif +#define get_sysregs_ctx(h) (&((cpu_context_t *) h)->sysregs_ctx) +#define get_gpregs_ctx(h) (&((cpu_context_t *) h)->gpregs_ctx) + +/* + * Compile time assertions related to the 'cpu_context' structure to + * ensure that the assembler and the compiler view of the offsets of + * the structure members is the same. + */ +CASSERT(CTX_GPREGS_OFFSET == __builtin_offsetof(cpu_context_t, gpregs_ctx), \ + assert_core_context_gp_offset_mismatch); +CASSERT(CTX_SYSREGS_OFFSET == __builtin_offsetof(cpu_context_t, sysregs_ctx), \ + assert_core_context_sys_offset_mismatch); +#if CTX_INCLUDE_FPREGS +CASSERT(CTX_FPREGS_OFFSET == __builtin_offsetof(cpu_context_t, fpregs_ctx), \ + assert_core_context_fp_offset_mismatch); +#endif +CASSERT(CTX_EL3STATE_OFFSET == __builtin_offsetof(cpu_context_t, el3state_ctx), \ + assert_core_context_el3state_offset_mismatch); + +/* + * Helper macro to set the general purpose registers that correspond to + * parameters in an aapcs_64 call i.e. x0-x7 + */ +#define set_aapcs_args0(ctx, x0) do { \ + write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X0, x0); \ + } while (0); +#define set_aapcs_args1(ctx, x0, x1) do { \ + write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X1, x1); \ + set_aapcs_args0(ctx, x0); \ + } while (0); +#define set_aapcs_args2(ctx, x0, x1, x2) do { \ + write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X2, x2); \ + set_aapcs_args1(ctx, x0, x1); \ + } while (0); +#define set_aapcs_args3(ctx, x0, x1, x2, x3) do { \ + write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X3, x3); \ + set_aapcs_args2(ctx, x0, x1, x2); \ + } while (0); +#define set_aapcs_args4(ctx, x0, x1, x2, x3, x4) do { \ + write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X4, x4); \ + set_aapcs_args3(ctx, x0, x1, x2, x3); \ + } while (0); +#define set_aapcs_args5(ctx, x0, x1, x2, x3, x4, x5) do { \ + write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X5, x5); \ + set_aapcs_args4(ctx, x0, x1, x2, x3, x4); \ + } while (0); +#define set_aapcs_args6(ctx, x0, x1, x2, x3, x4, x5, x6) do { \ + write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X6, x6); \ + set_aapcs_args5(ctx, x0, x1, x2, x3, x4, x5); \ + } while (0); +#define set_aapcs_args7(ctx, x0, x1, x2, x3, x4, x5, x6, x7) do { \ + write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X7, x7); \ + set_aapcs_args6(ctx, x0, x1, x2, x3, x4, x5, x6); \ + } while (0); + +/******************************************************************************* + * Function prototypes + ******************************************************************************/ +void el1_sysregs_context_save(el1_sys_regs_t *regs); +void el1_sysregs_context_restore(el1_sys_regs_t *regs); +#if CTX_INCLUDE_FPREGS +void fpregs_context_save(fp_regs_t *regs); +void fpregs_context_restore(fp_regs_t *regs); +#endif + + +#undef CTX_SYSREG_ALL +#if CTX_INCLUDE_FPREGS +#undef CTX_FPREG_ALL +#endif +#undef CTX_GPREG_ALL +#undef CTX_EL3STATE_ALL + +#endif /* __ASSEMBLY__ */ + +#endif /* __CONTEXT_H__ */ diff --git a/include/common/context_mgmt.h b/include/common/context_mgmt.h new file mode 100644 index 0000000..141b348 --- /dev/null +++ b/include/common/context_mgmt.h @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __CM_H__ +#define __CM_H__ + +#include +#include +#include + +/******************************************************************************* + * Forward declarations + ******************************************************************************/ +struct entry_point_info; + +/******************************************************************************* + * Function & variable prototypes + ******************************************************************************/ +void cm_init(void); +void *cm_get_context_by_mpidr(uint64_t mpidr, + uint32_t security_state) __warn_deprecated; +void cm_set_context_by_mpidr(uint64_t mpidr, + void *context, + uint32_t security_state) __warn_deprecated; +void *cm_get_context_by_index(unsigned int cpu_idx, + unsigned int security_state); +void cm_set_context_by_index(unsigned int cpu_idx, + void *context, + unsigned int security_state); +void *cm_get_context(uint32_t security_state); +void cm_set_context(void *context, uint32_t security_state); +inline void cm_set_next_context(void *context); +void cm_init_context(uint64_t mpidr, + const struct entry_point_info *ep) __warn_deprecated; +void cm_init_my_context(const struct entry_point_info *ep); +void cm_init_context_by_index(unsigned int cpu_idx, + const struct entry_point_info *ep); +void cm_prepare_el3_exit(uint32_t security_state); +void cm_el1_sysregs_context_save(uint32_t security_state); +void cm_el1_sysregs_context_restore(uint32_t security_state); +void cm_set_elr_el3(uint32_t security_state, uint64_t entrypoint); +void cm_set_elr_spsr_el3(uint32_t security_state, + uint64_t entrypoint, uint32_t spsr); +void cm_write_scr_el3_bit(uint32_t security_state, + uint32_t bit_pos, + uint32_t value); +void cm_set_next_eret_context(uint32_t security_state); +uint32_t cm_get_scr_el3(uint32_t security_state); + +/* Inline definitions */ + +/******************************************************************************* + * This function is used to program the context that's used for exception + * return. This initializes the SP_EL3 to a pointer to a 'cpu_context' set for + * the required security state + ******************************************************************************/ +inline void cm_set_next_context(void *context) +{ +#if DEBUG + uint64_t sp_mode; + + /* + * Check that this function is called with SP_EL0 as the stack + * pointer + */ + __asm__ volatile("mrs %0, SPSel\n" + : "=r" (sp_mode)); + + assert(sp_mode == MODE_SP_EL0); +#endif + + __asm__ volatile("msr spsel, #1\n" + "mov sp, %0\n" + "msr spsel, #0\n" + : : "r" (context)); +} +#endif /* __CM_H__ */ diff --git a/include/common/smcc_helpers.h b/include/common/smcc_helpers.h new file mode 100644 index 0000000..6a07b01 --- /dev/null +++ b/include/common/smcc_helpers.h @@ -0,0 +1,165 @@ +/* + * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __SMCC_HELPERS_H__ +#define __SMCC_HELPERS_H__ + +/******************************************************************************* + * Bit definitions inside the function id as per the SMC calling convention + ******************************************************************************/ +#define FUNCID_TYPE_SHIFT 31 +#define FUNCID_CC_SHIFT 30 +#define FUNCID_OEN_SHIFT 24 +#define FUNCID_NUM_SHIFT 0 + +#define FUNCID_TYPE_MASK 0x1 +#define FUNCID_CC_MASK 0x1 +#define FUNCID_OEN_MASK 0x3f +#define FUNCID_NUM_MASK 0xffff + +#define FUNCID_TYPE_WIDTH 1 +#define FUNCID_CC_WIDTH 1 +#define FUNCID_OEN_WIDTH 6 +#define FUNCID_NUM_WIDTH 16 + +#define GET_SMC_CC(id) ((id >> FUNCID_CC_SHIFT) & \ + FUNCID_CC_MASK) +#define GET_SMC_TYPE(id) ((id >> FUNCID_TYPE_SHIFT) & \ + FUNCID_TYPE_MASK) + +#define SMC_64 1 +#define SMC_32 0 +#define SMC_UNK 0xffffffff +#define SMC_TYPE_FAST 1 +#define SMC_TYPE_STD 0 +#define SMC_PREEMPTED 0xfffffffe +/******************************************************************************* + * Owning entity number definitions inside the function id as per the SMC + * calling convention + ******************************************************************************/ +#define OEN_ARM_START 0 +#define OEN_ARM_END 0 +#define OEN_CPU_START 1 +#define OEN_CPU_END 1 +#define OEN_SIP_START 2 +#define OEN_SIP_END 2 +#define OEN_OEM_START 3 +#define OEN_OEM_END 3 +#define OEN_STD_START 4 /* Standard Calls */ +#define OEN_STD_END 4 +#define OEN_TAP_START 48 /* Trusted Applications */ +#define OEN_TAP_END 49 +#define OEN_TOS_START 50 /* Trusted OS */ +#define OEN_TOS_END 63 +#define OEN_LIMIT 64 + +#ifndef __ASSEMBLY__ + +#include +#include +#include + +/* Various flags passed to SMC handlers */ +#define SMC_FROM_SECURE (0 << 0) +#define SMC_FROM_NON_SECURE (1 << 0) + +#define is_caller_non_secure(_f) (!!(_f & SMC_FROM_NON_SECURE)) +#define is_caller_secure(_f) (!(is_caller_non_secure(_f))) + +/* Convenience macros to return from SMC handler */ +#define SMC_RET0(_h) { \ + return (uint64_t) (_h); \ +} +#define SMC_RET1(_h, _x0) { \ + write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X0, (_x0)); \ + SMC_RET0(_h); \ +} +#define SMC_RET2(_h, _x0, _x1) { \ + write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X1, (_x1)); \ + SMC_RET1(_h, (_x0)); \ +} +#define SMC_RET3(_h, _x0, _x1, _x2) { \ + write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X2, (_x2)); \ + SMC_RET2(_h, (_x0), (_x1)); \ +} +#define SMC_RET4(_h, _x0, _x1, _x2, _x3) { \ + write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X3, (_x3)); \ + SMC_RET3(_h, (_x0), (_x1), (_x2)); \ +} + +/* + * Convenience macros to access general purpose registers using handle provided + * to SMC handler. These takes the offset values defined in context.h + */ +#define SMC_GET_GP(_h, _g) \ + read_ctx_reg(get_gpregs_ctx(_h), (_g)); +#define SMC_SET_GP(_h, _g, _v) \ + write_ctx_reg(get_gpregs_ctx(_h), (_g), (_v)); + +/* + * Convenience macros to access EL3 context registers using handle provided to + * SMC handler. These takes the offset values defined in context.h + */ +#define SMC_GET_EL3(_h, _e) \ + read_ctx_reg(get_el3state_ctx(_h), (_e)); +#define SMC_SET_EL3(_h, _e, _v) \ + write_ctx_reg(get_el3state_ctx(_h), (_e), (_v)); + +/* The macro below is used to identify a Standard Service SMC call */ +#define is_std_svc_call(_fid) ((((_fid) >> FUNCID_OEN_SHIFT) & \ + FUNCID_OEN_MASK) == OEN_STD_START) + +/* The macro below is used to identify a valid Fast SMC call */ +#define is_valid_fast_smc(_fid) ((!(((_fid) >> 16) & 0xff)) && \ + (GET_SMC_TYPE(_fid) == SMC_TYPE_FAST)) + +/* + * Macro to define UUID for services. Apart from defining and initializing a + * uuid_t structure, this macro verifies that the first word of the defined UUID + * does not equal SMC_UNK. This is to ensure that the caller won't mistake the + * returned UUID in x0 for an invalid SMC error return + */ +#define DEFINE_SVC_UUID(_name, _tl, _tm, _th, _cl, _ch, \ + _n0, _n1, _n2, _n3, _n4, _n5) \ + CASSERT(_tl != SMC_UNK, invalid_svc_uuid);\ + static const uuid_t _name = { \ + _tl, _tm, _th, _cl, _ch, \ + { _n0, _n1, _n2, _n3, _n4, _n5 } \ + } + +/* Return a UUID in the SMC return registers */ +#define SMC_UUID_RET(_h, _uuid) \ + SMC_RET4(handle, ((const uint32_t *) &(_uuid))[0], \ + ((const uint32_t *) &(_uuid))[1], \ + ((const uint32_t *) &(_uuid))[2], \ + ((const uint32_t *) &(_uuid))[3]) + +#endif /*__ASSEMBLY__*/ +#endif /* __SMCC_HELPERS_H__ */