diff --git a/bl31/aarch64/bl31_entrypoint.S b/bl31/aarch64/bl31_entrypoint.S index c0e8855..87bdae5 100644 --- a/bl31/aarch64/bl31_entrypoint.S +++ b/bl31/aarch64/bl31_entrypoint.S @@ -31,8 +31,6 @@ #include #include #include -#include - .globl bl31_entrypoint diff --git a/bl31/aarch64/runtime_exceptions.S b/bl31/aarch64/runtime_exceptions.S index b6dcccb..4789b33 100644 --- a/bl31/aarch64/runtime_exceptions.S +++ b/bl31/aarch64/runtime_exceptions.S @@ -30,14 +30,118 @@ #include #include -#include #include +#include #include #include .globl runtime_exceptions .globl el3_exit + /* ----------------------------------------------------- + * Handle SMC exceptions seperately from other sync. + * exceptions. + * ----------------------------------------------------- + */ + .macro handle_sync_exception + str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] + mrs x30, esr_el3 + ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH + + cmp x30, #EC_AARCH32_SMC + b.eq smc_handler32 + + cmp x30, #EC_AARCH64_SMC + b.eq smc_handler64 + + /* ----------------------------------------------------- + * The following code handles any synchronous exception + * that is not an SMC. + * ----------------------------------------------------- + */ + + bl dump_state_and_die + .endm + + + /* ----------------------------------------------------- + * This macro handles FIQ or IRQ interrupts i.e. EL3, + * S-EL1 and NS interrupts. + * ----------------------------------------------------- + */ + .macro handle_interrupt_exception label + str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] + bl save_gp_registers + + /* Switch to the runtime stack i.e. SP_EL0 */ + ldr x2, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP] + mov x20, sp + msr spsel, #0 + mov sp, x2 + + /* + * Find out whether this is a valid interrupt type. If the + * interrupt controller reports a spurious interrupt then + * return to where we came from. + */ + bl ic_get_pending_interrupt_type + cmp x0, #INTR_TYPE_INVAL + b.eq interrupt_exit_\label + + /* + * Get the registered handler for this interrupt type. A + * NULL return value implies that an interrupt was generated + * for which there is no handler registered or the interrupt + * was routed incorrectly. This is a problem of the framework + * so report it as an error. + */ + bl get_interrupt_type_handler + cbz x0, interrupt_error_\label + mov x21, x0 + + mov x0, #INTR_ID_UNAVAILABLE +#if IMF_READ_INTERRUPT_ID + /* + * Read the id of the highest priority pending interrupt. If + * no interrupt is asserted then return to where we came from. + */ + bl ic_get_pending_interrupt_id + cmp x0, #INTR_ID_UNAVAILABLE + b.eq interrupt_exit_\label +#endif + + /* + * Save the EL3 system registers needed to return from + * this exception. + */ + mrs x3, spsr_el3 + mrs x4, elr_el3 + stp x3, x4, [x20, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3] + + /* Set the current security state in the 'flags' parameter */ + mrs x2, scr_el3 + ubfx x1, x2, #0, #1 + + /* Restore the reference to the 'handle' i.e. SP_EL3 */ + mov x2, x20 + + /* Call the interrupt type handler */ + blr x21 + +interrupt_exit_\label: + /* Return from exception, possibly in a different security state */ + b el3_exit + + /* + * This label signifies a problem with the interrupt management + * framework where it is not safe to go back to the instruction + * where the interrupt was generated. + */ +interrupt_error_\label: + bl dump_intr_state_and_die + .endm + + .macro save_x18_to_x29_sp_el0 stp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18] stp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20] @@ -140,12 +244,12 @@ * ----------------------------------------------------- */ irq_aarch64: - bl dump_intr_state_and_die + handle_interrupt_exception irq_aarch64 check_vector_size irq_aarch64 .align 7 fiq_aarch64: - bl dump_intr_state_and_die + handle_interrupt_exception fiq_aarch64 check_vector_size fiq_aarch64 .align 7 @@ -177,12 +281,12 @@ * ----------------------------------------------------- */ irq_aarch32: - bl dump_intr_state_and_die + handle_interrupt_exception irq_aarch32 check_vector_size irq_aarch32 .align 7 fiq_aarch32: - bl dump_intr_state_and_die + handle_interrupt_exception fiq_aarch32 check_vector_size fiq_aarch32 .align 7 diff --git a/bl31/bl31.mk b/bl31/bl31.mk index 6c9650f..8155f3d 100644 --- a/bl31/bl31.mk +++ b/bl31/bl31.mk @@ -31,6 +31,7 @@ BL31_SOURCES += bl31/bl31_main.c \ bl31/context_mgmt.c \ bl31/runtime_svc.c \ + bl31/interrupt_mgmt.c \ bl31/aarch64/bl31_arch_setup.c \ bl31/aarch64/bl31_entrypoint.S \ bl31/aarch64/context.S \ @@ -50,3 +51,11 @@ services/std_svc/psci/psci_setup.c BL31_LINKERFILE := bl31/bl31.ld.S + +# Flag used by the generic interrupt management framework to determine if +# upon the assertion of an interrupt, it should pass the interrupt id or not +IMF_READ_INTERRUPT_ID := 0 + +$(eval $(call assert_boolean,IMF_READ_INTERRUPT_ID)) +$(eval $(call add_define,IMF_READ_INTERRUPT_ID)) + diff --git a/bl31/bl31_main.c b/bl31/bl31_main.c index 55bf7ce..02815ff 100644 --- a/bl31/bl31_main.c +++ b/bl31/bl31_main.c @@ -168,9 +168,15 @@ assert(next_image_info); scr = read_scr(); + scr &= ~SCR_NS_BIT; if (image_type == NON_SECURE) scr |= SCR_NS_BIT; + scr &= ~SCR_RW_BIT; + if ((next_image_info->spsr & (1 << MODE_RW_SHIFT)) == + (MODE_RW_64 << MODE_RW_SHIFT)) + scr |= SCR_RW_BIT; + /* * Tell the context mgmt. library to ensure that SP_EL3 points to * the right context to exit from EL3 correctly. diff --git a/bl31/context_mgmt.c b/bl31/context_mgmt.c index eae608c..78bfa89 100644 --- a/bl31/context_mgmt.c +++ b/bl31/context_mgmt.c @@ -28,12 +28,14 @@ * POSSIBILITY OF SUCH DAMAGE. */ +#include #include #include #include #include #include #include +#include #include #include @@ -145,10 +147,10 @@ } /******************************************************************************* - * This function function populates 'cpu_context' pertaining to the given - * security state with the entrypoint, SPSR and SCR values so that an ERET from - * this securit state correctly restores corresponding values to drop the CPU to - * the next exception level + * This function populates 'cpu_context' pertaining to the given security state + * with the entrypoint, SPSR and SCR values so that an ERET from this security + * state correctly restores corresponding values to drop the CPU to the next + * exception level ******************************************************************************/ void cm_set_el3_eret_context(uint32_t security_state, uint64_t entrypoint, uint32_t spsr, uint32_t scr) @@ -159,6 +161,11 @@ ctx = cm_get_context(read_mpidr(), security_state); assert(ctx); + /* Program the interrupt routing model for this security state */ + scr &= ~SCR_FIQ_BIT; + scr &= ~SCR_IRQ_BIT; + scr |= get_scr_el3_from_routing_model(security_state); + /* Populate EL3 state so that we've the right context before doing ERET */ state = get_el3state_ctx(ctx); write_ctx_reg(state, CTX_SPSR_EL3, spsr); @@ -167,10 +174,10 @@ } /******************************************************************************* - * This function function populates ELR_EL3 member of 'cpu_context' pertaining - * to the given security state with the given entrypoint + * This function populates ELR_EL3 member of 'cpu_context' pertaining to the + * given security state with the given entrypoint ******************************************************************************/ -void cm_set_el3_elr(uint32_t security_state, uint64_t entrypoint) +void cm_set_elr_el3(uint32_t security_state, uint64_t entrypoint) { cpu_context_t *ctx; el3_state_t *state; @@ -184,6 +191,53 @@ } /******************************************************************************* + * This function updates a single bit in the SCR_EL3 member of the 'cpu_context' + * pertaining to the given security state using the value and bit position + * specified in the parameters. It preserves all other bits. + ******************************************************************************/ +void cm_write_scr_el3_bit(uint32_t security_state, + uint32_t bit_pos, + uint32_t value) +{ + cpu_context_t *ctx; + el3_state_t *state; + uint32_t scr_el3; + + ctx = cm_get_context(read_mpidr(), security_state); + assert(ctx); + + /* Ensure that the bit position is a valid one */ + assert((1 << bit_pos) & SCR_VALID_BIT_MASK); + + /* + * Get the SCR_EL3 value from the cpu context, clear the desired bit + * and set it to its new value. + */ + state = get_el3state_ctx(ctx); + scr_el3 = read_ctx_reg(state, CTX_SCR_EL3); + scr_el3 &= ~(1 << bit_pos); + scr_el3 |= value << bit_pos; + write_ctx_reg(state, CTX_SCR_EL3, scr_el3); +} + +/******************************************************************************* + * This function retrieves SCR_EL3 member of 'cpu_context' pertaining to the + * given security state. + ******************************************************************************/ +uint32_t cm_get_scr_el3(uint32_t security_state) +{ + cpu_context_t *ctx; + el3_state_t *state; + + ctx = cm_get_context(read_mpidr(), security_state); + assert(ctx); + + /* Populate EL3 state so that ERET jumps to the correct entry */ + state = get_el3state_ctx(ctx); + return read_ctx_reg(state, CTX_SCR_EL3); +} + +/******************************************************************************* * This function is used to program the context that's used for exception * return. This initializes the SP_EL3 to a pointer to a 'cpu_context' set for * the required security state diff --git a/bl31/interrupt_mgmt.c b/bl31/interrupt_mgmt.c new file mode 100644 index 0000000..2b0c797 --- /dev/null +++ b/bl31/interrupt_mgmt.c @@ -0,0 +1,206 @@ +/* + * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include + +/******************************************************************************* + * Local structure and corresponding array to keep track of the state of the + * registered interrupt handlers for each interrupt type. + * The field descriptions are: + * + * 'flags' : Bit[0], Routing model for this interrupt type when execution is + * not in EL3 in the secure state. '1' implies that this + * interrupt will be routed to EL3. '0' implies that this + * interrupt will be routed to the current exception level. + * + * Bit[1], Routing model for this interrupt type when execution is + * not in EL3 in the non-secure state. '1' implies that this + * interrupt will be routed to EL3. '0' implies that this + * interrupt will be routed to the current exception level. + * + * All other bits are reserved and SBZ. + * + * 'scr_el3[2]' : Mapping of the routing model in the 'flags' field to the + * value of the SCR_EL3.IRQ or FIQ bit for each security state. + * There are two instances of this field corresponding to the + * two security states. + ******************************************************************************/ +typedef struct intr_type_desc { + interrupt_type_handler_t handler; + uint32_t flags; + uint32_t scr_el3[2]; +} intr_type_desc_t; + +static intr_type_desc_t intr_type_descs[MAX_INTR_TYPES]; + +/******************************************************************************* + * This function validates the interrupt type. EL3 interrupts are currently not + * supported. + ******************************************************************************/ +static int32_t validate_interrupt_type(uint32_t type) +{ + if (type == INTR_TYPE_EL3) + return -ENOTSUP; + + if (type != INTR_TYPE_S_EL1 && type != INTR_TYPE_NS) + return -EINVAL; + + return 0; +} + +/******************************************************************************* +* This function validates the routing model for this type of interrupt + ******************************************************************************/ +static int32_t validate_routing_model(uint32_t type, uint32_t flags) +{ + flags >>= INTR_RM_FLAGS_SHIFT; + flags &= INTR_RM_FLAGS_MASK; + + if (type == INTR_TYPE_S_EL1) + return validate_sel1_interrupt_rm(flags); + + if (type == INTR_TYPE_NS) + return validate_ns_interrupt_rm(flags); + + return -EINVAL; +} + +/******************************************************************************* + * This function returns the cached copy of the SCR_EL3 which contains the + * routing model (expressed through the IRQ and FIQ bits) for a security state + * which was stored through a call to 'set_routing_model()' earlier. + ******************************************************************************/ +uint32_t get_scr_el3_from_routing_model(uint32_t security_state) +{ + uint32_t scr_el3; + + assert(security_state <= NON_SECURE); + scr_el3 = intr_type_descs[INTR_TYPE_NS].scr_el3[security_state]; + scr_el3 |= intr_type_descs[INTR_TYPE_S_EL1].scr_el3[security_state]; + scr_el3 |= intr_type_descs[INTR_TYPE_EL3].scr_el3[security_state]; + return scr_el3; +} + +/******************************************************************************* + * This function uses the 'interrupt_type_flags' parameter to obtain the value + * of the trap bit (IRQ/FIQ) in the SCR_EL3 for a security state for this + * interrupt type. It uses it to update the SCR_EL3 in the cpu context and the + * 'intr_type_desc' for that security state. + ******************************************************************************/ +static void set_scr_el3_from_rm(uint32_t type, + uint32_t interrupt_type_flags, + uint32_t security_state) +{ + uint32_t flag, bit_pos; + + flag = get_interrupt_rm_flag(interrupt_type_flags, security_state); + bit_pos = plat_interrupt_type_to_line(type, security_state); + intr_type_descs[type].scr_el3[security_state] = flag << bit_pos; + cm_write_scr_el3_bit(security_state, bit_pos, flag); +} + +/******************************************************************************* + * This function validates the routing model specified in the 'flags' and + * updates internal data structures to reflect the new routing model. It also + * updates the copy of SCR_EL3 for each security state with the new routing + * model in the 'cpu_context' structure for this cpu. + ******************************************************************************/ +int32_t set_routing_model(uint32_t type, uint32_t flags) +{ + int32_t rc; + + rc = validate_interrupt_type(type); + if (rc) + return rc; + + rc = validate_routing_model(type, flags); + if (rc) + return rc; + + /* Update the routing model in internal data structures */ + intr_type_descs[type].flags = flags; + set_scr_el3_from_rm(type, flags, SECURE); + set_scr_el3_from_rm(type, flags, NON_SECURE); + + return 0; +} + +/******************************************************************************* + * This function registers a handler for the 'type' of interrupt specified. It + * also validates the routing model specified in the 'flags' for this type of + * interrupt. + ******************************************************************************/ +int32_t register_interrupt_type_handler(uint32_t type, + interrupt_type_handler_t handler, + uint32_t flags) +{ + int32_t rc; + + /* Validate the 'handler' parameter */ + if (!handler) + return -EINVAL; + + /* Validate the 'flags' parameter */ + if (flags & INTR_TYPE_FLAGS_MASK) + return -EINVAL; + + /* Check if a handler has already been registered */ + if (intr_type_descs[type].handler) + return -EALREADY; + + rc = set_routing_model(type, flags); + if (rc) + return rc; + + /* Save the handler */ + intr_type_descs[type].handler = handler; + + return 0; +} + +/******************************************************************************* + * This function is called when an interrupt is generated and returns the + * handler for the interrupt type (if registered). It returns NULL if the + * interrupt type is not supported or its handler has not been registered. + ******************************************************************************/ +interrupt_type_handler_t get_interrupt_type_handler(uint32_t type) +{ + if (validate_interrupt_type(type)) + return NULL; + + return intr_type_descs[type].handler; +} + diff --git a/bl31/runtime_svc.c b/bl31/runtime_svc.c index b2ba685..08cd2d8 100644 --- a/bl31/runtime_svc.c +++ b/bl31/runtime_svc.c @@ -109,26 +109,35 @@ goto error; } - /* Call the initialisation routine for this runtime service */ - rc = rt_svc_descs[index].init(); - if (rc) { - ERROR("Error initializing runtime service %s\n", - rt_svc_descs[index].name); - } else { - /* - * Fill the indices corresponding to the start and end - * owning entity numbers with the index of the - * descriptor which will handle the SMCs for this owning - * entity range. - */ - start_idx = get_unique_oen(rt_svc_descs[index].start_oen, - rt_svc_descs[index].call_type); - end_idx = get_unique_oen(rt_svc_descs[index].end_oen, - rt_svc_descs[index].call_type); - - for (; start_idx <= end_idx; start_idx++) - rt_svc_descs_indices[start_idx] = index; + /* + * The runtime service may have seperate rt_svc_desc_t + * for its fast smc and standard smc. Since the service itself + * need to be initialized only once, only one of them will have + * an initialisation routine defined. Call the initialisation + * routine for this runtime service, if it is defined. + */ + if (rt_svc_descs[index].init) { + rc = rt_svc_descs[index].init(); + if (rc) { + ERROR("Error initializing runtime service %s\n", + rt_svc_descs[index].name); + continue; + } } + + /* + * Fill the indices corresponding to the start and end + * owning entity numbers with the index of the + * descriptor which will handle the SMCs for this owning + * entity range. + */ + start_idx = get_unique_oen(rt_svc_descs[index].start_oen, + rt_svc_descs[index].call_type); + end_idx = get_unique_oen(rt_svc_descs[index].end_oen, + rt_svc_descs[index].call_type); + + for (; start_idx <= end_idx; start_idx++) + rt_svc_descs_indices[start_idx] = index; } return; diff --git a/bl32/tsp/aarch64/tsp_entrypoint.S b/bl32/tsp/aarch64/tsp_entrypoint.S index fab64cf..9999c43 100644 --- a/bl32/tsp/aarch64/tsp_entrypoint.S +++ b/bl32/tsp/aarch64/tsp_entrypoint.S @@ -39,6 +39,10 @@ .globl tsp_cpu_suspend_entry .globl tsp_cpu_resume_entry .globl tsp_fast_smc_entry + .globl tsp_std_smc_entry + .globl tsp_fiq_entry + + /* --------------------------------------------- * Populate the params in x0-x7 from the pointer @@ -53,6 +57,22 @@ smc #0 .endm + .macro save_eret_context reg1 reg2 + mrs \reg1, elr_el1 + mrs \reg2, spsr_el1 + stp \reg1, \reg2, [sp, #-0x10]! + stp x30, x18, [sp, #-0x10]! + .endm + + .macro restore_eret_context reg1 reg2 + ldp x30, x18, [sp], #0x10 + ldp \reg1, \reg2, [sp], #0x10 + msr elr_el1, \reg1 + msr spsr_el1, \reg2 + .endm + + .section .text, "ax" + .align 3 func tsp_entrypoint @@ -70,7 +90,7 @@ * Set the exception vector to something sane. * --------------------------------------------- */ - adr x0, early_exceptions + adr x0, tsp_exceptions msr vbar_el1, x0 /* --------------------------------------------- @@ -167,7 +187,7 @@ * Set the exception vector to something sane. * --------------------------------------------- */ - adr x0, early_exceptions + adr x0, tsp_exceptions msr vbar_el1, x0 /* --------------------------------------------- @@ -227,6 +247,58 @@ restore_args_call_smc /*--------------------------------------------- + * This entrypoint is used by the TSPD to pass + * control for handling a pending S-EL1 FIQ. + * 'x0' contains a magic number which indicates + * this. TSPD expects control to be handed back + * at the end of FIQ processing. This is done + * through an SMC. The handover agreement is: + * + * 1. PSTATE.DAIF are set upon entry. 'x1' has + * the ELR_EL3 from the non-secure state. + * 2. TSP has to preserve the callee saved + * general purpose registers, SP_EL1/EL0 and + * LR. + * 3. TSP has to preserve the system and vfp + * registers (if applicable). + * 4. TSP can use 'x0-x18' to enable its C + * runtime. + * 5. TSP returns to TSPD using an SMC with + * 'x0' = TSP_HANDLED_S_EL1_FIQ + * --------------------------------------------- + */ +func tsp_fiq_entry +#if DEBUG + mov x2, #(TSP_HANDLE_FIQ_AND_RETURN & ~0xffff) + movk x2, #(TSP_HANDLE_FIQ_AND_RETURN & 0xffff) + cmp x0, x2 + b.ne tsp_fiq_entry_panic +#endif + /*--------------------------------------------- + * Save any previous context needed to perform + * an exception return from S-EL1 e.g. context + * from a previous IRQ. Update statistics and + * handle the FIQ before returning to the TSPD. + * IRQ/FIQs are not enabled since that will + * complicate the implementation. Execution + * will be transferred back to the normal world + * in any case. A non-zero return value from the + * fiq handler is an error. + * --------------------------------------------- + */ + save_eret_context x2 x3 + bl tsp_update_sync_fiq_stats + bl tsp_fiq_handler + cbnz x0, tsp_fiq_entry_panic + restore_eret_context x2 x3 + mov x0, #(TSP_HANDLED_S_EL1_FIQ & ~0xffff) + movk x0, #(TSP_HANDLED_S_EL1_FIQ & 0xffff) + smc #0 + +tsp_fiq_entry_panic: + b tsp_fiq_entry_panic + + /*--------------------------------------------- * This entrypoint is used by the TSPD when this * cpu resumes execution after an earlier * CPU_SUSPEND psci call to ask the TSP to @@ -248,8 +320,22 @@ * --------------------------------------------- */ func tsp_fast_smc_entry - bl tsp_fast_smc_handler + bl tsp_smc_handler restore_args_call_smc tsp_fast_smc_entry_panic: b tsp_fast_smc_entry_panic + /*--------------------------------------------- + * This entrypoint is used by the TSPD to ask + * the TSP to service a std smc request. + * We will enable preemption during execution + * of tsp_smc_handler. + * --------------------------------------------- + */ +func tsp_std_smc_entry + msr daifclr, #DAIF_FIQ_BIT | DAIF_IRQ_BIT + bl tsp_smc_handler + msr daifset, #DAIF_FIQ_BIT | DAIF_IRQ_BIT + restore_args_call_smc +tsp_std_smc_entry_panic: + b tsp_std_smc_entry_panic diff --git a/bl32/tsp/aarch64/tsp_exceptions.S b/bl32/tsp/aarch64/tsp_exceptions.S new file mode 100644 index 0000000..f84b5e0 --- /dev/null +++ b/bl32/tsp/aarch64/tsp_exceptions.S @@ -0,0 +1,207 @@ +/* + * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include + + + /* ---------------------------------------------------- + * The caller-saved registers x0-x18 and LR are saved + * here. + * ---------------------------------------------------- + */ + +#define SCRATCH_REG_SIZE #(20 * 8) + + .macro save_caller_regs_and_lr + sub sp, sp, SCRATCH_REG_SIZE + stp x0, x1, [sp] + stp x2, x3, [sp, #0x10] + stp x4, x5, [sp, #0x20] + stp x6, x7, [sp, #0x30] + stp x8, x9, [sp, #0x40] + stp x10, x11, [sp, #0x50] + stp x12, x13, [sp, #0x60] + stp x14, x15, [sp, #0x70] + stp x16, x17, [sp, #0x80] + stp x18, x30, [sp, #0x90] + .endm + + .macro restore_caller_regs_and_lr + ldp x0, x1, [sp] + ldp x2, x3, [sp, #0x10] + ldp x4, x5, [sp, #0x20] + ldp x6, x7, [sp, #0x30] + ldp x8, x9, [sp, #0x40] + ldp x10, x11, [sp, #0x50] + ldp x12, x13, [sp, #0x60] + ldp x14, x15, [sp, #0x70] + ldp x16, x17, [sp, #0x80] + ldp x18, x30, [sp, #0x90] + add sp, sp, SCRATCH_REG_SIZE + .endm + + .globl tsp_exceptions + + /* ----------------------------------------------------- + * TSP exception handlers. + * ----------------------------------------------------- + */ + .section .vectors, "ax"; .align 11 + + .align 7 +tsp_exceptions: + /* ----------------------------------------------------- + * Current EL with _sp_el0 : 0x0 - 0x180. No exceptions + * are expected and treated as irrecoverable errors. + * ----------------------------------------------------- + */ +sync_exception_sp_el0: + wfi + b sync_exception_sp_el0 + check_vector_size sync_exception_sp_el0 + + .align 7 + +irq_sp_el0: + b irq_sp_el0 + check_vector_size irq_sp_el0 + + .align 7 +fiq_sp_el0: + b fiq_sp_el0 + check_vector_size fiq_sp_el0 + + .align 7 +serror_sp_el0: + b serror_sp_el0 + check_vector_size serror_sp_el0 + + + /* ----------------------------------------------------- + * Current EL with SPx: 0x200 - 0x380. Only IRQs/FIQs + * are expected and handled + * ----------------------------------------------------- + */ + .align 7 +sync_exception_sp_elx: + wfi + b sync_exception_sp_elx + check_vector_size sync_exception_sp_elx + + .align 7 +irq_sp_elx: + save_caller_regs_and_lr + /* We just update some statistics in the handler */ + bl tsp_irq_received + /* Hand over control to the normal world to handle the IRQ */ + smc #0 + /* The resume std smc starts from here */ + restore_caller_regs_and_lr + eret + check_vector_size irq_sp_elx + + .align 7 +fiq_sp_elx: + save_caller_regs_and_lr + bl tsp_fiq_handler + cbz x0, fiq_sp_elx_done + + /* + * This FIQ was not targetted to S-EL1 so send it to + * the monitor and wait for execution to resume. + */ + smc #0 +fiq_sp_elx_done: + restore_caller_regs_and_lr + eret + check_vector_size fiq_sp_elx + + .align 7 +serror_sp_elx: + b serror_sp_elx + check_vector_size serror_sp_elx + + + /* ----------------------------------------------------- + * Lower EL using AArch64 : 0x400 - 0x580. No exceptions + * are handled since TSP does not implement a lower EL + * ----------------------------------------------------- + */ + .align 7 +sync_exception_aarch64: + wfi + b sync_exception_aarch64 + check_vector_size sync_exception_aarch64 + + .align 7 +irq_aarch64: + b irq_aarch64 + check_vector_size irq_aarch64 + + .align 7 +fiq_aarch64: + b fiq_aarch64 + check_vector_size fiq_aarch64 + + .align 7 +serror_aarch64: + b serror_aarch64 + check_vector_size serror_aarch64 + + + /* ----------------------------------------------------- + * Lower EL using AArch32 : 0x600 - 0x780. No exceptions + * handled since the TSP does not implement a lower EL. + * ----------------------------------------------------- + */ + .align 7 +sync_exception_aarch32: + wfi + b sync_exception_aarch32 + check_vector_size sync_exception_aarch32 + + .align 7 +irq_aarch32: + b irq_aarch32 + check_vector_size irq_aarch32 + + .align 7 +fiq_aarch32: + b fiq_aarch32 + check_vector_size fiq_aarch32 + + .align 7 +serror_aarch32: + b serror_aarch32 + check_vector_size serror_aarch32 + .align 7 diff --git a/bl32/tsp/tsp-fvp.mk b/bl32/tsp/tsp-fvp.mk index 5d8a0e3..b1d0afe 100644 --- a/bl32/tsp/tsp-fvp.mk +++ b/bl32/tsp/tsp-fvp.mk @@ -29,7 +29,9 @@ # # TSP source files specific to FVP platform -BL32_SOURCES += plat/common/aarch64/platform_mp_stack.S \ - plat/fvp/bl32_plat_setup.c \ +BL32_SOURCES += drivers/arm/gic/gic_v2.c \ + plat/common/aarch64/platform_mp_stack.S \ plat/fvp/aarch64/plat_common.c \ - plat/fvp/aarch64/plat_helpers.S + plat/fvp/aarch64/plat_helpers.S \ + plat/fvp/bl32_plat_setup.c \ + plat/fvp/plat_gic.c diff --git a/bl32/tsp/tsp.mk b/bl32/tsp/tsp.mk index c478b43..b9084d5 100644 --- a/bl32/tsp/tsp.mk +++ b/bl32/tsp/tsp.mk @@ -30,7 +30,10 @@ BL32_SOURCES += bl32/tsp/tsp_main.c \ bl32/tsp/aarch64/tsp_entrypoint.S \ + bl32/tsp/aarch64/tsp_exceptions.S \ bl32/tsp/aarch64/tsp_request.S \ + bl32/tsp/tsp_interrupt.c \ + bl32/tsp/tsp_timer.c \ common/aarch64/early_exceptions.S \ lib/locks/exclusive/spinlock.S diff --git a/bl32/tsp/tsp_interrupt.c b/bl32/tsp/tsp_interrupt.c new file mode 100644 index 0000000..5719c06 --- /dev/null +++ b/bl32/tsp/tsp_interrupt.c @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include + +/******************************************************************************* + * This function updates the TSP statistics for FIQs handled synchronously i.e + * the ones that have been handed over by the TSPD. It also keeps count of the + * number of times control was passed back to the TSPD after handling an FIQ. + * In the future it will be possible that the TSPD hands over an FIQ to the TSP + * but does not expect it to return execution. This statistic will be useful to + * distinguish between these two models of synchronous FIQ handling. + * The 'elr_el3' parameter contains the address of the instruction in normal + * world where this FIQ was generated. + ******************************************************************************/ +void tsp_update_sync_fiq_stats(uint32_t type, uint64_t elr_el3) +{ + uint64_t mpidr = read_mpidr(); + uint32_t linear_id = platform_get_core_pos(mpidr); + + tsp_stats[linear_id].sync_fiq_count++; + if (type == TSP_HANDLE_FIQ_AND_RETURN) + tsp_stats[linear_id].sync_fiq_ret_count++; + + spin_lock(&console_lock); + printf("TSP: cpu 0x%x sync fiq request from 0x%llx \n\r", + mpidr, elr_el3); + INFO("cpu 0x%x: %d sync fiq requests, %d sync fiq returns\n", + mpidr, + tsp_stats[linear_id].sync_fiq_count, + tsp_stats[linear_id].sync_fiq_ret_count); + spin_unlock(&console_lock); +} + +/******************************************************************************* + * TSP FIQ handler called as a part of both synchronous and asynchronous + * handling of FIQ interrupts. It returns 0 upon successfully handling a S-EL1 + * FIQ and treats all other FIQs as EL3 interrupts. It assumes that the GIC + * architecture version in v2.0 and the secure physical timer interrupt is the + * only S-EL1 interrupt that it needs to handle. + ******************************************************************************/ +int32_t tsp_fiq_handler() +{ + uint64_t mpidr = read_mpidr(); + uint32_t linear_id = platform_get_core_pos(mpidr), id; + + /* + * Get the highest priority pending interrupt id and see if it is the + * secure physical generic timer interrupt in which case, handle it. + * Otherwise throw this interrupt at the EL3 firmware. + */ + id = ic_get_pending_interrupt_id(); + + /* TSP can only handle the secure physical timer interrupt */ + if (id != IRQ_SEC_PHY_TIMER) + return TSP_EL3_FIQ; + + /* + * Handle the interrupt. Also sanity check if it has been preempted by + * another secure interrupt through an assertion. + */ + id = ic_acknowledge_interrupt(); + assert(id == IRQ_SEC_PHY_TIMER); + tsp_generic_timer_handler(); + ic_end_of_interrupt(id); + + /* Update the statistics and print some messages */ + tsp_stats[linear_id].fiq_count++; + spin_lock(&console_lock); + printf("TSP: cpu 0x%x handled fiq %d \n\r", + mpidr, id); + INFO("cpu 0x%x: %d fiq requests \n", + mpidr, tsp_stats[linear_id].fiq_count); + spin_unlock(&console_lock); + + return 0; +} + +int32_t tsp_irq_received() +{ + uint64_t mpidr = read_mpidr(); + uint32_t linear_id = platform_get_core_pos(mpidr); + + tsp_stats[linear_id].irq_count++; + spin_lock(&console_lock); + printf("TSP: cpu 0x%x received irq\n\r", mpidr); + INFO("cpu 0x%x: %d irq requests \n", + mpidr, tsp_stats[linear_id].irq_count); + spin_unlock(&console_lock); + + return TSP_PREEMPTED; +} diff --git a/bl32/tsp/tsp_main.c b/bl32/tsp/tsp_main.c index a7c7386..1c3f3b9 100644 --- a/bl32/tsp/tsp_main.c +++ b/bl32/tsp/tsp_main.c @@ -58,7 +58,7 @@ /******************************************************************************* * Per cpu data structure to keep track of TSP activity ******************************************************************************/ -static work_statistics_t tsp_stats[PLATFORM_CORE_COUNT]; +work_statistics_t tsp_stats[PLATFORM_CORE_COUNT]; /******************************************************************************* * Single reference to the various entry points exported by the test secure @@ -66,11 +66,13 @@ * to change. ******************************************************************************/ static const entry_info_t tsp_entry_info = { + tsp_std_smc_entry, tsp_fast_smc_entry, tsp_cpu_on_entry, tsp_cpu_off_entry, tsp_cpu_resume_entry, tsp_cpu_suspend_entry, + tsp_fiq_entry, }; @@ -127,6 +129,7 @@ bl32_platform_setup(); /* Initialize secure/applications state here */ + tsp_generic_timer_start(); /* Update this cpu's statistics */ tsp_stats[linear_id].smc_count++; @@ -162,6 +165,9 @@ uint64_t mpidr = read_mpidr(); uint32_t linear_id = platform_get_core_pos(mpidr); + /* Initialize secure/applications state here */ + tsp_generic_timer_start(); + /* Update this cpu's statistics */ tsp_stats[linear_id].smc_count++; tsp_stats[linear_id].eret_count++; @@ -195,6 +201,13 @@ uint64_t mpidr = read_mpidr(); uint32_t linear_id = platform_get_core_pos(mpidr); + /* + * This cpu is being turned off, so disable the timer to prevent the + * secure timer interrupt from interfering with power down. A pending + * interrupt will be lost but we do not care as we are turning off. + */ + tsp_generic_timer_stop(); + /* Update this cpu's statistics */ tsp_stats[linear_id].smc_count++; tsp_stats[linear_id].eret_count++; @@ -230,6 +243,13 @@ uint64_t mpidr = read_mpidr(); uint32_t linear_id = platform_get_core_pos(mpidr); + /* + * Save the time context and disable it to prevent the secure timer + * interrupt from interfering with wakeup from the suspend state. + */ + tsp_generic_timer_save(); + tsp_generic_timer_stop(); + /* Update this cpu's statistics */ tsp_stats[linear_id].smc_count++; tsp_stats[linear_id].eret_count++; @@ -265,6 +285,9 @@ uint64_t mpidr = read_mpidr(); uint32_t linear_id = platform_get_core_pos(mpidr); + /* Restore the generic timer context */ + tsp_generic_timer_restore(); + /* Update this cpu's statistics */ tsp_stats[linear_id].smc_count++; tsp_stats[linear_id].eret_count++; @@ -287,9 +310,9 @@ * TSP fast smc handler. The secure monitor jumps to this function by * doing the ERET after populating X0-X7 registers. The arguments are received * in the function arguments in order. Once the service is rendered, this - * function returns to Secure Monitor by raising SMC + * function returns to Secure Monitor by raising SMC. ******************************************************************************/ -tsp_args_t *tsp_fast_smc_handler(uint64_t func, +tsp_args_t *tsp_smc_handler(uint64_t func, uint64_t arg1, uint64_t arg2, uint64_t arg3, @@ -302,18 +325,20 @@ uint64_t service_args[2]; uint64_t mpidr = read_mpidr(); uint32_t linear_id = platform_get_core_pos(mpidr); + const char *smc_type; /* Update this cpu's statistics */ tsp_stats[linear_id].smc_count++; tsp_stats[linear_id].eret_count++; - printf("SP: cpu 0x%x received fast smc 0x%x\n", read_mpidr(), func); + smc_type = ((func >> 31) & 1) == 1 ? "fast" : "standard"; + + printf("SP: cpu 0x%x received %s smc 0x%x\n", read_mpidr(), smc_type, func); INFO("cpu 0x%x: %d smcs, %d erets\n", mpidr, tsp_stats[linear_id].smc_count, tsp_stats[linear_id].eret_count); /* Render secure services and obtain results here */ - results[0] = arg1; results[1] = arg2; @@ -324,20 +349,20 @@ tsp_get_magic(service_args); /* Determine the function to perform based on the function ID */ - switch (func) { - case TSP_FID_ADD: + switch (TSP_BARE_FID(func)) { + case TSP_ADD: results[0] += service_args[0]; results[1] += service_args[1]; break; - case TSP_FID_SUB: + case TSP_SUB: results[0] -= service_args[0]; results[1] -= service_args[1]; break; - case TSP_FID_MUL: + case TSP_MUL: results[0] *= service_args[0]; results[1] *= service_args[1]; break; - case TSP_FID_DIV: + case TSP_DIV: results[0] /= service_args[0] ? service_args[0] : 1; results[1] /= service_args[1] ? service_args[1] : 1; break; @@ -345,9 +370,9 @@ break; } - return set_smc_args(func, + return set_smc_args(func, 0, results[0], results[1], - 0, 0, 0, 0, 0); + 0, 0, 0, 0); } diff --git a/bl32/tsp/tsp_timer.c b/bl32/tsp/tsp_timer.c new file mode 100644 index 0000000..f66ff9f --- /dev/null +++ b/bl32/tsp/tsp_timer.c @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#include +#include +#include + +/******************************************************************************* + * Data structure to keep track of per-cpu secure generic timer context across + * power management operations. + ******************************************************************************/ +typedef struct timer_context { + uint64_t cval; + uint32_t ctl; +} timer_context_t; + +static timer_context_t pcpu_timer_context[PLATFORM_CORE_COUNT]; + +/******************************************************************************* + * This function initializes the generic timer to fire every 0.5 second + ******************************************************************************/ +void tsp_generic_timer_start() +{ + uint64_t cval; + uint32_t ctl = 0; + + /* The timer will fire every 0.5 second */ + cval = read_cntpct_el0() + (read_cntfrq_el0() >> 1); + write_cntps_cval_el1(cval); + + /* Enable the secure physical timer */ + set_cntp_ctl_enable(ctl); + write_cntps_ctl_el1(ctl); +} + +/******************************************************************************* + * This function deasserts the timer interrupt and sets it up again + ******************************************************************************/ +void tsp_generic_timer_handler() +{ + /* Ensure that the timer did assert the interrupt */ + assert(get_cntp_ctl_istatus(read_cntps_ctl_el1())); + + /* Disable the timer and reprogram it */ + write_cntps_ctl_el1(0); + tsp_generic_timer_start(); +} + +/******************************************************************************* + * This function deasserts the timer interrupt prior to cpu power down + ******************************************************************************/ +void tsp_generic_timer_stop() +{ + /* Disable the timer */ + write_cntps_ctl_el1(0); +} + +/******************************************************************************* + * This function saves the timer context prior to cpu suspension + ******************************************************************************/ +void tsp_generic_timer_save() +{ + uint32_t linear_id = platform_get_core_pos(read_mpidr()); + + pcpu_timer_context[linear_id].cval = read_cntps_cval_el1(); + pcpu_timer_context[linear_id].ctl = read_cntps_ctl_el1(); + flush_dcache_range((uint64_t) &pcpu_timer_context[linear_id], + sizeof(pcpu_timer_context[linear_id])); +} + +/******************************************************************************* + * This function restores the timer context post cpu resummption + ******************************************************************************/ +void tsp_generic_timer_restore() +{ + uint32_t linear_id = platform_get_core_pos(read_mpidr()); + + write_cntps_cval_el1(pcpu_timer_context[linear_id].cval); + write_cntps_ctl_el1(pcpu_timer_context[linear_id].ctl); +} diff --git a/docs/firmware-design.md b/docs/firmware-design.md index a40ddac..76c27f7 100644 --- a/docs/firmware-design.md +++ b/docs/firmware-design.md @@ -219,6 +219,15 @@ abstraction layer is initialized which is used to load further bootloader images. +#### BL3-0 (System Control Processor Firmware) image load + +Some systems have a separate System Control Processor (SCP) for power, clock, +reset and system control. BL2 loads the optional BL3-0 image from platform +storage into a platform-specific region of secure memory. The subsequent +handling of BL3-0 is platform specific. Typically the image is transferred into +SCP memory using a platform-specific protocol. The SCP executes BL3-0 and +signals to the Application Processor (AP) for BL2 execution to continue. + #### BL3-1 (EL3 Runtime Firmware) image load BL2 loads the BL3-1 image from platform storage into a platform-specific address diff --git a/docs/user-guide.md b/docs/user-guide.md index 47ddcfa..04df875 100644 --- a/docs/user-guide.md +++ b/docs/user-guide.md @@ -158,6 +158,15 @@ * `V`: Verbose build. If assigned anything other than 0, the build commands are printed. Default is 0 +* `FVP_GIC_ARCH`: Choice of ARM GIC architecture version used by the FVP port + for implementing the platform GIC API. This API is used by the interrupt + management framework. Default is 2 i.e. version 2.0 + +* `IMF_READ_INTERRUPT_ID`: Boolean flag used by the interrupt management + framework to enable passing of the interrupt id to its handler. The id is + read using a platform GIC API. `INTR_ID_UNAVAILABLE` is passed instead if + this option set to 0. Default is 0. + ### Creating a Firmware Image Package FIPs are automatically created as part of the build instructions described in diff --git a/drivers/arm/gic/gic_v2.c b/drivers/arm/gic/gic_v2.c index 00464cb..27a39b9 100644 --- a/drivers/arm/gic/gic_v2.c +++ b/drivers/arm/gic/gic_v2.c @@ -28,8 +28,10 @@ * POSSIBILITY OF SUCH DAMAGE. */ +#include #include #include +#include #include /******************************************************************************* @@ -290,3 +292,27 @@ (1 << iface) << (byte_off << 3)); } +/******************************************************************************* + * This function allows the interrupt management framework to determine (through + * the platform) which interrupt line (IRQ/FIQ) to use for an interrupt type to + * route it to EL3. The interrupt line is represented as the bit position of the + * IRQ or FIQ bit in the SCR_EL3. + ******************************************************************************/ +uint32_t gicv2_interrupt_type_to_line(uint32_t cpuif_base, uint32_t type) +{ + uint32_t gicc_ctlr; + + /* Non-secure interrupts are signalled on the IRQ line always */ + if (type == INTR_TYPE_NS) + return __builtin_ctz(SCR_IRQ_BIT); + + /* + * Secure interrupts are signalled using the IRQ line if the FIQ_EN + * bit is not set else they are signalled using the FIQ line. + */ + gicc_ctlr = gicc_read_ctlr(cpuif_base); + if (gicc_ctlr & FIQ_EN) + return __builtin_ctz(SCR_FIQ_BIT); + else + return __builtin_ctz(SCR_IRQ_BIT); +} diff --git a/include/bl31/cm_macros.S b/include/bl31/cm_macros.S deleted file mode 100644 index f12f8c3..0000000 --- a/include/bl31/cm_macros.S +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ -#include -#include - - /* ----------------------------------------------------- - * Handle SMC exceptions seperately from other sync. - * exceptions. - * ----------------------------------------------------- - */ - .macro handle_sync_exception - str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] - mrs x30, esr_el3 - ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH - - cmp x30, #EC_AARCH32_SMC - b.eq smc_handler32 - - cmp x30, #EC_AARCH64_SMC - b.eq smc_handler64 - - /* ----------------------------------------------------- - * The following code handles any synchronous exception - * that is not an SMC. - * ----------------------------------------------------- - */ - - bl dump_state_and_die - .endm - diff --git a/include/bl31/context_mgmt.h b/include/bl31/context_mgmt.h index d2598ee..ad9d785 100644 --- a/include/bl31/context_mgmt.h +++ b/include/bl31/context_mgmt.h @@ -47,10 +47,13 @@ extern void cm_el1_sysregs_context_restore(uint32_t security_state); extern void cm_set_el3_eret_context(uint32_t security_state, uint64_t entrypoint, uint32_t spsr, uint32_t scr); -extern void cm_set_el3_elr(uint32_t security_state, uint64_t entrypoint); +extern void cm_set_elr_el3(uint32_t security_state, uint64_t entrypoint); +extern void cm_write_scr_el3_bit(uint32_t security_state, + uint32_t bit_pos, + uint32_t value); extern void cm_set_next_eret_context(uint32_t security_state); extern void cm_init_pcpu_ptr_cache(); extern void cm_set_pcpu_ptr_cache(const void *pcpu_ptr); extern void *cm_get_pcpu_ptr_cache(void); - +extern uint32_t cm_get_scr_el3(uint32_t security_state); #endif /* __CM_H__ */ diff --git a/include/bl31/interrupt_mgmt.h b/include/bl31/interrupt_mgmt.h new file mode 100644 index 0000000..0b24f39 --- /dev/null +++ b/include/bl31/interrupt_mgmt.h @@ -0,0 +1,128 @@ +/* + * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __INTERRUPT_MGMT_H__ +#define __INTERRUPT_MGMT_H__ + +#include + +/******************************************************************************* + * Constants for the types of interrupts recognised by the IM framework + ******************************************************************************/ +#define INTR_TYPE_S_EL1 0 +#define INTR_TYPE_EL3 1 +#define INTR_TYPE_NS 2 +#define MAX_INTR_TYPES 3 +#define INTR_TYPE_INVAL MAX_INTR_TYPES +/* + * Constant passed to the interrupt handler in the 'id' field when the + * framework does not read the gic registers to determine the interrupt id. + */ +#define INTR_ID_UNAVAILABLE 0xFFFFFFFF + + +/******************************************************************************* + * Mask for _both_ the routing model bits in the 'flags' parameter and + * constants to define the valid routing models for each supported interrupt + * type + ******************************************************************************/ +#define INTR_RM_FLAGS_SHIFT 0x0 +#define INTR_RM_FLAGS_MASK 0x3 +/* Routed to EL3 from NS. Taken to S-EL1 from Secure */ +#define INTR_SEL1_VALID_RM0 0x2 +/* Routed to EL3 from NS and Secure */ +#define INTR_SEL1_VALID_RM1 0x3 +/* Routed to EL1/EL2 from NS and to S-EL1 from Secure */ +#define INTR_NS_VALID_RM0 0x0 +/* Routed to EL1/EL2 from NS and to EL3 from Secure */ +#define INTR_NS_VALID_RM1 0x1 + + +/******************************************************************************* + * Constants for the _individual_ routing model bits in the 'flags' field for + * each interrupt type and mask to validate the 'flags' parameter while + * registering an interrupt handler + ******************************************************************************/ +#define INTR_TYPE_FLAGS_MASK 0xFFFFFFFC + +#define INTR_RM_FROM_SEC_SHIFT SECURE /* BIT[0] */ +#define INTR_RM_FROM_NS_SHIFT NON_SECURE /* BIT[1] */ +#define INTR_RM_FROM_FLAG_MASK 1 +#define get_interrupt_rm_flag(flag, ss) (((flag >> INTR_RM_FLAGS_SHIFT) >> ss) \ + & INTR_RM_FROM_FLAG_MASK) +#define set_interrupt_rm_flag(flag, ss) (flag |= 1 << ss) +#define clr_interrupt_rm_flag(flag, ss) (flag &= ~(1 << ss)) + + +/******************************************************************************* + * Macros to validate the routing model bits in the 'flags' for a type + * of interrupt. If the model does not match one of the valid masks + * -EINVAL is returned. + ******************************************************************************/ +#define validate_sel1_interrupt_rm(x) (x == INTR_SEL1_VALID_RM0 ? 0 : \ + (x == INTR_SEL1_VALID_RM1 ? 0 :\ + -EINVAL)) + +#define validate_ns_interrupt_rm(x) (x == INTR_NS_VALID_RM0 ? 0 : \ + (x == INTR_NS_VALID_RM1 ? 0 :\ + -EINVAL)) + +/******************************************************************************* + * Macros to set the 'flags' parameter passed to an interrupt type handler. Only + * the flag to indicate the security state when the exception was generated is + * supported. + ******************************************************************************/ +#define INTR_SRC_SS_FLAG_SHIFT 0 /* BIT[0] */ +#define INTR_SRC_SS_FLAG_MASK 1 +#define set_interrupt_src_ss(flag, val) (flag |= val << INTR_SRC_SS_FLAG_SHIFT) +#define clr_interrupt_src_ss(flag) (flag &= ~(1 << INTR_SRC_SS_FLAG_SHIFT)) +#define get_interrupt_src_ss(flag) ((flag >> INTR_SRC_SS_FLAG_SHIFT) & \ + INTR_SRC_SS_FLAG_MASK) + +#ifndef __ASSEMBLY__ + +/* Prototype for defining a handler for an interrupt type */ +typedef uint64_t (*interrupt_type_handler_t)(uint32_t id, + uint32_t flags, + void *handle, + void *cookie); + +/******************************************************************************* + * Function & variable prototypes + ******************************************************************************/ +extern uint32_t get_scr_el3_from_routing_model(uint32_t security_state); +extern int32_t set_routing_model(uint32_t type, uint32_t flags); +extern int32_t register_interrupt_type_handler(uint32_t type, + interrupt_type_handler_t handler, + uint32_t flags); +extern interrupt_type_handler_t get_interrupt_type_handler(uint32_t interrupt_type); + +#endif /*__ASSEMBLY__*/ +#endif /* __INTERRUPT_MGMT_H__ */ diff --git a/include/bl31/runtime_svc.h b/include/bl31/runtime_svc.h index 6d70896..66562e1 100644 --- a/include/bl31/runtime_svc.h +++ b/include/bl31/runtime_svc.h @@ -51,13 +51,15 @@ #define GET_SMC_CC(id) ((id >> FUNCID_CC_SHIFT) & \ FUNCID_CC_MASK) +#define GET_SMC_TYPE(id) ((id >> FUNCID_TYPE_SHIFT) & \ + FUNCID_TYPE_MASK) #define SMC_64 1 #define SMC_32 0 #define SMC_UNK 0xffffffff #define SMC_TYPE_FAST 1 #define SMC_TYPE_STD 0 - +#define SMC_PREEMPTED 0xfffffffe /******************************************************************************* * Owning entity number definitions inside the function id as per the SMC * calling convention @@ -135,9 +137,12 @@ typedef int32_t (*rt_svc_init_t)(void); /* Convenience macros to return from SMC handler */ +#define SMC_RET0(_h) { \ + return (uint64_t) (_h); \ +} #define SMC_RET1(_h, _x0) { \ write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X0, (_x0)); \ - return _x0; \ + SMC_RET0(_h); \ } #define SMC_RET2(_h, _x0, _x1) { \ write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X1, (_x1)); \ diff --git a/include/bl31/services/psci.h b/include/bl31/services/psci.h index 570fe5b..b6e272c 100644 --- a/include/bl31/services/psci.h +++ b/include/bl31/services/psci.h @@ -190,6 +190,7 @@ extern int psci_cpu_on(unsigned long, unsigned long, unsigned long); +extern void __dead2 psci_power_down_wfi(void); extern void psci_aff_on_finish_entry(void); extern void psci_aff_suspend_finish_entry(void); extern void psci_register_spd_pm_hook(const spd_pm_ops_t *); diff --git a/include/bl32/payloads/tsp.h b/include/bl32/payloads/tsp.h index 1f542d5..2e32c77 100644 --- a/include/bl32/payloads/tsp.h +++ b/include/bl32/payloads/tsp.h @@ -40,16 +40,44 @@ #define TSP_OFF_DONE 0xf2000002 #define TSP_SUSPEND_DONE 0xf2000003 #define TSP_RESUME_DONE 0xf2000004 -#define TSP_WORK_DONE 0xf2000005 +#define TSP_PREEMPTED 0xf2000005 -/* SMC function ID that TSP uses to request service from secure montior */ +/* + * Function identifiers to handle FIQs through the synchronous handling model. + * If the TSP was previously interrupted then control has to be returned to + * the TSPD after handling the interrupt else execution can remain in the TSP. + */ +#define TSP_HANDLED_S_EL1_FIQ 0xf2000006 +#define TSP_EL3_FIQ 0xf2000007 + +/* SMC function ID that TSP uses to request service from secure monitor */ #define TSP_GET_ARGS 0xf2001000 -/* Function IDs for various TSP services */ -#define TSP_FID_ADD 0xf2002000 -#define TSP_FID_SUB 0xf2002001 -#define TSP_FID_MUL 0xf2002002 -#define TSP_FID_DIV 0xf2002003 +/* + * Identifiers for various TSP services. Corresponding function IDs (whether + * fast or standard) are generated by macros defined below + */ +#define TSP_ADD 0x2000 +#define TSP_SUB 0x2001 +#define TSP_MUL 0x2002 +#define TSP_DIV 0x2003 +#define TSP_HANDLE_FIQ_AND_RETURN 0x2004 + +/* + * Generate function IDs for TSP services to be used in SMC calls, by + * appropriately setting bit 31 to differentiate standard and fast SMC calls + */ +#define TSP_STD_FID(fid) ((fid) | 0x72000000 | (0 << 31)) +#define TSP_FAST_FID(fid) ((fid) | 0x72000000 | (1 << 31)) + +/* SMC function ID to request a previously preempted std smc */ +#define TSP_FID_RESUME TSP_STD_FID(0x3000) + +/* + * Identify a TSP service from function ID filtering the last 16 bits from the + * SMC function ID + */ +#define TSP_BARE_FID(fid) ((fid) & 0xffff) /* * Total number of function IDs implemented for services offered to NS clients. @@ -86,26 +114,33 @@ #include #include /* For CACHE_WRITEBACK_GRANULE */ +#include #include typedef void (*tsp_generic_fptr_t)(uint64_t arg0, - uint64_t arg1, - uint64_t arg2, - uint64_t arg3, - uint64_t arg4, - uint64_t arg5, - uint64_t arg6, - uint64_t arg7); + uint64_t arg1, + uint64_t arg2, + uint64_t arg3, + uint64_t arg4, + uint64_t arg5, + uint64_t arg6, + uint64_t arg7); typedef struct entry_info { + tsp_generic_fptr_t std_smc_entry; tsp_generic_fptr_t fast_smc_entry; tsp_generic_fptr_t cpu_on_entry; tsp_generic_fptr_t cpu_off_entry; tsp_generic_fptr_t cpu_resume_entry; tsp_generic_fptr_t cpu_suspend_entry; + tsp_generic_fptr_t fiq_entry; } entry_info_t; typedef struct work_statistics { + uint32_t fiq_count; /* Number of FIQs on this cpu */ + uint32_t irq_count; /* Number of IRQs on this cpu */ + uint32_t sync_fiq_count; /* Number of sync. fiqs on this cpu */ + uint32_t sync_fiq_ret_count; /* Number of fiq returns on this cpu */ uint32_t smc_count; /* Number of returns on this cpu */ uint32_t eret_count; /* Number of entries on this cpu */ uint32_t cpu_on_count; /* Number of cpu on requests */ @@ -120,7 +155,7 @@ /* Macros to access members of the above structure using their offsets */ #define read_sp_arg(args, offset) ((args)->_regs[offset >> 3]) -#define write_sp_arg(args, offset, val)(((args)->_regs[offset >> 3]) \ +#define write_sp_arg(args, offset, val) (((args)->_regs[offset >> 3]) \ = val) /* @@ -131,6 +166,22 @@ extern void tsp_get_magic(uint64_t args[4]); +extern void tsp_fiq_entry(uint64_t arg0, + uint64_t arg1, + uint64_t arg2, + uint64_t arg3, + uint64_t arg4, + uint64_t arg5, + uint64_t arg6, + uint64_t arg7); +extern void tsp_std_smc_entry(uint64_t arg0, + uint64_t arg1, + uint64_t arg2, + uint64_t arg3, + uint64_t arg4, + uint64_t arg5, + uint64_t arg6, + uint64_t arg7); extern void tsp_fast_smc_entry(uint64_t arg0, uint64_t arg1, uint64_t arg2, @@ -196,6 +247,20 @@ uint64_t arg5, uint64_t arg6, uint64_t arg7); + +/* Generic Timer functions */ +extern void tsp_generic_timer_start(void); +extern void tsp_generic_timer_handler(void); +extern void tsp_generic_timer_stop(void); +extern void tsp_generic_timer_save(void); +extern void tsp_generic_timer_restore(void); + +/* FIQ management functions */ +extern void tsp_update_sync_fiq_stats(uint32_t type, uint64_t elr_el3); + +/* Data structure to keep track of TSP statistics */ +extern spinlock_t console_lock; +extern work_statistics_t tsp_stats[PLATFORM_CORE_COUNT]; #endif /* __ASSEMBLY__ */ #endif /* __BL2_H__ */ diff --git a/include/drivers/arm/gic_v2.h b/include/drivers/arm/gic_v2.h index ccf3d32..e819676 100644 --- a/include/drivers/arm/gic_v2.h +++ b/include/drivers/arm/gic_v2.h @@ -43,6 +43,7 @@ #define GIC_LOWEST_SEC_PRIORITY 127 #define GIC_HIGHEST_NS_PRIORITY 128 #define GIC_LOWEST_NS_PRIORITY 254 /* 255 would disable an interrupt */ +#define GIC_SPURIOUS_INTERRUPT 1023 #define ENABLE_GRP0 (1 << 0) #define ENABLE_GRP1 (1 << 1) @@ -88,6 +89,7 @@ #define GICC_EOIR 0x10 #define GICC_RPR 0x14 #define GICC_HPPIR 0x18 +#define GICC_AHPPIR 0x28 #define GICC_IIDR 0xFC #define GICC_DIR 0x1000 #define GICC_PRIODROP GICC_EOIR @@ -247,6 +249,11 @@ return mmio_read_32(base + GICC_HPPIR); } +static inline unsigned int gicc_read_ahppir(unsigned int base) +{ + return mmio_read_32(base + GICC_AHPPIR); +} + static inline unsigned int gicc_read_dir(unsigned int base) { return mmio_read_32(base + GICC_DIR); @@ -298,6 +305,12 @@ mmio_write_32(base + GICC_DIR, val); } +/******************************************************************************* + * Prototype of function to map an interrupt type to the interrupt line used to + * signal it. + ******************************************************************************/ +uint32_t gicv2_interrupt_type_to_line(uint32_t cpuif_base, uint32_t type); + #endif /*__ASSEMBLY__*/ #endif /* __GIC_V2_H__ */ diff --git a/include/lib/aarch64/arch.h b/include/lib/aarch64/arch.h index 9646b20..b44b0ca 100644 --- a/include/lib/aarch64/arch.h +++ b/include/lib/aarch64/arch.h @@ -148,6 +148,7 @@ #define SCR_FIQ_BIT (1 << 2) #define SCR_IRQ_BIT (1 << 1) #define SCR_NS_BIT (1 << 0) +#define SCR_VALID_BIT_MASK 0x2f8f /* HCR definitions */ #define HCR_RW_BIT (1ull << 31) @@ -264,6 +265,28 @@ ((aif) & SPSR_AIF_MASK) << SPSR_AIF_SHIFT) +/* Physical timer control register bit fields shifts and masks */ +#define CNTP_CTL_ENABLE_SHIFT 0 +#define CNTP_CTL_IMASK_SHIFT 1 +#define CNTP_CTL_ISTATUS_SHIFT 2 + +#define CNTP_CTL_ENABLE_MASK 1 +#define CNTP_CTL_IMASK_MASK 1 +#define CNTP_CTL_ISTATUS_MASK 1 + +#define get_cntp_ctl_enable(x) ((x >> CNTP_CTL_ENABLE_SHIFT) & \ + CNTP_CTL_ENABLE_MASK) +#define get_cntp_ctl_imask(x) ((x >> CNTP_CTL_IMASK_SHIFT) & \ + CNTP_CTL_IMASK_MASK) +#define get_cntp_ctl_istatus(x) ((x >> CNTP_CTL_ISTATUS_SHIFT) & \ + CNTP_CTL_ISTATUS_MASK) + +#define set_cntp_ctl_enable(x) (x |= 1 << CNTP_CTL_ENABLE_SHIFT) +#define set_cntp_ctl_imask(x) (x |= 1 << CNTP_CTL_IMASK_SHIFT) + +#define clr_cntp_ctl_enable(x) (x &= ~(1 << CNTP_CTL_ENABLE_SHIFT)) +#define clr_cntp_ctl_imask(x) (x &= ~(1 << CNTP_CTL_IMASK_SHIFT)) + /* Miscellaneous MMU related constants */ #define NUM_2MB_IN_GB (1 << 9) #define NUM_4K_IN_2MB (1 << 9) diff --git a/include/lib/aarch64/arch_helpers.h b/include/lib/aarch64/arch_helpers.h index 0a398d0..f30301d 100644 --- a/include/lib/aarch64/arch_helpers.h +++ b/include/lib/aarch64/arch_helpers.h @@ -202,6 +202,10 @@ extern unsigned long read_cpacr(void); extern unsigned long read_cpuectlr(void); extern unsigned int read_cntfrq_el0(void); +extern unsigned int read_cntps_ctl_el1(void); +extern unsigned int read_cntps_tval_el1(void); +extern unsigned long read_cntps_cval_el1(void); +extern unsigned long read_cntpct_el0(void); extern unsigned long read_cnthctl_el2(void); extern unsigned long read_tpidr_el3(void); @@ -210,6 +214,9 @@ extern void write_hcr(unsigned long); extern void write_cpacr(unsigned long); extern void write_cntfrq_el0(unsigned int); +extern void write_cntps_ctl_el1(unsigned int); +extern void write_cntps_tval_el1(unsigned int); +extern void write_cntps_cval_el1(unsigned long); extern void write_cnthctl_el2(unsigned long); extern void write_vbar_el1(unsigned long); diff --git a/lib/aarch64/sysreg_helpers.S b/lib/aarch64/sysreg_helpers.S index c86fdba..925e93e 100644 --- a/lib/aarch64/sysreg_helpers.S +++ b/lib/aarch64/sysreg_helpers.S @@ -142,6 +142,15 @@ .globl read_cntfrq_el0 .globl write_cntfrq_el0 + .globl read_cntps_ctl_el1 + .globl write_cntps_ctl_el1 + + .globl read_cntps_cval_el1 + .globl write_cntps_cval_el1 + + .globl read_cntps_tval_el1 + .globl write_cntps_tval_el1 + .globl read_scr .globl write_scr @@ -151,6 +160,7 @@ .globl read_midr .globl read_mpidr + .globl read_cntpct_el0 .globl read_current_el .globl read_id_pfr1_el1 .globl read_id_aa64pfr0_el1 @@ -672,6 +682,33 @@ msr cntfrq_el0, x0 ret +func read_cntps_ctl_el1 + mrs x0, cntps_ctl_el1 + ret + +func write_cntps_ctl_el1 + msr cntps_ctl_el1, x0 + ret + +func read_cntps_cval_el1 + mrs x0, cntps_cval_el1 + ret + +func write_cntps_cval_el1 + msr cntps_cval_el1, x0 + ret + +func read_cntps_tval_el1 + mrs x0, cntps_tval_el1 + ret + +func write_cntps_tval_el1 + msr cntps_tval_el1, x0 + ret + +func read_cntpct_el0 + mrs x0, cntpct_el0 + ret func read_cpuectlr mrs x0, CPUECTLR_EL1 diff --git a/plat/fvp/bl32_plat_setup.c b/plat/fvp/bl32_plat_setup.c index 8406d31..772e972 100644 --- a/plat/fvp/bl32_plat_setup.c +++ b/plat/fvp/bl32_plat_setup.c @@ -73,6 +73,9 @@ * messages from TSP */ console_init(PL011_UART1_BASE); + + /* Initialize the platform config for future decision making */ + platform_config_setup(); } /******************************************************************************* diff --git a/plat/fvp/plat_gic.c b/plat/fvp/plat_gic.c index db3c9cf..7dec404 100644 --- a/plat/fvp/plat_gic.c +++ b/plat/fvp/plat_gic.c @@ -29,18 +29,15 @@ */ #include +#include +#include #include #include #include +#include #include #include - -/******************************************************************************* - * TODO: Revisit if priorities are being set such that no non-secure interrupt - * can have a higher priority than a secure one as recommended in the GICv2 spec - ******************************************************************************/ - /******************************************************************************* * This function does some minimal GICv3 configuration. The Firmware itself does * not fully support GICv3 at this time and relies on GICv2 emulation as @@ -284,3 +281,126 @@ gic_cpuif_setup(gicc_base); gic_distif_setup(gicd_base); } + +/******************************************************************************* + * An ARM processor signals interrupt exceptions through the IRQ and FIQ pins. + * The interrupt controller knows which pin/line it uses to signal a type of + * interrupt. The platform knows which interrupt controller type is being used + * in a particular security state e.g. with an ARM GIC, normal world could use + * the GICv2 features while the secure world could use GICv3 features and vice + * versa. + * This function is exported by the platform to let the interrupt management + * framework determine for a type of interrupt and security state, which line + * should be used in the SCR_EL3 to control its routing to EL3. The interrupt + * line is represented as the bit position of the IRQ or FIQ bit in the SCR_EL3. + ******************************************************************************/ +uint32_t plat_interrupt_type_to_line(uint32_t type, uint32_t security_state) +{ + uint32_t gicc_base = platform_get_cfgvar(CONFIG_GICC_ADDR); + + assert(type == INTR_TYPE_S_EL1 || + type == INTR_TYPE_EL3 || + type == INTR_TYPE_NS); + + assert(security_state == NON_SECURE || security_state == SECURE); + + /* + * We ignore the security state parameter under the assumption that + * both normal and secure worlds are using ARM GICv2. This parameter + * will be used when the secure world starts using GICv3. + */ +#if FVP_GIC_ARCH == 2 + return gicv2_interrupt_type_to_line(gicc_base, type); +#else +#error "Invalid GIC architecture version specified for FVP port" +#endif +} + +#if FVP_GIC_ARCH == 2 +/******************************************************************************* + * This function returns the type of the highest priority pending interrupt at + * the GIC cpu interface. INTR_TYPE_INVAL is returned when there is no + * interrupt pending. + ******************************************************************************/ +uint32_t ic_get_pending_interrupt_type() +{ + uint32_t id, gicc_base; + + gicc_base = platform_get_cfgvar(CONFIG_GICC_ADDR); + id = gicc_read_hppir(gicc_base); + + /* Assume that all secure interrupts are S-EL1 interrupts */ + if (id < 1022) + return INTR_TYPE_S_EL1; + + if (id == GIC_SPURIOUS_INTERRUPT) + return INTR_TYPE_INVAL; + + return INTR_TYPE_NS; +} + +/******************************************************************************* + * This function returns the id of the highest priority pending interrupt at + * the GIC cpu interface. INTR_ID_UNAVAILABLE is returned when there is no + * interrupt pending. + ******************************************************************************/ +uint32_t ic_get_pending_interrupt_id() +{ + uint32_t id, gicc_base; + + gicc_base = platform_get_cfgvar(CONFIG_GICC_ADDR); + id = gicc_read_hppir(gicc_base); + + if (id < 1022) + return id; + + if (id == 1023) + return INTR_ID_UNAVAILABLE; + + /* + * Find out which non-secure interrupt it is under the assumption that + * the GICC_CTLR.AckCtl bit is 0. + */ + return gicc_read_ahppir(gicc_base); +} + +/******************************************************************************* + * This functions reads the GIC cpu interface Interrupt Acknowledge register + * to start handling the pending interrupt. It returns the contents of the IAR. + ******************************************************************************/ +uint32_t ic_acknowledge_interrupt() +{ + return gicc_read_IAR(platform_get_cfgvar(CONFIG_GICC_ADDR)); +} + +/******************************************************************************* + * This functions writes the GIC cpu interface End Of Interrupt register with + * the passed value to finish handling the active interrupt + ******************************************************************************/ +void ic_end_of_interrupt(uint32_t id) +{ + gicc_write_EOIR(platform_get_cfgvar(CONFIG_GICC_ADDR), id); + return; +} + +/******************************************************************************* + * This function returns the type of the interrupt id depending upon the group + * this interrupt has been configured under by the interrupt controller i.e. + * group0 or group1. + ******************************************************************************/ +uint32_t ic_get_interrupt_type(uint32_t id) +{ + uint32_t group; + + group = gicd_get_igroupr(platform_get_cfgvar(CONFIG_GICD_ADDR), id); + + /* Assume that all secure interrupts are S-EL1 interrupts */ + if (group == GRP0) + return INTR_TYPE_S_EL1; + else + return INTR_TYPE_NS; +} + +#else +#error "Invalid GIC architecture version specified for FVP port" +#endif diff --git a/plat/fvp/platform.h b/plat/fvp/platform.h index 7f6ef41..68b5c11 100644 --- a/plat/fvp/platform.h +++ b/plat/fvp/platform.h @@ -445,13 +445,20 @@ #endif extern void plat_cci_setup(void); -/* Declarations for fvp_gic.c */ +/* Declarations for plat_gic.c */ +extern uint32_t ic_get_pending_interrupt_id(void); +extern uint32_t ic_get_pending_interrupt_type(void); +extern uint32_t ic_acknowledge_interrupt(void); +extern uint32_t ic_get_interrupt_type(uint32_t id); +extern void ic_end_of_interrupt(uint32_t id); extern void gic_cpuif_deactivate(unsigned int); extern void gic_cpuif_setup(unsigned int); extern void gic_pcpu_distif_setup(unsigned int); extern void gic_setup(void); +extern uint32_t plat_interrupt_type_to_line(uint32_t type, + uint32_t security_state); -/* Declarations for fvp_topology.c */ +/* Declarations for plat_topology.c */ extern int plat_setup_topology(void); extern int plat_get_max_afflvl(void); extern unsigned int plat_get_aff_count(unsigned int, unsigned long); diff --git a/plat/fvp/platform.mk b/plat/fvp/platform.mk index 82bafed..d752d65 100644 --- a/plat/fvp/platform.mk +++ b/plat/fvp/platform.mk @@ -86,3 +86,9 @@ BL31_SOURCES += drivers/arm/tzc400/tzc400.c \ plat/fvp/plat_security.c endif + + +# Flag used by the FVP port to determine the version of ARM GIC architecture +# to use for interrupt management in EL3. +FVP_GIC_ARCH := 2 +$(eval $(call add_define,FVP_GIC_ARCH)) diff --git a/services/spd/tspd/tspd_common.c b/services/spd/tspd/tspd_common.c index eafce65..b01ba6e 100644 --- a/services/spd/tspd/tspd_common.c +++ b/services/spd/tspd/tspd_common.c @@ -42,9 +42,9 @@ * programming an entry into the secure payload. ******************************************************************************/ int32_t tspd_init_secure_context(uint64_t entrypoint, - uint32_t rw, - uint64_t mpidr, - tsp_context_t *tsp_ctx) + uint32_t rw, + uint64_t mpidr, + tsp_context_t *tsp_ctx) { uint32_t scr, sctlr; el1_sys_regs_t *el1_state; @@ -65,10 +65,14 @@ */ memset(tsp_ctx, 0, sizeof(*tsp_ctx)); - /* Set the right security state and register width for the SP */ + /* + * Set the right security state, register width and enable access to + * the secure physical timer for the SP. + */ scr = read_scr(); scr &= ~SCR_NS_BIT; scr &= ~SCR_RW_BIT; + scr |= SCR_ST_BIT; if (rw == TSP_AARCH64) scr |= SCR_RW_BIT; @@ -85,7 +89,14 @@ write_ctx_reg(el1_state, CTX_SCTLR_EL1, sctlr); /* Set this context as ready to be initialised i.e OFF */ - tsp_ctx->state = TSP_STATE_OFF; + set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_OFF); + + /* + * This context has not been used yet. It will become valid + * when the TSP is interrupted and wants the TSPD to preserve + * the context. + */ + clr_std_smc_active_flag(tsp_ctx->state); /* Associate this context with the cpu specified */ tsp_ctx->mpidr = mpidr; diff --git a/services/spd/tspd/tspd_main.c b/services/spd/tspd/tspd_main.c index 4445e87..3766abb 100644 --- a/services/spd/tspd/tspd_main.c +++ b/services/spd/tspd/tspd_main.c @@ -43,6 +43,9 @@ #include #include #include +#include +#include +#include #include #include #include @@ -68,6 +71,75 @@ int32_t tspd_init(void); +/******************************************************************************* + * This function is the handler registered for S-EL1 interrupts by the TSPD. It + * validates the interrupt and upon success arranges entry into the TSP at + * 'tsp_fiq_entry()' for handling the interrupt. + ******************************************************************************/ +static uint64_t tspd_sel1_interrupt_handler(uint32_t id, + uint32_t flags, + void *handle, + void *cookie) +{ + uint32_t linear_id; + uint64_t mpidr; + tsp_context_t *tsp_ctx; + + /* Check the security state when the exception was generated */ + assert(get_interrupt_src_ss(flags) == NON_SECURE); + +#if IMF_READ_INTERRUPT_ID + /* Check the security status of the interrupt */ + assert(ic_get_interrupt_group(id) == SECURE); +#endif + + /* Sanity check the pointer to this cpu's context */ + mpidr = read_mpidr(); + assert(handle == cm_get_context(mpidr, NON_SECURE)); + + /* Save the non-secure context before entering the TSP */ + cm_el1_sysregs_context_save(NON_SECURE); + + /* Get a reference to this cpu's TSP context */ + linear_id = platform_get_core_pos(mpidr); + tsp_ctx = &tspd_sp_context[linear_id]; + assert(&tsp_ctx->cpu_ctx == cm_get_context(mpidr, SECURE)); + + /* + * Determine if the TSP was previously preempted. Its last known + * context has to be preserved in this case. + * The TSP should return control to the TSPD after handling this + * FIQ. Preserve essential EL3 context to allow entry into the + * TSP at the FIQ entry point using the 'cpu_context' structure. + * There is no need to save the secure system register context + * since the TSP is supposed to preserve it during S-EL1 interrupt + * handling. + */ + if (get_std_smc_active_flag(tsp_ctx->state)) { + tsp_ctx->saved_spsr_el3 = SMC_GET_EL3(&tsp_ctx->cpu_ctx, + CTX_SPSR_EL3); + tsp_ctx->saved_elr_el3 = SMC_GET_EL3(&tsp_ctx->cpu_ctx, + CTX_ELR_EL3); + } + + SMC_SET_EL3(&tsp_ctx->cpu_ctx, + CTX_SPSR_EL3, + SPSR_64(MODE_EL1, MODE_SP_ELX, DISABLE_ALL_EXCEPTION)); + SMC_SET_EL3(&tsp_ctx->cpu_ctx, + CTX_ELR_EL3, + (uint64_t) tsp_entry_info->fiq_entry); + cm_el1_sysregs_context_restore(SECURE); + cm_set_next_eret_context(SECURE); + + /* + * Tell the TSP that it has to handle an FIQ synchronously. Also the + * instruction in normal world where the interrupt was generated is + * passed for debugging purposes. It is safe to retrieve this address + * from ELR_EL3 as the secure context will not take effect until + * el3_exit(). + */ + SMC_RET2(&tsp_ctx->cpu_ctx, TSP_HANDLE_FIQ_AND_RETURN, read_elr_el3()); +} /******************************************************************************* * Secure Payload Dispatcher setup. The SPD finds out the SP entrypoint and type @@ -131,7 +203,7 @@ int32_t tspd_init(void) { uint64_t mpidr = read_mpidr(); - uint32_t linear_id = platform_get_core_pos(mpidr); + uint32_t linear_id = platform_get_core_pos(mpidr), flags; uint64_t rc; tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id]; @@ -142,7 +214,7 @@ rc = tspd_synchronous_sp_entry(tsp_ctx); assert(rc != 0); if (rc) { - tsp_ctx->state = TSP_STATE_ON; + set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_ON); /* * TSP has been successfully initialized. Register power @@ -151,6 +223,18 @@ psci_register_spd_pm_hook(&tspd_pm); } + /* + * Register an interrupt handler for S-EL1 interrupts when generated + * during code executing in the non-secure state. + */ + flags = 0; + set_interrupt_rm_flag(flags, NON_SECURE); + rc = register_interrupt_type_handler(INTR_TYPE_S_EL1, + tspd_sel1_interrupt_handler, + flags); + if (rc) + panic(); + return rc; } @@ -173,7 +257,6 @@ uint64_t flags) { cpu_context_t *ns_cpu_context; - gp_regs_t *ns_gp_regs; unsigned long mpidr = read_mpidr(); uint32_t linear_id = platform_get_core_pos(mpidr), ns; tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id]; @@ -184,6 +267,98 @@ switch (smc_fid) { /* + * This function ID is used by TSP to indicate that it was + * preempted by a normal world IRQ. + * + */ + case TSP_PREEMPTED: + if (ns) + SMC_RET1(handle, SMC_UNK); + + assert(handle == cm_get_context(mpidr, SECURE)); + cm_el1_sysregs_context_save(SECURE); + /* Get a reference to the non-secure context */ + ns_cpu_context = cm_get_context(mpidr, NON_SECURE); + assert(ns_cpu_context); + + /* + * Restore non-secure state. There is no need to save the + * secure system register context since the TSP was supposed + * to preserve it during S-EL1 interrupt handling. + */ + cm_el1_sysregs_context_restore(NON_SECURE); + cm_set_next_eret_context(NON_SECURE); + + SMC_RET1(ns_cpu_context, SMC_PREEMPTED); + + /* + * This function ID is used only by the TSP to indicate that it has + * finished handling a S-EL1 FIQ interrupt. Execution should resume + * in the normal world. + */ + case TSP_HANDLED_S_EL1_FIQ: + if (ns) + SMC_RET1(handle, SMC_UNK); + + assert(handle == cm_get_context(mpidr, SECURE)); + + /* + * Restore the relevant EL3 state which saved to service + * this SMC. + */ + if (get_std_smc_active_flag(tsp_ctx->state)) { + SMC_SET_EL3(&tsp_ctx->cpu_ctx, + CTX_SPSR_EL3, + tsp_ctx->saved_spsr_el3); + SMC_SET_EL3(&tsp_ctx->cpu_ctx, + CTX_ELR_EL3, + tsp_ctx->saved_elr_el3); + } + + /* Get a reference to the non-secure context */ + ns_cpu_context = cm_get_context(mpidr, NON_SECURE); + assert(ns_cpu_context); + + /* + * Restore non-secure state. There is no need to save the + * secure system register context since the TSP was supposed + * to preserve it during S-EL1 interrupt handling. + */ + cm_el1_sysregs_context_restore(NON_SECURE); + cm_set_next_eret_context(NON_SECURE); + + SMC_RET0((uint64_t) ns_cpu_context); + + + /* + * This function ID is used only by the TSP to indicate that it was + * interrupted due to a EL3 FIQ interrupt. Execution should resume + * in the normal world. + */ + case TSP_EL3_FIQ: + if (ns) + SMC_RET1(handle, SMC_UNK); + + assert(handle == cm_get_context(mpidr, SECURE)); + + /* Assert that standard SMC execution has been preempted */ + assert(get_std_smc_active_flag(tsp_ctx->state)); + + /* Save the secure system register state */ + cm_el1_sysregs_context_save(SECURE); + + /* Get a reference to the non-secure context */ + ns_cpu_context = cm_get_context(mpidr, NON_SECURE); + assert(ns_cpu_context); + + /* Restore non-secure state */ + cm_el1_sysregs_context_restore(NON_SECURE); + cm_set_next_eret_context(NON_SECURE); + + SMC_RET1(ns_cpu_context, TSP_EL3_FIQ); + + + /* * This function ID is used only by the SP to indicate it has * finished initialising itself after a cold boot */ @@ -206,9 +381,6 @@ */ tspd_synchronous_sp_exit(tsp_ctx, x1); - /* Should never reach here */ - assert(0); - /* * These function IDs is used only by the SP to indicate it has * finished: @@ -241,18 +413,20 @@ */ tspd_synchronous_sp_exit(tsp_ctx, x1); - /* Should never reach here */ - assert(0); - /* * Request from non-secure client to perform an * arithmetic operation or response from secure * payload to an earlier request. */ - case TSP_FID_ADD: - case TSP_FID_SUB: - case TSP_FID_MUL: - case TSP_FID_DIV: + case TSP_FAST_FID(TSP_ADD): + case TSP_FAST_FID(TSP_SUB): + case TSP_FAST_FID(TSP_MUL): + case TSP_FAST_FID(TSP_DIV): + + case TSP_STD_FID(TSP_ADD): + case TSP_STD_FID(TSP_SUB): + case TSP_STD_FID(TSP_MUL): + case TSP_STD_FID(TSP_DIV): if (ns) { /* * This is a fresh request from the non-secure client. @@ -261,11 +435,15 @@ * state and send the request to the secure payload. */ assert(handle == cm_get_context(mpidr, NON_SECURE)); + + /* Check if we are already preempted */ + if (get_std_smc_active_flag(tsp_ctx->state)) + SMC_RET1(handle, SMC_UNK); + cm_el1_sysregs_context_save(NON_SECURE); /* Save x1 and x2 for use by TSP_GET_ARGS call below */ - SMC_SET_GP(handle, CTX_GPREG_X1, x1); - SMC_SET_GP(handle, CTX_GPREG_X2, x2); + store_tsp_args(tsp_ctx, x1, x2); /* * We are done stashing the non-secure context. Ask the @@ -280,17 +458,27 @@ * from this function. */ assert(&tsp_ctx->cpu_ctx == cm_get_context(mpidr, SECURE)); - set_aapcs_args7(&tsp_ctx->cpu_ctx, smc_fid, x1, x2, 0, 0, - 0, 0, 0); - cm_set_el3_elr(SECURE, (uint64_t) tsp_entry_info->fast_smc_entry); + + /* Set appropriate entry for SMC. + * We expect the TSP to manage the PSTATE.I and PSTATE.F + * flags as appropriate. + */ + if (GET_SMC_TYPE(smc_fid) == SMC_TYPE_FAST) { + cm_set_elr_el3(SECURE, (uint64_t) + tsp_entry_info->fast_smc_entry); + } else { + set_std_smc_active_flag(tsp_ctx->state); + cm_set_elr_el3(SECURE, (uint64_t) + tsp_entry_info->std_smc_entry); + } + cm_el1_sysregs_context_restore(SECURE); cm_set_next_eret_context(SECURE); - - return smc_fid; + SMC_RET3(&tsp_ctx->cpu_ctx, smc_fid, x1, x2); } else { /* * This is the result from the secure client of an - * earlier request. The results are in x1-x2. Copy it + * earlier request. The results are in x1-x3. Copy it * into the non-secure context, save the secure state * and return to the non-secure state. */ @@ -300,18 +488,53 @@ /* Get a reference to the non-secure context */ ns_cpu_context = cm_get_context(mpidr, NON_SECURE); assert(ns_cpu_context); - ns_gp_regs = get_gpregs_ctx(ns_cpu_context); /* Restore non-secure state */ cm_el1_sysregs_context_restore(NON_SECURE); cm_set_next_eret_context(NON_SECURE); - - SMC_RET2(ns_gp_regs, x1, x2); + if (GET_SMC_TYPE(smc_fid) == SMC_TYPE_STD) + clr_std_smc_active_flag(tsp_ctx->state); + SMC_RET3(ns_cpu_context, x1, x2, x3); } break; /* + * Request from non secure world to resume the preempted + * Standard SMC call. + */ + case TSP_FID_RESUME: + /* RESUME should be invoked only by normal world */ + if (!ns) { + assert(0); + break; + } + + /* + * This is a resume request from the non-secure client. + * save the non-secure state and send the request to + * the secure payload. + */ + assert(handle == cm_get_context(mpidr, NON_SECURE)); + + /* Check if we are already preempted before resume */ + if (!get_std_smc_active_flag(tsp_ctx->state)) + SMC_RET1(handle, SMC_UNK); + + cm_el1_sysregs_context_save(NON_SECURE); + + /* + * We are done stashing the non-secure context. Ask the + * secure payload to do the work now. + */ + + /* We just need to return to the preempted point in + * TSP and the execution will resume as normal. + */ + cm_el1_sysregs_context_restore(SECURE); + cm_set_next_eret_context(SECURE); + + /* * This is a request from the secure payload for more arguments * for an ongoing arithmetic operation requested by the * non-secure world. Simply return the arguments from the non- @@ -324,10 +547,9 @@ /* Get a reference to the non-secure context */ ns_cpu_context = cm_get_context(mpidr, NON_SECURE); assert(ns_cpu_context); - ns_gp_regs = get_gpregs_ctx(ns_cpu_context); - SMC_RET2(handle, read_ctx_reg(ns_gp_regs, CTX_GPREG_X1), - read_ctx_reg(ns_gp_regs, CTX_GPREG_X2)); + get_tsp_args(tsp_ctx, x1, x2); + SMC_RET2(handle, x1, x2); case TOS_CALL_COUNT: /* @@ -351,9 +573,9 @@ SMC_RET1(handle, SMC_UNK); } -/* Define a SPD runtime service descriptor */ +/* Define a SPD runtime service descriptor for fast SMC calls */ DECLARE_RT_SVC( - spd, + tspd_fast, OEN_TOS_START, OEN_TOS_END, @@ -361,3 +583,14 @@ tspd_setup, tspd_smc_handler ); + +/* Define a SPD runtime service descriptor for standard SMC calls */ +DECLARE_RT_SVC( + tspd_std, + + OEN_TOS_START, + OEN_TOS_END, + SMC_TYPE_STD, + NULL, + tspd_smc_handler +); diff --git a/services/spd/tspd/tspd_pm.c b/services/spd/tspd/tspd_pm.c index 2447d9e..d99aa22 100644 --- a/services/spd/tspd/tspd_pm.c +++ b/services/spd/tspd/tspd_pm.c @@ -56,10 +56,10 @@ tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id]; assert(tsp_entry_info); - assert(tsp_ctx->state == TSP_STATE_ON); + assert(get_tsp_pstate(tsp_ctx->state) == TSP_PSTATE_ON); /* Program the entry point and enter the TSP */ - cm_set_el3_elr(SECURE, (uint64_t) tsp_entry_info->cpu_off_entry); + cm_set_elr_el3(SECURE, (uint64_t) tsp_entry_info->cpu_off_entry); rc = tspd_synchronous_sp_entry(tsp_ctx); /* @@ -73,7 +73,7 @@ * Reset TSP's context for a fresh start when this cpu is turned on * subsequently. */ - tsp_ctx->state = TSP_STATE_OFF; + set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_OFF); return 0; } @@ -90,13 +90,13 @@ tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id]; assert(tsp_entry_info); - assert(tsp_ctx->state == TSP_STATE_ON); + assert(get_tsp_pstate(tsp_ctx->state) == TSP_PSTATE_ON); /* Program the entry point, power_state parameter and enter the TSP */ write_ctx_reg(get_gpregs_ctx(&tsp_ctx->cpu_ctx), CTX_GPREG_X0, power_state); - cm_set_el3_elr(SECURE, (uint64_t) tsp_entry_info->cpu_suspend_entry); + cm_set_elr_el3(SECURE, (uint64_t) tsp_entry_info->cpu_suspend_entry); rc = tspd_synchronous_sp_entry(tsp_ctx); /* @@ -107,7 +107,7 @@ panic(); /* Update its context to reflect the state the TSP is in */ - tsp_ctx->state = TSP_STATE_SUSPEND; + set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_SUSPEND); } /******************************************************************************* @@ -124,7 +124,7 @@ tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id]; assert(tsp_entry_info); - assert(tsp_ctx->state == TSP_STATE_OFF); + assert(get_tsp_pstate(tsp_ctx->state) == TSP_PSTATE_OFF); /* Initialise this cpu's secure context */ tspd_init_secure_context((uint64_t) tsp_entry_info->cpu_on_entry, @@ -143,7 +143,7 @@ panic(); /* Update its context to reflect the state the SP is in */ - tsp_ctx->state = TSP_STATE_ON; + set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_ON); } /******************************************************************************* @@ -159,13 +159,13 @@ tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id]; assert(tsp_entry_info); - assert(tsp_ctx->state == TSP_STATE_SUSPEND); + assert(get_tsp_pstate(tsp_ctx->state) == TSP_PSTATE_SUSPEND); /* Program the entry point, suspend_level and enter the SP */ write_ctx_reg(get_gpregs_ctx(&tsp_ctx->cpu_ctx), CTX_GPREG_X0, suspend_level); - cm_set_el3_elr(SECURE, (uint64_t) tsp_entry_info->cpu_resume_entry); + cm_set_elr_el3(SECURE, (uint64_t) tsp_entry_info->cpu_resume_entry); rc = tspd_synchronous_sp_entry(tsp_ctx); /* @@ -176,7 +176,7 @@ panic(); /* Update its context to reflect the state the SP is in */ - tsp_ctx->state = TSP_STATE_ON; + set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_ON); } /******************************************************************************* diff --git a/services/spd/tspd/tspd_private.h b/services/spd/tspd/tspd_private.h index 81484e1..7395bb9 100644 --- a/services/spd/tspd/tspd_private.h +++ b/services/spd/tspd/tspd_private.h @@ -33,15 +33,47 @@ #include #include +#include #include #include /******************************************************************************* * Secure Payload PM state information e.g. SP is suspended, uninitialised etc + * and macros to access the state information in the per-cpu 'state' flags ******************************************************************************/ -#define TSP_STATE_OFF 0 -#define TSP_STATE_ON 1 -#define TSP_STATE_SUSPEND 2 +#define TSP_PSTATE_OFF 0 +#define TSP_PSTATE_ON 1 +#define TSP_PSTATE_SUSPEND 2 +#define TSP_PSTATE_SHIFT 0 +#define TSP_PSTATE_MASK 0x3 +#define get_tsp_pstate(state) ((state >> TSP_PSTATE_SHIFT) & TSP_PSTATE_MASK) +#define clr_tsp_pstate(state) (state &= ~(TSP_PSTATE_MASK \ + << TSP_PSTATE_SHIFT)) +#define set_tsp_pstate(st, pst) do { \ + clr_tsp_pstate(st); \ + st |= (pst & TSP_PSTATE_MASK) << \ + TSP_PSTATE_SHIFT; \ + } while (0); + + +/* + * This flag is used by the TSPD to determine if the TSP is servicing a standard + * SMC request prior to programming the next entry into the TSP e.g. if TSP + * execution is preempted by a non-secure interrupt and handed control to the + * normal world. If another request which is distinct from what the TSP was + * previously doing arrives, then this flag will be help the TSPD to either + * reject the new request or service it while ensuring that the previous context + * is not corrupted. + */ +#define STD_SMC_ACTIVE_FLAG_SHIFT 2 +#define STD_SMC_ACTIVE_FLAG_MASK 1 +#define get_std_smc_active_flag(state) ((state >> STD_SMC_ACTIVE_FLAG_SHIFT) \ + & STD_SMC_ACTIVE_FLAG_MASK) +#define set_std_smc_active_flag(state) (state |= \ + 1 << STD_SMC_ACTIVE_FLAG_SHIFT) +#define clr_std_smc_active_flag(state) (state &= \ + ~(STD_SMC_ACTIVE_FLAG_MASK \ + << STD_SMC_ACTIVE_FLAG_SHIFT)) /******************************************************************************* * Secure Payload execution state information i.e. aarch32 or aarch64 @@ -93,6 +125,12 @@ #include #include +/* + * The number of arguments to save during a SMC call for TSP. + * Currently only x1 and x2 are used by TSP. + */ +#define TSP_NUM_ARGS 0x2 + /* AArch64 callee saved general purpose register context structure. */ DEFINE_REG_STRUCT(c_rt_regs, TSPD_C_RT_CTX_ENTRIES); @@ -106,19 +144,39 @@ /******************************************************************************* * Structure which helps the SPD to maintain the per-cpu state of the SP. - * 'state' - collection of flags to track SP state e.g. on/off - * 'mpidr' - mpidr to associate a context with a cpu - * 'c_rt_ctx' - stack address to restore C runtime context from after returning - * from a synchronous entry into the SP. - * 'cpu_ctx' - space to maintain SP architectural state + * 'saved_spsr_el3' - temporary copy to allow FIQ handling when the TSP has been + * preempted. + * 'saved_elr_el3' - temporary copy to allow FIQ handling when the TSP has been + * preempted. + * 'state' - collection of flags to track SP state e.g. on/off + * 'mpidr' - mpidr to associate a context with a cpu + * 'c_rt_ctx' - stack address to restore C runtime context from after + * returning from a synchronous entry into the SP. + * 'cpu_ctx' - space to maintain SP architectural state + * 'saved_tsp_args' - space to store arguments for TSP arithmetic operations + * which will queried using the TSP_GET_ARGS SMC by TSP. ******************************************************************************/ typedef struct tsp_context { + uint64_t saved_elr_el3; + uint32_t saved_spsr_el3; uint32_t state; uint64_t mpidr; uint64_t c_rt_ctx; cpu_context_t cpu_ctx; + uint64_t saved_tsp_args[TSP_NUM_ARGS]; } tsp_context_t; +/* Helper macros to store and retrieve tsp args from tsp_context */ +#define store_tsp_args(tsp_ctx, x1, x2) do {\ + tsp_ctx->saved_tsp_args[0] = x1;\ + tsp_ctx->saved_tsp_args[1] = x2;\ + } while (0) + +#define get_tsp_args(tsp_ctx, x1, x2) do {\ + x1 = tsp_ctx->saved_tsp_args[0];\ + x2 = tsp_ctx->saved_tsp_args[1];\ + } while (0) + /* TSPD power management handlers */ extern const spd_pm_ops_t tspd_pm; diff --git a/services/std_svc/psci/psci_entry.S b/services/std_svc/psci/psci_entry.S index 256c538..bc8d900 100644 --- a/services/std_svc/psci/psci_entry.S +++ b/services/std_svc/psci/psci_entry.S @@ -30,13 +30,13 @@ #include #include -#include #include .globl psci_aff_on_finish_entry .globl psci_aff_suspend_finish_entry .globl __psci_cpu_off .globl __psci_cpu_suspend + .globl psci_power_down_wfi /* ----------------------------------------------------- * This cpu has been physically powered up. Depending @@ -120,9 +120,6 @@ mrs x0, mpidr_el1 bl platform_set_coherent_stack bl psci_cpu_off - mov x1, #PSCI_E_SUCCESS - cmp x0, x1 - b.eq final_wfi mov sp, x19 ldp x19, x20, [sp,#0] add sp, sp, #0x10 @@ -144,9 +141,6 @@ mov x1, x21 mov x2, x22 bl psci_cpu_suspend - mov x1, #PSCI_E_SUCCESS - cmp x0, x1 - b.eq final_wfi mov sp, x19 ldp x21, x22, [sp,#0x10] ldp x19, x20, [sp,#0] @@ -154,7 +148,16 @@ func_epilogue ret -func final_wfi + /* -------------------------------------------- + * This function is called to indicate to the + * power controller that it is safe to power + * down this cpu. It should not exit the wfi + * and will be released from reset upon power + * up. 'wfi_spill' is used to catch erroneous + * exits from wfi. + * -------------------------------------------- + */ +func psci_power_down_wfi dsb sy // ensure write buffer empty wfi wfi_spill: diff --git a/services/std_svc/psci/psci_main.c b/services/std_svc/psci/psci_main.c index 1bcf216..c0866fb 100644 --- a/services/std_svc/psci/psci_main.c +++ b/services/std_svc/psci/psci_main.c @@ -90,23 +90,37 @@ if (target_afflvl > MPIDR_MAX_AFFLVL) return PSCI_E_INVALID_PARAMS; + /* Determine the 'state type' in the 'power_state' parameter */ pstate_type = psci_get_pstate_type(power_state); + + /* + * Ensure that we have a platform specific handler for entering + * a standby state. + */ if (pstate_type == PSTATE_TYPE_STANDBY) { - if (psci_plat_pm_ops->affinst_standby) - rc = psci_plat_pm_ops->affinst_standby(power_state); - else + if (!psci_plat_pm_ops->affinst_standby) return PSCI_E_INVALID_PARAMS; - } else { - mpidr = read_mpidr(); - rc = psci_afflvl_suspend(mpidr, - entrypoint, - context_id, - power_state, - MPIDR_AFFLVL0, - target_afflvl); + + rc = psci_plat_pm_ops->affinst_standby(power_state); + assert(rc == PSCI_E_INVALID_PARAMS || rc == PSCI_E_SUCCESS); + return rc; } - assert(rc == PSCI_E_INVALID_PARAMS || rc == PSCI_E_SUCCESS); + /* + * Do what is needed to enter the power down state. Upon success, + * enter the final wfi which will power down this cpu else return + * an error. + */ + mpidr = read_mpidr(); + rc = psci_afflvl_suspend(mpidr, + entrypoint, + context_id, + power_state, + MPIDR_AFFLVL0, + target_afflvl); + if (rc == PSCI_E_SUCCESS) + psci_power_down_wfi(); + assert(rc == PSCI_E_INVALID_PARAMS); return rc; } @@ -127,10 +141,18 @@ rc = psci_afflvl_off(mpidr, MPIDR_AFFLVL0, target_afflvl); /* + * Check if all actions needed to safely power down this cpu have + * successfully completed. Enter a wfi loop which will allow the + * power controller to physically power down this cpu. + */ + if (rc == PSCI_E_SUCCESS) + psci_power_down_wfi(); + + /* * The only error cpu_off can return is E_DENIED. So check if that's * indeed the case. */ - assert (rc == PSCI_E_SUCCESS || rc == PSCI_E_DENIED); + assert (rc == PSCI_E_DENIED); return rc; } diff --git a/tools/fip_create/fip_create.c b/tools/fip_create/fip_create.c index c97204a..d1802b7 100644 --- a/tools/fip_create/fip_create.c +++ b/tools/fip_create/fip_create.c @@ -53,7 +53,7 @@ * const char* format_type_str[] = { "RAW", "ELF", "PIC" }; */ -/* Currently only BL2 and BL31 images are supported. */ +/* The images used depends on the platform. */ static entry_lookup_list_t toc_entry_lookup_list[] = { { "Trusted Boot Firmware BL2", UUID_TRUSTED_BOOT_FIRMWARE_BL2, "bl2", NULL, FLAG_FILENAME },