diff --git a/bl31/context_mgmt.c b/bl31/context_mgmt.c index 44ef0d9..60c3492 100644 --- a/bl31/context_mgmt.c +++ b/bl31/context_mgmt.c @@ -165,6 +165,23 @@ } /******************************************************************************* + * This function function populates ELR_EL3 member of 'cpu_context' pertaining + * to the given security state with the given entrypoint + ******************************************************************************/ +void cm_set_el3_elr(uint32_t security_state, uint64_t entrypoint) +{ + cpu_context *ctx; + el3_state *state; + + ctx = cm_get_context(read_mpidr(), security_state); + assert(ctx); + + /* Populate EL3 state so that ERET jumps to the correct entry */ + state = get_el3state_ctx(ctx); + write_ctx_reg(state, CTX_ELR_EL3, entrypoint); +} + +/******************************************************************************* * This function is used to program the context that's used for exception * return. This initializes the SP_EL3 to a pointer to a 'cpu_context' set for * the required security state diff --git a/bl32/tsp/tsp_main.c b/bl32/tsp/tsp_main.c index 258bee4..f6640ed 100644 --- a/bl32/tsp/tsp_main.c +++ b/bl32/tsp/tsp_main.c @@ -199,10 +199,7 @@ spin_unlock(&console_lock); - /* - * Indicate to the SPD that we have completed - * this initialisation request. - */ + /* Indicate to the SPD that we have completed this request */ return set_smc_args(TSP_OFF_DONE, 0, 0, 0, 0, 0, 0, 0); } @@ -237,10 +234,7 @@ tsp_stats[linear_id].cpu_suspend_count); spin_unlock(&console_lock); - /* - * Indicate to the SPD that we have completed - * this initialisation request. - */ + /* Indicate to the SPD that we have completed this request */ return set_smc_args(TSP_SUSPEND_DONE, 0, 0, 0, 0, 0, 0, 0); } @@ -275,10 +269,7 @@ tsp_stats[linear_id].cpu_suspend_count); spin_unlock(&console_lock); - /* - * Indicate to the SPD that we have completed - * this initialisation request. - */ + /* Indicate to the SPD that we have completed this request */ return set_smc_args(TSP_RESUME_DONE, 0, 0, 0, 0, 0, 0, 0); } diff --git a/include/context.h b/include/context.h index 082e511..cb4cd8e 100644 --- a/include/context.h +++ b/include/context.h @@ -259,6 +259,42 @@ CASSERT(CTX_EL3STATE_OFFSET == __builtin_offsetof(cpu_context, el3state_ctx), \ assert_core_context_el3state_offset_mismatch); +/* + * Helper macro to set the general purpose registers that correspond to + * parameters in an aapcs_64 call i.e. x0-x7 + */ +#define set_aapcs_args0(ctx, x0) do { \ + write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X0, x0); \ + } while (0); +#define set_aapcs_args1(ctx, x0, x1) do { \ + write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X1, x1); \ + set_aapcs_args0(ctx, x0); \ + } while (0); +#define set_aapcs_args2(ctx, x0, x1, x2) do { \ + write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X2, x2); \ + set_aapcs_args1(ctx, x0, x1); \ + } while (0); +#define set_aapcs_args3(ctx, x0, x1, x2, x3) do { \ + write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X3, x3); \ + set_aapcs_args2(ctx, x0, x1, x2); \ + } while (0); +#define set_aapcs_args4(ctx, x0, x1, x2, x3, x4) do { \ + write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X4, x4); \ + set_aapcs_args3(ctx, x0, x1, x2, x3); \ + } while (0); +#define set_aapcs_args5(ctx, x0, x1, x2, x3, x4, x5) do { \ + write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X5, x5); \ + set_aapcs_args4(ctx, x0, x1, x2, x3, x4); \ + } while (0); +#define set_aapcs_args6(ctx, x0, x1, x2, x3, x4, x5, x6) do { \ + write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X6, x6); \ + set_aapcs_args5(ctx, x0, x1, x2, x3, x4, x5); \ + } while (0); +#define set_aapcs_args7(ctx, x0, x1, x2, x3, x4, x5, x6, x7) do { \ + write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X7, x7); \ + set_aapcs_args6(ctx, x0, x1, x2, x3, x4, x5, x6); \ + } while (0); + /******************************************************************************* * Function prototypes ******************************************************************************/ diff --git a/include/context_mgmt.h b/include/context_mgmt.h index b8c8077..35f7c8c 100644 --- a/include/context_mgmt.h +++ b/include/context_mgmt.h @@ -48,6 +48,7 @@ extern void cm_el1_sysregs_context_restore(uint32_t security_state); extern void cm_set_el3_eret_context(uint32_t security_state, uint64_t entrypoint, uint32_t spsr, uint32_t scr); +extern void cm_set_el3_elr(uint32_t security_state, uint64_t entrypoint); extern void cm_set_next_eret_context(uint32_t security_state); extern void cm_init_exception_stack(uint64_t mpidr, uint32_t security_state); #endif /*__ASSEMBLY__*/ diff --git a/include/psci.h b/include/psci.h index e14c60b..3a040f9 100644 --- a/include/psci.h +++ b/include/psci.h @@ -55,7 +55,7 @@ ******************************************************************************/ #define PSCI_TOS_UP_MIG_CAP 0 #define PSCI_TOS_NOT_UP_MIG_CAP 1 -#define PSCI_TOS_NOT_PRESENT 2 +#define PSCI_TOS_NOT_PRESENT_MP 2 /******************************************************************************* * PSCI CPU_SUSPEND 'power_state' parameter specific defines @@ -140,6 +140,22 @@ } plat_pm_ops; /******************************************************************************* + * Optional structure populated by the Secure Payload Dispatcher to be given a + * chance to perform any bookkeeping before PSCI executes a power mgmt. + * operation. It also allows PSCI to determine certain properties of the SP e.g. + * migrate capability etc. + ******************************************************************************/ +typedef struct { + void (*svc_on)(uint64_t target_cpu); + int32_t (*svc_off)(uint64_t __unused); + void (*svc_suspend)(uint64_t power_state); + void (*svc_on_finish)(uint64_t __unused); + void (*svc_suspend_finish)(uint64_t suspend_level); + void (*svc_migrate)(uint64_t __unused1, uint64_t __unused2); + int32_t (*svc_migrate_info)(uint64_t *__unused); +} spd_pm_ops; + +/******************************************************************************* * Function & Data prototypes ******************************************************************************/ extern unsigned int psci_version(void); diff --git a/services/psci/psci_afflvl_off.c b/services/psci/psci_afflvl_off.c index 72557aa..24c212f 100644 --- a/services/psci/psci_afflvl_off.c +++ b/services/psci/psci_afflvl_off.c @@ -56,9 +56,21 @@ psci_set_state(cpu_node, PSCI_STATE_OFF); /* - * Generic management: Get the index for clearing any - * lingering re-entry information + * Generic management: Get the index for clearing any lingering re-entry + * information and allow the secure world to switch itself off */ + + /* + * Call the cpu off handler registered by the Secure Payload Dispatcher + * to let it do any bookeeping. Assume that the SPD always reports an + * E_DENIED error if SP refuse to power down + */ + if (spd_pm.svc_off) { + rc = spd_pm.svc_off(0); + if (rc) + return rc; + } + index = cpu_node->data; memset(&psci_ns_entry_info[index], 0, sizeof(psci_ns_entry_info[index])); diff --git a/services/psci/psci_afflvl_on.c b/services/psci/psci_afflvl_on.c index d22904c..ee16c73 100644 --- a/services/psci/psci_afflvl_on.c +++ b/services/psci/psci_afflvl_on.c @@ -91,6 +91,14 @@ return rc; /* + * Call the cpu on handler registered by the Secure Payload Dispatcher + * to let it do any bookeeping. If the handler encounters an error, it's + * expected to assert within + */ + if (spd_pm.svc_on) + spd_pm.svc_on(target_cpu); + + /* * Arch. management: Derive the re-entry information for * the non-secure world from the non-secure state from * where this call originated. @@ -365,6 +373,24 @@ bl31_arch_setup(); /* + * Use the more complex exception vectors to enable SPD + * initialisation. SP_EL3 should point to a 'cpu_context' + * structure which has an exception stack allocated. The + * calling cpu should have set the context already + */ + assert(cm_get_context(mpidr, NON_SECURE)); + cm_set_next_eret_context(NON_SECURE); + write_vbar_el3((uint64_t) runtime_exceptions); + + /* + * Call the cpu on finish handler registered by the Secure Payload + * Dispatcher to let it do any bookeeping. If the handler encounters an + * error, it's expected to assert within + */ + if (spd_pm.svc_on_finish) + spd_pm.svc_on_finish(0); + + /* * Generic management: Now we just need to retrieve the * information that we had stashed away during the cpu_on * call to set this cpu on its way. First get the index diff --git a/services/psci/psci_afflvl_suspend.c b/services/psci/psci_afflvl_suspend.c index 4391580..62d270f 100644 --- a/services/psci/psci_afflvl_suspend.c +++ b/services/psci/psci_afflvl_suspend.c @@ -94,6 +94,19 @@ /* Sanity check to safeguard against data corruption */ assert(cpu_node->level == MPIDR_AFFLVL0); + /* + * Generic management: Store the re-entry information for the non-secure + * world and allow the secure world to suspend itself + */ + + /* + * Call the cpu suspend handler registered by the Secure Payload + * Dispatcher to let it do any bookeeping. If the handler encounters an + * error, it's expected to assert within + */ + if (spd_pm.svc_suspend) + spd_pm.svc_suspend(power_state); + /* State management: mark this cpu as suspended */ psci_set_state(cpu_node, PSCI_STATE_SUSPEND); @@ -395,6 +408,7 @@ aff_map_node *cpu_node) { unsigned int index, plat_state, state, rc = PSCI_E_SUCCESS; + int32_t suspend_level; assert(cpu_node->level == MPIDR_AFFLVL0); @@ -430,6 +444,27 @@ rc = PSCI_E_SUCCESS; /* + * Use the more complex exception vectors to enable SPD + * initialisation. SP_EL3 should point to a 'cpu_context' + * structure which has an exception stack allocated. The + * non-secure context should have been set on this cpu + * prior to suspension. + */ + assert(cm_get_context(mpidr, NON_SECURE)); + cm_set_next_eret_context(NON_SECURE); + write_vbar_el3((uint64_t) runtime_exceptions); + + /* + * Call the cpu suspend finish handler registered by the Secure Payload + * Dispatcher to let it do any bookeeping. If the handler encounters an + * error, it's expected to assert within + */ + if (spd_pm.svc_suspend) { + suspend_level = psci_get_suspend_afflvl(cpu_node); + spd_pm.svc_suspend_finish(suspend_level); + } + + /* * Generic management: Now we just need to retrieve the * information that we had stashed away during the suspend * call to set this cpu on its way. diff --git a/services/psci/psci_common.c b/services/psci/psci_common.c index 214db78..cacd97e 100644 --- a/services/psci/psci_common.c +++ b/services/psci/psci_common.c @@ -40,6 +40,12 @@ #include #include "debug.h" +/* + * Provide a null weak instantiation for SPD power management operations. An SPD + * can define its own instance overriding this one + */ +const spd_pm_ops __attribute__((weak)) spd_pm = {0}; + /******************************************************************************* * Arrays that contains information needs to resume a cpu's execution when woken * out of suspend or off states. 'psci_ns_einfo_idx' keeps track of the next diff --git a/services/psci/psci_entry.S b/services/psci/psci_entry.S index 15e074c..361dfde 100644 --- a/services/psci/psci_entry.S +++ b/services/psci/psci_entry.S @@ -109,18 +109,6 @@ mov x0, x19 bl platform_set_stack - /* --------------------------------------------- - * Now that the context management has been set - * up, enable full runtime exception handling. - * SP_EL3 is pointing to a 'cpu_context' - * structure which has an exception stack - * allocated. Since we're just about to leave - * this EL with ERET, we don't need an ISB here - * --------------------------------------------- - */ - adr x0, runtime_exceptions - msr vbar_el3, x0 - zero_callee_saved_regs b el3_exit _panic: diff --git a/services/psci/psci_main.c b/services/psci/psci_main.c index 67f189d..ca3a5a0 100644 --- a/services/psci/psci_main.c +++ b/services/psci/psci_main.c @@ -178,7 +178,7 @@ /* Unimplemented */ unsigned int psci_migrate_info_type(void) { - return PSCI_TOS_NOT_PRESENT; + return PSCI_TOS_NOT_PRESENT_MP; } unsigned long psci_migrate_info_up_cpu(void) diff --git a/services/psci/psci_private.h b/services/psci/psci_private.h index 3d7ae74..351cbe8 100644 --- a/services/psci/psci_private.h +++ b/services/psci/psci_private.h @@ -96,6 +96,12 @@ extern afflvl_power_on_finisher psci_afflvl_sus_finish_handlers[]; /******************************************************************************* + * Weak declarations to allow PSCI to cope on a system where the Secure Payload + * Dispatcher is missing. An SPD will define this structure when present. + ******************************************************************************/ +extern const spd_pm_ops spd_pm; + +/******************************************************************************* * Function prototypes ******************************************************************************/ /* Private exported functions from psci_common.c */ diff --git a/services/spd/tspd/tspd.mk b/services/spd/tspd/tspd.mk index 60a51a6..ee6400d 100644 --- a/services/spd/tspd/tspd.mk +++ b/services/spd/tspd/tspd.mk @@ -34,6 +34,7 @@ SPD_OBJS := tspd_common.o \ tspd_main.o \ + tspd_pm.o \ tspd_helpers.o vpath %.c ${TSPD_DIR} diff --git a/services/spd/tspd/tspd_main.c b/services/spd/tspd/tspd_main.c index 6896379..15b3922 100644 --- a/services/spd/tspd/tspd_main.c +++ b/services/spd/tspd/tspd_main.c @@ -129,7 +129,10 @@ CTX_GPREG_X0, (uint64_t) bl32_meminfo); - /* Arrange for an entry into the secure payload */ + /* + * Arrange for an entry into the test secure payload. We expect an array + * of vectors in return + */ rc = tspd_synchronous_sp_entry(tsp_ctx); assert(rc != 0); if (rc) @@ -189,11 +192,46 @@ /* Should never reach here */ assert(0); + /* + * These function IDs is used only by the SP to indicate it has + * finished: + * 1. turning itself on in response to an earlier psci + * cpu_on request + * 2. resuming itself after an earlier psci cpu_suspend + * request. + */ + case TSP_ON_DONE: + case TSP_RESUME_DONE: + + /* + * These function IDs is used only by the SP to indicate it has + * finished: + * 1. suspending itself after an earlier psci cpu_suspend + * request. + * 2. turning itself off in response to an earlier psci + * cpu_off request. + */ + case TSP_OFF_DONE: + case TSP_SUSPEND_DONE: + if (ns) + SMC_RET1(handle, SMC_UNK); + + /* + * SP reports completion. The SPD must have initiated the + * original request through a synchronous entry into the SP. + * Jump back to the original C runtime context, and pass x1 as + * return value to the caller + */ + tspd_synchronous_sp_exit(&tspd_sp_context[linear_id], x1); + + /* Should never reach here */ + assert(0); + default: - panic(); + break; } - SMC_RET1(handle, 0); + SMC_RET1(handle, SMC_UNK); } /* Define a SPD runtime service descriptor */ diff --git a/services/spd/tspd/tspd_pm.c b/services/spd/tspd/tspd_pm.c new file mode 100644 index 0000000..9e2f6c2 --- /dev/null +++ b/services/spd/tspd/tspd_pm.c @@ -0,0 +1,211 @@ +/* + * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/******************************************************************************* + * The target cpu is being turned on. Allow the TSPD/TSP to perform any actions + * needed. Nothing at the moment. + ******************************************************************************/ +static void tspd_cpu_on_handler(uint64_t target_cpu) +{ +} + +/******************************************************************************* + * This cpu is being turned off. Allow the TSPD/TSP to perform any actions + * needed + ******************************************************************************/ +static int32_t tspd_cpu_off_handler(uint64_t cookie) +{ + int32_t rc = 0; + uint64_t mpidr = read_mpidr(); + uint32_t linear_id = platform_get_core_pos(mpidr); + tsp_context *tsp_ctx = &tspd_sp_context[linear_id]; + + assert(tsp_entry_info); + assert(tsp_ctx->state == TSP_STATE_ON); + + /* Program the entry point and enter the TSP */ + cm_set_el3_elr(SECURE, (uint64_t) tsp_entry_info->cpu_off_entry); + rc = tspd_synchronous_sp_entry(tsp_ctx); + + /* + * Read the response from the TSP. A non-zero return means that + * something went wrong while communicating with the TSP. + */ + if (rc != 0) + panic(); + + /* + * Reset TSP's context for a fresh start when this cpu is turned on + * subsequently. + */ + tsp_ctx->state = TSP_STATE_OFF; + + return 0; +} + +/******************************************************************************* + * This cpu is being suspended. S-EL1 state must have been saved in the + * resident cpu (mpidr format) if it is a UP/UP migratable TSP. + ******************************************************************************/ +static void tspd_cpu_suspend_handler(uint64_t power_state) +{ + int32_t rc = 0; + uint64_t mpidr = read_mpidr(); + uint32_t linear_id = platform_get_core_pos(mpidr); + tsp_context *tsp_ctx = &tspd_sp_context[linear_id]; + + assert(tsp_entry_info); + assert(tsp_ctx->state == TSP_STATE_ON); + + /* Program the entry point, power_state parameter and enter the TSP */ + write_ctx_reg(get_gpregs_ctx(&tsp_ctx->cpu_ctx), + CTX_GPREG_X0, + power_state); + cm_set_el3_elr(SECURE, (uint64_t) tsp_entry_info->cpu_suspend_entry); + rc = tspd_synchronous_sp_entry(tsp_ctx); + + /* + * Read the response from the TSP. A non-zero return means that + * something went wrong while communicating with the TSP. + */ + if (rc != 0) + panic(); + + /* Update its context to reflect the state the TSP is in */ + tsp_ctx->state = TSP_STATE_SUSPEND; +} + +/******************************************************************************* + * This cpu has been turned on. Enter the TSP to initialise S-EL1 and other bits + * before passing control back to the Secure Monitor. Entry in S-El1 is done + * after initialising minimal architectural state that guarantees safe + * execution. + ******************************************************************************/ +static void tspd_cpu_on_finish_handler(uint64_t cookie) +{ + int32_t rc = 0; + uint64_t mpidr = read_mpidr(); + uint32_t linear_id = platform_get_core_pos(mpidr); + tsp_context *tsp_ctx = &tspd_sp_context[linear_id]; + + assert(tsp_entry_info); + assert(tsp_ctx->state == TSP_STATE_OFF); + + /* Initialise this cpu's secure context */ + tspd_init_secure_context((uint64_t) tsp_entry_info->cpu_on_entry, + TSP_AARCH64, + mpidr, + tsp_ctx); + + /* Enter the TSP */ + rc = tspd_synchronous_sp_entry(tsp_ctx); + + /* + * Read the response from the TSP. A non-zero return means that + * something went wrong while communicating with the SP. + */ + if (rc != 0) + panic(); + + /* Update its context to reflect the state the SP is in */ + tsp_ctx->state = TSP_STATE_ON; +} + +/******************************************************************************* + * This cpu has resumed from suspend. The SPD saved the TSP context when it + * completed the preceding suspend call. Use that context to program an entry + * into the TSP to allow it to do any remaining book keeping + ******************************************************************************/ +static void tspd_cpu_suspend_finish_handler(uint64_t suspend_level) +{ + int32_t rc = 0; + uint64_t mpidr = read_mpidr(); + uint32_t linear_id = platform_get_core_pos(mpidr); + tsp_context *tsp_ctx = &tspd_sp_context[linear_id]; + + assert(tsp_entry_info); + assert(tsp_ctx->state == TSP_STATE_SUSPEND); + + /* Program the entry point, suspend_level and enter the SP */ + write_ctx_reg(get_gpregs_ctx(&tsp_ctx->cpu_ctx), + CTX_GPREG_X0, + suspend_level); + cm_set_el3_elr(SECURE, (uint64_t) tsp_entry_info->cpu_resume_entry); + rc = tspd_synchronous_sp_entry(tsp_ctx); + + /* + * Read the response from the TSP. A non-zero return means that + * something went wrong while communicating with the TSP. + */ + if (rc != 0) + panic(); + + /* Update its context to reflect the state the SP is in */ + tsp_ctx->state = TSP_STATE_ON; +} + +/******************************************************************************* + * Return the type of TSP the TSPD is dealing with. Report the current resident + * cpu (mpidr format) if it is a UP/UP migratable TSP. + ******************************************************************************/ +static int32_t tspd_cpu_migrate_info(uint64_t *resident_cpu) +{ + return TSP_MIGRATE_INFO; +} + +/******************************************************************************* + * Structure populated by the TSP Dispatcher to be given a chance to perform any + * TSP bookkeeping before PSCI executes a power mgmt. operation. + ******************************************************************************/ +const spd_pm_ops spd_pm = { + tspd_cpu_on_handler, + tspd_cpu_off_handler, + tspd_cpu_suspend_handler, + tspd_cpu_on_finish_handler, + tspd_cpu_suspend_finish_handler, + NULL, + tspd_cpu_migrate_info +}; +