diff --git a/include/bl31/services/psci1.0/psci.h b/include/bl31/services/psci1.0/psci.h new file mode 100644 index 0000000..dd1891c --- /dev/null +++ b/include/bl31/services/psci1.0/psci.h @@ -0,0 +1,259 @@ +/* + * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __PSCI_H__ +#define __PSCI_H__ + +#include +#include /* for PLATFORM_NUM_AFFS */ + +/******************************************************************************* + * Number of affinity instances whose state this psci imp. can track + ******************************************************************************/ +#ifdef PLATFORM_NUM_AFFS +#define PSCI_NUM_AFFS PLATFORM_NUM_AFFS +#else +#define PSCI_NUM_AFFS (2 * PLATFORM_CORE_COUNT) +#endif + +/******************************************************************************* + * Defines for runtime services func ids + ******************************************************************************/ +#define PSCI_VERSION 0x84000000 +#define PSCI_CPU_SUSPEND_AARCH32 0x84000001 +#define PSCI_CPU_SUSPEND_AARCH64 0xc4000001 +#define PSCI_CPU_OFF 0x84000002 +#define PSCI_CPU_ON_AARCH32 0x84000003 +#define PSCI_CPU_ON_AARCH64 0xc4000003 +#define PSCI_AFFINITY_INFO_AARCH32 0x84000004 +#define PSCI_AFFINITY_INFO_AARCH64 0xc4000004 +#define PSCI_MIG_AARCH32 0x84000005 +#define PSCI_MIG_AARCH64 0xc4000005 +#define PSCI_MIG_INFO_TYPE 0x84000006 +#define PSCI_MIG_INFO_UP_CPU_AARCH32 0x84000007 +#define PSCI_MIG_INFO_UP_CPU_AARCH64 0xc4000007 +#define PSCI_SYSTEM_OFF 0x84000008 +#define PSCI_SYSTEM_RESET 0x84000009 +#define PSCI_FEATURES 0x8400000A +#define PSCI_SYSTEM_SUSPEND_AARCH32 0x8400000E +#define PSCI_SYSTEM_SUSPEND_AARCH64 0xc400000E + +/* Macro to help build the psci capabilities bitfield */ +#define define_psci_cap(x) (1 << (x & 0x1f)) + +/* + * Number of PSCI calls (above) implemented + */ +#define PSCI_NUM_CALLS 18 + +/******************************************************************************* + * PSCI Migrate and friends + ******************************************************************************/ +#define PSCI_TOS_UP_MIG_CAP 0 +#define PSCI_TOS_NOT_UP_MIG_CAP 1 +#define PSCI_TOS_NOT_PRESENT_MP 2 + +/******************************************************************************* + * PSCI CPU_SUSPEND 'power_state' parameter specific defines + ******************************************************************************/ +#define PSTATE_ID_SHIFT 0 +#define PSTATE_TYPE_SHIFT 16 +#define PSTATE_AFF_LVL_SHIFT 24 + +#define PSTATE_ID_MASK 0xffff +#define PSTATE_TYPE_MASK 0x1 +#define PSTATE_AFF_LVL_MASK 0x3 +#define PSTATE_VALID_MASK 0xFCFE0000 + +#define PSTATE_TYPE_STANDBY 0x0 +#define PSTATE_TYPE_POWERDOWN 0x1 + +#define psci_get_pstate_id(pstate) (((pstate) >> PSTATE_ID_SHIFT) & \ + PSTATE_ID_MASK) +#define psci_get_pstate_type(pstate) (((pstate) >> PSTATE_TYPE_SHIFT) & \ + PSTATE_TYPE_MASK) +#define psci_get_pstate_afflvl(pstate) (((pstate) >> PSTATE_AFF_LVL_SHIFT) & \ + PSTATE_AFF_LVL_MASK) +#define psci_make_powerstate(state_id, type, afflvl) \ + (((state_id) & PSTATE_ID_MASK) << PSTATE_ID_SHIFT) |\ + (((type) & PSTATE_TYPE_MASK) << PSTATE_TYPE_SHIFT) |\ + (((afflvl) & PSTATE_AFF_LVL_MASK) << PSTATE_AFF_LVL_SHIFT) + +/******************************************************************************* + * PSCI CPU_FEATURES feature flag specific defines + ******************************************************************************/ +/* Features flags for CPU SUSPEND power state parameter format. Bits [1:1] */ +#define FF_PSTATE_SHIFT 1 +#define FF_PSTATE_ORIG 0 +#define FF_PSTATE_EXTENDED 1 + +/* Features flags for CPU SUSPEND OS Initiated mode support. Bits [0:0] */ +#define FF_MODE_SUPPORT_SHIFT 0 +#define FF_SUPPORTS_OS_INIT_MODE 1 + +/******************************************************************************* + * PSCI version + ******************************************************************************/ +#define PSCI_MAJOR_VER (1 << 16) +#define PSCI_MINOR_VER 0x0 + +/******************************************************************************* + * PSCI error codes + ******************************************************************************/ +#define PSCI_E_SUCCESS 0 +#define PSCI_E_NOT_SUPPORTED -1 +#define PSCI_E_INVALID_PARAMS -2 +#define PSCI_E_DENIED -3 +#define PSCI_E_ALREADY_ON -4 +#define PSCI_E_ON_PENDING -5 +#define PSCI_E_INTERN_FAIL -6 +#define PSCI_E_NOT_PRESENT -7 +#define PSCI_E_DISABLED -8 + +/******************************************************************************* + * PSCI affinity state related constants. An affinity instance could be present + * or absent physically to cater for asymmetric topologies. If present then it + * could in one of the 4 further defined states. + ******************************************************************************/ +#define PSCI_STATE_SHIFT 1 +#define PSCI_STATE_MASK 0xff + +#define PSCI_AFF_ABSENT 0x0 +#define PSCI_AFF_PRESENT 0x1 +#define PSCI_STATE_ON 0x0 +#define PSCI_STATE_OFF 0x1 +#define PSCI_STATE_ON_PENDING 0x2 +#define PSCI_STATE_SUSPEND 0x3 + +#define PSCI_INVALID_DATA -1 + +#define get_phys_state(x) (x != PSCI_STATE_ON ? \ + PSCI_STATE_OFF : PSCI_STATE_ON) + +#define psci_validate_power_state(pstate) (pstate & PSTATE_VALID_MASK) + + +#ifndef __ASSEMBLY__ + +#include + +/******************************************************************************* + * Structure used to store per-cpu information relevant to the PSCI service. + * It is populated in the per-cpu data array. In return we get a guarantee that + * this information will not reside on a cache line shared with another cpu. + ******************************************************************************/ +typedef struct psci_cpu_data { + uint32_t power_state; + uint32_t max_phys_off_afflvl; /* Highest affinity level in physically + powered off state */ +#if !USE_COHERENT_MEM + bakery_info_t pcpu_bakery_info[PSCI_NUM_AFFS]; +#endif +} psci_cpu_data_t; + +/******************************************************************************* + * Structure populated by platform specific code to export routines which + * perform common low level pm functions + ******************************************************************************/ +typedef struct plat_pm_ops { + void (*affinst_standby)(unsigned int power_state); + int (*affinst_on)(unsigned long mpidr, + unsigned long sec_entrypoint, + unsigned int afflvl, + unsigned int state); + void (*affinst_off)(unsigned int afflvl, unsigned int state); + void (*affinst_suspend)(unsigned long sec_entrypoint, + unsigned int afflvl, + unsigned int state); + void (*affinst_on_finish)(unsigned int afflvl, unsigned int state); + void (*affinst_suspend_finish)(unsigned int afflvl, + unsigned int state); + void (*system_off)(void) __dead2; + void (*system_reset)(void) __dead2; + int (*validate_power_state)(unsigned int power_state); + int (*validate_ns_entrypoint)(unsigned long ns_entrypoint); + unsigned int (*get_sys_suspend_power_state)(void); +} plat_pm_ops_t; + +/******************************************************************************* + * Optional structure populated by the Secure Payload Dispatcher to be given a + * chance to perform any bookkeeping before PSCI executes a power mgmt. + * operation. It also allows PSCI to determine certain properties of the SP e.g. + * migrate capability etc. + ******************************************************************************/ +typedef struct spd_pm_ops { + void (*svc_on)(uint64_t target_cpu); + int32_t (*svc_off)(uint64_t __unused); + void (*svc_suspend)(uint64_t __unused); + void (*svc_on_finish)(uint64_t __unused); + void (*svc_suspend_finish)(uint64_t suspend_level); + int32_t (*svc_migrate)(uint64_t from_cpu, uint64_t to_cpu); + int32_t (*svc_migrate_info)(uint64_t *resident_cpu); + void (*svc_system_off)(void); + void (*svc_system_reset)(void); +} spd_pm_ops_t; + +/******************************************************************************* + * Function & Data prototypes + ******************************************************************************/ +unsigned int psci_version(void); +int psci_affinity_info(unsigned long, unsigned int); +int psci_migrate(unsigned long); +int psci_migrate_info_type(void); +long psci_migrate_info_up_cpu(void); +int psci_cpu_on(unsigned long, + unsigned long, + unsigned long); +void __dead2 psci_power_down_wfi(void); +void psci_aff_on_finish_entry(void); +void psci_aff_suspend_finish_entry(void); +void psci_register_spd_pm_hook(const spd_pm_ops_t *); +int psci_get_suspend_stateid_by_mpidr(unsigned long); +int psci_get_suspend_stateid(void); +int psci_get_suspend_afflvl(void); +uint32_t psci_get_max_phys_off_afflvl(void); + +uint64_t psci_smc_handler(uint32_t smc_fid, + uint64_t x1, + uint64_t x2, + uint64_t x3, + uint64_t x4, + void *cookie, + void *handle, + uint64_t flags); + +/* PSCI setup function */ +int32_t psci_setup(void); + + +#endif /*__ASSEMBLY__*/ + + +#endif /* __PSCI_H__ */ diff --git a/include/plat/common/psci1.0/platform.h b/include/plat/common/psci1.0/platform.h new file mode 100644 index 0000000..469d46b --- /dev/null +++ b/include/plat/common/psci1.0/platform.h @@ -0,0 +1,204 @@ +/* + * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __PLATFORM_H__ +#define __PLATFORM_H__ + +#include + + +/******************************************************************************* + * Forward declarations + ******************************************************************************/ +struct plat_pm_ops; +struct meminfo; +struct image_info; +struct entry_point_info; +struct bl31_params; + +/******************************************************************************* + * plat_get_rotpk_info() flags + ******************************************************************************/ +#define ROTPK_IS_HASH (1 << 0) + +/******************************************************************************* + * Function declarations + ******************************************************************************/ +/******************************************************************************* + * Mandatory common functions + ******************************************************************************/ +uint64_t plat_get_syscnt_freq(void); +int plat_get_image_source(unsigned int image_id, + uintptr_t *dev_handle, + uintptr_t *image_spec); +unsigned long plat_get_ns_image_entrypoint(void); + +/******************************************************************************* + * Mandatory interrupt management functions + ******************************************************************************/ +uint32_t plat_ic_get_pending_interrupt_id(void); +uint32_t plat_ic_get_pending_interrupt_type(void); +uint32_t plat_ic_acknowledge_interrupt(void); +uint32_t plat_ic_get_interrupt_type(uint32_t id); +void plat_ic_end_of_interrupt(uint32_t id); +uint32_t plat_interrupt_type_to_line(uint32_t type, + uint32_t security_state); + +/******************************************************************************* + * Optional common functions (may be overridden) + ******************************************************************************/ +unsigned int platform_get_core_pos(unsigned long mpidr); +unsigned long platform_get_stack(unsigned long mpidr); +void plat_report_exception(unsigned long); +int plat_crash_console_init(void); +int plat_crash_console_putc(int c); + +/******************************************************************************* + * Mandatory BL1 functions + ******************************************************************************/ +void bl1_early_platform_setup(void); +void bl1_plat_arch_setup(void); +void bl1_platform_setup(void); +struct meminfo *bl1_plat_sec_mem_layout(void); + +/* + * This function allows the platform to change the entrypoint information for + * BL2, after BL1 has loaded BL2 into memory but before BL2 is executed. + */ +void bl1_plat_set_bl2_ep_info(struct image_info *image, + struct entry_point_info *ep); + +/******************************************************************************* + * Optional BL1 functions (may be overridden) + ******************************************************************************/ +void bl1_init_bl2_mem_layout(const struct meminfo *bl1_mem_layout, + struct meminfo *bl2_mem_layout); + +/******************************************************************************* + * Mandatory BL2 functions + ******************************************************************************/ +void bl2_early_platform_setup(struct meminfo *mem_layout); +void bl2_plat_arch_setup(void); +void bl2_platform_setup(void); +struct meminfo *bl2_plat_sec_mem_layout(void); + +/* + * This function returns a pointer to the shared memory that the platform has + * kept aside to pass trusted firmware related information that BL3-1 + * could need + */ +struct bl31_params *bl2_plat_get_bl31_params(void); + +/* + * This function returns a pointer to the shared memory that the platform + * has kept to point to entry point information of BL31 to BL2 + */ +struct entry_point_info *bl2_plat_get_bl31_ep_info(void); + +/* + * This function flushes to main memory all the params that are + * passed to BL3-1 + */ +void bl2_plat_flush_bl31_params(void); + +/* + * The next 2 functions allow the platform to change the entrypoint information + * for the mandatory 3rd level BL images, BL3-1 and BL3-3. This is done after + * BL2 has loaded those images into memory but before BL3-1 is executed. + */ +void bl2_plat_set_bl31_ep_info(struct image_info *image, + struct entry_point_info *ep); + +void bl2_plat_set_bl33_ep_info(struct image_info *image, + struct entry_point_info *ep); + +/* Gets the memory layout for BL3-3 */ +void bl2_plat_get_bl33_meminfo(struct meminfo *mem_info); + +/******************************************************************************* + * Conditionally mandatory BL2 functions: must be implemented if BL3-0 image + * is supported + ******************************************************************************/ +/* Gets the memory layout for BL3-0 */ +void bl2_plat_get_bl30_meminfo(struct meminfo *mem_info); + +/* + * This function is called after loading BL3-0 image and it is used to perform + * any platform-specific actions required to handle the SCP firmware. + */ +int bl2_plat_handle_bl30(struct image_info *bl30_image_info); + +/******************************************************************************* + * Conditionally mandatory BL2 functions: must be implemented if BL3-2 image + * is supported + ******************************************************************************/ +void bl2_plat_set_bl32_ep_info(struct image_info *image, + struct entry_point_info *ep); + +/* Gets the memory layout for BL3-2 */ +void bl2_plat_get_bl32_meminfo(struct meminfo *mem_info); + +/******************************************************************************* + * Optional BL2 functions (may be overridden) + ******************************************************************************/ + +/******************************************************************************* + * Mandatory BL3-1 functions + ******************************************************************************/ +void bl31_early_platform_setup(struct bl31_params *from_bl2, + void *plat_params_from_bl2); +void bl31_plat_arch_setup(void); +void bl31_platform_setup(void); +struct entry_point_info *bl31_plat_get_next_image_ep_info(uint32_t type); + +/******************************************************************************* + * Mandatory PSCI functions (BL3-1) + ******************************************************************************/ +int platform_setup_pm(const struct plat_pm_ops **); +unsigned int plat_get_aff_count(unsigned int, unsigned long); +unsigned int plat_get_aff_state(unsigned int, unsigned long); + +/******************************************************************************* + * Optional BL3-1 functions (may be overridden) + ******************************************************************************/ +void bl31_plat_enable_mmu(uint32_t flags); + +/******************************************************************************* + * Optional BL3-2 functions (may be overridden) + ******************************************************************************/ +void bl32_plat_enable_mmu(uint32_t flags); + +/******************************************************************************* + * Trusted Board Boot functions + ******************************************************************************/ +int plat_get_rotpk_info(void *cookie, void **key_ptr, unsigned int *key_len, + unsigned int *flags); + +#endif /* __PLATFORM_H__ */ diff --git a/services/std_svc/psci1.0/psci_afflvl_off.c b/services/std_svc/psci1.0/psci_afflvl_off.c new file mode 100644 index 0000000..7eb9688 --- /dev/null +++ b/services/std_svc/psci1.0/psci_afflvl_off.c @@ -0,0 +1,248 @@ +/* + * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include "psci_private.h" + +typedef void (*afflvl_off_handler_t)(aff_map_node_t *node); + +/******************************************************************************* + * The next three functions implement a handler for each supported affinity + * level which is called when that affinity level is turned off. + ******************************************************************************/ +static void psci_afflvl0_off(aff_map_node_t *cpu_node) +{ + assert(cpu_node->level == MPIDR_AFFLVL0); + + /* + * Arch. management. Perform the necessary steps to flush all + * cpu caches. + */ + psci_do_pwrdown_cache_maintenance(MPIDR_AFFLVL0); + + /* + * Plat. management: Perform platform specific actions to turn this + * cpu off e.g. exit cpu coherency, program the power controller etc. + */ + psci_plat_pm_ops->affinst_off(cpu_node->level, + psci_get_phys_state(cpu_node)); +} + +static void psci_afflvl1_off(aff_map_node_t *cluster_node) +{ + /* Sanity check the cluster level */ + assert(cluster_node->level == MPIDR_AFFLVL1); + + /* + * Arch. Management. Flush all levels of caches to PoC if + * the cluster is to be shutdown. + */ + psci_do_pwrdown_cache_maintenance(MPIDR_AFFLVL1); + + /* + * Plat. Management. Allow the platform to do its cluster + * specific bookeeping e.g. turn off interconnect coherency, + * program the power controller etc. + */ + psci_plat_pm_ops->affinst_off(cluster_node->level, + psci_get_phys_state(cluster_node)); +} + +static void psci_afflvl2_off(aff_map_node_t *system_node) +{ + /* Cannot go beyond this level */ + assert(system_node->level == MPIDR_AFFLVL2); + + /* + * Keep the physical state of the system handy to decide what + * action needs to be taken + */ + + /* + * Arch. Management. Flush all levels of caches to PoC if + * the system is to be shutdown. + */ + psci_do_pwrdown_cache_maintenance(MPIDR_AFFLVL2); + + /* + * Plat. Management : Allow the platform to do its bookeeping + * at this affinity level + */ + psci_plat_pm_ops->affinst_off(system_node->level, + psci_get_phys_state(system_node)); +} + +static const afflvl_off_handler_t psci_afflvl_off_handlers[] = { + psci_afflvl0_off, + psci_afflvl1_off, + psci_afflvl2_off, +}; + +/******************************************************************************* + * This function takes an array of pointers to affinity instance nodes in the + * topology tree and calls the off handler for the corresponding affinity + * levels + ******************************************************************************/ +static void psci_call_off_handlers(aff_map_node_t *mpidr_nodes[], + int start_afflvl, + int end_afflvl) +{ + int level; + aff_map_node_t *node; + + for (level = start_afflvl; level <= end_afflvl; level++) { + node = mpidr_nodes[level]; + if (node == NULL) + continue; + + psci_afflvl_off_handlers[level](node); + } +} + +/******************************************************************************* + * Top level handler which is called when a cpu wants to power itself down. + * It's assumed that along with turning the cpu off, higher affinity levels will + * be turned off as far as possible. It traverses through all the affinity + * levels performing generic, architectural, platform setup and state management + * e.g. for a cluster that's to be powered off, it will call the platform + * specific code which will disable coherency at the interconnect level if the + * cpu is the last in the cluster. For a cpu it could mean programming the power + * the power controller etc. + * + * The state of all the relevant affinity levels is changed prior to calling the + * affinity level specific handlers as their actions would depend upon the state + * the affinity level is about to enter. + * + * The affinity level specific handlers are called in ascending order i.e. from + * the lowest to the highest affinity level implemented by the platform because + * to turn off affinity level X it is neccesary to turn off affinity level X - 1 + * first. + ******************************************************************************/ +int psci_afflvl_off(int start_afflvl, + int end_afflvl) +{ + int rc; + mpidr_aff_map_nodes_t mpidr_nodes; + unsigned int max_phys_off_afflvl; + + /* + * This function must only be called on platforms where the + * CPU_OFF platform hooks have been implemented. + */ + assert(psci_plat_pm_ops->affinst_off); + + /* + * Collect the pointers to the nodes in the topology tree for + * each affinity instance in the mpidr. If this function does + * not return successfully then either the mpidr or the affinity + * levels are incorrect. Either way, this an internal TF error + * therefore assert. + */ + rc = psci_get_aff_map_nodes(read_mpidr_el1() & MPIDR_AFFINITY_MASK, + start_afflvl, + end_afflvl, + mpidr_nodes); + assert(rc == PSCI_E_SUCCESS); + + /* + * This function acquires the lock corresponding to each affinity + * level so that by the time all locks are taken, the system topology + * is snapshot and state management can be done safely. + */ + psci_acquire_afflvl_locks(start_afflvl, + end_afflvl, + mpidr_nodes); + + + /* + * Call the cpu off handler registered by the Secure Payload Dispatcher + * to let it do any bookkeeping. Assume that the SPD always reports an + * E_DENIED error if SP refuse to power down + */ + if (psci_spd_pm && psci_spd_pm->svc_off) { + rc = psci_spd_pm->svc_off(0); + if (rc) + goto exit; + } + + /* + * This function updates the state of each affinity instance + * corresponding to the mpidr in the range of affinity levels + * specified. + */ + psci_do_afflvl_state_mgmt(start_afflvl, + end_afflvl, + mpidr_nodes, + PSCI_STATE_OFF); + + max_phys_off_afflvl = psci_find_max_phys_off_afflvl(start_afflvl, + end_afflvl, + mpidr_nodes); + assert(max_phys_off_afflvl != PSCI_INVALID_DATA); + + /* Stash the highest affinity level that will enter the OFF state. */ + psci_set_max_phys_off_afflvl(max_phys_off_afflvl); + + /* Perform generic, architecture and platform specific handling */ + psci_call_off_handlers(mpidr_nodes, + start_afflvl, + end_afflvl); + + /* + * Invalidate the entry for the highest affinity level stashed earlier. + * This ensures that any reads of this variable outside the power + * up/down sequences return PSCI_INVALID_DATA. + * + */ + psci_set_max_phys_off_afflvl(PSCI_INVALID_DATA); + +exit: + /* + * Release the locks corresponding to each affinity level in the + * reverse order to which they were acquired. + */ + psci_release_afflvl_locks(start_afflvl, + end_afflvl, + mpidr_nodes); + + /* + * Check if all actions needed to safely power down this cpu have + * successfully completed. Enter a wfi loop which will allow the + * power controller to physically power down this cpu. + */ + if (rc == PSCI_E_SUCCESS) + psci_power_down_wfi(); + + return rc; +} diff --git a/services/std_svc/psci1.0/psci_afflvl_on.c b/services/std_svc/psci1.0/psci_afflvl_on.c new file mode 100644 index 0000000..0dbd0e0 --- /dev/null +++ b/services/std_svc/psci1.0/psci_afflvl_on.c @@ -0,0 +1,405 @@ +/* + * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "psci_private.h" + +typedef int (*afflvl_on_handler_t)(unsigned long target_cpu, + aff_map_node_t *node); + +/******************************************************************************* + * This function checks whether a cpu which has been requested to be turned on + * is OFF to begin with. + ******************************************************************************/ +static int cpu_on_validate_state(unsigned int psci_state) +{ + if (psci_state == PSCI_STATE_ON || psci_state == PSCI_STATE_SUSPEND) + return PSCI_E_ALREADY_ON; + + if (psci_state == PSCI_STATE_ON_PENDING) + return PSCI_E_ON_PENDING; + + assert(psci_state == PSCI_STATE_OFF); + return PSCI_E_SUCCESS; +} + +/******************************************************************************* + * Handler routine to turn a cpu on. It takes care of any generic, architectural + * or platform specific setup required. + * TODO: Split this code across separate handlers for each type of setup? + ******************************************************************************/ +static int psci_afflvl0_on(unsigned long target_cpu, + aff_map_node_t *cpu_node) +{ + unsigned long psci_entrypoint; + + /* Sanity check to safeguard against data corruption */ + assert(cpu_node->level == MPIDR_AFFLVL0); + + /* Set the secure world (EL3) re-entry point after BL1 */ + psci_entrypoint = (unsigned long) psci_aff_on_finish_entry; + + /* + * Plat. management: Give the platform the current state + * of the target cpu to allow it to perform the necessary + * steps to power on. + */ + return psci_plat_pm_ops->affinst_on(target_cpu, + psci_entrypoint, + cpu_node->level, + psci_get_phys_state(cpu_node)); +} + +/******************************************************************************* + * Handler routine to turn a cluster on. It takes care or any generic, arch. + * or platform specific setup required. + * TODO: Split this code across separate handlers for each type of setup? + ******************************************************************************/ +static int psci_afflvl1_on(unsigned long target_cpu, + aff_map_node_t *cluster_node) +{ + unsigned long psci_entrypoint; + + assert(cluster_node->level == MPIDR_AFFLVL1); + + /* + * There is no generic and arch. specific cluster + * management required + */ + + /* State management: Is not required while turning a cluster on */ + + /* + * Plat. management: Give the platform the current state + * of the target cpu to allow it to perform the necessary + * steps to power on. + */ + psci_entrypoint = (unsigned long) psci_aff_on_finish_entry; + return psci_plat_pm_ops->affinst_on(target_cpu, + psci_entrypoint, + cluster_node->level, + psci_get_phys_state(cluster_node)); +} + +/******************************************************************************* + * Handler routine to turn a cluster of clusters on. It takes care or any + * generic, arch. or platform specific setup required. + * TODO: Split this code across separate handlers for each type of setup? + ******************************************************************************/ +static int psci_afflvl2_on(unsigned long target_cpu, + aff_map_node_t *system_node) +{ + unsigned long psci_entrypoint; + + /* Cannot go beyond affinity level 2 in this psci imp. */ + assert(system_node->level == MPIDR_AFFLVL2); + + /* + * There is no generic and arch. specific system management + * required + */ + + /* State management: Is not required while turning a system on */ + + /* + * Plat. management: Give the platform the current state + * of the target cpu to allow it to perform the necessary + * steps to power on. + */ + psci_entrypoint = (unsigned long) psci_aff_on_finish_entry; + return psci_plat_pm_ops->affinst_on(target_cpu, + psci_entrypoint, + system_node->level, + psci_get_phys_state(system_node)); +} + +/* Private data structure to make this handlers accessible through indexing */ +static const afflvl_on_handler_t psci_afflvl_on_handlers[] = { + psci_afflvl0_on, + psci_afflvl1_on, + psci_afflvl2_on, +}; + +/******************************************************************************* + * This function takes an array of pointers to affinity instance nodes in the + * topology tree and calls the on handler for the corresponding affinity + * levels + ******************************************************************************/ +static int psci_call_on_handlers(aff_map_node_t *target_cpu_nodes[], + int start_afflvl, + int end_afflvl, + unsigned long target_cpu) +{ + int rc = PSCI_E_INVALID_PARAMS, level; + aff_map_node_t *node; + + for (level = end_afflvl; level >= start_afflvl; level--) { + node = target_cpu_nodes[level]; + if (node == NULL) + continue; + + /* + * TODO: In case of an error should there be a way + * of undoing what we might have setup at higher + * affinity levels. + */ + rc = psci_afflvl_on_handlers[level](target_cpu, + node); + if (rc != PSCI_E_SUCCESS) + break; + } + + return rc; +} + +/******************************************************************************* + * Generic handler which is called to physically power on a cpu identified by + * its mpidr. It traverses through all the affinity levels performing generic, + * architectural, platform setup and state management e.g. for a cpu that is + * to be powered on, it will ensure that enough information is stashed for it + * to resume execution in the non-secure security state. + * + * The state of all the relevant affinity levels is changed after calling the + * affinity level specific handlers as their actions would depend upon the state + * the affinity level is currently in. + * + * The affinity level specific handlers are called in descending order i.e. from + * the highest to the lowest affinity level implemented by the platform because + * to turn on affinity level X it is necessary to turn on affinity level X + 1 + * first. + ******************************************************************************/ +int psci_afflvl_on(unsigned long target_cpu, + entry_point_info_t *ep, + int start_afflvl, + int end_afflvl) +{ + int rc; + mpidr_aff_map_nodes_t target_cpu_nodes; + + /* + * This function must only be called on platforms where the + * CPU_ON platform hooks have been implemented. + */ + assert(psci_plat_pm_ops->affinst_on && + psci_plat_pm_ops->affinst_on_finish); + + /* + * Collect the pointers to the nodes in the topology tree for + * each affinity instance in the mpidr. If this function does + * not return successfully then either the mpidr or the affinity + * levels are incorrect. + */ + rc = psci_get_aff_map_nodes(target_cpu, + start_afflvl, + end_afflvl, + target_cpu_nodes); + assert(rc == PSCI_E_SUCCESS); + + /* + * This function acquires the lock corresponding to each affinity + * level so that by the time all locks are taken, the system topology + * is snapshot and state management can be done safely. + */ + psci_acquire_afflvl_locks(start_afflvl, + end_afflvl, + target_cpu_nodes); + + /* + * Generic management: Ensure that the cpu is off to be + * turned on. + */ + rc = cpu_on_validate_state(psci_get_state( + target_cpu_nodes[MPIDR_AFFLVL0])); + if (rc != PSCI_E_SUCCESS) + goto exit; + + /* + * Call the cpu on handler registered by the Secure Payload Dispatcher + * to let it do any bookeeping. If the handler encounters an error, it's + * expected to assert within + */ + if (psci_spd_pm && psci_spd_pm->svc_on) + psci_spd_pm->svc_on(target_cpu); + + /* + * This function updates the state of each affinity instance + * corresponding to the mpidr in the range of affinity levels + * specified. + */ + psci_do_afflvl_state_mgmt(start_afflvl, + end_afflvl, + target_cpu_nodes, + PSCI_STATE_ON_PENDING); + + /* Perform generic, architecture and platform specific handling. */ + rc = psci_call_on_handlers(target_cpu_nodes, + start_afflvl, + end_afflvl, + target_cpu); + + assert(rc == PSCI_E_SUCCESS || rc == PSCI_E_INTERN_FAIL); + + if (rc == PSCI_E_SUCCESS) + /* Store the re-entry information for the non-secure world. */ + cm_init_context(target_cpu, ep); + else + /* Restore the state on error. */ + psci_do_afflvl_state_mgmt(start_afflvl, + end_afflvl, + target_cpu_nodes, + PSCI_STATE_OFF); +exit: + /* + * This loop releases the lock corresponding to each affinity level + * in the reverse order to which they were acquired. + */ + psci_release_afflvl_locks(start_afflvl, + end_afflvl, + target_cpu_nodes); + + return rc; +} + +/******************************************************************************* + * The following functions finish an earlier affinity power on request. They + * are called by the common finisher routine in psci_common.c. + ******************************************************************************/ +static void psci_afflvl0_on_finish(aff_map_node_t *cpu_node) +{ + unsigned int plat_state, state; + + assert(cpu_node->level == MPIDR_AFFLVL0); + + /* Ensure we have been explicitly woken up by another cpu */ + state = psci_get_state(cpu_node); + assert(state == PSCI_STATE_ON_PENDING); + + /* + * Plat. management: Perform the platform specific actions + * for this cpu e.g. enabling the gic or zeroing the mailbox + * register. The actual state of this cpu has already been + * changed. + */ + + /* Get the physical state of this cpu */ + plat_state = get_phys_state(state); + psci_plat_pm_ops->affinst_on_finish(cpu_node->level, + plat_state); + + /* + * Arch. management: Enable data cache and manage stack memory + */ + psci_do_pwrup_cache_maintenance(); + + /* + * All the platform specific actions for turning this cpu + * on have completed. Perform enough arch.initialization + * to run in the non-secure address space. + */ + bl31_arch_setup(); + + /* + * Call the cpu on finish handler registered by the Secure Payload + * Dispatcher to let it do any bookeeping. If the handler encounters an + * error, it's expected to assert within + */ + if (psci_spd_pm && psci_spd_pm->svc_on_finish) + psci_spd_pm->svc_on_finish(0); + + /* + * Generic management: Now we just need to retrieve the + * information that we had stashed away during the cpu_on + * call to set this cpu on its way. + */ + cm_prepare_el3_exit(NON_SECURE); + + /* Clean caches before re-entering normal world */ + dcsw_op_louis(DCCSW); +} + +static void psci_afflvl1_on_finish(aff_map_node_t *cluster_node) +{ + unsigned int plat_state; + + assert(cluster_node->level == MPIDR_AFFLVL1); + + /* + * Plat. management: Perform the platform specific actions + * as per the old state of the cluster e.g. enabling + * coherency at the interconnect depends upon the state with + * which this cluster was powered up. If anything goes wrong + * then assert as there is no way to recover from this + * situation. + */ + plat_state = psci_get_phys_state(cluster_node); + psci_plat_pm_ops->affinst_on_finish(cluster_node->level, + plat_state); +} + + +static void psci_afflvl2_on_finish(aff_map_node_t *system_node) +{ + unsigned int plat_state; + + /* Cannot go beyond this affinity level */ + assert(system_node->level == MPIDR_AFFLVL2); + + /* + * Currently, there are no architectural actions to perform + * at the system level. + */ + + /* + * Plat. management: Perform the platform specific actions + * as per the old state of the cluster e.g. enabling + * coherency at the interconnect depends upon the state with + * which this cluster was powered up. If anything goes wrong + * then assert as there is no way to recover from this + * situation. + */ + plat_state = psci_get_phys_state(system_node); + psci_plat_pm_ops->affinst_on_finish(system_node->level, + plat_state); +} + +const afflvl_power_on_finisher_t psci_afflvl_on_finishers[] = { + psci_afflvl0_on_finish, + psci_afflvl1_on_finish, + psci_afflvl2_on_finish, +}; diff --git a/services/std_svc/psci1.0/psci_afflvl_suspend.c b/services/std_svc/psci1.0/psci_afflvl_suspend.c new file mode 100644 index 0000000..76e8c90 --- /dev/null +++ b/services/std_svc/psci1.0/psci_afflvl_suspend.c @@ -0,0 +1,469 @@ +/* + * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "psci_private.h" + +typedef void (*afflvl_suspend_handler_t)(aff_map_node_t *node); + +/******************************************************************************* + * This function saves the power state parameter passed in the current PSCI + * cpu_suspend call in the per-cpu data array. + ******************************************************************************/ +void psci_set_suspend_power_state(unsigned int power_state) +{ + set_cpu_data(psci_svc_cpu_data.power_state, power_state); + flush_cpu_data(psci_svc_cpu_data.power_state); +} + +/******************************************************************************* + * This function gets the affinity level till which the current cpu could be + * powered down during a cpu_suspend call. Returns PSCI_INVALID_DATA if the + * power state is invalid. + ******************************************************************************/ +int psci_get_suspend_afflvl(void) +{ + unsigned int power_state; + + power_state = get_cpu_data(psci_svc_cpu_data.power_state); + + return ((power_state == PSCI_INVALID_DATA) ? + power_state : psci_get_pstate_afflvl(power_state)); +} + +/******************************************************************************* + * This function gets the state id of the current cpu from the power state + * parameter saved in the per-cpu data array. Returns PSCI_INVALID_DATA if the + * power state saved is invalid. + ******************************************************************************/ +int psci_get_suspend_stateid(void) +{ + unsigned int power_state; + + power_state = get_cpu_data(psci_svc_cpu_data.power_state); + + return ((power_state == PSCI_INVALID_DATA) ? + power_state : psci_get_pstate_id(power_state)); +} + +/******************************************************************************* + * This function gets the state id of the cpu specified by the 'mpidr' parameter + * from the power state parameter saved in the per-cpu data array. Returns + * PSCI_INVALID_DATA if the power state saved is invalid. + ******************************************************************************/ +int psci_get_suspend_stateid_by_mpidr(unsigned long mpidr) +{ + unsigned int power_state; + + power_state = get_cpu_data_by_mpidr(mpidr, + psci_svc_cpu_data.power_state); + + return ((power_state == PSCI_INVALID_DATA) ? + power_state : psci_get_pstate_id(power_state)); +} + +/******************************************************************************* + * The next three functions implement a handler for each supported affinity + * level which is called when that affinity level is about to be suspended. + ******************************************************************************/ +static void psci_afflvl0_suspend(aff_map_node_t *cpu_node) +{ + unsigned long psci_entrypoint; + + /* Sanity check to safeguard against data corruption */ + assert(cpu_node->level == MPIDR_AFFLVL0); + + /* Set the secure world (EL3) re-entry point after BL1 */ + psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry; + + /* + * Arch. management. Perform the necessary steps to flush all + * cpu caches. + */ + psci_do_pwrdown_cache_maintenance(MPIDR_AFFLVL0); + + /* + * Plat. management: Allow the platform to perform the + * necessary actions to turn off this cpu e.g. set the + * platform defined mailbox with the psci entrypoint, + * program the power controller etc. + */ + psci_plat_pm_ops->affinst_suspend(psci_entrypoint, + cpu_node->level, + psci_get_phys_state(cpu_node)); +} + +static void psci_afflvl1_suspend(aff_map_node_t *cluster_node) +{ + unsigned int plat_state; + unsigned long psci_entrypoint; + + /* Sanity check the cluster level */ + assert(cluster_node->level == MPIDR_AFFLVL1); + + /* + * Arch. management: Flush all levels of caches to PoC if the + * cluster is to be shutdown. + */ + psci_do_pwrdown_cache_maintenance(MPIDR_AFFLVL1); + + /* + * Plat. Management. Allow the platform to do its cluster specific + * bookeeping e.g. turn off interconnect coherency, program the power + * controller etc. Sending the psci entrypoint is currently redundant + * beyond affinity level 0 but one never knows what a platform might + * do. Also it allows us to keep the platform handler prototype the + * same. + */ + plat_state = psci_get_phys_state(cluster_node); + psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry; + psci_plat_pm_ops->affinst_suspend(psci_entrypoint, + cluster_node->level, + plat_state); +} + + +static void psci_afflvl2_suspend(aff_map_node_t *system_node) +{ + unsigned int plat_state; + unsigned long psci_entrypoint; + + /* Cannot go beyond this */ + assert(system_node->level == MPIDR_AFFLVL2); + + /* + * Keep the physical state of the system handy to decide what + * action needs to be taken + */ + plat_state = psci_get_phys_state(system_node); + + /* + * Arch. management: Flush all levels of caches to PoC if the + * system is to be shutdown. + */ + psci_do_pwrdown_cache_maintenance(MPIDR_AFFLVL2); + + /* + * Plat. Management : Allow the platform to do its bookeeping + * at this affinity level + */ + + /* + * Sending the psci entrypoint is currently redundant + * beyond affinity level 0 but one never knows what a + * platform might do. Also it allows us to keep the + * platform handler prototype the same. + */ + plat_state = psci_get_phys_state(system_node); + psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry; + psci_plat_pm_ops->affinst_suspend(psci_entrypoint, + system_node->level, + plat_state); +} + +static const afflvl_suspend_handler_t psci_afflvl_suspend_handlers[] = { + psci_afflvl0_suspend, + psci_afflvl1_suspend, + psci_afflvl2_suspend, +}; + +/******************************************************************************* + * This function takes an array of pointers to affinity instance nodes in the + * topology tree and calls the suspend handler for the corresponding affinity + * levels + ******************************************************************************/ +static void psci_call_suspend_handlers(aff_map_node_t *mpidr_nodes[], + int start_afflvl, + int end_afflvl) +{ + int level; + aff_map_node_t *node; + + for (level = start_afflvl; level <= end_afflvl; level++) { + node = mpidr_nodes[level]; + if (node == NULL) + continue; + + psci_afflvl_suspend_handlers[level](node); + } +} + +/******************************************************************************* + * Top level handler which is called when a cpu wants to suspend its execution. + * It is assumed that along with turning the cpu off, higher affinity levels + * until the target affinity level will be turned off as well. It traverses + * through all the affinity levels performing generic, architectural, platform + * setup and state management e.g. for a cluster that's to be suspended, it will + * call the platform specific code which will disable coherency at the + * interconnect level if the cpu is the last in the cluster. For a cpu it could + * mean programming the power controller etc. + * + * The state of all the relevant affinity levels is changed prior to calling the + * affinity level specific handlers as their actions would depend upon the state + * the affinity level is about to enter. + * + * The affinity level specific handlers are called in ascending order i.e. from + * the lowest to the highest affinity level implemented by the platform because + * to turn off affinity level X it is neccesary to turn off affinity level X - 1 + * first. + * + * All the required parameter checks are performed at the beginning and after + * the state transition has been done, no further error is expected and it + * is not possible to undo any of the actions taken beyond that point. + ******************************************************************************/ +void psci_afflvl_suspend(entry_point_info_t *ep, + int start_afflvl, + int end_afflvl) +{ + int skip_wfi = 0; + mpidr_aff_map_nodes_t mpidr_nodes; + unsigned int max_phys_off_afflvl; + + /* + * This function must only be called on platforms where the + * CPU_SUSPEND platform hooks have been implemented. + */ + assert(psci_plat_pm_ops->affinst_suspend && + psci_plat_pm_ops->affinst_suspend_finish); + + /* + * Collect the pointers to the nodes in the topology tree for + * each affinity instance in the mpidr. If this function does + * not return successfully then either the mpidr or the affinity + * levels are incorrect. Either way, this an internal TF error + * therefore assert. + */ + if (psci_get_aff_map_nodes(read_mpidr_el1() & MPIDR_AFFINITY_MASK, + start_afflvl, end_afflvl, mpidr_nodes) != PSCI_E_SUCCESS) + assert(0); + + /* + * This function acquires the lock corresponding to each affinity + * level so that by the time all locks are taken, the system topology + * is snapshot and state management can be done safely. + */ + psci_acquire_afflvl_locks(start_afflvl, + end_afflvl, + mpidr_nodes); + + /* + * We check if there are any pending interrupts after the delay + * introduced by lock contention to increase the chances of early + * detection that a wake-up interrupt has fired. + */ + if (read_isr_el1()) { + skip_wfi = 1; + goto exit; + } + + /* + * Call the cpu suspend handler registered by the Secure Payload + * Dispatcher to let it do any bookeeping. If the handler encounters an + * error, it's expected to assert within + */ + if (psci_spd_pm && psci_spd_pm->svc_suspend) + psci_spd_pm->svc_suspend(0); + + /* + * This function updates the state of each affinity instance + * corresponding to the mpidr in the range of affinity levels + * specified. + */ + psci_do_afflvl_state_mgmt(start_afflvl, + end_afflvl, + mpidr_nodes, + PSCI_STATE_SUSPEND); + + max_phys_off_afflvl = psci_find_max_phys_off_afflvl(start_afflvl, + end_afflvl, + mpidr_nodes); + assert(max_phys_off_afflvl != PSCI_INVALID_DATA); + + /* Stash the highest affinity level that will be turned off */ + psci_set_max_phys_off_afflvl(max_phys_off_afflvl); + + /* + * Store the re-entry information for the non-secure world. + */ + cm_init_context(read_mpidr_el1(), ep); + + /* Perform generic, architecture and platform specific handling */ + psci_call_suspend_handlers(mpidr_nodes, + start_afflvl, + end_afflvl); + + /* + * Invalidate the entry for the highest affinity level stashed earlier. + * This ensures that any reads of this variable outside the power + * up/down sequences return PSCI_INVALID_DATA. + */ + psci_set_max_phys_off_afflvl(PSCI_INVALID_DATA); + +exit: + /* + * Release the locks corresponding to each affinity level in the + * reverse order to which they were acquired. + */ + psci_release_afflvl_locks(start_afflvl, + end_afflvl, + mpidr_nodes); + if (!skip_wfi) + psci_power_down_wfi(); +} + +/******************************************************************************* + * The following functions finish an earlier affinity suspend request. They + * are called by the common finisher routine in psci_common.c. + ******************************************************************************/ +static void psci_afflvl0_suspend_finish(aff_map_node_t *cpu_node) +{ + unsigned int plat_state, state; + int32_t suspend_level; + uint64_t counter_freq; + + assert(cpu_node->level == MPIDR_AFFLVL0); + + /* Ensure we have been woken up from a suspended state */ + state = psci_get_state(cpu_node); + assert(state == PSCI_STATE_SUSPEND); + + /* + * Plat. management: Perform the platform specific actions + * before we change the state of the cpu e.g. enabling the + * gic or zeroing the mailbox register. If anything goes + * wrong then assert as there is no way to recover from this + * situation. + */ + + /* Get the physical state of this cpu */ + plat_state = get_phys_state(state); + psci_plat_pm_ops->affinst_suspend_finish(cpu_node->level, + plat_state); + + /* + * Arch. management: Enable the data cache, manage stack memory and + * restore the stashed EL3 architectural context from the 'cpu_context' + * structure for this cpu. + */ + psci_do_pwrup_cache_maintenance(); + + /* Re-init the cntfrq_el0 register */ + counter_freq = plat_get_syscnt_freq(); + write_cntfrq_el0(counter_freq); + + /* + * Call the cpu suspend finish handler registered by the Secure Payload + * Dispatcher to let it do any bookeeping. If the handler encounters an + * error, it's expected to assert within + */ + if (psci_spd_pm && psci_spd_pm->svc_suspend) { + suspend_level = psci_get_suspend_afflvl(); + assert (suspend_level != PSCI_INVALID_DATA); + psci_spd_pm->svc_suspend_finish(suspend_level); + } + + /* Invalidate the suspend context for the node */ + psci_set_suspend_power_state(PSCI_INVALID_DATA); + + /* + * Generic management: Now we just need to retrieve the + * information that we had stashed away during the suspend + * call to set this cpu on its way. + */ + cm_prepare_el3_exit(NON_SECURE); + + /* Clean caches before re-entering normal world */ + dcsw_op_louis(DCCSW); +} + +static void psci_afflvl1_suspend_finish(aff_map_node_t *cluster_node) +{ + unsigned int plat_state; + + assert(cluster_node->level == MPIDR_AFFLVL1); + + /* + * Plat. management: Perform the platform specific actions + * as per the old state of the cluster e.g. enabling + * coherency at the interconnect depends upon the state with + * which this cluster was powered up. If anything goes wrong + * then assert as there is no way to recover from this + * situation. + */ + + /* Get the physical state of this cpu */ + plat_state = psci_get_phys_state(cluster_node); + psci_plat_pm_ops->affinst_suspend_finish(cluster_node->level, + plat_state); +} + + +static void psci_afflvl2_suspend_finish(aff_map_node_t *system_node) +{ + unsigned int plat_state; + + /* Cannot go beyond this affinity level */ + assert(system_node->level == MPIDR_AFFLVL2); + + /* + * Currently, there are no architectural actions to perform + * at the system level. + */ + + /* + * Plat. management: Perform the platform specific actions + * as per the old state of the cluster e.g. enabling + * coherency at the interconnect depends upon the state with + * which this cluster was powered up. If anything goes wrong + * then assert as there is no way to recover from this + * situation. + */ + + /* Get the physical state of the system */ + plat_state = psci_get_phys_state(system_node); + psci_plat_pm_ops->affinst_suspend_finish(system_node->level, + plat_state); +} + +const afflvl_power_on_finisher_t psci_afflvl_suspend_finishers[] = { + psci_afflvl0_suspend_finish, + psci_afflvl1_suspend_finish, + psci_afflvl2_suspend_finish, +}; diff --git a/services/std_svc/psci1.0/psci_common.c b/services/std_svc/psci1.0/psci_common.c new file mode 100644 index 0000000..1b74ff2 --- /dev/null +++ b/services/std_svc/psci1.0/psci_common.c @@ -0,0 +1,648 @@ +/* + * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "psci_private.h" + +/* + * SPD power management operations, expected to be supplied by the registered + * SPD on successful SP initialization + */ +const spd_pm_ops_t *psci_spd_pm; + +/******************************************************************************* + * Grand array that holds the platform's topology information for state + * management of affinity instances. Each node (aff_map_node) in the array + * corresponds to an affinity instance e.g. cluster, cpu within an mpidr + ******************************************************************************/ +aff_map_node_t psci_aff_map[PSCI_NUM_AFFS] +#if USE_COHERENT_MEM +__attribute__ ((section("tzfw_coherent_mem"))) +#endif +; + +/******************************************************************************* + * Pointer to functions exported by the platform to complete power mgmt. ops + ******************************************************************************/ +const plat_pm_ops_t *psci_plat_pm_ops; + +/******************************************************************************* + * Check that the maximum affinity level supported by the platform makes sense + * ****************************************************************************/ +CASSERT(PLATFORM_MAX_AFFLVL <= MPIDR_MAX_AFFLVL && \ + PLATFORM_MAX_AFFLVL >= MPIDR_AFFLVL0, \ + assert_platform_max_afflvl_check); + +/******************************************************************************* + * This function is passed an array of pointers to affinity level nodes in the + * topology tree for an mpidr. It iterates through the nodes to find the highest + * affinity level which is marked as physically powered off. + ******************************************************************************/ +uint32_t psci_find_max_phys_off_afflvl(uint32_t start_afflvl, + uint32_t end_afflvl, + aff_map_node_t *mpidr_nodes[]) +{ + uint32_t max_afflvl = PSCI_INVALID_DATA; + + for (; start_afflvl <= end_afflvl; start_afflvl++) { + if (mpidr_nodes[start_afflvl] == NULL) + continue; + + if (psci_get_phys_state(mpidr_nodes[start_afflvl]) == + PSCI_STATE_OFF) + max_afflvl = start_afflvl; + } + + return max_afflvl; +} + +/******************************************************************************* + * This function verifies that the all the other cores in the system have been + * turned OFF and the current CPU is the last running CPU in the system. + * Returns 1 (true) if the current CPU is the last ON CPU or 0 (false) + * otherwise. + ******************************************************************************/ +unsigned int psci_is_last_on_cpu(void) +{ + unsigned long mpidr = read_mpidr_el1() & MPIDR_AFFINITY_MASK; + unsigned int i; + + for (i = psci_aff_limits[MPIDR_AFFLVL0].min; + i <= psci_aff_limits[MPIDR_AFFLVL0].max; i++) { + + assert(psci_aff_map[i].level == MPIDR_AFFLVL0); + + if (!(psci_aff_map[i].state & PSCI_AFF_PRESENT)) + continue; + + if (psci_aff_map[i].mpidr == mpidr) { + assert(psci_get_state(&psci_aff_map[i]) + == PSCI_STATE_ON); + continue; + } + + if (psci_get_state(&psci_aff_map[i]) != PSCI_STATE_OFF) + return 0; + } + + return 1; +} + +/******************************************************************************* + * This function saves the highest affinity level which is in OFF state. The + * affinity instance with which the level is associated is determined by the + * caller. + ******************************************************************************/ +void psci_set_max_phys_off_afflvl(uint32_t afflvl) +{ + set_cpu_data(psci_svc_cpu_data.max_phys_off_afflvl, afflvl); + + /* + * Ensure that the saved value is flushed to main memory and any + * speculatively pre-fetched stale copies are invalidated from the + * caches of other cpus in the same coherency domain. This ensures that + * the value can be safely read irrespective of the state of the data + * cache. + */ + flush_cpu_data(psci_svc_cpu_data.max_phys_off_afflvl); +} + +/******************************************************************************* + * This function reads the saved highest affinity level which is in OFF + * state. The affinity instance with which the level is associated is determined + * by the caller. + ******************************************************************************/ +uint32_t psci_get_max_phys_off_afflvl(void) +{ + /* + * Ensure that the last update of this value in this cpu's cache is + * flushed to main memory and any speculatively pre-fetched stale copies + * are invalidated from the caches of other cpus in the same coherency + * domain. This ensures that the value is always read from the main + * memory when it was written before the data cache was enabled. + */ + flush_cpu_data(psci_svc_cpu_data.max_phys_off_afflvl); + return get_cpu_data(psci_svc_cpu_data.max_phys_off_afflvl); +} + +/******************************************************************************* + * Routine to return the maximum affinity level to traverse to after a cpu has + * been physically powered up. It is expected to be called immediately after + * reset from assembler code. + ******************************************************************************/ +int get_power_on_target_afflvl(void) +{ + int afflvl; + +#if DEBUG + unsigned int state; + aff_map_node_t *node; + + /* Retrieve our node from the topology tree */ + node = psci_get_aff_map_node(read_mpidr_el1() & MPIDR_AFFINITY_MASK, + MPIDR_AFFLVL0); + assert(node); + + /* + * Sanity check the state of the cpu. It should be either suspend or "on + * pending" + */ + state = psci_get_state(node); + assert(state == PSCI_STATE_SUSPEND || state == PSCI_STATE_ON_PENDING); +#endif + + /* + * Assume that this cpu was suspended and retrieve its target affinity + * level. If it is invalid then it could only have been turned off + * earlier. PLATFORM_MAX_AFFLVL will be the highest affinity level a + * cpu can be turned off to. + */ + afflvl = psci_get_suspend_afflvl(); + if (afflvl == PSCI_INVALID_DATA) + afflvl = PLATFORM_MAX_AFFLVL; + return afflvl; +} + +/******************************************************************************* + * Simple routine to set the id of an affinity instance at a given level in the + * mpidr. + ******************************************************************************/ +unsigned long mpidr_set_aff_inst(unsigned long mpidr, + unsigned char aff_inst, + int aff_lvl) +{ + unsigned long aff_shift; + + assert(aff_lvl <= MPIDR_AFFLVL3); + + /* + * Decide the number of bits to shift by depending upon + * the affinity level + */ + aff_shift = get_afflvl_shift(aff_lvl); + + /* Clear the existing affinity instance & set the new one*/ + mpidr &= ~(((unsigned long)MPIDR_AFFLVL_MASK) << aff_shift); + mpidr |= ((unsigned long)aff_inst) << aff_shift; + + return mpidr; +} + +/******************************************************************************* + * This function sanity checks a range of affinity levels. + ******************************************************************************/ +int psci_check_afflvl_range(int start_afflvl, int end_afflvl) +{ + /* Sanity check the parameters passed */ + if (end_afflvl > PLATFORM_MAX_AFFLVL) + return PSCI_E_INVALID_PARAMS; + + if (start_afflvl < MPIDR_AFFLVL0) + return PSCI_E_INVALID_PARAMS; + + if (end_afflvl < start_afflvl) + return PSCI_E_INVALID_PARAMS; + + return PSCI_E_SUCCESS; +} + +/******************************************************************************* + * This function is passed an array of pointers to affinity level nodes in the + * topology tree for an mpidr and the state which each node should transition + * to. It updates the state of each node between the specified affinity levels. + ******************************************************************************/ +void psci_do_afflvl_state_mgmt(uint32_t start_afflvl, + uint32_t end_afflvl, + aff_map_node_t *mpidr_nodes[], + uint32_t state) +{ + uint32_t level; + + for (level = start_afflvl; level <= end_afflvl; level++) { + if (mpidr_nodes[level] == NULL) + continue; + psci_set_state(mpidr_nodes[level], state); + } +} + +/******************************************************************************* + * This function is passed an array of pointers to affinity level nodes in the + * topology tree for an mpidr. It picks up locks for each affinity level bottom + * up in the range specified. + ******************************************************************************/ +void psci_acquire_afflvl_locks(int start_afflvl, + int end_afflvl, + aff_map_node_t *mpidr_nodes[]) +{ + int level; + + for (level = start_afflvl; level <= end_afflvl; level++) { + if (mpidr_nodes[level] == NULL) + continue; + + psci_lock_get(mpidr_nodes[level]); + } +} + +/******************************************************************************* + * This function is passed an array of pointers to affinity level nodes in the + * topology tree for an mpidr. It releases the lock for each affinity level top + * down in the range specified. + ******************************************************************************/ +void psci_release_afflvl_locks(int start_afflvl, + int end_afflvl, + aff_map_node_t *mpidr_nodes[]) +{ + int level; + + for (level = end_afflvl; level >= start_afflvl; level--) { + if (mpidr_nodes[level] == NULL) + continue; + + psci_lock_release(mpidr_nodes[level]); + } +} + +/******************************************************************************* + * Simple routine to determine whether an affinity instance at a given level + * in an mpidr exists or not. + ******************************************************************************/ +int psci_validate_mpidr(unsigned long mpidr, int level) +{ + aff_map_node_t *node; + + node = psci_get_aff_map_node(mpidr, level); + if (node && (node->state & PSCI_AFF_PRESENT)) + return PSCI_E_SUCCESS; + else + return PSCI_E_INVALID_PARAMS; +} + +/******************************************************************************* + * This function determines the full entrypoint information for the requested + * PSCI entrypoint on power on/resume and returns it. + ******************************************************************************/ +int psci_get_ns_ep_info(entry_point_info_t *ep, + uint64_t entrypoint, uint64_t context_id) +{ + uint32_t ep_attr, mode, sctlr, daif, ee; + uint32_t ns_scr_el3 = read_scr_el3(); + uint32_t ns_sctlr_el1 = read_sctlr_el1(); + + sctlr = ns_scr_el3 & SCR_HCE_BIT ? read_sctlr_el2() : ns_sctlr_el1; + ee = 0; + + ep_attr = NON_SECURE | EP_ST_DISABLE; + if (sctlr & SCTLR_EE_BIT) { + ep_attr |= EP_EE_BIG; + ee = 1; + } + SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr); + + ep->pc = entrypoint; + memset(&ep->args, 0, sizeof(ep->args)); + ep->args.arg0 = context_id; + + /* + * Figure out whether the cpu enters the non-secure address space + * in aarch32 or aarch64 + */ + if (ns_scr_el3 & SCR_RW_BIT) { + + /* + * Check whether a Thumb entry point has been provided for an + * aarch64 EL + */ + if (entrypoint & 0x1) + return PSCI_E_INVALID_PARAMS; + + mode = ns_scr_el3 & SCR_HCE_BIT ? MODE_EL2 : MODE_EL1; + + ep->spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS); + } else { + + mode = ns_scr_el3 & SCR_HCE_BIT ? MODE32_hyp : MODE32_svc; + + /* + * TODO: Choose async. exception bits if HYP mode is not + * implemented according to the values of SCR.{AW, FW} bits + */ + daif = DAIF_ABT_BIT | DAIF_IRQ_BIT | DAIF_FIQ_BIT; + + ep->spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, daif); + } + + return PSCI_E_SUCCESS; +} + +/******************************************************************************* + * This function takes a pointer to an affinity node in the topology tree and + * returns its state. State of a non-leaf node needs to be calculated. + ******************************************************************************/ +unsigned short psci_get_state(aff_map_node_t *node) +{ +#if !USE_COHERENT_MEM + flush_dcache_range((uint64_t) node, sizeof(*node)); +#endif + + assert(node->level >= MPIDR_AFFLVL0 && node->level <= MPIDR_MAX_AFFLVL); + + /* A cpu node just contains the state which can be directly returned */ + if (node->level == MPIDR_AFFLVL0) + return (node->state >> PSCI_STATE_SHIFT) & PSCI_STATE_MASK; + + /* + * For an affinity level higher than a cpu, the state has to be + * calculated. It depends upon the value of the reference count + * which is managed by each node at the next lower affinity level + * e.g. for a cluster, each cpu increments/decrements the reference + * count. If the reference count is 0 then the affinity level is + * OFF else ON. + */ + if (node->ref_count) + return PSCI_STATE_ON; + else + return PSCI_STATE_OFF; +} + +/******************************************************************************* + * This function takes a pointer to an affinity node in the topology tree and + * a target state. State of a non-leaf node needs to be converted to a reference + * count. State of a leaf node can be set directly. + ******************************************************************************/ +void psci_set_state(aff_map_node_t *node, unsigned short state) +{ + assert(node->level >= MPIDR_AFFLVL0 && node->level <= MPIDR_MAX_AFFLVL); + + /* + * For an affinity level higher than a cpu, the state is used + * to decide whether the reference count is incremented or + * decremented. Entry into the ON_PENDING state does not have + * effect. + */ + if (node->level > MPIDR_AFFLVL0) { + switch (state) { + case PSCI_STATE_ON: + node->ref_count++; + break; + case PSCI_STATE_OFF: + case PSCI_STATE_SUSPEND: + node->ref_count--; + break; + case PSCI_STATE_ON_PENDING: + /* + * An affinity level higher than a cpu will not undergo + * a state change when it is about to be turned on + */ + return; + default: + assert(0); + } + } else { + node->state &= ~(PSCI_STATE_MASK << PSCI_STATE_SHIFT); + node->state |= (state & PSCI_STATE_MASK) << PSCI_STATE_SHIFT; + } + +#if !USE_COHERENT_MEM + flush_dcache_range((uint64_t) node, sizeof(*node)); +#endif +} + +/******************************************************************************* + * An affinity level could be on, on_pending, suspended or off. These are the + * logical states it can be in. Physically either it is off or on. When it is in + * the state on_pending then it is about to be turned on. It is not possible to + * tell whether that's actually happenned or not. So we err on the side of + * caution & treat the affinity level as being turned off. + ******************************************************************************/ +unsigned short psci_get_phys_state(aff_map_node_t *node) +{ + unsigned int state; + + state = psci_get_state(node); + return get_phys_state(state); +} + +/******************************************************************************* + * This function takes an array of pointers to affinity instance nodes in the + * topology tree and calls the physical power on handler for the corresponding + * affinity levels + ******************************************************************************/ +static void psci_call_power_on_handlers(aff_map_node_t *mpidr_nodes[], + int start_afflvl, + int end_afflvl, + afflvl_power_on_finisher_t *pon_handlers) +{ + int level; + aff_map_node_t *node; + + for (level = end_afflvl; level >= start_afflvl; level--) { + node = mpidr_nodes[level]; + if (node == NULL) + continue; + + /* + * If we run into any trouble while powering up an + * affinity instance, then there is no recovery path + * so simply return an error and let the caller take + * care of the situation. + */ + pon_handlers[level](node); + } +} + +/******************************************************************************* + * Generic handler which is called when a cpu is physically powered on. It + * traverses through all the affinity levels performing generic, architectural, + * platform setup and state management e.g. for a cluster that's been powered + * on, it will call the platform specific code which will enable coherency at + * the interconnect level. For a cpu it could mean turning on the MMU etc. + * + * The state of all the relevant affinity levels is changed after calling the + * affinity level specific handlers as their actions would depend upon the state + * the affinity level is exiting from. + * + * The affinity level specific handlers are called in descending order i.e. from + * the highest to the lowest affinity level implemented by the platform because + * to turn on affinity level X it is neccesary to turn on affinity level X + 1 + * first. + ******************************************************************************/ +void psci_afflvl_power_on_finish(int start_afflvl, + int end_afflvl, + afflvl_power_on_finisher_t *pon_handlers) +{ + mpidr_aff_map_nodes_t mpidr_nodes; + int rc; + unsigned int max_phys_off_afflvl; + + + /* + * Collect the pointers to the nodes in the topology tree for + * each affinity instance in the mpidr. If this function does + * not return successfully then either the mpidr or the affinity + * levels are incorrect. Either case is an irrecoverable error. + */ + rc = psci_get_aff_map_nodes(read_mpidr_el1() & MPIDR_AFFINITY_MASK, + start_afflvl, + end_afflvl, + mpidr_nodes); + if (rc != PSCI_E_SUCCESS) + panic(); + + /* + * This function acquires the lock corresponding to each affinity + * level so that by the time all locks are taken, the system topology + * is snapshot and state management can be done safely. + */ + psci_acquire_afflvl_locks(start_afflvl, + end_afflvl, + mpidr_nodes); + + max_phys_off_afflvl = psci_find_max_phys_off_afflvl(start_afflvl, + end_afflvl, + mpidr_nodes); + assert(max_phys_off_afflvl != PSCI_INVALID_DATA); + + /* + * Stash the highest affinity level that will come out of the OFF or + * SUSPEND states. + */ + psci_set_max_phys_off_afflvl(max_phys_off_afflvl); + + /* Perform generic, architecture and platform specific handling */ + psci_call_power_on_handlers(mpidr_nodes, + start_afflvl, + end_afflvl, + pon_handlers); + + /* + * This function updates the state of each affinity instance + * corresponding to the mpidr in the range of affinity levels + * specified. + */ + psci_do_afflvl_state_mgmt(start_afflvl, + end_afflvl, + mpidr_nodes, + PSCI_STATE_ON); + + /* + * Invalidate the entry for the highest affinity level stashed earlier. + * This ensures that any reads of this variable outside the power + * up/down sequences return PSCI_INVALID_DATA + */ + psci_set_max_phys_off_afflvl(PSCI_INVALID_DATA); + + /* + * This loop releases the lock corresponding to each affinity level + * in the reverse order to which they were acquired. + */ + psci_release_afflvl_locks(start_afflvl, + end_afflvl, + mpidr_nodes); +} + +/******************************************************************************* + * This function initializes the set of hooks that PSCI invokes as part of power + * management operation. The power management hooks are expected to be provided + * by the SPD, after it finishes all its initialization + ******************************************************************************/ +void psci_register_spd_pm_hook(const spd_pm_ops_t *pm) +{ + assert(pm); + psci_spd_pm = pm; + + if (pm->svc_migrate) + psci_caps |= define_psci_cap(PSCI_MIG_AARCH64); + + if (pm->svc_migrate_info) + psci_caps |= define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64) + | define_psci_cap(PSCI_MIG_INFO_TYPE); +} + +/******************************************************************************* + * This function invokes the migrate info hook in the spd_pm_ops. It performs + * the necessary return value validation. If the Secure Payload is UP and + * migrate capable, it returns the mpidr of the CPU on which the Secure payload + * is resident through the mpidr parameter. Else the value of the parameter on + * return is undefined. + ******************************************************************************/ +int psci_spd_migrate_info(uint64_t *mpidr) +{ + int rc; + + if (!psci_spd_pm || !psci_spd_pm->svc_migrate_info) + return PSCI_E_NOT_SUPPORTED; + + rc = psci_spd_pm->svc_migrate_info(mpidr); + + assert(rc == PSCI_TOS_UP_MIG_CAP || rc == PSCI_TOS_NOT_UP_MIG_CAP \ + || rc == PSCI_TOS_NOT_PRESENT_MP || rc == PSCI_E_NOT_SUPPORTED); + + return rc; +} + + +/******************************************************************************* + * This function prints the state of all affinity instances present in the + * system + ******************************************************************************/ +void psci_print_affinity_map(void) +{ +#if LOG_LEVEL >= LOG_LEVEL_INFO + aff_map_node_t *node; + unsigned int idx; + /* This array maps to the PSCI_STATE_X definitions in psci.h */ + static const char *psci_state_str[] = { + "ON", + "OFF", + "ON_PENDING", + "SUSPEND" + }; + + INFO("PSCI Affinity Map:\n"); + for (idx = 0; idx < PSCI_NUM_AFFS ; idx++) { + node = &psci_aff_map[idx]; + if (!(node->state & PSCI_AFF_PRESENT)) { + continue; + } + INFO(" AffInst: Level %u, MPID 0x%lx, State %s\n", + node->level, node->mpidr, + psci_state_str[psci_get_state(node)]); + } +#endif +} diff --git a/services/std_svc/psci1.0/psci_entry.S b/services/std_svc/psci1.0/psci_entry.S new file mode 100644 index 0000000..050f6c6 --- /dev/null +++ b/services/std_svc/psci1.0/psci_entry.S @@ -0,0 +1,130 @@ +/* + * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include + + .globl psci_aff_on_finish_entry + .globl psci_aff_suspend_finish_entry + .globl psci_power_down_wfi + + /* ----------------------------------------------------- + * This cpu has been physically powered up. Depending + * upon whether it was resumed from suspend or simply + * turned on, call the common power on finisher with + * the handlers (chosen depending upon original state). + * ----------------------------------------------------- + */ +func psci_aff_on_finish_entry + adr x23, psci_afflvl_on_finishers + b psci_aff_common_finish_entry + +psci_aff_suspend_finish_entry: + adr x23, psci_afflvl_suspend_finishers + +psci_aff_common_finish_entry: + /* + * On the warm boot path, most of the EL3 initialisations performed by + * 'el3_entrypoint_common' must be skipped: + * + * - Only when the platform bypasses the BL1/BL3-1 entrypoint by + * programming the reset address do we need to set the CPU endianness. + * In other cases, we assume this has been taken care by the + * entrypoint code. + * + * - No need to determine the type of boot, we know it is a warm boot. + * + * - Do not try to distinguish between primary and secondary CPUs, this + * notion only exists for a cold boot. + * + * - No need to initialise the memory or the C runtime environment, + * it has been done once and for all on the cold boot path. + */ + el3_entrypoint_common \ + _set_endian=PROGRAMMABLE_RESET_ADDRESS \ + _warm_boot_mailbox=0 \ + _secondary_cold_boot=0 \ + _init_memory=0 \ + _init_c_runtime=0 \ + _exception_vectors=runtime_exceptions + + /* -------------------------------------------- + * Enable the MMU with the DCache disabled. It + * is safe to use stacks allocated in normal + * memory as a result. All memory accesses are + * marked nGnRnE when the MMU is disabled. So + * all the stack writes will make it to memory. + * All memory accesses are marked Non-cacheable + * when the MMU is enabled but D$ is disabled. + * So used stack memory is guaranteed to be + * visible immediately after the MMU is enabled + * Enabling the DCache at the same time as the + * MMU can lead to speculatively fetched and + * possibly stale stack memory being read from + * other caches. This can lead to coherency + * issues. + * -------------------------------------------- + */ + mov x0, #DISABLE_DCACHE + bl bl31_plat_enable_mmu + + /* --------------------------------------------- + * Call the finishers starting from affinity + * level 0. + * --------------------------------------------- + */ + bl get_power_on_target_afflvl + mov x2, x23 + mov x1, x0 + mov x0, #MPIDR_AFFLVL0 + bl psci_afflvl_power_on_finish + + b el3_exit +endfunc psci_aff_on_finish_entry + + /* -------------------------------------------- + * This function is called to indicate to the + * power controller that it is safe to power + * down this cpu. It should not exit the wfi + * and will be released from reset upon power + * up. 'wfi_spill' is used to catch erroneous + * exits from wfi. + * -------------------------------------------- + */ +func psci_power_down_wfi + dsb sy // ensure write buffer empty + wfi +wfi_spill: + b wfi_spill +endfunc psci_power_down_wfi + diff --git a/services/std_svc/psci1.0/psci_helpers.S b/services/std_svc/psci1.0/psci_helpers.S new file mode 100644 index 0000000..1d99158 --- /dev/null +++ b/services/std_svc/psci1.0/psci_helpers.S @@ -0,0 +1,168 @@ +/* + * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include + + .globl psci_do_pwrdown_cache_maintenance + .globl psci_do_pwrup_cache_maintenance + +/* ----------------------------------------------------------------------- + * void psci_do_pwrdown_cache_maintenance(uint32_t affinity level); + * + * This function performs cache maintenance if the specified affinity + * level is the equal to the level of the highest affinity instance which + * will be/is physically powered off. The levels of cache affected are + * determined by the affinity level which is passed as the argument i.e. + * level 0 results in a flush of the L1 cache. Both the L1 and L2 caches + * are flushed for a higher affinity level. + * + * Additionally, this function also ensures that stack memory is correctly + * flushed out to avoid coherency issues due to a change in its memory + * attributes after the data cache is disabled. + * ----------------------------------------------------------------------- + */ +func psci_do_pwrdown_cache_maintenance + stp x29, x30, [sp,#-16]! + stp x19, x20, [sp,#-16]! + + mov x19, x0 + bl psci_get_max_phys_off_afflvl +#if ASM_ASSERTION + cmp x0, #PSCI_INVALID_DATA + ASM_ASSERT(ne) +#endif + cmp x0, x19 + b.ne 1f + + /* --------------------------------------------- + * Determine to how many levels of cache will be + * subject to cache maintenance. Affinity level + * 0 implies that only the cpu is being powered + * down. Only the L1 data cache needs to be + * flushed to the PoU in this case. For a higher + * affinity level we are assuming that a flush + * of L1 data and L2 unified cache is enough. + * This information should be provided by the + * platform. + * --------------------------------------------- + */ + cmp x0, #MPIDR_AFFLVL0 + b.eq do_core_pwr_dwn + bl prepare_cluster_pwr_dwn + b do_stack_maintenance + +do_core_pwr_dwn: + bl prepare_core_pwr_dwn + + /* --------------------------------------------- + * Do stack maintenance by flushing the used + * stack to the main memory and invalidating the + * remainder. + * --------------------------------------------- + */ +do_stack_maintenance: + mrs x0, mpidr_el1 + bl platform_get_stack + + /* --------------------------------------------- + * Calculate and store the size of the used + * stack memory in x1. + * --------------------------------------------- + */ + mov x19, x0 + mov x1, sp + sub x1, x0, x1 + mov x0, sp + bl flush_dcache_range + + /* --------------------------------------------- + * Calculate and store the size of the unused + * stack memory in x1. Calculate and store the + * stack base address in x0. + * --------------------------------------------- + */ + sub x0, x19, #PLATFORM_STACK_SIZE + sub x1, sp, x0 + bl inv_dcache_range + +1: + ldp x19, x20, [sp], #16 + ldp x29, x30, [sp], #16 + ret +endfunc psci_do_pwrdown_cache_maintenance + + +/* ----------------------------------------------------------------------- + * void psci_do_pwrup_cache_maintenance(void); + * + * This function performs cache maintenance after this cpu is powered up. + * Currently, this involves managing the used stack memory before turning + * on the data cache. + * ----------------------------------------------------------------------- + */ +func psci_do_pwrup_cache_maintenance + stp x29, x30, [sp,#-16]! + + /* --------------------------------------------- + * Ensure any inflight stack writes have made it + * to main memory. + * --------------------------------------------- + */ + dmb st + + /* --------------------------------------------- + * Calculate and store the size of the used + * stack memory in x1. Calculate and store the + * stack base address in x0. + * --------------------------------------------- + */ + mrs x0, mpidr_el1 + bl platform_get_stack + mov x1, sp + sub x1, x0, x1 + mov x0, sp + bl inv_dcache_range + + /* --------------------------------------------- + * Enable the data cache. + * --------------------------------------------- + */ + mrs x0, sctlr_el3 + orr x0, x0, #SCTLR_C_BIT + msr sctlr_el3, x0 + isb + + ldp x29, x30, [sp], #16 + ret +endfunc psci_do_pwrup_cache_maintenance diff --git a/services/std_svc/psci1.0/psci_main.c b/services/std_svc/psci1.0/psci_main.c new file mode 100644 index 0000000..b389287 --- /dev/null +++ b/services/std_svc/psci1.0/psci_main.c @@ -0,0 +1,463 @@ +/* + * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "psci_private.h" + +/******************************************************************************* + * PSCI frontend api for servicing SMCs. Described in the PSCI spec. + ******************************************************************************/ +int psci_cpu_on(unsigned long target_cpu, + unsigned long entrypoint, + unsigned long context_id) + +{ + int rc; + unsigned int start_afflvl, end_afflvl; + entry_point_info_t ep; + + /* Determine if the cpu exists of not */ + rc = psci_validate_mpidr(target_cpu, MPIDR_AFFLVL0); + if (rc != PSCI_E_SUCCESS) { + return PSCI_E_INVALID_PARAMS; + } + + /* Validate the entrypoint using platform pm_ops */ + if (psci_plat_pm_ops->validate_ns_entrypoint) { + rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint); + if (rc != PSCI_E_SUCCESS) { + assert(rc == PSCI_E_INVALID_PARAMS); + return PSCI_E_INVALID_PARAMS; + } + } + + /* + * Verify and derive the re-entry information for + * the non-secure world from the non-secure state from + * where this call originated. + */ + rc = psci_get_ns_ep_info(&ep, entrypoint, context_id); + if (rc != PSCI_E_SUCCESS) + return rc; + + + /* + * To turn this cpu on, specify which affinity + * levels need to be turned on + */ + start_afflvl = MPIDR_AFFLVL0; + end_afflvl = PLATFORM_MAX_AFFLVL; + rc = psci_afflvl_on(target_cpu, + &ep, + start_afflvl, + end_afflvl); + + return rc; +} + +unsigned int psci_version(void) +{ + return PSCI_MAJOR_VER | PSCI_MINOR_VER; +} + +int psci_cpu_suspend(unsigned int power_state, + unsigned long entrypoint, + unsigned long context_id) +{ + int rc; + unsigned int target_afflvl, pstate_type; + entry_point_info_t ep; + + /* Check SBZ bits in power state are zero */ + if (psci_validate_power_state(power_state)) + return PSCI_E_INVALID_PARAMS; + + /* Sanity check the requested state */ + target_afflvl = psci_get_pstate_afflvl(power_state); + if (target_afflvl > PLATFORM_MAX_AFFLVL) + return PSCI_E_INVALID_PARAMS; + + /* Validate the power_state using platform pm_ops */ + if (psci_plat_pm_ops->validate_power_state) { + rc = psci_plat_pm_ops->validate_power_state(power_state); + if (rc != PSCI_E_SUCCESS) { + assert(rc == PSCI_E_INVALID_PARAMS); + return PSCI_E_INVALID_PARAMS; + } + } + + /* Validate the entrypoint using platform pm_ops */ + if (psci_plat_pm_ops->validate_ns_entrypoint) { + rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint); + if (rc != PSCI_E_SUCCESS) { + assert(rc == PSCI_E_INVALID_PARAMS); + return PSCI_E_INVALID_PARAMS; + } + } + + /* Determine the 'state type' in the 'power_state' parameter */ + pstate_type = psci_get_pstate_type(power_state); + + /* + * Ensure that we have a platform specific handler for entering + * a standby state. + */ + if (pstate_type == PSTATE_TYPE_STANDBY) { + if (!psci_plat_pm_ops->affinst_standby) + return PSCI_E_INVALID_PARAMS; + + psci_plat_pm_ops->affinst_standby(power_state); + return PSCI_E_SUCCESS; + } + + /* + * Verify and derive the re-entry information for + * the non-secure world from the non-secure state from + * where this call originated. + */ + rc = psci_get_ns_ep_info(&ep, entrypoint, context_id); + if (rc != PSCI_E_SUCCESS) + return rc; + + /* Save PSCI power state parameter for the core in suspend context */ + psci_set_suspend_power_state(power_state); + + /* + * Do what is needed to enter the power down state. Upon success, + * enter the final wfi which will power down this CPU. + */ + psci_afflvl_suspend(&ep, + MPIDR_AFFLVL0, + target_afflvl); + + /* Reset PSCI power state parameter for the core. */ + psci_set_suspend_power_state(PSCI_INVALID_DATA); + return PSCI_E_SUCCESS; +} + +int psci_system_suspend(unsigned long entrypoint, + unsigned long context_id) +{ + int rc; + unsigned int power_state; + entry_point_info_t ep; + + /* Validate the entrypoint using platform pm_ops */ + if (psci_plat_pm_ops->validate_ns_entrypoint) { + rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint); + if (rc != PSCI_E_SUCCESS) { + assert(rc == PSCI_E_INVALID_PARAMS); + return PSCI_E_INVALID_PARAMS; + } + } + + /* Check if the current CPU is the last ON CPU in the system */ + if (!psci_is_last_on_cpu()) + return PSCI_E_DENIED; + + /* + * Verify and derive the re-entry information for + * the non-secure world from the non-secure state from + * where this call originated. + */ + rc = psci_get_ns_ep_info(&ep, entrypoint, context_id); + if (rc != PSCI_E_SUCCESS) + return rc; + + /* + * Assert that the required pm_ops hook is implemented to ensure that + * the capability detected during psci_setup() is valid. + */ + assert(psci_plat_pm_ops->get_sys_suspend_power_state); + + /* + * Query the platform for the power_state required for system suspend + */ + power_state = psci_plat_pm_ops->get_sys_suspend_power_state(); + + /* Save PSCI power state parameter for the core in suspend context */ + psci_set_suspend_power_state(power_state); + + /* + * Do what is needed to enter the power down state. Upon success, + * enter the final wfi which will power down this cpu. + */ + psci_afflvl_suspend(&ep, + MPIDR_AFFLVL0, + PLATFORM_MAX_AFFLVL); + + /* Reset PSCI power state parameter for the core. */ + psci_set_suspend_power_state(PSCI_INVALID_DATA); + return PSCI_E_SUCCESS; +} + +int psci_cpu_off(void) +{ + int rc; + int target_afflvl = PLATFORM_MAX_AFFLVL; + + /* + * Traverse from the highest to the lowest affinity level. When the + * lowest affinity level is hit, all the locks are acquired. State + * management is done immediately followed by cpu, cluster ... + * ..target_afflvl specific actions as this function unwinds back. + */ + rc = psci_afflvl_off(MPIDR_AFFLVL0, target_afflvl); + + /* + * The only error cpu_off can return is E_DENIED. So check if that's + * indeed the case. + */ + assert (rc == PSCI_E_DENIED); + + return rc; +} + +int psci_affinity_info(unsigned long target_affinity, + unsigned int lowest_affinity_level) +{ + int rc = PSCI_E_INVALID_PARAMS; + unsigned int aff_state; + aff_map_node_t *node; + + if (lowest_affinity_level > PLATFORM_MAX_AFFLVL) + return rc; + + node = psci_get_aff_map_node(target_affinity, lowest_affinity_level); + if (node && (node->state & PSCI_AFF_PRESENT)) { + + /* + * TODO: For affinity levels higher than 0 i.e. cpu, the + * state will always be either ON or OFF. Need to investigate + * how critical is it to support ON_PENDING here. + */ + aff_state = psci_get_state(node); + + /* A suspended cpu is available & on for the OS */ + if (aff_state == PSCI_STATE_SUSPEND) { + aff_state = PSCI_STATE_ON; + } + + rc = aff_state; + } + + return rc; +} + +int psci_migrate(unsigned long target_cpu) +{ + int rc; + unsigned long resident_cpu_mpidr; + + rc = psci_spd_migrate_info(&resident_cpu_mpidr); + if (rc != PSCI_TOS_UP_MIG_CAP) + return (rc == PSCI_TOS_NOT_UP_MIG_CAP) ? + PSCI_E_DENIED : PSCI_E_NOT_SUPPORTED; + + /* + * Migrate should only be invoked on the CPU where + * the Secure OS is resident. + */ + if (resident_cpu_mpidr != read_mpidr_el1()) + return PSCI_E_NOT_PRESENT; + + /* Check the validity of the specified target cpu */ + rc = psci_validate_mpidr(target_cpu, MPIDR_AFFLVL0); + if (rc != PSCI_E_SUCCESS) + return PSCI_E_INVALID_PARAMS; + + assert(psci_spd_pm && psci_spd_pm->svc_migrate); + + rc = psci_spd_pm->svc_migrate(read_mpidr_el1(), target_cpu); + assert(rc == PSCI_E_SUCCESS || rc == PSCI_E_INTERN_FAIL); + + return rc; +} + +int psci_migrate_info_type(void) +{ + unsigned long resident_cpu_mpidr; + + return psci_spd_migrate_info(&resident_cpu_mpidr); +} + +long psci_migrate_info_up_cpu(void) +{ + unsigned long resident_cpu_mpidr; + int rc; + + /* + * Return value of this depends upon what + * psci_spd_migrate_info() returns. + */ + rc = psci_spd_migrate_info(&resident_cpu_mpidr); + if (rc != PSCI_TOS_NOT_UP_MIG_CAP && rc != PSCI_TOS_UP_MIG_CAP) + return PSCI_E_INVALID_PARAMS; + + return resident_cpu_mpidr; +} + +int psci_features(unsigned int psci_fid) +{ + uint32_t local_caps = psci_caps; + + /* Check if it is a 64 bit function */ + if (((psci_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_64) + local_caps &= PSCI_CAP_64BIT_MASK; + + /* Check for invalid fid */ + if (!(is_std_svc_call(psci_fid) && is_valid_fast_smc(psci_fid) + && is_psci_fid(psci_fid))) + return PSCI_E_NOT_SUPPORTED; + + + /* Check if the psci fid is supported or not */ + if (!(local_caps & define_psci_cap(psci_fid))) + return PSCI_E_NOT_SUPPORTED; + + /* Format the feature flags */ + if (psci_fid == PSCI_CPU_SUSPEND_AARCH32 || + psci_fid == PSCI_CPU_SUSPEND_AARCH64) { + /* + * The trusted firmware uses the original power state format + * and does not support OS Initiated Mode. + */ + return (FF_PSTATE_ORIG << FF_PSTATE_SHIFT) | + ((!FF_SUPPORTS_OS_INIT_MODE) << FF_MODE_SUPPORT_SHIFT); + } + + /* Return 0 for all other fid's */ + return PSCI_E_SUCCESS; +} + +/******************************************************************************* + * PSCI top level handler for servicing SMCs. + ******************************************************************************/ +uint64_t psci_smc_handler(uint32_t smc_fid, + uint64_t x1, + uint64_t x2, + uint64_t x3, + uint64_t x4, + void *cookie, + void *handle, + uint64_t flags) +{ + if (is_caller_secure(flags)) + SMC_RET1(handle, SMC_UNK); + + /* Check the fid against the capabilities */ + if (!(psci_caps & define_psci_cap(smc_fid))) + SMC_RET1(handle, SMC_UNK); + + if (((smc_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_32) { + /* 32-bit PSCI function, clear top parameter bits */ + + x1 = (uint32_t)x1; + x2 = (uint32_t)x2; + x3 = (uint32_t)x3; + + switch (smc_fid) { + case PSCI_VERSION: + SMC_RET1(handle, psci_version()); + + case PSCI_CPU_OFF: + SMC_RET1(handle, psci_cpu_off()); + + case PSCI_CPU_SUSPEND_AARCH32: + SMC_RET1(handle, psci_cpu_suspend(x1, x2, x3)); + + case PSCI_CPU_ON_AARCH32: + SMC_RET1(handle, psci_cpu_on(x1, x2, x3)); + + case PSCI_AFFINITY_INFO_AARCH32: + SMC_RET1(handle, psci_affinity_info(x1, x2)); + + case PSCI_MIG_AARCH32: + SMC_RET1(handle, psci_migrate(x1)); + + case PSCI_MIG_INFO_TYPE: + SMC_RET1(handle, psci_migrate_info_type()); + + case PSCI_MIG_INFO_UP_CPU_AARCH32: + SMC_RET1(handle, psci_migrate_info_up_cpu()); + + case PSCI_SYSTEM_SUSPEND_AARCH32: + SMC_RET1(handle, psci_system_suspend(x1, x2)); + + case PSCI_SYSTEM_OFF: + psci_system_off(); + /* We should never return from psci_system_off() */ + + case PSCI_SYSTEM_RESET: + psci_system_reset(); + /* We should never return from psci_system_reset() */ + + case PSCI_FEATURES: + SMC_RET1(handle, psci_features(x1)); + + default: + break; + } + } else { + /* 64-bit PSCI function */ + + switch (smc_fid) { + case PSCI_CPU_SUSPEND_AARCH64: + SMC_RET1(handle, psci_cpu_suspend(x1, x2, x3)); + + case PSCI_CPU_ON_AARCH64: + SMC_RET1(handle, psci_cpu_on(x1, x2, x3)); + + case PSCI_AFFINITY_INFO_AARCH64: + SMC_RET1(handle, psci_affinity_info(x1, x2)); + + case PSCI_MIG_AARCH64: + SMC_RET1(handle, psci_migrate(x1)); + + case PSCI_MIG_INFO_UP_CPU_AARCH64: + SMC_RET1(handle, psci_migrate_info_up_cpu()); + + case PSCI_SYSTEM_SUSPEND_AARCH64: + SMC_RET1(handle, psci_system_suspend(x1, x2)); + + default: + break; + } + } + + WARN("Unimplemented PSCI Call: 0x%x \n", smc_fid); + SMC_RET1(handle, SMC_UNK); +} diff --git a/services/std_svc/psci1.0/psci_private.h b/services/std_svc/psci1.0/psci_private.h new file mode 100644 index 0000000..2955de7 --- /dev/null +++ b/services/std_svc/psci1.0/psci_private.h @@ -0,0 +1,180 @@ +/* + * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __PSCI_PRIVATE_H__ +#define __PSCI_PRIVATE_H__ + +#include +#include +#include +#include + +/* + * The following helper macros abstract the interface to the Bakery + * Lock API. + */ +#if USE_COHERENT_MEM +#define psci_lock_init(aff_map, idx) bakery_lock_init(&(aff_map)[(idx)].lock) +#define psci_lock_get(node) bakery_lock_get(&((node)->lock)) +#define psci_lock_release(node) bakery_lock_release(&((node)->lock)) +#else +#define psci_lock_init(aff_map, idx) ((aff_map)[(idx)].aff_map_index = (idx)) +#define psci_lock_get(node) bakery_lock_get((node)->aff_map_index, \ + CPU_DATA_PSCI_LOCK_OFFSET) +#define psci_lock_release(node) bakery_lock_release((node)->aff_map_index,\ + CPU_DATA_PSCI_LOCK_OFFSET) +#endif + +/* + * The PSCI capability which are provided by the generic code but does not + * depend on the platform or spd capabilities. + */ +#define PSCI_GENERIC_CAP \ + (define_psci_cap(PSCI_VERSION) | \ + define_psci_cap(PSCI_AFFINITY_INFO_AARCH64) | \ + define_psci_cap(PSCI_FEATURES)) + +/* + * The PSCI capabilities mask for 64 bit functions. + */ +#define PSCI_CAP_64BIT_MASK \ + (define_psci_cap(PSCI_CPU_SUSPEND_AARCH64) | \ + define_psci_cap(PSCI_CPU_ON_AARCH64) | \ + define_psci_cap(PSCI_AFFINITY_INFO_AARCH64) | \ + define_psci_cap(PSCI_MIG_AARCH64) | \ + define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64) | \ + define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64)) + + +/******************************************************************************* + * The following two data structures hold the topology tree which in turn tracks + * the state of the all the affinity instances supported by the platform. + ******************************************************************************/ +typedef struct aff_map_node { + unsigned long mpidr; + unsigned char ref_count; + unsigned char state; + unsigned char level; +#if USE_COHERENT_MEM + bakery_lock_t lock; +#else + /* For indexing the bakery_info array in per CPU data */ + unsigned char aff_map_index; +#endif +} aff_map_node_t; + +typedef struct aff_limits_node { + int min; + int max; +} aff_limits_node_t; + +typedef aff_map_node_t (*mpidr_aff_map_nodes_t[MPIDR_MAX_AFFLVL + 1]); +typedef void (*afflvl_power_on_finisher_t)(aff_map_node_t *); + +/******************************************************************************* + * Data prototypes + ******************************************************************************/ +extern const plat_pm_ops_t *psci_plat_pm_ops; +extern aff_map_node_t psci_aff_map[PSCI_NUM_AFFS]; +extern aff_limits_node_t psci_aff_limits[MPIDR_MAX_AFFLVL + 1]; +extern uint32_t psci_caps; + +/******************************************************************************* + * SPD's power management hooks registered with PSCI + ******************************************************************************/ +extern const spd_pm_ops_t *psci_spd_pm; + +/******************************************************************************* + * Function prototypes + ******************************************************************************/ +/* Private exported functions from psci_common.c */ +unsigned short psci_get_state(aff_map_node_t *node); +unsigned short psci_get_phys_state(aff_map_node_t *node); +void psci_set_state(aff_map_node_t *node, unsigned short state); +unsigned long mpidr_set_aff_inst(unsigned long, unsigned char, int); +int psci_validate_mpidr(unsigned long, int); +int get_power_on_target_afflvl(void); +void psci_afflvl_power_on_finish(int, + int, + afflvl_power_on_finisher_t *); +int psci_get_ns_ep_info(entry_point_info_t *ep, + uint64_t entrypoint, uint64_t context_id); +int psci_check_afflvl_range(int start_afflvl, int end_afflvl); +void psci_do_afflvl_state_mgmt(uint32_t start_afflvl, + uint32_t end_afflvl, + aff_map_node_t *mpidr_nodes[], + uint32_t state); +void psci_acquire_afflvl_locks(int start_afflvl, + int end_afflvl, + aff_map_node_t *mpidr_nodes[]); +void psci_release_afflvl_locks(int start_afflvl, + int end_afflvl, + mpidr_aff_map_nodes_t mpidr_nodes); +void psci_print_affinity_map(void); +void psci_set_max_phys_off_afflvl(uint32_t afflvl); +uint32_t psci_find_max_phys_off_afflvl(uint32_t start_afflvl, + uint32_t end_afflvl, + aff_map_node_t *mpidr_nodes[]); +unsigned int psci_is_last_on_cpu(void); +int psci_spd_migrate_info(uint64_t *mpidr); + +/* Private exported functions from psci_setup.c */ +int psci_get_aff_map_nodes(unsigned long mpidr, + int start_afflvl, + int end_afflvl, + aff_map_node_t *mpidr_nodes[]); +aff_map_node_t *psci_get_aff_map_node(unsigned long, int); + +/* Private exported functions from psci_affinity_on.c */ +int psci_afflvl_on(unsigned long target_cpu, + entry_point_info_t *ep, + int start_afflvl, + int end_afflvl); + +/* Private exported functions from psci_affinity_off.c */ +int psci_afflvl_off(int, int); + +/* Private exported functions from psci_affinity_suspend.c */ +void psci_afflvl_suspend(entry_point_info_t *ep, + int start_afflvl, + int end_afflvl); + +unsigned int psci_afflvl_suspend_finish(int, int); +void psci_set_suspend_power_state(unsigned int power_state); + +/* Private exported functions from psci_helpers.S */ +void psci_do_pwrdown_cache_maintenance(uint32_t affinity_level); +void psci_do_pwrup_cache_maintenance(void); + +/* Private exported functions from psci_system_off.c */ +void __dead2 psci_system_off(void); +void __dead2 psci_system_reset(void); + +#endif /* __PSCI_PRIVATE_H__ */ diff --git a/services/std_svc/psci1.0/psci_setup.c b/services/std_svc/psci1.0/psci_setup.c new file mode 100644 index 0000000..01b559c --- /dev/null +++ b/services/std_svc/psci1.0/psci_setup.c @@ -0,0 +1,400 @@ +/* + * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "psci_private.h" + +/******************************************************************************* + * Per cpu non-secure contexts used to program the architectural state prior + * return to the normal world. + * TODO: Use the memory allocator to set aside memory for the contexts instead + * of relying on platform defined constants. Using PSCI_NUM_AFFS will be an + * overkill. + ******************************************************************************/ +static cpu_context_t psci_ns_context[PLATFORM_CORE_COUNT]; + +/******************************************************************************* + * In a system, a certain number of affinity instances are present at an + * affinity level. The cumulative number of instances across all levels are + * stored in 'psci_aff_map'. The topology tree has been flattenned into this + * array. To retrieve nodes, information about the extents of each affinity + * level i.e. start index and end index needs to be present. 'psci_aff_limits' + * stores this information. + ******************************************************************************/ +aff_limits_node_t psci_aff_limits[MPIDR_MAX_AFFLVL + 1]; + +/****************************************************************************** + * Define the psci capability variable. + *****************************************************************************/ +uint32_t psci_caps; + + +/******************************************************************************* + * Routines for retrieving the node corresponding to an affinity level instance + * in the mpidr. The first one uses binary search to find the node corresponding + * to the mpidr (key) at a particular affinity level. The second routine decides + * extents of the binary search at each affinity level. + ******************************************************************************/ +static int psci_aff_map_get_idx(unsigned long key, + int min_idx, + int max_idx) +{ + int mid; + + /* + * Terminating condition: If the max and min indices have crossed paths + * during the binary search then the key has not been found. + */ + if (max_idx < min_idx) + return PSCI_E_INVALID_PARAMS; + + /* + * Make sure we are within array limits. + */ + assert(min_idx >= 0 && max_idx < PSCI_NUM_AFFS); + + /* + * Bisect the array around 'mid' and then recurse into the array chunk + * where the key is likely to be found. The mpidrs in each node in the + * 'psci_aff_map' for a given affinity level are stored in an ascending + * order which makes the binary search possible. + */ + mid = min_idx + ((max_idx - min_idx) >> 1); /* Divide by 2 */ + + if (psci_aff_map[mid].mpidr > key) + return psci_aff_map_get_idx(key, min_idx, mid - 1); + else if (psci_aff_map[mid].mpidr < key) + return psci_aff_map_get_idx(key, mid + 1, max_idx); + else + return mid; +} + +aff_map_node_t *psci_get_aff_map_node(unsigned long mpidr, int aff_lvl) +{ + int rc; + + if (aff_lvl > PLATFORM_MAX_AFFLVL) + return NULL; + + /* Right shift the mpidr to the required affinity level */ + mpidr = mpidr_mask_lower_afflvls(mpidr, aff_lvl); + + rc = psci_aff_map_get_idx(mpidr, + psci_aff_limits[aff_lvl].min, + psci_aff_limits[aff_lvl].max); + if (rc >= 0) + return &psci_aff_map[rc]; + else + return NULL; +} + +/******************************************************************************* + * This function populates an array with nodes corresponding to a given range of + * affinity levels in an mpidr. It returns successfully only when the affinity + * levels are correct, the mpidr is valid i.e. no affinity level is absent from + * the topology tree & the affinity instance at level 0 is not absent. + ******************************************************************************/ +int psci_get_aff_map_nodes(unsigned long mpidr, + int start_afflvl, + int end_afflvl, + aff_map_node_t *mpidr_nodes[]) +{ + int rc = PSCI_E_INVALID_PARAMS, level; + aff_map_node_t *node; + + rc = psci_check_afflvl_range(start_afflvl, end_afflvl); + if (rc != PSCI_E_SUCCESS) + return rc; + + for (level = start_afflvl; level <= end_afflvl; level++) { + + /* + * Grab the node for each affinity level. No affinity level + * can be missing as that would mean that the topology tree + * is corrupted. + */ + node = psci_get_aff_map_node(mpidr, level); + if (node == NULL) { + rc = PSCI_E_INVALID_PARAMS; + break; + } + + /* + * Skip absent affinity levels unless it's afffinity level 0. + * An absent cpu means that the mpidr is invalid. Save the + * pointer to the node for the present affinity level + */ + if (!(node->state & PSCI_AFF_PRESENT)) { + if (level == MPIDR_AFFLVL0) { + rc = PSCI_E_INVALID_PARAMS; + break; + } + + mpidr_nodes[level] = NULL; + } else + mpidr_nodes[level] = node; + } + + return rc; +} + +/******************************************************************************* + * Function which initializes the 'aff_map_node' corresponding to an affinity + * level instance. Each node has a unique mpidr, level and bakery lock. The data + * field is opaque and holds affinity level specific data e.g. for affinity + * level 0 it contains the index into arrays that hold the secure/non-secure + * state for a cpu that's been turned on/off + ******************************************************************************/ +static void psci_init_aff_map_node(unsigned long mpidr, + int level, + unsigned int idx) +{ + unsigned char state; + uint32_t linear_id; + psci_aff_map[idx].mpidr = mpidr; + psci_aff_map[idx].level = level; + psci_lock_init(psci_aff_map, idx); + + /* + * If an affinity instance is present then mark it as OFF to begin with. + */ + state = plat_get_aff_state(level, mpidr); + psci_aff_map[idx].state = state; + + if (level == MPIDR_AFFLVL0) { + + /* + * Mark the cpu as OFF. Higher affinity level reference counts + * have already been memset to 0 + */ + if (state & PSCI_AFF_PRESENT) + psci_set_state(&psci_aff_map[idx], PSCI_STATE_OFF); + + /* + * Associate a non-secure context with this affinity + * instance through the context management library. + */ + linear_id = platform_get_core_pos(mpidr); + assert(linear_id < PLATFORM_CORE_COUNT); + + /* Invalidate the suspend context for the node */ + set_cpu_data_by_index(linear_id, + psci_svc_cpu_data.power_state, + PSCI_INVALID_DATA); + + /* + * There is no state associated with the current execution + * context so ensure that any reads of the highest affinity + * level in a powered down state return PSCI_INVALID_DATA. + */ + set_cpu_data_by_index(linear_id, + psci_svc_cpu_data.max_phys_off_afflvl, + PSCI_INVALID_DATA); + + flush_cpu_data_by_index(linear_id, psci_svc_cpu_data); + + cm_set_context_by_mpidr(mpidr, + (void *) &psci_ns_context[linear_id], + NON_SECURE); + } + + return; +} + +/******************************************************************************* + * Core routine used by the Breadth-First-Search algorithm to populate the + * affinity tree. Each level in the tree corresponds to an affinity level. This + * routine's aim is to traverse to the target affinity level and populate nodes + * in the 'psci_aff_map' for all the siblings at that level. It uses the current + * affinity level to keep track of how many levels from the root of the tree + * have been traversed. If the current affinity level != target affinity level, + * then the platform is asked to return the number of children that each + * affinity instance has at the current affinity level. Traversal is then done + * for each child at the next lower level i.e. current affinity level - 1. + * + * CAUTION: This routine assumes that affinity instance ids are allocated in a + * monotonically increasing manner at each affinity level in a mpidr starting + * from 0. If the platform breaks this assumption then this code will have to + * be reworked accordingly. + ******************************************************************************/ +static unsigned int psci_init_aff_map(unsigned long mpidr, + unsigned int affmap_idx, + int cur_afflvl, + int tgt_afflvl) +{ + unsigned int ctr, aff_count; + + assert(cur_afflvl >= tgt_afflvl); + + /* + * Find the number of siblings at the current affinity level & + * assert if there are none 'cause then we have been invoked with + * an invalid mpidr. + */ + aff_count = plat_get_aff_count(cur_afflvl, mpidr); + assert(aff_count); + + if (tgt_afflvl < cur_afflvl) { + for (ctr = 0; ctr < aff_count; ctr++) { + mpidr = mpidr_set_aff_inst(mpidr, ctr, cur_afflvl); + affmap_idx = psci_init_aff_map(mpidr, + affmap_idx, + cur_afflvl - 1, + tgt_afflvl); + } + } else { + for (ctr = 0; ctr < aff_count; ctr++, affmap_idx++) { + mpidr = mpidr_set_aff_inst(mpidr, ctr, cur_afflvl); + psci_init_aff_map_node(mpidr, cur_afflvl, affmap_idx); + } + + /* affmap_idx is 1 greater than the max index of cur_afflvl */ + psci_aff_limits[cur_afflvl].max = affmap_idx - 1; + } + + return affmap_idx; +} + +/******************************************************************************* + * This function initializes the topology tree by querying the platform. To do + * so, it's helper routines implement a Breadth-First-Search. At each affinity + * level the platform conveys the number of affinity instances that exist i.e. + * the affinity count. The algorithm populates the psci_aff_map recursively + * using this information. On a platform that implements two clusters of 4 cpus + * each, the populated aff_map_array would look like this: + * + * <- cpus cluster0 -><- cpus cluster1 -> + * --------------------------------------------------- + * | 0 | 1 | 0 | 1 | 2 | 3 | 0 | 1 | 2 | 3 | + * --------------------------------------------------- + * ^ ^ + * cluster __| cpu __| + * limit limit + * + * The first 2 entries are of the cluster nodes. The next 4 entries are of cpus + * within cluster 0. The last 4 entries are of cpus within cluster 1. + * The 'psci_aff_limits' array contains the max & min index of each affinity + * level within the 'psci_aff_map' array. This allows restricting search of a + * node at an affinity level between the indices in the limits array. + ******************************************************************************/ +int32_t psci_setup(void) +{ + unsigned long mpidr = read_mpidr(); + int afflvl, affmap_idx, max_afflvl; + aff_map_node_t *node; + + psci_plat_pm_ops = NULL; + + /* Find out the maximum affinity level that the platform implements */ + max_afflvl = PLATFORM_MAX_AFFLVL; + assert(max_afflvl <= MPIDR_MAX_AFFLVL); + + /* + * This call traverses the topology tree with help from the platform and + * populates the affinity map using a breadth-first-search recursively. + * We assume that the platform allocates affinity instance ids from 0 + * onwards at each affinity level in the mpidr. FIRST_MPIDR = 0.0.0.0 + */ + affmap_idx = 0; + for (afflvl = max_afflvl; afflvl >= MPIDR_AFFLVL0; afflvl--) { + affmap_idx = psci_init_aff_map(FIRST_MPIDR, + affmap_idx, + max_afflvl, + afflvl); + } + +#if !USE_COHERENT_MEM + /* + * The psci_aff_map only needs flushing when it's not allocated in + * coherent memory. + */ + flush_dcache_range((uint64_t) &psci_aff_map, sizeof(psci_aff_map)); +#endif + + /* + * Set the bounds for the affinity counts of each level in the map. Also + * flush out the entire array so that it's visible to subsequent power + * management operations. The 'psci_aff_limits' array is allocated in + * normal memory. It will be accessed when the mmu is off e.g. after + * reset. Hence it needs to be flushed. + */ + for (afflvl = MPIDR_AFFLVL0; afflvl < max_afflvl; afflvl++) { + psci_aff_limits[afflvl].min = + psci_aff_limits[afflvl + 1].max + 1; + } + + flush_dcache_range((unsigned long) psci_aff_limits, + sizeof(psci_aff_limits)); + + /* + * Mark the affinity instances in our mpidr as ON. No need to lock as + * this is the primary cpu. + */ + mpidr &= MPIDR_AFFINITY_MASK; + for (afflvl = MPIDR_AFFLVL0; afflvl <= max_afflvl; afflvl++) { + + node = psci_get_aff_map_node(mpidr, afflvl); + assert(node); + + /* Mark each present node as ON. */ + if (node->state & PSCI_AFF_PRESENT) + psci_set_state(node, PSCI_STATE_ON); + } + + platform_setup_pm(&psci_plat_pm_ops); + assert(psci_plat_pm_ops); + + /* Initialize the psci capability */ + psci_caps = PSCI_GENERIC_CAP; + + if (psci_plat_pm_ops->affinst_off) + psci_caps |= define_psci_cap(PSCI_CPU_OFF); + if (psci_plat_pm_ops->affinst_on && psci_plat_pm_ops->affinst_on_finish) + psci_caps |= define_psci_cap(PSCI_CPU_ON_AARCH64); + if (psci_plat_pm_ops->affinst_suspend && + psci_plat_pm_ops->affinst_suspend_finish) { + psci_caps |= define_psci_cap(PSCI_CPU_SUSPEND_AARCH64); + if (psci_plat_pm_ops->get_sys_suspend_power_state) + psci_caps |= define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64); + } + if (psci_plat_pm_ops->system_off) + psci_caps |= define_psci_cap(PSCI_SYSTEM_OFF); + if (psci_plat_pm_ops->system_reset) + psci_caps |= define_psci_cap(PSCI_SYSTEM_RESET); + + return 0; +} diff --git a/services/std_svc/psci1.0/psci_system_off.c b/services/std_svc/psci1.0/psci_system_off.c new file mode 100644 index 0000000..970d4bb --- /dev/null +++ b/services/std_svc/psci1.0/psci_system_off.c @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include "psci_private.h" + +void psci_system_off(void) +{ + psci_print_affinity_map(); + + assert(psci_plat_pm_ops->system_off); + + /* Notify the Secure Payload Dispatcher */ + if (psci_spd_pm && psci_spd_pm->svc_system_off) { + psci_spd_pm->svc_system_off(); + } + + /* Call the platform specific hook */ + psci_plat_pm_ops->system_off(); + + /* This function does not return. We should never get here */ +} + +void psci_system_reset(void) +{ + psci_print_affinity_map(); + + assert(psci_plat_pm_ops->system_reset); + + /* Notify the Secure Payload Dispatcher */ + if (psci_spd_pm && psci_spd_pm->svc_system_reset) { + psci_spd_pm->svc_system_reset(); + } + + /* Call the platform specific hook */ + psci_plat_pm_ops->system_reset(); + + /* This function does not return. We should never get here */ +}