diff --git a/Makefile b/Makefile index c1e9617..2120cb3 100644 --- a/Makefile +++ b/Makefile @@ -169,10 +169,6 @@ include ${PLAT_MAKEFILE_FULL} -# Disable the Platform Compatibility layer till the new PSCI framework is -# introduced. -ENABLE_PLAT_COMPAT := 0 - # If the platform has not defined ENABLE_PLAT_COMPAT, then enable it by default ifndef ENABLE_PLAT_COMPAT ENABLE_PLAT_COMPAT := 1 diff --git a/bl31/bl31.mk b/bl31/bl31.mk index 4c25a60..04e1542 100644 --- a/bl31/bl31.mk +++ b/bl31/bl31.mk @@ -1,5 +1,5 @@ # -# Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. +# Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: @@ -42,9 +42,9 @@ lib/cpus/aarch64/cpu_helpers.S \ lib/locks/exclusive/spinlock.S \ services/std_svc/std_svc_setup.c \ - services/std_svc/psci/psci_afflvl_off.c \ - services/std_svc/psci/psci_afflvl_on.c \ - services/std_svc/psci/psci_afflvl_suspend.c \ + services/std_svc/psci/psci_off.c \ + services/std_svc/psci/psci_on.c \ + services/std_svc/psci/psci_suspend.c \ services/std_svc/psci/psci_common.c \ services/std_svc/psci/psci_entry.S \ services/std_svc/psci/psci_helpers.S \ diff --git a/include/bl31/services/psci.h b/include/bl31/services/psci.h index dd1891c..c9b3f8d 100644 --- a/include/bl31/services/psci.h +++ b/include/bl31/services/psci.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -32,17 +32,33 @@ #define __PSCI_H__ #include -#include /* for PLATFORM_NUM_AFFS */ +#include /* for PLAT_NUM_PWR_DOMAINS */ +#if ENABLE_PLAT_COMPAT +#include +#endif /******************************************************************************* - * Number of affinity instances whose state this psci imp. can track + * Number of power domains whose state this psci imp. can track ******************************************************************************/ -#ifdef PLATFORM_NUM_AFFS -#define PSCI_NUM_AFFS PLATFORM_NUM_AFFS +#ifdef PLAT_NUM_PWR_DOMAINS +#define PSCI_NUM_PWR_DOMAINS PLAT_NUM_PWR_DOMAINS #else -#define PSCI_NUM_AFFS (2 * PLATFORM_CORE_COUNT) +#define PSCI_NUM_PWR_DOMAINS (2 * PLATFORM_CORE_COUNT) #endif +#define PSCI_NUM_NON_CPU_PWR_DOMAINS (PSCI_NUM_PWR_DOMAINS - \ + PLATFORM_CORE_COUNT) + +/* This is the power level corresponding to a CPU */ +#define PSCI_CPU_PWR_LVL 0 + +/* + * The maximum power level supported by PSCI. Since PSCI CPU_SUSPEND + * uses the old power_state parameter format which has 2 bits to specify the + * power level, this constant is defined to be 3. + */ +#define PSCI_MAX_PWR_LVL 3 + /******************************************************************************* * Defines for runtime services func ids ******************************************************************************/ @@ -84,27 +100,35 @@ * PSCI CPU_SUSPEND 'power_state' parameter specific defines ******************************************************************************/ #define PSTATE_ID_SHIFT 0 -#define PSTATE_TYPE_SHIFT 16 -#define PSTATE_AFF_LVL_SHIFT 24 +#if PSCI_EXTENDED_STATE_ID +#define PSTATE_VALID_MASK 0xB0000000 +#define PSTATE_TYPE_SHIFT 30 +#define PSTATE_ID_MASK 0xfffffff +#else +#define PSTATE_VALID_MASK 0xFCFE0000 +#define PSTATE_TYPE_SHIFT 16 +#define PSTATE_PWR_LVL_SHIFT 24 #define PSTATE_ID_MASK 0xffff -#define PSTATE_TYPE_MASK 0x1 -#define PSTATE_AFF_LVL_MASK 0x3 -#define PSTATE_VALID_MASK 0xFCFE0000 +#define PSTATE_PWR_LVL_MASK 0x3 + +#define psci_get_pstate_pwrlvl(pstate) (((pstate) >> PSTATE_PWR_LVL_SHIFT) & \ + PSTATE_PWR_LVL_MASK) +#define psci_make_powerstate(state_id, type, pwrlvl) \ + (((state_id) & PSTATE_ID_MASK) << PSTATE_ID_SHIFT) |\ + (((type) & PSTATE_TYPE_MASK) << PSTATE_TYPE_SHIFT) |\ + (((pwrlvl) & PSTATE_PWR_LVL_MASK) << PSTATE_PWR_LVL_SHIFT) +#endif /* __PSCI_EXTENDED_STATE_ID__ */ #define PSTATE_TYPE_STANDBY 0x0 #define PSTATE_TYPE_POWERDOWN 0x1 +#define PSTATE_TYPE_MASK 0x1 #define psci_get_pstate_id(pstate) (((pstate) >> PSTATE_ID_SHIFT) & \ PSTATE_ID_MASK) #define psci_get_pstate_type(pstate) (((pstate) >> PSTATE_TYPE_SHIFT) & \ PSTATE_TYPE_MASK) -#define psci_get_pstate_afflvl(pstate) (((pstate) >> PSTATE_AFF_LVL_SHIFT) & \ - PSTATE_AFF_LVL_MASK) -#define psci_make_powerstate(state_id, type, afflvl) \ - (((state_id) & PSTATE_ID_MASK) << PSTATE_ID_SHIFT) |\ - (((type) & PSTATE_TYPE_MASK) << PSTATE_TYPE_SHIFT) |\ - (((afflvl) & PSTATE_AFF_LVL_MASK) << PSTATE_AFF_LVL_SHIFT) +#define psci_check_power_state(pstate) ((pstate) & PSTATE_VALID_MASK) /******************************************************************************* * PSCI CPU_FEATURES feature flag specific defines @@ -113,6 +137,11 @@ #define FF_PSTATE_SHIFT 1 #define FF_PSTATE_ORIG 0 #define FF_PSTATE_EXTENDED 1 +#if PSCI_EXTENDED_STATE_ID +#define FF_PSTATE FF_PSTATE_EXTENDED +#else +#define FF_PSTATE FF_PSTATE_ORIG +#endif /* Features flags for CPU SUSPEND OS Initiated mode support. Bits [0:0] */ #define FF_MODE_SUPPORT_SHIFT 0 @@ -137,32 +166,72 @@ #define PSCI_E_NOT_PRESENT -7 #define PSCI_E_DISABLED -8 -/******************************************************************************* - * PSCI affinity state related constants. An affinity instance could be present - * or absent physically to cater for asymmetric topologies. If present then it - * could in one of the 4 further defined states. - ******************************************************************************/ -#define PSCI_STATE_SHIFT 1 -#define PSCI_STATE_MASK 0xff - -#define PSCI_AFF_ABSENT 0x0 -#define PSCI_AFF_PRESENT 0x1 -#define PSCI_STATE_ON 0x0 -#define PSCI_STATE_OFF 0x1 -#define PSCI_STATE_ON_PENDING 0x2 -#define PSCI_STATE_SUSPEND 0x3 - -#define PSCI_INVALID_DATA -1 - -#define get_phys_state(x) (x != PSCI_STATE_ON ? \ - PSCI_STATE_OFF : PSCI_STATE_ON) - -#define psci_validate_power_state(pstate) (pstate & PSTATE_VALID_MASK) - +#define PSCI_INVALID_MPIDR ~(0ULL) #ifndef __ASSEMBLY__ #include +#include + +/* + * These are the states reported by the PSCI_AFFINITY_INFO API for the specified + * CPU. The definitions of these states can be found in Section 5.7.1 in the + * PSCI specification (ARM DEN 0022C). + */ +typedef enum { + AFF_STATE_ON = 0, + AFF_STATE_OFF = 1, + AFF_STATE_ON_PENDING = 2 +} aff_info_state_t; + +/* + * Macro to represent invalid affinity level within PSCI. + */ +#define PSCI_INVALID_DATA -1 + +/* + * Type for representing the local power state at a particular level. + */ +typedef uint8_t plat_local_state_t; + +/* The local state macro used to represent RUN state. */ +#define PSCI_LOCAL_STATE_RUN 0 + +/* + * Macro to test whether the plat_local_state is RUN state + */ +#define is_local_state_run(plat_local_state) \ + ((plat_local_state) == PSCI_LOCAL_STATE_RUN) + +/* + * Macro to test whether the plat_local_state is RETENTION state + */ +#define is_local_state_retn(plat_local_state) \ + (((plat_local_state) > PSCI_LOCAL_STATE_RUN) && \ + ((plat_local_state) <= PLAT_MAX_RET_STATE)) + +/* + * Macro to test whether the plat_local_state is OFF state + */ +#define is_local_state_off(plat_local_state) \ + (((plat_local_state) > PLAT_MAX_RET_STATE) && \ + ((plat_local_state) <= PLAT_MAX_OFF_STATE)) + +/***************************************************************************** + * This data structure defines the representation of the power state parameter + * for its exchange between the generic PSCI code and the platform port. For + * example, it is used by the platform port to specify the requested power + * states during a power management operation. It is used by the generic code to + * inform the platform about the target power states that each level should + * enter. + ****************************************************************************/ +typedef struct psci_power_state { + /* + * The pwr_domain_state[] stores the local power state at each level + * for the CPU. + */ + plat_local_state_t pwr_domain_state[PLAT_MAX_PWR_LVL + 1]; +} psci_power_state_t; /******************************************************************************* * Structure used to store per-cpu information relevant to the PSCI service. @@ -170,11 +239,17 @@ * this information will not reside on a cache line shared with another cpu. ******************************************************************************/ typedef struct psci_cpu_data { - uint32_t power_state; - uint32_t max_phys_off_afflvl; /* Highest affinity level in physically - powered off state */ + /* State as seen by PSCI Affinity Info API */ + aff_info_state_t aff_info_state; + /* + * Highest power level which takes part in a power management + * operation. + */ + int8_t target_pwrlvl; + /* The local power state of this CPU */ + plat_local_state_t local_state; #if !USE_COHERENT_MEM - bakery_info_t pcpu_bakery_info[PSCI_NUM_AFFS]; + bakery_info_t pcpu_bakery_info[PSCI_NUM_NON_CPU_PWR_DOMAINS]; #endif } psci_cpu_data_t; @@ -182,25 +257,22 @@ * Structure populated by platform specific code to export routines which * perform common low level pm functions ******************************************************************************/ -typedef struct plat_pm_ops { - void (*affinst_standby)(unsigned int power_state); - int (*affinst_on)(unsigned long mpidr, - unsigned long sec_entrypoint, - unsigned int afflvl, - unsigned int state); - void (*affinst_off)(unsigned int afflvl, unsigned int state); - void (*affinst_suspend)(unsigned long sec_entrypoint, - unsigned int afflvl, - unsigned int state); - void (*affinst_on_finish)(unsigned int afflvl, unsigned int state); - void (*affinst_suspend_finish)(unsigned int afflvl, - unsigned int state); +typedef struct plat_psci_ops { + void (*cpu_standby)(plat_local_state_t cpu_state); + int (*pwr_domain_on)(u_register_t mpidr); + void (*pwr_domain_off)(const psci_power_state_t *target_state); + void (*pwr_domain_suspend)(const psci_power_state_t *target_state); + void (*pwr_domain_on_finish)(const psci_power_state_t *target_state); + void (*pwr_domain_suspend_finish)( + const psci_power_state_t *target_state); void (*system_off)(void) __dead2; void (*system_reset)(void) __dead2; - int (*validate_power_state)(unsigned int power_state); + int (*validate_power_state)(unsigned int power_state, + psci_power_state_t *req_state); int (*validate_ns_entrypoint)(unsigned long ns_entrypoint); - unsigned int (*get_sys_suspend_power_state)(void); -} plat_pm_ops_t; + void (*get_sys_suspend_power_state)( + psci_power_state_t *req_state); +} plat_psci_ops_t; /******************************************************************************* * Optional structure populated by the Secure Payload Dispatcher to be given a @@ -232,13 +304,8 @@ unsigned long, unsigned long); void __dead2 psci_power_down_wfi(void); -void psci_aff_on_finish_entry(void); -void psci_aff_suspend_finish_entry(void); +void psci_entrypoint(void); void psci_register_spd_pm_hook(const spd_pm_ops_t *); -int psci_get_suspend_stateid_by_mpidr(unsigned long); -int psci_get_suspend_stateid(void); -int psci_get_suspend_afflvl(void); -uint32_t psci_get_max_phys_off_afflvl(void); uint64_t psci_smc_handler(uint32_t smc_fid, uint64_t x1, @@ -252,8 +319,6 @@ /* PSCI setup function */ int32_t psci_setup(void); - #endif /*__ASSEMBLY__*/ - #endif /* __PSCI_H__ */ diff --git a/include/bl31/services/psci1.0/psci.h b/include/bl31/services/psci1.0/psci.h deleted file mode 100644 index 9361187..0000000 --- a/include/bl31/services/psci1.0/psci.h +++ /dev/null @@ -1,324 +0,0 @@ -/* - * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef __PSCI_H__ -#define __PSCI_H__ - -#include -#include /* for PLAT_NUM_PWR_DOMAINS */ -#if ENABLE_PLAT_COMPAT -#include -#endif - -/******************************************************************************* - * Number of power domains whose state this psci imp. can track - ******************************************************************************/ -#ifdef PLAT_NUM_PWR_DOMAINS -#define PSCI_NUM_PWR_DOMAINS PLAT_NUM_PWR_DOMAINS -#else -#define PSCI_NUM_PWR_DOMAINS (2 * PLATFORM_CORE_COUNT) -#endif - -#define PSCI_NUM_NON_CPU_PWR_DOMAINS (PSCI_NUM_PWR_DOMAINS - \ - PLATFORM_CORE_COUNT) - -/* This is the power level corresponding to a CPU */ -#define PSCI_CPU_PWR_LVL 0 - -/* - * The maximum power level supported by PSCI. Since PSCI CPU_SUSPEND - * uses the old power_state parameter format which has 2 bits to specify the - * power level, this constant is defined to be 3. - */ -#define PSCI_MAX_PWR_LVL 3 - -/******************************************************************************* - * Defines for runtime services func ids - ******************************************************************************/ -#define PSCI_VERSION 0x84000000 -#define PSCI_CPU_SUSPEND_AARCH32 0x84000001 -#define PSCI_CPU_SUSPEND_AARCH64 0xc4000001 -#define PSCI_CPU_OFF 0x84000002 -#define PSCI_CPU_ON_AARCH32 0x84000003 -#define PSCI_CPU_ON_AARCH64 0xc4000003 -#define PSCI_AFFINITY_INFO_AARCH32 0x84000004 -#define PSCI_AFFINITY_INFO_AARCH64 0xc4000004 -#define PSCI_MIG_AARCH32 0x84000005 -#define PSCI_MIG_AARCH64 0xc4000005 -#define PSCI_MIG_INFO_TYPE 0x84000006 -#define PSCI_MIG_INFO_UP_CPU_AARCH32 0x84000007 -#define PSCI_MIG_INFO_UP_CPU_AARCH64 0xc4000007 -#define PSCI_SYSTEM_OFF 0x84000008 -#define PSCI_SYSTEM_RESET 0x84000009 -#define PSCI_FEATURES 0x8400000A -#define PSCI_SYSTEM_SUSPEND_AARCH32 0x8400000E -#define PSCI_SYSTEM_SUSPEND_AARCH64 0xc400000E - -/* Macro to help build the psci capabilities bitfield */ -#define define_psci_cap(x) (1 << (x & 0x1f)) - -/* - * Number of PSCI calls (above) implemented - */ -#define PSCI_NUM_CALLS 18 - -/******************************************************************************* - * PSCI Migrate and friends - ******************************************************************************/ -#define PSCI_TOS_UP_MIG_CAP 0 -#define PSCI_TOS_NOT_UP_MIG_CAP 1 -#define PSCI_TOS_NOT_PRESENT_MP 2 - -/******************************************************************************* - * PSCI CPU_SUSPEND 'power_state' parameter specific defines - ******************************************************************************/ -#define PSTATE_ID_SHIFT 0 - -#if PSCI_EXTENDED_STATE_ID -#define PSTATE_VALID_MASK 0xB0000000 -#define PSTATE_TYPE_SHIFT 30 -#define PSTATE_ID_MASK 0xfffffff -#else -#define PSTATE_VALID_MASK 0xFCFE0000 -#define PSTATE_TYPE_SHIFT 16 -#define PSTATE_PWR_LVL_SHIFT 24 -#define PSTATE_ID_MASK 0xffff -#define PSTATE_PWR_LVL_MASK 0x3 - -#define psci_get_pstate_pwrlvl(pstate) (((pstate) >> PSTATE_PWR_LVL_SHIFT) & \ - PSTATE_PWR_LVL_MASK) -#define psci_make_powerstate(state_id, type, pwrlvl) \ - (((state_id) & PSTATE_ID_MASK) << PSTATE_ID_SHIFT) |\ - (((type) & PSTATE_TYPE_MASK) << PSTATE_TYPE_SHIFT) |\ - (((pwrlvl) & PSTATE_PWR_LVL_MASK) << PSTATE_PWR_LVL_SHIFT) -#endif /* __PSCI_EXTENDED_STATE_ID__ */ - -#define PSTATE_TYPE_STANDBY 0x0 -#define PSTATE_TYPE_POWERDOWN 0x1 -#define PSTATE_TYPE_MASK 0x1 - -#define psci_get_pstate_id(pstate) (((pstate) >> PSTATE_ID_SHIFT) & \ - PSTATE_ID_MASK) -#define psci_get_pstate_type(pstate) (((pstate) >> PSTATE_TYPE_SHIFT) & \ - PSTATE_TYPE_MASK) -#define psci_check_power_state(pstate) ((pstate) & PSTATE_VALID_MASK) - -/******************************************************************************* - * PSCI CPU_FEATURES feature flag specific defines - ******************************************************************************/ -/* Features flags for CPU SUSPEND power state parameter format. Bits [1:1] */ -#define FF_PSTATE_SHIFT 1 -#define FF_PSTATE_ORIG 0 -#define FF_PSTATE_EXTENDED 1 -#if PSCI_EXTENDED_STATE_ID -#define FF_PSTATE FF_PSTATE_EXTENDED -#else -#define FF_PSTATE FF_PSTATE_ORIG -#endif - -/* Features flags for CPU SUSPEND OS Initiated mode support. Bits [0:0] */ -#define FF_MODE_SUPPORT_SHIFT 0 -#define FF_SUPPORTS_OS_INIT_MODE 1 - -/******************************************************************************* - * PSCI version - ******************************************************************************/ -#define PSCI_MAJOR_VER (1 << 16) -#define PSCI_MINOR_VER 0x0 - -/******************************************************************************* - * PSCI error codes - ******************************************************************************/ -#define PSCI_E_SUCCESS 0 -#define PSCI_E_NOT_SUPPORTED -1 -#define PSCI_E_INVALID_PARAMS -2 -#define PSCI_E_DENIED -3 -#define PSCI_E_ALREADY_ON -4 -#define PSCI_E_ON_PENDING -5 -#define PSCI_E_INTERN_FAIL -6 -#define PSCI_E_NOT_PRESENT -7 -#define PSCI_E_DISABLED -8 - -#define PSCI_INVALID_MPIDR ~(0ULL) - -#ifndef __ASSEMBLY__ - -#include -#include - -/* - * These are the states reported by the PSCI_AFFINITY_INFO API for the specified - * CPU. The definitions of these states can be found in Section 5.7.1 in the - * PSCI specification (ARM DEN 0022C). - */ -typedef enum aff_info_state { - AFF_STATE_ON = 0, - AFF_STATE_OFF = 1, - AFF_STATE_ON_PENDING = 2 -} aff_info_state_t; - -/* - * Macro to represent invalid affinity level within PSCI. - */ -#define PSCI_INVALID_DATA -1 - -/* - * Type for representing the local power state at a particular level. - */ -typedef uint8_t plat_local_state_t; - -/* The local state macro used to represent RUN state. */ -#define PSCI_LOCAL_STATE_RUN 0 - -/* - * Macro to test whether the plat_local_state is RUN state - */ -#define is_local_state_run(plat_local_state) \ - ((plat_local_state) == PSCI_LOCAL_STATE_RUN) - -/* - * Macro to test whether the plat_local_state is RETENTION state - */ -#define is_local_state_retn(plat_local_state) \ - (((plat_local_state) > PSCI_LOCAL_STATE_RUN) && \ - ((plat_local_state) <= PLAT_MAX_RET_STATE)) - -/* - * Macro to test whether the plat_local_state is OFF state - */ -#define is_local_state_off(plat_local_state) \ - (((plat_local_state) > PLAT_MAX_RET_STATE) && \ - ((plat_local_state) <= PLAT_MAX_OFF_STATE)) - -/***************************************************************************** - * This data structure defines the representation of the power state parameter - * for its exchange between the generic PSCI code and the platform port. For - * example, it is used by the platform port to specify the requested power - * states during a power management operation. It is used by the generic code - * to inform the platform about the target power states that each level - * should enter. - ****************************************************************************/ -typedef struct psci_power_state { - /* - * The pwr_domain_state[] stores the local power state at each level - * for the CPU. - */ - plat_local_state_t pwr_domain_state[PLAT_MAX_PWR_LVL + 1]; -} psci_power_state_t; - -/******************************************************************************* - * Structure used to store per-cpu information relevant to the PSCI service. - * It is populated in the per-cpu data array. In return we get a guarantee that - * this information will not reside on a cache line shared with another cpu. - ******************************************************************************/ -typedef struct psci_cpu_data { - /* State as seen by PSCI Affinity Info API */ - aff_info_state_t aff_info_state; - /* - * Highest power level which takes part in a power management - * operation. - */ - int8_t target_pwrlvl; - /* The local power state of this CPU */ - plat_local_state_t local_state; -#if !USE_COHERENT_MEM - bakery_info_t pcpu_bakery_info[PSCI_NUM_NON_CPU_PWR_DOMAINS]; -#endif -} psci_cpu_data_t; - -/******************************************************************************* - * Structure populated by platform specific code to export routines which - * perform common low level pm functions - ******************************************************************************/ -typedef struct plat_psci_ops { - void (*cpu_standby)(plat_local_state_t cpu_state); - int (*pwr_domain_on)(u_register_t mpidr); - void (*pwr_domain_off)(const psci_power_state_t *target_state); - void (*pwr_domain_suspend)(const psci_power_state_t *target_state); - void (*pwr_domain_on_finish)(const psci_power_state_t *target_state); - void (*pwr_domain_suspend_finish)( - const psci_power_state_t *target_state); - void (*system_off)(void) __dead2; - void (*system_reset)(void) __dead2; - int (*validate_power_state)(unsigned int power_state, - psci_power_state_t *req_state); - int (*validate_ns_entrypoint)(unsigned long ns_entrypoint); - void (*get_sys_suspend_power_state)( - psci_power_state_t *req_state); -} plat_psci_ops_t; - -/******************************************************************************* - * Optional structure populated by the Secure Payload Dispatcher to be given a - * chance to perform any bookkeeping before PSCI executes a power mgmt. - * operation. It also allows PSCI to determine certain properties of the SP e.g. - * migrate capability etc. - ******************************************************************************/ -typedef struct spd_pm_ops { - void (*svc_on)(uint64_t target_cpu); - int32_t (*svc_off)(uint64_t __unused); - void (*svc_suspend)(uint64_t __unused); - void (*svc_on_finish)(uint64_t __unused); - void (*svc_suspend_finish)(uint64_t suspend_level); - int32_t (*svc_migrate)(uint64_t from_cpu, uint64_t to_cpu); - int32_t (*svc_migrate_info)(uint64_t *resident_cpu); - void (*svc_system_off)(void); - void (*svc_system_reset)(void); -} spd_pm_ops_t; - -/******************************************************************************* - * Function & Data prototypes - ******************************************************************************/ -unsigned int psci_version(void); -int psci_affinity_info(unsigned long, unsigned int); -int psci_migrate(unsigned long); -int psci_migrate_info_type(void); -long psci_migrate_info_up_cpu(void); -int psci_cpu_on(unsigned long, - unsigned long, - unsigned long); -void __dead2 psci_power_down_wfi(void); -void psci_entrypoint(void); -void psci_register_spd_pm_hook(const spd_pm_ops_t *); - -uint64_t psci_smc_handler(uint32_t smc_fid, - uint64_t x1, - uint64_t x2, - uint64_t x3, - uint64_t x4, - void *cookie, - void *handle, - uint64_t flags); - -/* PSCI setup function */ -int32_t psci_setup(void); - -#endif /*__ASSEMBLY__*/ - -#endif /* __PSCI_H__ */ diff --git a/include/bl31/services/psci1.0/psci_compat.h b/include/bl31/services/psci1.0/psci_compat.h deleted file mode 100644 index cc80ae3..0000000 --- a/include/bl31/services/psci1.0/psci_compat.h +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef __PSCI_COMPAT_H__ -#define __PSCI_COMPAT_H__ - -#include -#include - -#ifndef __ASSEMBLY__ -/* - * The below declarations are to enable compatibility for the platform ports - * using the old platform interface and psci helpers. - */ -#define PLAT_MAX_PWR_LVL PLATFORM_MAX_AFFLVL -#define PLAT_NUM_PWR_DOMAINS PLATFORM_NUM_AFFS - -/******************************************************************************* - * PSCI affinity related constants. An affinity instance could - * be present or absent physically to cater for asymmetric topologies. - ******************************************************************************/ -#define PSCI_AFF_ABSENT 0x0 -#define PSCI_AFF_PRESENT 0x1 - -#define PSCI_STATE_ON 0x0 -#define PSCI_STATE_OFF 0x1 -#define PSCI_STATE_ON_PENDING 0x2 -#define PSCI_STATE_SUSPEND 0x3 - -/* - * Using the compatibility platform interfaces means that the local states - * used in psci_power_state_t need to only convey whether its power down - * or standby state. The onus is on the platform port to do the right thing - * including the state coordination in case multiple power down states are - * involved. Hence if we assume 3 generic states viz, run, standby and - * power down, we can assign 1 and 2 to standby and power down respectively. - */ -#define PLAT_MAX_RET_STATE 1 -#define PLAT_MAX_OFF_STATE 2 - - -#define psci_get_pstate_afflvl(pstate) psci_get_pstate_pwrlvl(pstate) - -/* - * This array stores the 'power_state' requests of each CPU during - * CPU_SUSPEND and SYSTEM_SUSPEND which will be populated by the - * compatibility layer when appropriate platform hooks are invoked. - */ -extern unsigned int psci_power_state_compat[PLATFORM_CORE_COUNT]; - -/******************************************************************************* - * Structure populated by platform specific code to export routines which - * perform common low level pm functions - ******************************************************************************/ -typedef struct plat_pm_ops { - void (*affinst_standby)(unsigned int power_state); - int (*affinst_on)(unsigned long mpidr, - unsigned long sec_entrypoint, - unsigned int afflvl, - unsigned int state); - void (*affinst_off)(unsigned int afflvl, unsigned int state); - void (*affinst_suspend)(unsigned long sec_entrypoint, - unsigned int afflvl, - unsigned int state); - void (*affinst_on_finish)(unsigned int afflvl, unsigned int state); - void (*affinst_suspend_finish)(unsigned int afflvl, - unsigned int state); - void (*system_off)(void) __dead2; - void (*system_reset)(void) __dead2; - int (*validate_power_state)(unsigned int power_state); - int (*validate_ns_entrypoint)(unsigned long ns_entrypoint); - unsigned int (*get_sys_suspend_power_state)(void); -} plat_pm_ops_t; - -/******************************************************************************* - * Function & Data prototypes to enable compatibility for older platform ports - ******************************************************************************/ -int psci_get_suspend_stateid_by_mpidr(unsigned long); -int psci_get_suspend_stateid(void); -int psci_get_suspend_powerstate(void); -unsigned int psci_get_max_phys_off_afflvl(void); -int psci_get_suspend_afflvl(void); - -#endif /* ____ASSEMBLY__ */ -#endif /* __PSCI_COMPAT_H__ */ diff --git a/include/bl31/services/psci_compat.h b/include/bl31/services/psci_compat.h new file mode 100644 index 0000000..cc80ae3 --- /dev/null +++ b/include/bl31/services/psci_compat.h @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __PSCI_COMPAT_H__ +#define __PSCI_COMPAT_H__ + +#include +#include + +#ifndef __ASSEMBLY__ +/* + * The below declarations are to enable compatibility for the platform ports + * using the old platform interface and psci helpers. + */ +#define PLAT_MAX_PWR_LVL PLATFORM_MAX_AFFLVL +#define PLAT_NUM_PWR_DOMAINS PLATFORM_NUM_AFFS + +/******************************************************************************* + * PSCI affinity related constants. An affinity instance could + * be present or absent physically to cater for asymmetric topologies. + ******************************************************************************/ +#define PSCI_AFF_ABSENT 0x0 +#define PSCI_AFF_PRESENT 0x1 + +#define PSCI_STATE_ON 0x0 +#define PSCI_STATE_OFF 0x1 +#define PSCI_STATE_ON_PENDING 0x2 +#define PSCI_STATE_SUSPEND 0x3 + +/* + * Using the compatibility platform interfaces means that the local states + * used in psci_power_state_t need to only convey whether its power down + * or standby state. The onus is on the platform port to do the right thing + * including the state coordination in case multiple power down states are + * involved. Hence if we assume 3 generic states viz, run, standby and + * power down, we can assign 1 and 2 to standby and power down respectively. + */ +#define PLAT_MAX_RET_STATE 1 +#define PLAT_MAX_OFF_STATE 2 + + +#define psci_get_pstate_afflvl(pstate) psci_get_pstate_pwrlvl(pstate) + +/* + * This array stores the 'power_state' requests of each CPU during + * CPU_SUSPEND and SYSTEM_SUSPEND which will be populated by the + * compatibility layer when appropriate platform hooks are invoked. + */ +extern unsigned int psci_power_state_compat[PLATFORM_CORE_COUNT]; + +/******************************************************************************* + * Structure populated by platform specific code to export routines which + * perform common low level pm functions + ******************************************************************************/ +typedef struct plat_pm_ops { + void (*affinst_standby)(unsigned int power_state); + int (*affinst_on)(unsigned long mpidr, + unsigned long sec_entrypoint, + unsigned int afflvl, + unsigned int state); + void (*affinst_off)(unsigned int afflvl, unsigned int state); + void (*affinst_suspend)(unsigned long sec_entrypoint, + unsigned int afflvl, + unsigned int state); + void (*affinst_on_finish)(unsigned int afflvl, unsigned int state); + void (*affinst_suspend_finish)(unsigned int afflvl, + unsigned int state); + void (*system_off)(void) __dead2; + void (*system_reset)(void) __dead2; + int (*validate_power_state)(unsigned int power_state); + int (*validate_ns_entrypoint)(unsigned long ns_entrypoint); + unsigned int (*get_sys_suspend_power_state)(void); +} plat_pm_ops_t; + +/******************************************************************************* + * Function & Data prototypes to enable compatibility for older platform ports + ******************************************************************************/ +int psci_get_suspend_stateid_by_mpidr(unsigned long); +int psci_get_suspend_stateid(void); +int psci_get_suspend_powerstate(void); +unsigned int psci_get_max_phys_off_afflvl(void); +int psci_get_suspend_afflvl(void); + +#endif /* ____ASSEMBLY__ */ +#endif /* __PSCI_COMPAT_H__ */ diff --git a/include/common/asm_macros.S b/include/common/asm_macros.S index 902127e..f959eb4 100644 --- a/include/common/asm_macros.S +++ b/include/common/asm_macros.S @@ -115,6 +115,7 @@ .space ((\_count) * (\_size)), 0 .endm +#if ENABLE_PLAT_COMPAT /* * This macro calculates the base address of an MP stack using the * platform_get_core_pos() index, the name of the stack storage and @@ -129,6 +130,7 @@ mov x1, #\_size madd x0, x0, x1, x2 .endm +#endif /* * This macro calculates the base address of the current CPU's MP stack diff --git a/include/plat/common/platform.h b/include/plat/common/platform.h index 469d46b..f054cd0 100644 --- a/include/plat/common/platform.h +++ b/include/plat/common/platform.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -31,13 +31,14 @@ #ifndef __PLATFORM_H__ #define __PLATFORM_H__ +#include #include +#include /******************************************************************************* * Forward declarations ******************************************************************************/ -struct plat_pm_ops; struct meminfo; struct image_info; struct entry_point_info; @@ -59,6 +60,8 @@ uintptr_t *dev_handle, uintptr_t *image_spec); unsigned long plat_get_ns_image_entrypoint(void); +unsigned int plat_my_core_pos(void); +int plat_core_pos_by_mpidr(u_register_t mpidr); /******************************************************************************* * Mandatory interrupt management functions @@ -74,8 +77,7 @@ /******************************************************************************* * Optional common functions (may be overridden) ******************************************************************************/ -unsigned int platform_get_core_pos(unsigned long mpidr); -unsigned long platform_get_stack(unsigned long mpidr); +unsigned long plat_get_my_stack(void); void plat_report_exception(unsigned long); int plat_crash_console_init(void); int plat_crash_console_putc(int c); @@ -181,9 +183,16 @@ /******************************************************************************* * Mandatory PSCI functions (BL3-1) ******************************************************************************/ -int platform_setup_pm(const struct plat_pm_ops **); -unsigned int plat_get_aff_count(unsigned int, unsigned long); -unsigned int plat_get_aff_state(unsigned int, unsigned long); +int plat_setup_psci_ops(uintptr_t sec_entrypoint, + const struct plat_psci_ops **); +const unsigned char *plat_get_power_domain_tree_desc(void); + +/******************************************************************************* + * Optional PSCI functions (BL3-1). + ******************************************************************************/ +plat_local_state_t plat_get_target_pwr_state(unsigned int lvl, + const plat_local_state_t *states, + unsigned int ncpu); /******************************************************************************* * Optional BL3-1 functions (may be overridden) @@ -201,4 +210,24 @@ int plat_get_rotpk_info(void *cookie, void **key_ptr, unsigned int *key_len, unsigned int *flags); +#if ENABLE_PLAT_COMPAT +/* + * The below declarations are to enable compatibility for the platform ports + * using the old platform interface. + */ + +/******************************************************************************* + * Optional common functions (may be overridden) + ******************************************************************************/ +unsigned int platform_get_core_pos(unsigned long mpidr); + +/******************************************************************************* + * Mandatory PSCI Compatibility functions (BL3-1) + ******************************************************************************/ +int platform_setup_pm(const plat_pm_ops_t **); + +unsigned int plat_get_aff_count(unsigned int, unsigned long); +unsigned int plat_get_aff_state(unsigned int, unsigned long); +#endif /* __ENABLE_PLAT_COMPAT__ */ + #endif /* __PLATFORM_H__ */ diff --git a/include/plat/common/psci1.0/platform.h b/include/plat/common/psci1.0/platform.h deleted file mode 100644 index f054cd0..0000000 --- a/include/plat/common/psci1.0/platform.h +++ /dev/null @@ -1,233 +0,0 @@ -/* - * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef __PLATFORM_H__ -#define __PLATFORM_H__ - -#include -#include -#include - - -/******************************************************************************* - * Forward declarations - ******************************************************************************/ -struct meminfo; -struct image_info; -struct entry_point_info; -struct bl31_params; - -/******************************************************************************* - * plat_get_rotpk_info() flags - ******************************************************************************/ -#define ROTPK_IS_HASH (1 << 0) - -/******************************************************************************* - * Function declarations - ******************************************************************************/ -/******************************************************************************* - * Mandatory common functions - ******************************************************************************/ -uint64_t plat_get_syscnt_freq(void); -int plat_get_image_source(unsigned int image_id, - uintptr_t *dev_handle, - uintptr_t *image_spec); -unsigned long plat_get_ns_image_entrypoint(void); -unsigned int plat_my_core_pos(void); -int plat_core_pos_by_mpidr(u_register_t mpidr); - -/******************************************************************************* - * Mandatory interrupt management functions - ******************************************************************************/ -uint32_t plat_ic_get_pending_interrupt_id(void); -uint32_t plat_ic_get_pending_interrupt_type(void); -uint32_t plat_ic_acknowledge_interrupt(void); -uint32_t plat_ic_get_interrupt_type(uint32_t id); -void plat_ic_end_of_interrupt(uint32_t id); -uint32_t plat_interrupt_type_to_line(uint32_t type, - uint32_t security_state); - -/******************************************************************************* - * Optional common functions (may be overridden) - ******************************************************************************/ -unsigned long plat_get_my_stack(void); -void plat_report_exception(unsigned long); -int plat_crash_console_init(void); -int plat_crash_console_putc(int c); - -/******************************************************************************* - * Mandatory BL1 functions - ******************************************************************************/ -void bl1_early_platform_setup(void); -void bl1_plat_arch_setup(void); -void bl1_platform_setup(void); -struct meminfo *bl1_plat_sec_mem_layout(void); - -/* - * This function allows the platform to change the entrypoint information for - * BL2, after BL1 has loaded BL2 into memory but before BL2 is executed. - */ -void bl1_plat_set_bl2_ep_info(struct image_info *image, - struct entry_point_info *ep); - -/******************************************************************************* - * Optional BL1 functions (may be overridden) - ******************************************************************************/ -void bl1_init_bl2_mem_layout(const struct meminfo *bl1_mem_layout, - struct meminfo *bl2_mem_layout); - -/******************************************************************************* - * Mandatory BL2 functions - ******************************************************************************/ -void bl2_early_platform_setup(struct meminfo *mem_layout); -void bl2_plat_arch_setup(void); -void bl2_platform_setup(void); -struct meminfo *bl2_plat_sec_mem_layout(void); - -/* - * This function returns a pointer to the shared memory that the platform has - * kept aside to pass trusted firmware related information that BL3-1 - * could need - */ -struct bl31_params *bl2_plat_get_bl31_params(void); - -/* - * This function returns a pointer to the shared memory that the platform - * has kept to point to entry point information of BL31 to BL2 - */ -struct entry_point_info *bl2_plat_get_bl31_ep_info(void); - -/* - * This function flushes to main memory all the params that are - * passed to BL3-1 - */ -void bl2_plat_flush_bl31_params(void); - -/* - * The next 2 functions allow the platform to change the entrypoint information - * for the mandatory 3rd level BL images, BL3-1 and BL3-3. This is done after - * BL2 has loaded those images into memory but before BL3-1 is executed. - */ -void bl2_plat_set_bl31_ep_info(struct image_info *image, - struct entry_point_info *ep); - -void bl2_plat_set_bl33_ep_info(struct image_info *image, - struct entry_point_info *ep); - -/* Gets the memory layout for BL3-3 */ -void bl2_plat_get_bl33_meminfo(struct meminfo *mem_info); - -/******************************************************************************* - * Conditionally mandatory BL2 functions: must be implemented if BL3-0 image - * is supported - ******************************************************************************/ -/* Gets the memory layout for BL3-0 */ -void bl2_plat_get_bl30_meminfo(struct meminfo *mem_info); - -/* - * This function is called after loading BL3-0 image and it is used to perform - * any platform-specific actions required to handle the SCP firmware. - */ -int bl2_plat_handle_bl30(struct image_info *bl30_image_info); - -/******************************************************************************* - * Conditionally mandatory BL2 functions: must be implemented if BL3-2 image - * is supported - ******************************************************************************/ -void bl2_plat_set_bl32_ep_info(struct image_info *image, - struct entry_point_info *ep); - -/* Gets the memory layout for BL3-2 */ -void bl2_plat_get_bl32_meminfo(struct meminfo *mem_info); - -/******************************************************************************* - * Optional BL2 functions (may be overridden) - ******************************************************************************/ - -/******************************************************************************* - * Mandatory BL3-1 functions - ******************************************************************************/ -void bl31_early_platform_setup(struct bl31_params *from_bl2, - void *plat_params_from_bl2); -void bl31_plat_arch_setup(void); -void bl31_platform_setup(void); -struct entry_point_info *bl31_plat_get_next_image_ep_info(uint32_t type); - -/******************************************************************************* - * Mandatory PSCI functions (BL3-1) - ******************************************************************************/ -int plat_setup_psci_ops(uintptr_t sec_entrypoint, - const struct plat_psci_ops **); -const unsigned char *plat_get_power_domain_tree_desc(void); - -/******************************************************************************* - * Optional PSCI functions (BL3-1). - ******************************************************************************/ -plat_local_state_t plat_get_target_pwr_state(unsigned int lvl, - const plat_local_state_t *states, - unsigned int ncpu); - -/******************************************************************************* - * Optional BL3-1 functions (may be overridden) - ******************************************************************************/ -void bl31_plat_enable_mmu(uint32_t flags); - -/******************************************************************************* - * Optional BL3-2 functions (may be overridden) - ******************************************************************************/ -void bl32_plat_enable_mmu(uint32_t flags); - -/******************************************************************************* - * Trusted Board Boot functions - ******************************************************************************/ -int plat_get_rotpk_info(void *cookie, void **key_ptr, unsigned int *key_len, - unsigned int *flags); - -#if ENABLE_PLAT_COMPAT -/* - * The below declarations are to enable compatibility for the platform ports - * using the old platform interface. - */ - -/******************************************************************************* - * Optional common functions (may be overridden) - ******************************************************************************/ -unsigned int platform_get_core_pos(unsigned long mpidr); - -/******************************************************************************* - * Mandatory PSCI Compatibility functions (BL3-1) - ******************************************************************************/ -int platform_setup_pm(const plat_pm_ops_t **); - -unsigned int plat_get_aff_count(unsigned int, unsigned long); -unsigned int plat_get_aff_state(unsigned int, unsigned long); -#endif /* __ENABLE_PLAT_COMPAT__ */ - -#endif /* __PLATFORM_H__ */ diff --git a/plat/common/aarch64/platform_helpers.S b/plat/common/aarch64/platform_helpers.S index c117449..b88603c 100644 --- a/plat/common/aarch64/platform_helpers.S +++ b/plat/common/aarch64/platform_helpers.S @@ -32,9 +32,6 @@ #include #include - - .weak platform_get_core_pos - .weak platform_check_mpidr .weak plat_report_exception .weak plat_crash_console_init .weak plat_crash_console_putc @@ -42,29 +39,6 @@ .weak plat_disable_acp /* ----------------------------------------------------- - * int platform_get_core_pos(int mpidr); - * With this function: CorePos = (ClusterId * 4) + - * CoreId - * ----------------------------------------------------- - */ -func platform_get_core_pos - and x1, x0, #MPIDR_CPU_MASK - and x0, x0, #MPIDR_CLUSTER_MASK - add x0, x1, x0, LSR #6 - ret -endfunc platform_get_core_pos - - /* ----------------------------------------------------- - * Placeholder function which should be redefined by - * each platform. - * ----------------------------------------------------- - */ -func platform_check_mpidr - mov x0, xzr - ret -endfunc platform_check_mpidr - - /* ----------------------------------------------------- * Placeholder function which should be redefined by * each platform. * ----------------------------------------------------- diff --git a/plat/common/aarch64/platform_mp_stack.S b/plat/common/aarch64/platform_mp_stack.S index b1f7b6d..6cfa069 100644 --- a/plat/common/aarch64/platform_mp_stack.S +++ b/plat/common/aarch64/platform_mp_stack.S @@ -37,9 +37,9 @@ #if ENABLE_PLAT_COMPAT .globl plat_get_my_stack .globl plat_set_my_stack -#else .weak platform_get_stack .weak platform_set_stack +#else .weak plat_get_my_stack .weak plat_set_my_stack #endif /*__ENABLE_PLAT_COMPAT__*/ @@ -80,7 +80,6 @@ b platform_set_stack endfunc plat_set_my_stack -#else /* ----------------------------------------------------- * unsigned long platform_get_stack (unsigned long mpidr) * @@ -108,6 +107,8 @@ ret x9 endfunc platform_set_stack +#else + /* ----------------------------------------------------- * unsigned long plat_get_my_stack () * diff --git a/services/std_svc/psci/psci_afflvl_off.c b/services/std_svc/psci/psci_afflvl_off.c deleted file mode 100644 index 7eb9688..0000000 --- a/services/std_svc/psci/psci_afflvl_off.c +++ /dev/null @@ -1,248 +0,0 @@ -/* - * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include -#include "psci_private.h" - -typedef void (*afflvl_off_handler_t)(aff_map_node_t *node); - -/******************************************************************************* - * The next three functions implement a handler for each supported affinity - * level which is called when that affinity level is turned off. - ******************************************************************************/ -static void psci_afflvl0_off(aff_map_node_t *cpu_node) -{ - assert(cpu_node->level == MPIDR_AFFLVL0); - - /* - * Arch. management. Perform the necessary steps to flush all - * cpu caches. - */ - psci_do_pwrdown_cache_maintenance(MPIDR_AFFLVL0); - - /* - * Plat. management: Perform platform specific actions to turn this - * cpu off e.g. exit cpu coherency, program the power controller etc. - */ - psci_plat_pm_ops->affinst_off(cpu_node->level, - psci_get_phys_state(cpu_node)); -} - -static void psci_afflvl1_off(aff_map_node_t *cluster_node) -{ - /* Sanity check the cluster level */ - assert(cluster_node->level == MPIDR_AFFLVL1); - - /* - * Arch. Management. Flush all levels of caches to PoC if - * the cluster is to be shutdown. - */ - psci_do_pwrdown_cache_maintenance(MPIDR_AFFLVL1); - - /* - * Plat. Management. Allow the platform to do its cluster - * specific bookeeping e.g. turn off interconnect coherency, - * program the power controller etc. - */ - psci_plat_pm_ops->affinst_off(cluster_node->level, - psci_get_phys_state(cluster_node)); -} - -static void psci_afflvl2_off(aff_map_node_t *system_node) -{ - /* Cannot go beyond this level */ - assert(system_node->level == MPIDR_AFFLVL2); - - /* - * Keep the physical state of the system handy to decide what - * action needs to be taken - */ - - /* - * Arch. Management. Flush all levels of caches to PoC if - * the system is to be shutdown. - */ - psci_do_pwrdown_cache_maintenance(MPIDR_AFFLVL2); - - /* - * Plat. Management : Allow the platform to do its bookeeping - * at this affinity level - */ - psci_plat_pm_ops->affinst_off(system_node->level, - psci_get_phys_state(system_node)); -} - -static const afflvl_off_handler_t psci_afflvl_off_handlers[] = { - psci_afflvl0_off, - psci_afflvl1_off, - psci_afflvl2_off, -}; - -/******************************************************************************* - * This function takes an array of pointers to affinity instance nodes in the - * topology tree and calls the off handler for the corresponding affinity - * levels - ******************************************************************************/ -static void psci_call_off_handlers(aff_map_node_t *mpidr_nodes[], - int start_afflvl, - int end_afflvl) -{ - int level; - aff_map_node_t *node; - - for (level = start_afflvl; level <= end_afflvl; level++) { - node = mpidr_nodes[level]; - if (node == NULL) - continue; - - psci_afflvl_off_handlers[level](node); - } -} - -/******************************************************************************* - * Top level handler which is called when a cpu wants to power itself down. - * It's assumed that along with turning the cpu off, higher affinity levels will - * be turned off as far as possible. It traverses through all the affinity - * levels performing generic, architectural, platform setup and state management - * e.g. for a cluster that's to be powered off, it will call the platform - * specific code which will disable coherency at the interconnect level if the - * cpu is the last in the cluster. For a cpu it could mean programming the power - * the power controller etc. - * - * The state of all the relevant affinity levels is changed prior to calling the - * affinity level specific handlers as their actions would depend upon the state - * the affinity level is about to enter. - * - * The affinity level specific handlers are called in ascending order i.e. from - * the lowest to the highest affinity level implemented by the platform because - * to turn off affinity level X it is neccesary to turn off affinity level X - 1 - * first. - ******************************************************************************/ -int psci_afflvl_off(int start_afflvl, - int end_afflvl) -{ - int rc; - mpidr_aff_map_nodes_t mpidr_nodes; - unsigned int max_phys_off_afflvl; - - /* - * This function must only be called on platforms where the - * CPU_OFF platform hooks have been implemented. - */ - assert(psci_plat_pm_ops->affinst_off); - - /* - * Collect the pointers to the nodes in the topology tree for - * each affinity instance in the mpidr. If this function does - * not return successfully then either the mpidr or the affinity - * levels are incorrect. Either way, this an internal TF error - * therefore assert. - */ - rc = psci_get_aff_map_nodes(read_mpidr_el1() & MPIDR_AFFINITY_MASK, - start_afflvl, - end_afflvl, - mpidr_nodes); - assert(rc == PSCI_E_SUCCESS); - - /* - * This function acquires the lock corresponding to each affinity - * level so that by the time all locks are taken, the system topology - * is snapshot and state management can be done safely. - */ - psci_acquire_afflvl_locks(start_afflvl, - end_afflvl, - mpidr_nodes); - - - /* - * Call the cpu off handler registered by the Secure Payload Dispatcher - * to let it do any bookkeeping. Assume that the SPD always reports an - * E_DENIED error if SP refuse to power down - */ - if (psci_spd_pm && psci_spd_pm->svc_off) { - rc = psci_spd_pm->svc_off(0); - if (rc) - goto exit; - } - - /* - * This function updates the state of each affinity instance - * corresponding to the mpidr in the range of affinity levels - * specified. - */ - psci_do_afflvl_state_mgmt(start_afflvl, - end_afflvl, - mpidr_nodes, - PSCI_STATE_OFF); - - max_phys_off_afflvl = psci_find_max_phys_off_afflvl(start_afflvl, - end_afflvl, - mpidr_nodes); - assert(max_phys_off_afflvl != PSCI_INVALID_DATA); - - /* Stash the highest affinity level that will enter the OFF state. */ - psci_set_max_phys_off_afflvl(max_phys_off_afflvl); - - /* Perform generic, architecture and platform specific handling */ - psci_call_off_handlers(mpidr_nodes, - start_afflvl, - end_afflvl); - - /* - * Invalidate the entry for the highest affinity level stashed earlier. - * This ensures that any reads of this variable outside the power - * up/down sequences return PSCI_INVALID_DATA. - * - */ - psci_set_max_phys_off_afflvl(PSCI_INVALID_DATA); - -exit: - /* - * Release the locks corresponding to each affinity level in the - * reverse order to which they were acquired. - */ - psci_release_afflvl_locks(start_afflvl, - end_afflvl, - mpidr_nodes); - - /* - * Check if all actions needed to safely power down this cpu have - * successfully completed. Enter a wfi loop which will allow the - * power controller to physically power down this cpu. - */ - if (rc == PSCI_E_SUCCESS) - psci_power_down_wfi(); - - return rc; -} diff --git a/services/std_svc/psci/psci_afflvl_on.c b/services/std_svc/psci/psci_afflvl_on.c deleted file mode 100644 index 0dbd0e0..0000000 --- a/services/std_svc/psci/psci_afflvl_on.c +++ /dev/null @@ -1,405 +0,0 @@ -/* - * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "psci_private.h" - -typedef int (*afflvl_on_handler_t)(unsigned long target_cpu, - aff_map_node_t *node); - -/******************************************************************************* - * This function checks whether a cpu which has been requested to be turned on - * is OFF to begin with. - ******************************************************************************/ -static int cpu_on_validate_state(unsigned int psci_state) -{ - if (psci_state == PSCI_STATE_ON || psci_state == PSCI_STATE_SUSPEND) - return PSCI_E_ALREADY_ON; - - if (psci_state == PSCI_STATE_ON_PENDING) - return PSCI_E_ON_PENDING; - - assert(psci_state == PSCI_STATE_OFF); - return PSCI_E_SUCCESS; -} - -/******************************************************************************* - * Handler routine to turn a cpu on. It takes care of any generic, architectural - * or platform specific setup required. - * TODO: Split this code across separate handlers for each type of setup? - ******************************************************************************/ -static int psci_afflvl0_on(unsigned long target_cpu, - aff_map_node_t *cpu_node) -{ - unsigned long psci_entrypoint; - - /* Sanity check to safeguard against data corruption */ - assert(cpu_node->level == MPIDR_AFFLVL0); - - /* Set the secure world (EL3) re-entry point after BL1 */ - psci_entrypoint = (unsigned long) psci_aff_on_finish_entry; - - /* - * Plat. management: Give the platform the current state - * of the target cpu to allow it to perform the necessary - * steps to power on. - */ - return psci_plat_pm_ops->affinst_on(target_cpu, - psci_entrypoint, - cpu_node->level, - psci_get_phys_state(cpu_node)); -} - -/******************************************************************************* - * Handler routine to turn a cluster on. It takes care or any generic, arch. - * or platform specific setup required. - * TODO: Split this code across separate handlers for each type of setup? - ******************************************************************************/ -static int psci_afflvl1_on(unsigned long target_cpu, - aff_map_node_t *cluster_node) -{ - unsigned long psci_entrypoint; - - assert(cluster_node->level == MPIDR_AFFLVL1); - - /* - * There is no generic and arch. specific cluster - * management required - */ - - /* State management: Is not required while turning a cluster on */ - - /* - * Plat. management: Give the platform the current state - * of the target cpu to allow it to perform the necessary - * steps to power on. - */ - psci_entrypoint = (unsigned long) psci_aff_on_finish_entry; - return psci_plat_pm_ops->affinst_on(target_cpu, - psci_entrypoint, - cluster_node->level, - psci_get_phys_state(cluster_node)); -} - -/******************************************************************************* - * Handler routine to turn a cluster of clusters on. It takes care or any - * generic, arch. or platform specific setup required. - * TODO: Split this code across separate handlers for each type of setup? - ******************************************************************************/ -static int psci_afflvl2_on(unsigned long target_cpu, - aff_map_node_t *system_node) -{ - unsigned long psci_entrypoint; - - /* Cannot go beyond affinity level 2 in this psci imp. */ - assert(system_node->level == MPIDR_AFFLVL2); - - /* - * There is no generic and arch. specific system management - * required - */ - - /* State management: Is not required while turning a system on */ - - /* - * Plat. management: Give the platform the current state - * of the target cpu to allow it to perform the necessary - * steps to power on. - */ - psci_entrypoint = (unsigned long) psci_aff_on_finish_entry; - return psci_plat_pm_ops->affinst_on(target_cpu, - psci_entrypoint, - system_node->level, - psci_get_phys_state(system_node)); -} - -/* Private data structure to make this handlers accessible through indexing */ -static const afflvl_on_handler_t psci_afflvl_on_handlers[] = { - psci_afflvl0_on, - psci_afflvl1_on, - psci_afflvl2_on, -}; - -/******************************************************************************* - * This function takes an array of pointers to affinity instance nodes in the - * topology tree and calls the on handler for the corresponding affinity - * levels - ******************************************************************************/ -static int psci_call_on_handlers(aff_map_node_t *target_cpu_nodes[], - int start_afflvl, - int end_afflvl, - unsigned long target_cpu) -{ - int rc = PSCI_E_INVALID_PARAMS, level; - aff_map_node_t *node; - - for (level = end_afflvl; level >= start_afflvl; level--) { - node = target_cpu_nodes[level]; - if (node == NULL) - continue; - - /* - * TODO: In case of an error should there be a way - * of undoing what we might have setup at higher - * affinity levels. - */ - rc = psci_afflvl_on_handlers[level](target_cpu, - node); - if (rc != PSCI_E_SUCCESS) - break; - } - - return rc; -} - -/******************************************************************************* - * Generic handler which is called to physically power on a cpu identified by - * its mpidr. It traverses through all the affinity levels performing generic, - * architectural, platform setup and state management e.g. for a cpu that is - * to be powered on, it will ensure that enough information is stashed for it - * to resume execution in the non-secure security state. - * - * The state of all the relevant affinity levels is changed after calling the - * affinity level specific handlers as their actions would depend upon the state - * the affinity level is currently in. - * - * The affinity level specific handlers are called in descending order i.e. from - * the highest to the lowest affinity level implemented by the platform because - * to turn on affinity level X it is necessary to turn on affinity level X + 1 - * first. - ******************************************************************************/ -int psci_afflvl_on(unsigned long target_cpu, - entry_point_info_t *ep, - int start_afflvl, - int end_afflvl) -{ - int rc; - mpidr_aff_map_nodes_t target_cpu_nodes; - - /* - * This function must only be called on platforms where the - * CPU_ON platform hooks have been implemented. - */ - assert(psci_plat_pm_ops->affinst_on && - psci_plat_pm_ops->affinst_on_finish); - - /* - * Collect the pointers to the nodes in the topology tree for - * each affinity instance in the mpidr. If this function does - * not return successfully then either the mpidr or the affinity - * levels are incorrect. - */ - rc = psci_get_aff_map_nodes(target_cpu, - start_afflvl, - end_afflvl, - target_cpu_nodes); - assert(rc == PSCI_E_SUCCESS); - - /* - * This function acquires the lock corresponding to each affinity - * level so that by the time all locks are taken, the system topology - * is snapshot and state management can be done safely. - */ - psci_acquire_afflvl_locks(start_afflvl, - end_afflvl, - target_cpu_nodes); - - /* - * Generic management: Ensure that the cpu is off to be - * turned on. - */ - rc = cpu_on_validate_state(psci_get_state( - target_cpu_nodes[MPIDR_AFFLVL0])); - if (rc != PSCI_E_SUCCESS) - goto exit; - - /* - * Call the cpu on handler registered by the Secure Payload Dispatcher - * to let it do any bookeeping. If the handler encounters an error, it's - * expected to assert within - */ - if (psci_spd_pm && psci_spd_pm->svc_on) - psci_spd_pm->svc_on(target_cpu); - - /* - * This function updates the state of each affinity instance - * corresponding to the mpidr in the range of affinity levels - * specified. - */ - psci_do_afflvl_state_mgmt(start_afflvl, - end_afflvl, - target_cpu_nodes, - PSCI_STATE_ON_PENDING); - - /* Perform generic, architecture and platform specific handling. */ - rc = psci_call_on_handlers(target_cpu_nodes, - start_afflvl, - end_afflvl, - target_cpu); - - assert(rc == PSCI_E_SUCCESS || rc == PSCI_E_INTERN_FAIL); - - if (rc == PSCI_E_SUCCESS) - /* Store the re-entry information for the non-secure world. */ - cm_init_context(target_cpu, ep); - else - /* Restore the state on error. */ - psci_do_afflvl_state_mgmt(start_afflvl, - end_afflvl, - target_cpu_nodes, - PSCI_STATE_OFF); -exit: - /* - * This loop releases the lock corresponding to each affinity level - * in the reverse order to which they were acquired. - */ - psci_release_afflvl_locks(start_afflvl, - end_afflvl, - target_cpu_nodes); - - return rc; -} - -/******************************************************************************* - * The following functions finish an earlier affinity power on request. They - * are called by the common finisher routine in psci_common.c. - ******************************************************************************/ -static void psci_afflvl0_on_finish(aff_map_node_t *cpu_node) -{ - unsigned int plat_state, state; - - assert(cpu_node->level == MPIDR_AFFLVL0); - - /* Ensure we have been explicitly woken up by another cpu */ - state = psci_get_state(cpu_node); - assert(state == PSCI_STATE_ON_PENDING); - - /* - * Plat. management: Perform the platform specific actions - * for this cpu e.g. enabling the gic or zeroing the mailbox - * register. The actual state of this cpu has already been - * changed. - */ - - /* Get the physical state of this cpu */ - plat_state = get_phys_state(state); - psci_plat_pm_ops->affinst_on_finish(cpu_node->level, - plat_state); - - /* - * Arch. management: Enable data cache and manage stack memory - */ - psci_do_pwrup_cache_maintenance(); - - /* - * All the platform specific actions for turning this cpu - * on have completed. Perform enough arch.initialization - * to run in the non-secure address space. - */ - bl31_arch_setup(); - - /* - * Call the cpu on finish handler registered by the Secure Payload - * Dispatcher to let it do any bookeeping. If the handler encounters an - * error, it's expected to assert within - */ - if (psci_spd_pm && psci_spd_pm->svc_on_finish) - psci_spd_pm->svc_on_finish(0); - - /* - * Generic management: Now we just need to retrieve the - * information that we had stashed away during the cpu_on - * call to set this cpu on its way. - */ - cm_prepare_el3_exit(NON_SECURE); - - /* Clean caches before re-entering normal world */ - dcsw_op_louis(DCCSW); -} - -static void psci_afflvl1_on_finish(aff_map_node_t *cluster_node) -{ - unsigned int plat_state; - - assert(cluster_node->level == MPIDR_AFFLVL1); - - /* - * Plat. management: Perform the platform specific actions - * as per the old state of the cluster e.g. enabling - * coherency at the interconnect depends upon the state with - * which this cluster was powered up. If anything goes wrong - * then assert as there is no way to recover from this - * situation. - */ - plat_state = psci_get_phys_state(cluster_node); - psci_plat_pm_ops->affinst_on_finish(cluster_node->level, - plat_state); -} - - -static void psci_afflvl2_on_finish(aff_map_node_t *system_node) -{ - unsigned int plat_state; - - /* Cannot go beyond this affinity level */ - assert(system_node->level == MPIDR_AFFLVL2); - - /* - * Currently, there are no architectural actions to perform - * at the system level. - */ - - /* - * Plat. management: Perform the platform specific actions - * as per the old state of the cluster e.g. enabling - * coherency at the interconnect depends upon the state with - * which this cluster was powered up. If anything goes wrong - * then assert as there is no way to recover from this - * situation. - */ - plat_state = psci_get_phys_state(system_node); - psci_plat_pm_ops->affinst_on_finish(system_node->level, - plat_state); -} - -const afflvl_power_on_finisher_t psci_afflvl_on_finishers[] = { - psci_afflvl0_on_finish, - psci_afflvl1_on_finish, - psci_afflvl2_on_finish, -}; diff --git a/services/std_svc/psci/psci_afflvl_suspend.c b/services/std_svc/psci/psci_afflvl_suspend.c deleted file mode 100644 index 76e8c90..0000000 --- a/services/std_svc/psci/psci_afflvl_suspend.c +++ /dev/null @@ -1,469 +0,0 @@ -/* - * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "psci_private.h" - -typedef void (*afflvl_suspend_handler_t)(aff_map_node_t *node); - -/******************************************************************************* - * This function saves the power state parameter passed in the current PSCI - * cpu_suspend call in the per-cpu data array. - ******************************************************************************/ -void psci_set_suspend_power_state(unsigned int power_state) -{ - set_cpu_data(psci_svc_cpu_data.power_state, power_state); - flush_cpu_data(psci_svc_cpu_data.power_state); -} - -/******************************************************************************* - * This function gets the affinity level till which the current cpu could be - * powered down during a cpu_suspend call. Returns PSCI_INVALID_DATA if the - * power state is invalid. - ******************************************************************************/ -int psci_get_suspend_afflvl(void) -{ - unsigned int power_state; - - power_state = get_cpu_data(psci_svc_cpu_data.power_state); - - return ((power_state == PSCI_INVALID_DATA) ? - power_state : psci_get_pstate_afflvl(power_state)); -} - -/******************************************************************************* - * This function gets the state id of the current cpu from the power state - * parameter saved in the per-cpu data array. Returns PSCI_INVALID_DATA if the - * power state saved is invalid. - ******************************************************************************/ -int psci_get_suspend_stateid(void) -{ - unsigned int power_state; - - power_state = get_cpu_data(psci_svc_cpu_data.power_state); - - return ((power_state == PSCI_INVALID_DATA) ? - power_state : psci_get_pstate_id(power_state)); -} - -/******************************************************************************* - * This function gets the state id of the cpu specified by the 'mpidr' parameter - * from the power state parameter saved in the per-cpu data array. Returns - * PSCI_INVALID_DATA if the power state saved is invalid. - ******************************************************************************/ -int psci_get_suspend_stateid_by_mpidr(unsigned long mpidr) -{ - unsigned int power_state; - - power_state = get_cpu_data_by_mpidr(mpidr, - psci_svc_cpu_data.power_state); - - return ((power_state == PSCI_INVALID_DATA) ? - power_state : psci_get_pstate_id(power_state)); -} - -/******************************************************************************* - * The next three functions implement a handler for each supported affinity - * level which is called when that affinity level is about to be suspended. - ******************************************************************************/ -static void psci_afflvl0_suspend(aff_map_node_t *cpu_node) -{ - unsigned long psci_entrypoint; - - /* Sanity check to safeguard against data corruption */ - assert(cpu_node->level == MPIDR_AFFLVL0); - - /* Set the secure world (EL3) re-entry point after BL1 */ - psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry; - - /* - * Arch. management. Perform the necessary steps to flush all - * cpu caches. - */ - psci_do_pwrdown_cache_maintenance(MPIDR_AFFLVL0); - - /* - * Plat. management: Allow the platform to perform the - * necessary actions to turn off this cpu e.g. set the - * platform defined mailbox with the psci entrypoint, - * program the power controller etc. - */ - psci_plat_pm_ops->affinst_suspend(psci_entrypoint, - cpu_node->level, - psci_get_phys_state(cpu_node)); -} - -static void psci_afflvl1_suspend(aff_map_node_t *cluster_node) -{ - unsigned int plat_state; - unsigned long psci_entrypoint; - - /* Sanity check the cluster level */ - assert(cluster_node->level == MPIDR_AFFLVL1); - - /* - * Arch. management: Flush all levels of caches to PoC if the - * cluster is to be shutdown. - */ - psci_do_pwrdown_cache_maintenance(MPIDR_AFFLVL1); - - /* - * Plat. Management. Allow the platform to do its cluster specific - * bookeeping e.g. turn off interconnect coherency, program the power - * controller etc. Sending the psci entrypoint is currently redundant - * beyond affinity level 0 but one never knows what a platform might - * do. Also it allows us to keep the platform handler prototype the - * same. - */ - plat_state = psci_get_phys_state(cluster_node); - psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry; - psci_plat_pm_ops->affinst_suspend(psci_entrypoint, - cluster_node->level, - plat_state); -} - - -static void psci_afflvl2_suspend(aff_map_node_t *system_node) -{ - unsigned int plat_state; - unsigned long psci_entrypoint; - - /* Cannot go beyond this */ - assert(system_node->level == MPIDR_AFFLVL2); - - /* - * Keep the physical state of the system handy to decide what - * action needs to be taken - */ - plat_state = psci_get_phys_state(system_node); - - /* - * Arch. management: Flush all levels of caches to PoC if the - * system is to be shutdown. - */ - psci_do_pwrdown_cache_maintenance(MPIDR_AFFLVL2); - - /* - * Plat. Management : Allow the platform to do its bookeeping - * at this affinity level - */ - - /* - * Sending the psci entrypoint is currently redundant - * beyond affinity level 0 but one never knows what a - * platform might do. Also it allows us to keep the - * platform handler prototype the same. - */ - plat_state = psci_get_phys_state(system_node); - psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry; - psci_plat_pm_ops->affinst_suspend(psci_entrypoint, - system_node->level, - plat_state); -} - -static const afflvl_suspend_handler_t psci_afflvl_suspend_handlers[] = { - psci_afflvl0_suspend, - psci_afflvl1_suspend, - psci_afflvl2_suspend, -}; - -/******************************************************************************* - * This function takes an array of pointers to affinity instance nodes in the - * topology tree and calls the suspend handler for the corresponding affinity - * levels - ******************************************************************************/ -static void psci_call_suspend_handlers(aff_map_node_t *mpidr_nodes[], - int start_afflvl, - int end_afflvl) -{ - int level; - aff_map_node_t *node; - - for (level = start_afflvl; level <= end_afflvl; level++) { - node = mpidr_nodes[level]; - if (node == NULL) - continue; - - psci_afflvl_suspend_handlers[level](node); - } -} - -/******************************************************************************* - * Top level handler which is called when a cpu wants to suspend its execution. - * It is assumed that along with turning the cpu off, higher affinity levels - * until the target affinity level will be turned off as well. It traverses - * through all the affinity levels performing generic, architectural, platform - * setup and state management e.g. for a cluster that's to be suspended, it will - * call the platform specific code which will disable coherency at the - * interconnect level if the cpu is the last in the cluster. For a cpu it could - * mean programming the power controller etc. - * - * The state of all the relevant affinity levels is changed prior to calling the - * affinity level specific handlers as their actions would depend upon the state - * the affinity level is about to enter. - * - * The affinity level specific handlers are called in ascending order i.e. from - * the lowest to the highest affinity level implemented by the platform because - * to turn off affinity level X it is neccesary to turn off affinity level X - 1 - * first. - * - * All the required parameter checks are performed at the beginning and after - * the state transition has been done, no further error is expected and it - * is not possible to undo any of the actions taken beyond that point. - ******************************************************************************/ -void psci_afflvl_suspend(entry_point_info_t *ep, - int start_afflvl, - int end_afflvl) -{ - int skip_wfi = 0; - mpidr_aff_map_nodes_t mpidr_nodes; - unsigned int max_phys_off_afflvl; - - /* - * This function must only be called on platforms where the - * CPU_SUSPEND platform hooks have been implemented. - */ - assert(psci_plat_pm_ops->affinst_suspend && - psci_plat_pm_ops->affinst_suspend_finish); - - /* - * Collect the pointers to the nodes in the topology tree for - * each affinity instance in the mpidr. If this function does - * not return successfully then either the mpidr or the affinity - * levels are incorrect. Either way, this an internal TF error - * therefore assert. - */ - if (psci_get_aff_map_nodes(read_mpidr_el1() & MPIDR_AFFINITY_MASK, - start_afflvl, end_afflvl, mpidr_nodes) != PSCI_E_SUCCESS) - assert(0); - - /* - * This function acquires the lock corresponding to each affinity - * level so that by the time all locks are taken, the system topology - * is snapshot and state management can be done safely. - */ - psci_acquire_afflvl_locks(start_afflvl, - end_afflvl, - mpidr_nodes); - - /* - * We check if there are any pending interrupts after the delay - * introduced by lock contention to increase the chances of early - * detection that a wake-up interrupt has fired. - */ - if (read_isr_el1()) { - skip_wfi = 1; - goto exit; - } - - /* - * Call the cpu suspend handler registered by the Secure Payload - * Dispatcher to let it do any bookeeping. If the handler encounters an - * error, it's expected to assert within - */ - if (psci_spd_pm && psci_spd_pm->svc_suspend) - psci_spd_pm->svc_suspend(0); - - /* - * This function updates the state of each affinity instance - * corresponding to the mpidr in the range of affinity levels - * specified. - */ - psci_do_afflvl_state_mgmt(start_afflvl, - end_afflvl, - mpidr_nodes, - PSCI_STATE_SUSPEND); - - max_phys_off_afflvl = psci_find_max_phys_off_afflvl(start_afflvl, - end_afflvl, - mpidr_nodes); - assert(max_phys_off_afflvl != PSCI_INVALID_DATA); - - /* Stash the highest affinity level that will be turned off */ - psci_set_max_phys_off_afflvl(max_phys_off_afflvl); - - /* - * Store the re-entry information for the non-secure world. - */ - cm_init_context(read_mpidr_el1(), ep); - - /* Perform generic, architecture and platform specific handling */ - psci_call_suspend_handlers(mpidr_nodes, - start_afflvl, - end_afflvl); - - /* - * Invalidate the entry for the highest affinity level stashed earlier. - * This ensures that any reads of this variable outside the power - * up/down sequences return PSCI_INVALID_DATA. - */ - psci_set_max_phys_off_afflvl(PSCI_INVALID_DATA); - -exit: - /* - * Release the locks corresponding to each affinity level in the - * reverse order to which they were acquired. - */ - psci_release_afflvl_locks(start_afflvl, - end_afflvl, - mpidr_nodes); - if (!skip_wfi) - psci_power_down_wfi(); -} - -/******************************************************************************* - * The following functions finish an earlier affinity suspend request. They - * are called by the common finisher routine in psci_common.c. - ******************************************************************************/ -static void psci_afflvl0_suspend_finish(aff_map_node_t *cpu_node) -{ - unsigned int plat_state, state; - int32_t suspend_level; - uint64_t counter_freq; - - assert(cpu_node->level == MPIDR_AFFLVL0); - - /* Ensure we have been woken up from a suspended state */ - state = psci_get_state(cpu_node); - assert(state == PSCI_STATE_SUSPEND); - - /* - * Plat. management: Perform the platform specific actions - * before we change the state of the cpu e.g. enabling the - * gic or zeroing the mailbox register. If anything goes - * wrong then assert as there is no way to recover from this - * situation. - */ - - /* Get the physical state of this cpu */ - plat_state = get_phys_state(state); - psci_plat_pm_ops->affinst_suspend_finish(cpu_node->level, - plat_state); - - /* - * Arch. management: Enable the data cache, manage stack memory and - * restore the stashed EL3 architectural context from the 'cpu_context' - * structure for this cpu. - */ - psci_do_pwrup_cache_maintenance(); - - /* Re-init the cntfrq_el0 register */ - counter_freq = plat_get_syscnt_freq(); - write_cntfrq_el0(counter_freq); - - /* - * Call the cpu suspend finish handler registered by the Secure Payload - * Dispatcher to let it do any bookeeping. If the handler encounters an - * error, it's expected to assert within - */ - if (psci_spd_pm && psci_spd_pm->svc_suspend) { - suspend_level = psci_get_suspend_afflvl(); - assert (suspend_level != PSCI_INVALID_DATA); - psci_spd_pm->svc_suspend_finish(suspend_level); - } - - /* Invalidate the suspend context for the node */ - psci_set_suspend_power_state(PSCI_INVALID_DATA); - - /* - * Generic management: Now we just need to retrieve the - * information that we had stashed away during the suspend - * call to set this cpu on its way. - */ - cm_prepare_el3_exit(NON_SECURE); - - /* Clean caches before re-entering normal world */ - dcsw_op_louis(DCCSW); -} - -static void psci_afflvl1_suspend_finish(aff_map_node_t *cluster_node) -{ - unsigned int plat_state; - - assert(cluster_node->level == MPIDR_AFFLVL1); - - /* - * Plat. management: Perform the platform specific actions - * as per the old state of the cluster e.g. enabling - * coherency at the interconnect depends upon the state with - * which this cluster was powered up. If anything goes wrong - * then assert as there is no way to recover from this - * situation. - */ - - /* Get the physical state of this cpu */ - plat_state = psci_get_phys_state(cluster_node); - psci_plat_pm_ops->affinst_suspend_finish(cluster_node->level, - plat_state); -} - - -static void psci_afflvl2_suspend_finish(aff_map_node_t *system_node) -{ - unsigned int plat_state; - - /* Cannot go beyond this affinity level */ - assert(system_node->level == MPIDR_AFFLVL2); - - /* - * Currently, there are no architectural actions to perform - * at the system level. - */ - - /* - * Plat. management: Perform the platform specific actions - * as per the old state of the cluster e.g. enabling - * coherency at the interconnect depends upon the state with - * which this cluster was powered up. If anything goes wrong - * then assert as there is no way to recover from this - * situation. - */ - - /* Get the physical state of the system */ - plat_state = psci_get_phys_state(system_node); - psci_plat_pm_ops->affinst_suspend_finish(system_node->level, - plat_state); -} - -const afflvl_power_on_finisher_t psci_afflvl_suspend_finishers[] = { - psci_afflvl0_suspend_finish, - psci_afflvl1_suspend_finish, - psci_afflvl2_suspend_finish, -}; diff --git a/services/std_svc/psci/psci_common.c b/services/std_svc/psci/psci_common.c index 1b74ff2..7f1a5fd 100644 --- a/services/std_svc/psci/psci_common.c +++ b/services/std_svc/psci/psci_common.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -45,50 +45,120 @@ */ const spd_pm_ops_t *psci_spd_pm; +/* + * PSCI requested local power state map. This array is used to store the local + * power states requested by a CPU for power levels from level 1 to + * PLAT_MAX_PWR_LVL. It does not store the requested local power state for power + * level 0 (PSCI_CPU_PWR_LVL) as the requested and the target power state for a + * CPU are the same. + * + * During state coordination, the platform is passed an array containing the + * local states requested for a particular non cpu power domain by each cpu + * within the domain. + * + * TODO: Dense packing of the requested states will cause cache thrashing + * when multiple power domains write to it. If we allocate the requested + * states at each power level in a cache-line aligned per-domain memory, + * the cache thrashing can be avoided. + */ +static plat_local_state_t + psci_req_local_pwr_states[PLAT_MAX_PWR_LVL][PLATFORM_CORE_COUNT]; + + /******************************************************************************* - * Grand array that holds the platform's topology information for state - * management of affinity instances. Each node (aff_map_node) in the array - * corresponds to an affinity instance e.g. cluster, cpu within an mpidr + * Arrays that hold the platform's power domain tree information for state + * management of power domains. + * Each node in the array 'psci_non_cpu_pd_nodes' corresponds to a power domain + * which is an ancestor of a CPU power domain. + * Each node in the array 'psci_cpu_pd_nodes' corresponds to a cpu power domain ******************************************************************************/ -aff_map_node_t psci_aff_map[PSCI_NUM_AFFS] +non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS] #if USE_COHERENT_MEM __attribute__ ((section("tzfw_coherent_mem"))) #endif ; +cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT]; + /******************************************************************************* * Pointer to functions exported by the platform to complete power mgmt. ops ******************************************************************************/ -const plat_pm_ops_t *psci_plat_pm_ops; +const plat_psci_ops_t *psci_plat_pm_ops; -/******************************************************************************* - * Check that the maximum affinity level supported by the platform makes sense - * ****************************************************************************/ -CASSERT(PLATFORM_MAX_AFFLVL <= MPIDR_MAX_AFFLVL && \ - PLATFORM_MAX_AFFLVL >= MPIDR_AFFLVL0, \ - assert_platform_max_afflvl_check); +/****************************************************************************** + * Check that the maximum power level supported by the platform makes sense + *****************************************************************************/ +CASSERT(PLAT_MAX_PWR_LVL <= PSCI_MAX_PWR_LVL && \ + PLAT_MAX_PWR_LVL >= PSCI_CPU_PWR_LVL, \ + assert_platform_max_pwrlvl_check); -/******************************************************************************* - * This function is passed an array of pointers to affinity level nodes in the - * topology tree for an mpidr. It iterates through the nodes to find the highest - * affinity level which is marked as physically powered off. - ******************************************************************************/ -uint32_t psci_find_max_phys_off_afflvl(uint32_t start_afflvl, - uint32_t end_afflvl, - aff_map_node_t *mpidr_nodes[]) +/* + * The plat_local_state used by the platform is one of these types: RUN, + * RETENTION and OFF. The platform can define further sub-states for each type + * apart from RUN. This categorization is done to verify the sanity of the + * psci_power_state passed by the platform and to print debug information. The + * categorization is done on the basis of the following conditions: + * + * 1. If (plat_local_state == 0) then the category is STATE_TYPE_RUN. + * + * 2. If (0 < plat_local_state <= PLAT_MAX_RET_STATE), then the category is + * STATE_TYPE_RETN. + * + * 3. If (plat_local_state > PLAT_MAX_RET_STATE), then the category is + * STATE_TYPE_OFF. + */ +typedef enum plat_local_state_type { + STATE_TYPE_RUN = 0, + STATE_TYPE_RETN, + STATE_TYPE_OFF +} plat_local_state_type_t; + +/* The macro used to categorize plat_local_state. */ +#define find_local_state_type(plat_local_state) \ + ((plat_local_state) ? ((plat_local_state > PLAT_MAX_RET_STATE) \ + ? STATE_TYPE_OFF : STATE_TYPE_RETN) \ + : STATE_TYPE_RUN) + +/****************************************************************************** + * Check that the maximum retention level supported by the platform is less + * than the maximum off level. + *****************************************************************************/ +CASSERT(PLAT_MAX_RET_STATE < PLAT_MAX_OFF_STATE, \ + assert_platform_max_off_and_retn_state_check); + +/****************************************************************************** + * This function ensures that the power state parameter in a CPU_SUSPEND request + * is valid. If so, it returns the requested states for each power level. + *****************************************************************************/ +int psci_validate_power_state(unsigned int power_state, + psci_power_state_t *state_info) { - uint32_t max_afflvl = PSCI_INVALID_DATA; + /* Check SBZ bits in power state are zero */ + if (psci_check_power_state(power_state)) + return PSCI_E_INVALID_PARAMS; - for (; start_afflvl <= end_afflvl; start_afflvl++) { - if (mpidr_nodes[start_afflvl] == NULL) - continue; + assert(psci_plat_pm_ops->validate_power_state); - if (psci_get_phys_state(mpidr_nodes[start_afflvl]) == - PSCI_STATE_OFF) - max_afflvl = start_afflvl; - } + /* Validate the power_state using platform pm_ops */ + return psci_plat_pm_ops->validate_power_state(power_state, state_info); +} - return max_afflvl; +/****************************************************************************** + * This function retrieves the `psci_power_state_t` for system suspend from + * the platform. + *****************************************************************************/ +void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info) +{ + /* + * Assert that the required pm_ops hook is implemented to ensure that + * the capability detected during psci_setup() is valid. + */ + assert(psci_plat_pm_ops->get_sys_suspend_power_state); + + /* + * Query the platform for the power_state required for system suspend + */ + psci_plat_pm_ops->get_sys_suspend_power_state(state_info); } /******************************************************************************* @@ -99,24 +169,15 @@ ******************************************************************************/ unsigned int psci_is_last_on_cpu(void) { - unsigned long mpidr = read_mpidr_el1() & MPIDR_AFFINITY_MASK; - unsigned int i; + unsigned int cpu_idx, my_idx = plat_my_core_pos(); - for (i = psci_aff_limits[MPIDR_AFFLVL0].min; - i <= psci_aff_limits[MPIDR_AFFLVL0].max; i++) { - - assert(psci_aff_map[i].level == MPIDR_AFFLVL0); - - if (!(psci_aff_map[i].state & PSCI_AFF_PRESENT)) - continue; - - if (psci_aff_map[i].mpidr == mpidr) { - assert(psci_get_state(&psci_aff_map[i]) - == PSCI_STATE_ON); + for (cpu_idx = 0; cpu_idx < PLATFORM_CORE_COUNT; cpu_idx++) { + if (cpu_idx == my_idx) { + assert(psci_get_aff_info_state() == AFF_STATE_ON); continue; } - if (psci_get_state(&psci_aff_map[i]) != PSCI_STATE_OFF) + if (psci_get_aff_info_state_by_idx(cpu_idx) != AFF_STATE_OFF) return 0; } @@ -124,193 +185,404 @@ } /******************************************************************************* - * This function saves the highest affinity level which is in OFF state. The - * affinity instance with which the level is associated is determined by the - * caller. - ******************************************************************************/ -void psci_set_max_phys_off_afflvl(uint32_t afflvl) -{ - set_cpu_data(psci_svc_cpu_data.max_phys_off_afflvl, afflvl); - - /* - * Ensure that the saved value is flushed to main memory and any - * speculatively pre-fetched stale copies are invalidated from the - * caches of other cpus in the same coherency domain. This ensures that - * the value can be safely read irrespective of the state of the data - * cache. - */ - flush_cpu_data(psci_svc_cpu_data.max_phys_off_afflvl); -} - -/******************************************************************************* - * This function reads the saved highest affinity level which is in OFF - * state. The affinity instance with which the level is associated is determined - * by the caller. - ******************************************************************************/ -uint32_t psci_get_max_phys_off_afflvl(void) -{ - /* - * Ensure that the last update of this value in this cpu's cache is - * flushed to main memory and any speculatively pre-fetched stale copies - * are invalidated from the caches of other cpus in the same coherency - * domain. This ensures that the value is always read from the main - * memory when it was written before the data cache was enabled. - */ - flush_cpu_data(psci_svc_cpu_data.max_phys_off_afflvl); - return get_cpu_data(psci_svc_cpu_data.max_phys_off_afflvl); -} - -/******************************************************************************* - * Routine to return the maximum affinity level to traverse to after a cpu has + * Routine to return the maximum power level to traverse to after a cpu has * been physically powered up. It is expected to be called immediately after * reset from assembler code. ******************************************************************************/ -int get_power_on_target_afflvl(void) +static int get_power_on_target_pwrlvl(void) { - int afflvl; - -#if DEBUG - unsigned int state; - aff_map_node_t *node; - - /* Retrieve our node from the topology tree */ - node = psci_get_aff_map_node(read_mpidr_el1() & MPIDR_AFFINITY_MASK, - MPIDR_AFFLVL0); - assert(node); + int pwrlvl; /* - * Sanity check the state of the cpu. It should be either suspend or "on - * pending" - */ - state = psci_get_state(node); - assert(state == PSCI_STATE_SUSPEND || state == PSCI_STATE_ON_PENDING); -#endif - - /* - * Assume that this cpu was suspended and retrieve its target affinity + * Assume that this cpu was suspended and retrieve its target power * level. If it is invalid then it could only have been turned off - * earlier. PLATFORM_MAX_AFFLVL will be the highest affinity level a + * earlier. PLAT_MAX_PWR_LVL will be the highest power level a * cpu can be turned off to. */ - afflvl = psci_get_suspend_afflvl(); - if (afflvl == PSCI_INVALID_DATA) - afflvl = PLATFORM_MAX_AFFLVL; - return afflvl; + pwrlvl = psci_get_suspend_pwrlvl(); + if (pwrlvl == PSCI_INVALID_DATA) + pwrlvl = PLAT_MAX_PWR_LVL; + return pwrlvl; } -/******************************************************************************* - * Simple routine to set the id of an affinity instance at a given level in the - * mpidr. - ******************************************************************************/ -unsigned long mpidr_set_aff_inst(unsigned long mpidr, - unsigned char aff_inst, - int aff_lvl) +/****************************************************************************** + * Helper function to update the requested local power state array. This array + * does not store the requested state for the CPU power level. Hence an + * assertion is added to prevent us from accessing the wrong index. + *****************************************************************************/ +static void psci_set_req_local_pwr_state(unsigned int pwrlvl, + unsigned int cpu_idx, + plat_local_state_t req_pwr_state) { - unsigned long aff_shift; + assert(pwrlvl > PSCI_CPU_PWR_LVL); + psci_req_local_pwr_states[pwrlvl - 1][cpu_idx] = req_pwr_state; +} - assert(aff_lvl <= MPIDR_AFFLVL3); +/****************************************************************************** + * This function initializes the psci_req_local_pwr_states. + *****************************************************************************/ +void psci_init_req_local_pwr_states(void) +{ + /* Initialize the requested state of all non CPU power domains as OFF */ + memset(&psci_req_local_pwr_states, PLAT_MAX_OFF_STATE, + sizeof(psci_req_local_pwr_states)); +} + +/****************************************************************************** + * Helper function to return a reference to an array containing the local power + * states requested by each cpu for a power domain at 'pwrlvl'. The size of the + * array will be the number of cpu power domains of which this power domain is + * an ancestor. These requested states will be used to determine a suitable + * target state for this power domain during psci state coordination. An + * assertion is added to prevent us from accessing the CPU power level. + *****************************************************************************/ +static plat_local_state_t *psci_get_req_local_pwr_states(int pwrlvl, + int cpu_idx) +{ + assert(pwrlvl > PSCI_CPU_PWR_LVL); + + return &psci_req_local_pwr_states[pwrlvl - 1][cpu_idx]; +} + +/****************************************************************************** + * Helper function to return the current local power state of each power domain + * from the current cpu power domain to its ancestor at the 'end_pwrlvl'. This + * function will be called after a cpu is powered on to find the local state + * each power domain has emerged from. + *****************************************************************************/ +static void psci_get_target_local_pwr_states(uint32_t end_pwrlvl, + psci_power_state_t *target_state) +{ + int lvl; + unsigned int parent_idx; + plat_local_state_t *pd_state = target_state->pwr_domain_state; + + pd_state[PSCI_CPU_PWR_LVL] = psci_get_cpu_local_state(); + parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node; + + /* Copy the local power state from node to state_info */ + for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) { +#if !USE_COHERENT_MEM + /* + * If using normal memory for psci_non_cpu_pd_nodes, we need + * to flush before reading the local power state as another + * cpu in the same power domain could have updated it and this + * code runs before caches are enabled. + */ + flush_dcache_range( + (uint64_t)&psci_non_cpu_pd_nodes[parent_idx], + sizeof(psci_non_cpu_pd_nodes[parent_idx])); +#endif + pd_state[lvl] = psci_non_cpu_pd_nodes[parent_idx].local_state; + parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; + } + + /* Set the the higher levels to RUN */ + for (; lvl <= PLAT_MAX_PWR_LVL; lvl++) + target_state->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN; +} + +/****************************************************************************** + * Helper function to set the target local power state that each power domain + * from the current cpu power domain to its ancestor at the 'end_pwrlvl' will + * enter. This function will be called after coordination of requested power + * states has been done for each power level. + *****************************************************************************/ +static void psci_set_target_local_pwr_states(uint32_t end_pwrlvl, + const psci_power_state_t *target_state) +{ + int lvl; + unsigned int parent_idx; + const plat_local_state_t *pd_state = target_state->pwr_domain_state; + + psci_set_cpu_local_state(pd_state[PSCI_CPU_PWR_LVL]); /* - * Decide the number of bits to shift by depending upon - * the affinity level + * Need to flush as local_state will be accessed with Data Cache + * disabled during power on */ - aff_shift = get_afflvl_shift(aff_lvl); + flush_cpu_data(psci_svc_cpu_data.local_state); - /* Clear the existing affinity instance & set the new one*/ - mpidr &= ~(((unsigned long)MPIDR_AFFLVL_MASK) << aff_shift); - mpidr |= ((unsigned long)aff_inst) << aff_shift; + parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node; - return mpidr; + /* Copy the local_state from state_info */ + for (lvl = 1; lvl <= end_pwrlvl; lvl++) { + psci_non_cpu_pd_nodes[parent_idx].local_state = pd_state[lvl]; +#if !USE_COHERENT_MEM + flush_dcache_range( + (uint64_t)&psci_non_cpu_pd_nodes[parent_idx], + sizeof(psci_non_cpu_pd_nodes[parent_idx])); +#endif + parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; + } } + /******************************************************************************* - * This function sanity checks a range of affinity levels. + * PSCI helper function to get the parent nodes corresponding to a cpu_index. ******************************************************************************/ -int psci_check_afflvl_range(int start_afflvl, int end_afflvl) +void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx, + int end_lvl, + unsigned int node_index[]) { - /* Sanity check the parameters passed */ - if (end_afflvl > PLATFORM_MAX_AFFLVL) + unsigned int parent_node = psci_cpu_pd_nodes[cpu_idx].parent_node; + int i; + + for (i = PSCI_CPU_PWR_LVL + 1; i <= end_lvl; i++) { + *node_index++ = parent_node; + parent_node = psci_non_cpu_pd_nodes[parent_node].parent_node; + } +} + +/****************************************************************************** + * This function is invoked post CPU power up and initialization. It sets the + * affinity info state, target power state and requested power state for the + * current CPU and all its ancestor power domains to RUN. + *****************************************************************************/ +void psci_set_pwr_domains_to_run(uint32_t end_pwrlvl) +{ + int lvl; + unsigned int parent_idx, cpu_idx = plat_my_core_pos(); + parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node; + + /* Reset the local_state to RUN for the non cpu power domains. */ + for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) { + psci_non_cpu_pd_nodes[parent_idx].local_state = + PSCI_LOCAL_STATE_RUN; +#if !USE_COHERENT_MEM + flush_dcache_range( + (uint64_t)&psci_non_cpu_pd_nodes[parent_idx], + sizeof(psci_non_cpu_pd_nodes[parent_idx])); +#endif + psci_set_req_local_pwr_state(lvl, + cpu_idx, + PSCI_LOCAL_STATE_RUN); + parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; + } + + /* Set the affinity info state to ON */ + psci_set_aff_info_state(AFF_STATE_ON); + + psci_set_cpu_local_state(PSCI_LOCAL_STATE_RUN); + flush_cpu_data(psci_svc_cpu_data); +} + +/****************************************************************************** + * This function is passed the local power states requested for each power + * domain (state_info) between the current CPU domain and its ancestors until + * the target power level (end_pwrlvl). It updates the array of requested power + * states with this information. + * + * Then, for each level (apart from the CPU level) until the 'end_pwrlvl', it + * retrieves the states requested by all the cpus of which the power domain at + * that level is an ancestor. It passes this information to the platform to + * coordinate and return the target power state. If the target state for a level + * is RUN then subsequent levels are not considered. At the CPU level, state + * coordination is not required. Hence, the requested and the target states are + * the same. + * + * The 'state_info' is updated with the target state for each level between the + * CPU and the 'end_pwrlvl' and returned to the caller. + * + * This function will only be invoked with data cache enabled and while + * powering down a core. + *****************************************************************************/ +void psci_do_state_coordination(int end_pwrlvl, psci_power_state_t *state_info) +{ + unsigned int lvl, parent_idx, cpu_idx = plat_my_core_pos(); + unsigned int start_idx, ncpus; + plat_local_state_t target_state, *req_states; + + parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node; + + /* For level 0, the requested state will be equivalent + to target state */ + for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) { + + /* First update the requested power state */ + psci_set_req_local_pwr_state(lvl, cpu_idx, + state_info->pwr_domain_state[lvl]); + + /* Get the requested power states for this power level */ + start_idx = psci_non_cpu_pd_nodes[parent_idx].cpu_start_idx; + req_states = psci_get_req_local_pwr_states(lvl, start_idx); + + /* + * Let the platform coordinate amongst the requested states at + * this power level and return the target local power state. + */ + ncpus = psci_non_cpu_pd_nodes[parent_idx].ncpus; + target_state = plat_get_target_pwr_state(lvl, + req_states, + ncpus); + + state_info->pwr_domain_state[lvl] = target_state; + + /* Break early if the negotiated target power state is RUN */ + if (is_local_state_run(state_info->pwr_domain_state[lvl])) + break; + + parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; + } + + /* + * This is for cases when we break out of the above loop early because + * the target power state is RUN at a power level < end_pwlvl. + * We update the requested power state from state_info and then + * set the target state as RUN. + */ + for (lvl = lvl + 1; lvl <= end_pwrlvl; lvl++) { + psci_set_req_local_pwr_state(lvl, cpu_idx, + state_info->pwr_domain_state[lvl]); + state_info->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN; + + } + + /* Update the target state in the power domain nodes */ + psci_set_target_local_pwr_states(end_pwrlvl, state_info); +} + +/****************************************************************************** + * This function validates a suspend request by making sure that if a standby + * state is requested then no power level is turned off and the highest power + * level is placed in a standby/retention state. + * + * It also ensures that the state level X will enter is not shallower than the + * state level X + 1 will enter. + * + * This validation will be enabled only for DEBUG builds as the platform is + * expected to perform these validations as well. + *****************************************************************************/ +int psci_validate_suspend_req(const psci_power_state_t *state_info, + unsigned int is_power_down_state) +{ + unsigned int max_off_lvl, target_lvl, max_retn_lvl; + plat_local_state_t state; + plat_local_state_type_t req_state_type, deepest_state_type; + int i; + + /* Find the target suspend power level */ + target_lvl = psci_find_target_suspend_lvl(state_info); + if (target_lvl == PSCI_INVALID_DATA) return PSCI_E_INVALID_PARAMS; - if (start_afflvl < MPIDR_AFFLVL0) - return PSCI_E_INVALID_PARAMS; + /* All power domain levels are in a RUN state to begin with */ + deepest_state_type = STATE_TYPE_RUN; - if (end_afflvl < start_afflvl) + for (i = target_lvl; i >= PSCI_CPU_PWR_LVL; i--) { + state = state_info->pwr_domain_state[i]; + req_state_type = find_local_state_type(state); + + /* + * While traversing from the highest power level to the lowest, + * the state requested for lower levels has to be the same or + * deeper i.e. equal to or greater than the state at the higher + * levels. If this condition is true, then the requested state + * becomes the deepest state encountered so far. + */ + if (req_state_type < deepest_state_type) + return PSCI_E_INVALID_PARAMS; + deepest_state_type = req_state_type; + } + + /* Find the highest off power level */ + max_off_lvl = psci_find_max_off_lvl(state_info); + + /* The target_lvl is either equal to the max_off_lvl or max_retn_lvl */ + max_retn_lvl = PSCI_INVALID_DATA; + if (target_lvl != max_off_lvl) + max_retn_lvl = target_lvl; + + /* + * If this is not a request for a power down state then max off level + * has to be invalid and max retention level has to be a valid power + * level. + */ + if (!is_power_down_state && (max_off_lvl != PSCI_INVALID_DATA || + max_retn_lvl == PSCI_INVALID_DATA)) return PSCI_E_INVALID_PARAMS; return PSCI_E_SUCCESS; } -/******************************************************************************* - * This function is passed an array of pointers to affinity level nodes in the - * topology tree for an mpidr and the state which each node should transition - * to. It updates the state of each node between the specified affinity levels. - ******************************************************************************/ -void psci_do_afflvl_state_mgmt(uint32_t start_afflvl, - uint32_t end_afflvl, - aff_map_node_t *mpidr_nodes[], - uint32_t state) +/****************************************************************************** + * This function finds the highest power level which will be powered down + * amongst all the power levels specified in the 'state_info' structure + *****************************************************************************/ +unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info) { - uint32_t level; + int i; - for (level = start_afflvl; level <= end_afflvl; level++) { - if (mpidr_nodes[level] == NULL) - continue; - psci_set_state(mpidr_nodes[level], state); + for (i = PLAT_MAX_PWR_LVL; i >= PSCI_CPU_PWR_LVL; i--) { + if (is_local_state_off(state_info->pwr_domain_state[i])) + return i; } + + return PSCI_INVALID_DATA; +} + +/****************************************************************************** + * This functions finds the level of the highest power domain which will be + * placed in a low power state during a suspend operation. + *****************************************************************************/ +unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info) +{ + int i; + + for (i = PLAT_MAX_PWR_LVL; i >= PSCI_CPU_PWR_LVL; i--) { + if (!is_local_state_run(state_info->pwr_domain_state[i])) + return i; + } + + return PSCI_INVALID_DATA; } /******************************************************************************* - * This function is passed an array of pointers to affinity level nodes in the - * topology tree for an mpidr. It picks up locks for each affinity level bottom - * up in the range specified. + * This function is passed a cpu_index and the highest level in the topology + * tree that the operation should be applied to. It picks up locks in order of + * increasing power domain level in the range specified. ******************************************************************************/ -void psci_acquire_afflvl_locks(int start_afflvl, - int end_afflvl, - aff_map_node_t *mpidr_nodes[]) +void psci_acquire_pwr_domain_locks(int end_pwrlvl, unsigned int cpu_idx) { + unsigned int parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node; int level; - for (level = start_afflvl; level <= end_afflvl; level++) { - if (mpidr_nodes[level] == NULL) - continue; - - psci_lock_get(mpidr_nodes[level]); + /* No locking required for level 0. Hence start locking from level 1 */ + for (level = PSCI_CPU_PWR_LVL + 1; level <= end_pwrlvl; level++) { + psci_lock_get(&psci_non_cpu_pd_nodes[parent_idx]); + parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; } } /******************************************************************************* - * This function is passed an array of pointers to affinity level nodes in the - * topology tree for an mpidr. It releases the lock for each affinity level top - * down in the range specified. + * This function is passed a cpu_index and the highest level in the topology + * tree that the operation should be applied to. It releases the locks in order + * of decreasing power domain level in the range specified. ******************************************************************************/ -void psci_release_afflvl_locks(int start_afflvl, - int end_afflvl, - aff_map_node_t *mpidr_nodes[]) +void psci_release_pwr_domain_locks(int end_pwrlvl, unsigned int cpu_idx) { + unsigned int parent_idx, parent_nodes[PLAT_MAX_PWR_LVL] = {0}; int level; - for (level = end_afflvl; level >= start_afflvl; level--) { - if (mpidr_nodes[level] == NULL) - continue; + /* Get the parent nodes */ + psci_get_parent_pwr_domain_nodes(cpu_idx, end_pwrlvl, parent_nodes); - psci_lock_release(mpidr_nodes[level]); + /* Unlock top down. No unlocking required for level 0. */ + for (level = end_pwrlvl; level >= PSCI_CPU_PWR_LVL + 1; level--) { + parent_idx = parent_nodes[level - 1]; + psci_lock_release(&psci_non_cpu_pd_nodes[parent_idx]); } } /******************************************************************************* - * Simple routine to determine whether an affinity instance at a given level - * in an mpidr exists or not. + * Simple routine to determine whether a mpidr is valid or not. ******************************************************************************/ -int psci_validate_mpidr(unsigned long mpidr, int level) +int psci_validate_mpidr(unsigned long mpidr) { - aff_map_node_t *node; - - node = psci_get_aff_map_node(mpidr, level); - if (node && (node->state & PSCI_AFF_PRESENT)) - return PSCI_E_SUCCESS; - else + if (plat_core_pos_by_mpidr(mpidr) < 0) return PSCI_E_INVALID_PARAMS; + + return PSCI_E_SUCCESS; } /******************************************************************************* @@ -371,209 +643,74 @@ } /******************************************************************************* - * This function takes a pointer to an affinity node in the topology tree and - * returns its state. State of a non-leaf node needs to be calculated. - ******************************************************************************/ -unsigned short psci_get_state(aff_map_node_t *node) -{ -#if !USE_COHERENT_MEM - flush_dcache_range((uint64_t) node, sizeof(*node)); -#endif - - assert(node->level >= MPIDR_AFFLVL0 && node->level <= MPIDR_MAX_AFFLVL); - - /* A cpu node just contains the state which can be directly returned */ - if (node->level == MPIDR_AFFLVL0) - return (node->state >> PSCI_STATE_SHIFT) & PSCI_STATE_MASK; - - /* - * For an affinity level higher than a cpu, the state has to be - * calculated. It depends upon the value of the reference count - * which is managed by each node at the next lower affinity level - * e.g. for a cluster, each cpu increments/decrements the reference - * count. If the reference count is 0 then the affinity level is - * OFF else ON. - */ - if (node->ref_count) - return PSCI_STATE_ON; - else - return PSCI_STATE_OFF; -} - -/******************************************************************************* - * This function takes a pointer to an affinity node in the topology tree and - * a target state. State of a non-leaf node needs to be converted to a reference - * count. State of a leaf node can be set directly. - ******************************************************************************/ -void psci_set_state(aff_map_node_t *node, unsigned short state) -{ - assert(node->level >= MPIDR_AFFLVL0 && node->level <= MPIDR_MAX_AFFLVL); - - /* - * For an affinity level higher than a cpu, the state is used - * to decide whether the reference count is incremented or - * decremented. Entry into the ON_PENDING state does not have - * effect. - */ - if (node->level > MPIDR_AFFLVL0) { - switch (state) { - case PSCI_STATE_ON: - node->ref_count++; - break; - case PSCI_STATE_OFF: - case PSCI_STATE_SUSPEND: - node->ref_count--; - break; - case PSCI_STATE_ON_PENDING: - /* - * An affinity level higher than a cpu will not undergo - * a state change when it is about to be turned on - */ - return; - default: - assert(0); - } - } else { - node->state &= ~(PSCI_STATE_MASK << PSCI_STATE_SHIFT); - node->state |= (state & PSCI_STATE_MASK) << PSCI_STATE_SHIFT; - } - -#if !USE_COHERENT_MEM - flush_dcache_range((uint64_t) node, sizeof(*node)); -#endif -} - -/******************************************************************************* - * An affinity level could be on, on_pending, suspended or off. These are the - * logical states it can be in. Physically either it is off or on. When it is in - * the state on_pending then it is about to be turned on. It is not possible to - * tell whether that's actually happenned or not. So we err on the side of - * caution & treat the affinity level as being turned off. - ******************************************************************************/ -unsigned short psci_get_phys_state(aff_map_node_t *node) -{ - unsigned int state; - - state = psci_get_state(node); - return get_phys_state(state); -} - -/******************************************************************************* - * This function takes an array of pointers to affinity instance nodes in the - * topology tree and calls the physical power on handler for the corresponding - * affinity levels - ******************************************************************************/ -static void psci_call_power_on_handlers(aff_map_node_t *mpidr_nodes[], - int start_afflvl, - int end_afflvl, - afflvl_power_on_finisher_t *pon_handlers) -{ - int level; - aff_map_node_t *node; - - for (level = end_afflvl; level >= start_afflvl; level--) { - node = mpidr_nodes[level]; - if (node == NULL) - continue; - - /* - * If we run into any trouble while powering up an - * affinity instance, then there is no recovery path - * so simply return an error and let the caller take - * care of the situation. - */ - pon_handlers[level](node); - } -} - -/******************************************************************************* * Generic handler which is called when a cpu is physically powered on. It - * traverses through all the affinity levels performing generic, architectural, - * platform setup and state management e.g. for a cluster that's been powered - * on, it will call the platform specific code which will enable coherency at - * the interconnect level. For a cpu it could mean turning on the MMU etc. - * - * The state of all the relevant affinity levels is changed after calling the - * affinity level specific handlers as their actions would depend upon the state - * the affinity level is exiting from. - * - * The affinity level specific handlers are called in descending order i.e. from - * the highest to the lowest affinity level implemented by the platform because - * to turn on affinity level X it is neccesary to turn on affinity level X + 1 - * first. + * traverses the node information and finds the highest power level powered + * off and performs generic, architectural, platform setup and state management + * to power on that power level and power levels below it. + * e.g. For a cpu that's been powered on, it will call the platform specific + * code to enable the gic cpu interface and for a cluster it will enable + * coherency at the interconnect level in addition to gic cpu interface. ******************************************************************************/ -void psci_afflvl_power_on_finish(int start_afflvl, - int end_afflvl, - afflvl_power_on_finisher_t *pon_handlers) +void psci_power_up_finish(void) { - mpidr_aff_map_nodes_t mpidr_nodes; - int rc; - unsigned int max_phys_off_afflvl; - + unsigned int cpu_idx = plat_my_core_pos(); + psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} }; + int end_pwrlvl; /* - * Collect the pointers to the nodes in the topology tree for - * each affinity instance in the mpidr. If this function does - * not return successfully then either the mpidr or the affinity - * levels are incorrect. Either case is an irrecoverable error. + * Verify that we have been explicitly turned ON or resumed from + * suspend. */ - rc = psci_get_aff_map_nodes(read_mpidr_el1() & MPIDR_AFFINITY_MASK, - start_afflvl, - end_afflvl, - mpidr_nodes); - if (rc != PSCI_E_SUCCESS) + if (psci_get_aff_info_state() == AFF_STATE_OFF) { + ERROR("Unexpected affinity info state"); panic(); + } /* - * This function acquires the lock corresponding to each affinity - * level so that by the time all locks are taken, the system topology - * is snapshot and state management can be done safely. + * Get the maximum power domain level to traverse to after this cpu + * has been physically powered up. */ - psci_acquire_afflvl_locks(start_afflvl, - end_afflvl, - mpidr_nodes); - - max_phys_off_afflvl = psci_find_max_phys_off_afflvl(start_afflvl, - end_afflvl, - mpidr_nodes); - assert(max_phys_off_afflvl != PSCI_INVALID_DATA); + end_pwrlvl = get_power_on_target_pwrlvl(); /* - * Stash the highest affinity level that will come out of the OFF or - * SUSPEND states. + * This function acquires the lock corresponding to each power level so + * that by the time all locks are taken, the system topology is snapshot + * and state management can be done safely. */ - psci_set_max_phys_off_afflvl(max_phys_off_afflvl); + psci_acquire_pwr_domain_locks(end_pwrlvl, + cpu_idx); - /* Perform generic, architecture and platform specific handling */ - psci_call_power_on_handlers(mpidr_nodes, - start_afflvl, - end_afflvl, - pon_handlers); + psci_get_target_local_pwr_states(end_pwrlvl, &state_info); /* - * This function updates the state of each affinity instance - * corresponding to the mpidr in the range of affinity levels - * specified. + * This CPU could be resuming from suspend or it could have just been + * turned on. To distinguish between these 2 cases, we examine the + * affinity state of the CPU: + * - If the affinity state is ON_PENDING then it has just been + * turned on. + * - Else it is resuming from suspend. + * + * Depending on the type of warm reset identified, choose the right set + * of power management handler and perform the generic, architecture + * and platform specific handling. */ - psci_do_afflvl_state_mgmt(start_afflvl, - end_afflvl, - mpidr_nodes, - PSCI_STATE_ON); + if (psci_get_aff_info_state() == AFF_STATE_ON_PENDING) + psci_cpu_on_finish(cpu_idx, &state_info); + else + psci_cpu_suspend_finish(cpu_idx, &state_info); /* - * Invalidate the entry for the highest affinity level stashed earlier. - * This ensures that any reads of this variable outside the power - * up/down sequences return PSCI_INVALID_DATA + * Set the requested and target state of this CPU and all the higher + * power domains which are ancestors of this CPU to run. */ - psci_set_max_phys_off_afflvl(PSCI_INVALID_DATA); + psci_set_pwr_domains_to_run(end_pwrlvl); /* - * This loop releases the lock corresponding to each affinity level + * This loop releases the lock corresponding to each power level * in the reverse order to which they were acquired. */ - psci_release_afflvl_locks(start_afflvl, - end_afflvl, - mpidr_nodes); + psci_release_pwr_domain_locks(end_pwrlvl, + cpu_idx); } /******************************************************************************* @@ -618,31 +755,123 @@ /******************************************************************************* - * This function prints the state of all affinity instances present in the + * This function prints the state of all power domains present in the * system ******************************************************************************/ -void psci_print_affinity_map(void) +void psci_print_power_domain_map(void) { #if LOG_LEVEL >= LOG_LEVEL_INFO - aff_map_node_t *node; unsigned int idx; + plat_local_state_t state; + plat_local_state_type_t state_type; + /* This array maps to the PSCI_STATE_X definitions in psci.h */ - static const char *psci_state_str[] = { + static const char *psci_state_type_str[] = { "ON", + "RETENTION", "OFF", - "ON_PENDING", - "SUSPEND" }; - INFO("PSCI Affinity Map:\n"); - for (idx = 0; idx < PSCI_NUM_AFFS ; idx++) { - node = &psci_aff_map[idx]; - if (!(node->state & PSCI_AFF_PRESENT)) { - continue; - } - INFO(" AffInst: Level %u, MPID 0x%lx, State %s\n", - node->level, node->mpidr, - psci_state_str[psci_get_state(node)]); + INFO("PSCI Power Domain Map:\n"); + for (idx = 0; idx < (PSCI_NUM_PWR_DOMAINS - PLATFORM_CORE_COUNT); + idx++) { + state_type = find_local_state_type( + psci_non_cpu_pd_nodes[idx].local_state); + INFO(" Domain Node : Level %u, parent_node %d," + " State %s (0x%x)\n", + psci_non_cpu_pd_nodes[idx].level, + psci_non_cpu_pd_nodes[idx].parent_node, + psci_state_type_str[state_type], + psci_non_cpu_pd_nodes[idx].local_state); + } + + for (idx = 0; idx < PLATFORM_CORE_COUNT; idx++) { + state = psci_get_cpu_local_state_by_idx(idx); + state_type = find_local_state_type(state); + INFO(" CPU Node : MPID 0x%lx, parent_node %d," + " State %s (0x%x)\n", + psci_cpu_pd_nodes[idx].mpidr, + psci_cpu_pd_nodes[idx].parent_node, + psci_state_type_str[state_type], + psci_get_cpu_local_state_by_idx(idx)); } #endif } + +#if ENABLE_PLAT_COMPAT +/******************************************************************************* + * PSCI Compatibility helper function to return the 'power_state' parameter of + * the PSCI CPU SUSPEND request for the current CPU. Returns PSCI_INVALID_DATA + * if not invoked within CPU_SUSPEND for the current CPU. + ******************************************************************************/ +int psci_get_suspend_powerstate(void) +{ + /* Sanity check to verify that CPU is within CPU_SUSPEND */ + if (psci_get_aff_info_state() == AFF_STATE_ON && + !is_local_state_run(psci_get_cpu_local_state())) + return psci_power_state_compat[plat_my_core_pos()]; + + return PSCI_INVALID_DATA; +} + +/******************************************************************************* + * PSCI Compatibility helper function to return the state id of the current + * cpu encoded in the 'power_state' parameter. Returns PSCI_INVALID_DATA + * if not invoked within CPU_SUSPEND for the current CPU. + ******************************************************************************/ +int psci_get_suspend_stateid(void) +{ + unsigned int power_state; + power_state = psci_get_suspend_powerstate(); + if (power_state != PSCI_INVALID_DATA) + return psci_get_pstate_id(power_state); + + return PSCI_INVALID_DATA; +} + +/******************************************************************************* + * PSCI Compatibility helper function to return the state id encoded in the + * 'power_state' parameter of the CPU specified by 'mpidr'. Returns + * PSCI_INVALID_DATA if the CPU is not in CPU_SUSPEND. + ******************************************************************************/ +int psci_get_suspend_stateid_by_mpidr(unsigned long mpidr) +{ + int cpu_idx = plat_core_pos_by_mpidr(mpidr); + + if (cpu_idx == -1) + return PSCI_INVALID_DATA; + + /* Sanity check to verify that the CPU is in CPU_SUSPEND */ + if (psci_get_aff_info_state_by_idx(cpu_idx) == AFF_STATE_ON && + !is_local_state_run(psci_get_cpu_local_state_by_idx(cpu_idx))) + return psci_get_pstate_id(psci_power_state_compat[cpu_idx]); + + return PSCI_INVALID_DATA; +} + +/******************************************************************************* + * This function returns highest affinity level which is in OFF + * state. The affinity instance with which the level is associated is + * determined by the caller. + ******************************************************************************/ +unsigned int psci_get_max_phys_off_afflvl(void) +{ + psci_power_state_t state_info; + + memset(&state_info, 0, sizeof(state_info)); + psci_get_target_local_pwr_states(PLAT_MAX_PWR_LVL, &state_info); + + return psci_find_target_suspend_lvl(&state_info); +} + +/******************************************************************************* + * PSCI Compatibility helper function to return target affinity level requested + * for the CPU_SUSPEND. This function assumes affinity levels correspond to + * power domain levels on the platform. + ******************************************************************************/ +int psci_get_suspend_afflvl(void) +{ + return psci_get_suspend_pwrlvl(); +} + +#endif diff --git a/services/std_svc/psci/psci_entry.S b/services/std_svc/psci/psci_entry.S index 050f6c6..73c3377 100644 --- a/services/std_svc/psci/psci_entry.S +++ b/services/std_svc/psci/psci_entry.S @@ -34,25 +34,16 @@ #include #include - .globl psci_aff_on_finish_entry - .globl psci_aff_suspend_finish_entry + .globl psci_entrypoint .globl psci_power_down_wfi - /* ----------------------------------------------------- - * This cpu has been physically powered up. Depending - * upon whether it was resumed from suspend or simply - * turned on, call the common power on finisher with - * the handlers (chosen depending upon original state). - * ----------------------------------------------------- + /* -------------------------------------------------------------------- + * This CPU has been physically powered up. It is either resuming from + * suspend or has simply been turned on. In both cases, call the power + * on finisher. + * -------------------------------------------------------------------- */ -func psci_aff_on_finish_entry - adr x23, psci_afflvl_on_finishers - b psci_aff_common_finish_entry - -psci_aff_suspend_finish_entry: - adr x23, psci_afflvl_suspend_finishers - -psci_aff_common_finish_entry: +func psci_entrypoint /* * On the warm boot path, most of the EL3 initialisations performed by * 'el3_entrypoint_common' must be skipped: @@ -98,19 +89,10 @@ mov x0, #DISABLE_DCACHE bl bl31_plat_enable_mmu - /* --------------------------------------------- - * Call the finishers starting from affinity - * level 0. - * --------------------------------------------- - */ - bl get_power_on_target_afflvl - mov x2, x23 - mov x1, x0 - mov x0, #MPIDR_AFFLVL0 - bl psci_afflvl_power_on_finish + bl psci_power_up_finish b el3_exit -endfunc psci_aff_on_finish_entry +endfunc psci_entrypoint /* -------------------------------------------- * This function is called to indicate to the diff --git a/services/std_svc/psci/psci_helpers.S b/services/std_svc/psci/psci_helpers.S index 1d99158..bbfa5d5 100644 --- a/services/std_svc/psci/psci_helpers.S +++ b/services/std_svc/psci/psci_helpers.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2014-2015, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -28,7 +28,6 @@ * POSSIBILITY OF SUCH DAMAGE. */ -#include #include #include #include @@ -38,14 +37,13 @@ .globl psci_do_pwrup_cache_maintenance /* ----------------------------------------------------------------------- - * void psci_do_pwrdown_cache_maintenance(uint32_t affinity level); + * void psci_do_pwrdown_cache_maintenance(uint32_t power level); * - * This function performs cache maintenance if the specified affinity - * level is the equal to the level of the highest affinity instance which - * will be/is physically powered off. The levels of cache affected are - * determined by the affinity level which is passed as the argument i.e. - * level 0 results in a flush of the L1 cache. Both the L1 and L2 caches - * are flushed for a higher affinity level. + * This function performs cache maintenance for the specified power + * level. The levels of cache affected are determined by the power + * level which is passed as the argument i.e. level 0 results + * in a flush of the L1 cache. Both the L1 and L2 caches are flushed + * for a higher power level. * * Additionally, this function also ensures that stack memory is correctly * flushed out to avoid coherency issues due to a change in its memory @@ -56,28 +54,19 @@ stp x29, x30, [sp,#-16]! stp x19, x20, [sp,#-16]! - mov x19, x0 - bl psci_get_max_phys_off_afflvl -#if ASM_ASSERTION - cmp x0, #PSCI_INVALID_DATA - ASM_ASSERT(ne) -#endif - cmp x0, x19 - b.ne 1f - /* --------------------------------------------- * Determine to how many levels of cache will be - * subject to cache maintenance. Affinity level + * subject to cache maintenance. Power level * 0 implies that only the cpu is being powered * down. Only the L1 data cache needs to be * flushed to the PoU in this case. For a higher - * affinity level we are assuming that a flush + * power level we are assuming that a flush * of L1 data and L2 unified cache is enough. * This information should be provided by the * platform. * --------------------------------------------- */ - cmp x0, #MPIDR_AFFLVL0 + cmp x0, #PSCI_CPU_PWR_LVL b.eq do_core_pwr_dwn bl prepare_cluster_pwr_dwn b do_stack_maintenance @@ -92,8 +81,7 @@ * --------------------------------------------- */ do_stack_maintenance: - mrs x0, mpidr_el1 - bl platform_get_stack + bl plat_get_my_stack /* --------------------------------------------- * Calculate and store the size of the used @@ -116,7 +104,6 @@ sub x1, sp, x0 bl inv_dcache_range -1: ldp x19, x20, [sp], #16 ldp x29, x30, [sp], #16 ret @@ -147,8 +134,7 @@ * stack base address in x0. * --------------------------------------------- */ - mrs x0, mpidr_el1 - bl platform_get_stack + bl plat_get_my_stack mov x1, sp sub x1, x0, x1 mov x0, sp diff --git a/services/std_svc/psci/psci_main.c b/services/std_svc/psci/psci_main.c index b389287..f024291 100644 --- a/services/std_svc/psci/psci_main.c +++ b/services/std_svc/psci/psci_main.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -35,6 +35,7 @@ #include #include #include +#include #include "psci_private.h" /******************************************************************************* @@ -46,14 +47,13 @@ { int rc; - unsigned int start_afflvl, end_afflvl; + unsigned int end_pwrlvl; entry_point_info_t ep; /* Determine if the cpu exists of not */ - rc = psci_validate_mpidr(target_cpu, MPIDR_AFFLVL0); - if (rc != PSCI_E_SUCCESS) { + rc = psci_validate_mpidr(target_cpu); + if (rc != PSCI_E_SUCCESS) return PSCI_E_INVALID_PARAMS; - } /* Validate the entrypoint using platform pm_ops */ if (psci_plat_pm_ops->validate_ns_entrypoint) { @@ -73,18 +73,14 @@ if (rc != PSCI_E_SUCCESS) return rc; - /* - * To turn this cpu on, specify which affinity + * To turn this cpu on, specify which power * levels need to be turned on */ - start_afflvl = MPIDR_AFFLVL0; - end_afflvl = PLATFORM_MAX_AFFLVL; - rc = psci_afflvl_on(target_cpu, + end_pwrlvl = PLAT_MAX_PWR_LVL; + rc = psci_cpu_on_start(target_cpu, &ep, - start_afflvl, - end_afflvl); - + end_pwrlvl); return rc; } @@ -98,73 +94,82 @@ unsigned long context_id) { int rc; - unsigned int target_afflvl, pstate_type; + unsigned int target_pwrlvl, is_power_down_state; entry_point_info_t ep; + psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} }; + plat_local_state_t cpu_pd_state; - /* Check SBZ bits in power state are zero */ - if (psci_validate_power_state(power_state)) - return PSCI_E_INVALID_PARAMS; - - /* Sanity check the requested state */ - target_afflvl = psci_get_pstate_afflvl(power_state); - if (target_afflvl > PLATFORM_MAX_AFFLVL) - return PSCI_E_INVALID_PARAMS; - - /* Validate the power_state using platform pm_ops */ - if (psci_plat_pm_ops->validate_power_state) { - rc = psci_plat_pm_ops->validate_power_state(power_state); - if (rc != PSCI_E_SUCCESS) { - assert(rc == PSCI_E_INVALID_PARAMS); - return PSCI_E_INVALID_PARAMS; - } + /* Validate the power_state parameter */ + rc = psci_validate_power_state(power_state, &state_info); + if (rc != PSCI_E_SUCCESS) { + assert(rc == PSCI_E_INVALID_PARAMS); + return rc; } - /* Validate the entrypoint using platform pm_ops */ - if (psci_plat_pm_ops->validate_ns_entrypoint) { - rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint); - if (rc != PSCI_E_SUCCESS) { - assert(rc == PSCI_E_INVALID_PARAMS); - return PSCI_E_INVALID_PARAMS; - } - } - - /* Determine the 'state type' in the 'power_state' parameter */ - pstate_type = psci_get_pstate_type(power_state); - /* - * Ensure that we have a platform specific handler for entering - * a standby state. + * Get the value of the state type bit from the power state parameter. */ - if (pstate_type == PSTATE_TYPE_STANDBY) { - if (!psci_plat_pm_ops->affinst_standby) + is_power_down_state = psci_get_pstate_type(power_state); + + /* Sanity check the requested suspend levels */ + assert (psci_validate_suspend_req(&state_info, is_power_down_state) + == PSCI_E_SUCCESS); + + target_pwrlvl = psci_find_target_suspend_lvl(&state_info); + + /* Fast path for CPU standby.*/ + if (is_cpu_standby_req(is_power_down_state, target_pwrlvl)) { + if (!psci_plat_pm_ops->cpu_standby) return PSCI_E_INVALID_PARAMS; - psci_plat_pm_ops->affinst_standby(power_state); + /* + * Set the state of the CPU power domain to the platform + * specific retention state and enter the standby state. + */ + cpu_pd_state = state_info.pwr_domain_state[PSCI_CPU_PWR_LVL]; + psci_set_cpu_local_state(cpu_pd_state); + psci_plat_pm_ops->cpu_standby(cpu_pd_state); + + /* Upon exit from standby, set the state back to RUN. */ + psci_set_cpu_local_state(PSCI_LOCAL_STATE_RUN); + return PSCI_E_SUCCESS; } /* - * Verify and derive the re-entry information for - * the non-secure world from the non-secure state from - * where this call originated. + * If a power down state has been requested, we need to verify entry + * point and program entry information. */ - rc = psci_get_ns_ep_info(&ep, entrypoint, context_id); - if (rc != PSCI_E_SUCCESS) - return rc; + if (is_power_down_state) { + if (psci_plat_pm_ops->validate_ns_entrypoint) { + rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint); + if (rc != PSCI_E_SUCCESS) { + assert(rc == PSCI_E_INVALID_PARAMS); + return rc; + } + } - /* Save PSCI power state parameter for the core in suspend context */ - psci_set_suspend_power_state(power_state); + /* + * Verify and derive the re-entry information for + * the non-secure world from the non-secure state from + * where this call originated. + */ + rc = psci_get_ns_ep_info(&ep, entrypoint, context_id); + if (rc != PSCI_E_SUCCESS) + return rc; + } /* * Do what is needed to enter the power down state. Upon success, - * enter the final wfi which will power down this CPU. + * enter the final wfi which will power down this CPU. This function + * might return if the power down was abandoned for any reason, e.g. + * arrival of an interrupt */ - psci_afflvl_suspend(&ep, - MPIDR_AFFLVL0, - target_afflvl); + psci_cpu_suspend_start(&ep, + target_pwrlvl, + &state_info, + is_power_down_state); - /* Reset PSCI power state parameter for the core. */ - psci_set_suspend_power_state(PSCI_INVALID_DATA); return PSCI_E_SUCCESS; } @@ -172,7 +177,7 @@ unsigned long context_id) { int rc; - unsigned int power_state; + psci_power_state_t state_info; entry_point_info_t ep; /* Validate the entrypoint using platform pm_ops */ @@ -180,7 +185,7 @@ rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint); if (rc != PSCI_E_SUCCESS) { assert(rc == PSCI_E_INVALID_PARAMS); - return PSCI_E_INVALID_PARAMS; + return rc; } } @@ -197,45 +202,39 @@ if (rc != PSCI_E_SUCCESS) return rc; - /* - * Assert that the required pm_ops hook is implemented to ensure that - * the capability detected during psci_setup() is valid. - */ - assert(psci_plat_pm_ops->get_sys_suspend_power_state); + /* Query the psci_power_state for system suspend */ + psci_query_sys_suspend_pwrstate(&state_info); + + /* Ensure that the psci_power_state makes sense */ + assert(psci_find_target_suspend_lvl(&state_info) == PLAT_MAX_PWR_LVL); + assert(psci_validate_suspend_req(&state_info, PSTATE_TYPE_POWERDOWN) + == PSCI_E_SUCCESS); + assert(is_local_state_off(state_info.pwr_domain_state[PLAT_MAX_PWR_LVL])); /* - * Query the platform for the power_state required for system suspend + * Do what is needed to enter the system suspend state. This function + * might return if the power down was abandoned for any reason, e.g. + * arrival of an interrupt */ - power_state = psci_plat_pm_ops->get_sys_suspend_power_state(); + psci_cpu_suspend_start(&ep, + PLAT_MAX_PWR_LVL, + &state_info, + PSTATE_TYPE_POWERDOWN); - /* Save PSCI power state parameter for the core in suspend context */ - psci_set_suspend_power_state(power_state); - - /* - * Do what is needed to enter the power down state. Upon success, - * enter the final wfi which will power down this cpu. - */ - psci_afflvl_suspend(&ep, - MPIDR_AFFLVL0, - PLATFORM_MAX_AFFLVL); - - /* Reset PSCI power state parameter for the core. */ - psci_set_suspend_power_state(PSCI_INVALID_DATA); return PSCI_E_SUCCESS; } int psci_cpu_off(void) { int rc; - int target_afflvl = PLATFORM_MAX_AFFLVL; + int target_pwrlvl = PLAT_MAX_PWR_LVL; /* - * Traverse from the highest to the lowest affinity level. When the - * lowest affinity level is hit, all the locks are acquired. State - * management is done immediately followed by cpu, cluster ... - * ..target_afflvl specific actions as this function unwinds back. + * Do what is needed to power off this CPU and possible higher power + * levels if it able to do so. Upon success, enter the final wfi + * which will power down this CPU. */ - rc = psci_afflvl_off(MPIDR_AFFLVL0, target_afflvl); + rc = psci_do_cpu_off(target_pwrlvl); /* * The only error cpu_off can return is E_DENIED. So check if that's @@ -249,32 +248,18 @@ int psci_affinity_info(unsigned long target_affinity, unsigned int lowest_affinity_level) { - int rc = PSCI_E_INVALID_PARAMS; - unsigned int aff_state; - aff_map_node_t *node; + unsigned int target_idx; - if (lowest_affinity_level > PLATFORM_MAX_AFFLVL) - return rc; + /* We dont support level higher than PSCI_CPU_PWR_LVL */ + if (lowest_affinity_level > PSCI_CPU_PWR_LVL) + return PSCI_E_INVALID_PARAMS; - node = psci_get_aff_map_node(target_affinity, lowest_affinity_level); - if (node && (node->state & PSCI_AFF_PRESENT)) { + /* Calculate the cpu index of the target */ + target_idx = plat_core_pos_by_mpidr(target_affinity); + if (target_idx == -1) + return PSCI_E_INVALID_PARAMS; - /* - * TODO: For affinity levels higher than 0 i.e. cpu, the - * state will always be either ON or OFF. Need to investigate - * how critical is it to support ON_PENDING here. - */ - aff_state = psci_get_state(node); - - /* A suspended cpu is available & on for the OS */ - if (aff_state == PSCI_STATE_SUSPEND) { - aff_state = PSCI_STATE_ON; - } - - rc = aff_state; - } - - return rc; + return psci_get_aff_info_state_by_idx(target_idx); } int psci_migrate(unsigned long target_cpu) @@ -295,7 +280,7 @@ return PSCI_E_NOT_PRESENT; /* Check the validity of the specified target cpu */ - rc = psci_validate_mpidr(target_cpu, MPIDR_AFFLVL0); + rc = psci_validate_mpidr(target_cpu); if (rc != PSCI_E_SUCCESS) return PSCI_E_INVALID_PARAMS; @@ -352,10 +337,9 @@ if (psci_fid == PSCI_CPU_SUSPEND_AARCH32 || psci_fid == PSCI_CPU_SUSPEND_AARCH64) { /* - * The trusted firmware uses the original power state format - * and does not support OS Initiated Mode. + * The trusted firmware does not support OS Initiated Mode. */ - return (FF_PSTATE_ORIG << FF_PSTATE_SHIFT) | + return (FF_PSTATE << FF_PSTATE_SHIFT) | ((!FF_SUPPORTS_OS_INIT_MODE) << FF_MODE_SUPPORT_SHIFT); } diff --git a/services/std_svc/psci/psci_off.c b/services/std_svc/psci/psci_off.c new file mode 100644 index 0000000..28fa52c --- /dev/null +++ b/services/std_svc/psci/psci_off.c @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include "psci_private.h" + +/****************************************************************************** + * Construct the psci_power_state to request power OFF at all power levels. + ******************************************************************************/ +static void psci_set_power_off_state(psci_power_state_t *state_info) +{ + int lvl; + + for (lvl = PSCI_CPU_PWR_LVL; lvl <= PLAT_MAX_PWR_LVL; lvl++) + state_info->pwr_domain_state[lvl] = PLAT_MAX_OFF_STATE; +} + +/****************************************************************************** + * Top level handler which is called when a cpu wants to power itself down. + * It's assumed that along with turning the cpu power domain off, power + * domains at higher levels will be turned off as far as possible. It finds + * the highest level where a domain has to be powered off by traversing the + * node information and then performs generic, architectural, platform setup + * and state management required to turn OFF that power domain and domains + * below it. e.g. For a cpu that's to be powered OFF, it could mean programming + * the power controller whereas for a cluster that's to be powered off, it will + * call the platform specific code which will disable coherency at the + * interconnect level if the cpu is the last in the cluster and also the + * program the power controller. + ******************************************************************************/ +int psci_do_cpu_off(int end_pwrlvl) +{ + int rc, idx = plat_my_core_pos(); + psci_power_state_t state_info; + + /* + * This function must only be called on platforms where the + * CPU_OFF platform hooks have been implemented. + */ + assert(psci_plat_pm_ops->pwr_domain_off); + + /* + * This function acquires the lock corresponding to each power + * level so that by the time all locks are taken, the system topology + * is snapshot and state management can be done safely. + */ + psci_acquire_pwr_domain_locks(end_pwrlvl, + idx); + + /* + * Call the cpu off handler registered by the Secure Payload Dispatcher + * to let it do any bookkeeping. Assume that the SPD always reports an + * E_DENIED error if SP refuse to power down + */ + if (psci_spd_pm && psci_spd_pm->svc_off) { + rc = psci_spd_pm->svc_off(0); + if (rc) + goto exit; + } + + /* Construct the psci_power_state for CPU_OFF */ + psci_set_power_off_state(&state_info); + + /* + * This function is passed the requested state info and + * it returns the negotiated state info for each power level upto + * the end level specified. + */ + psci_do_state_coordination(end_pwrlvl, &state_info); + + /* + * Arch. management. Perform the necessary steps to flush all + * cpu caches. + */ + psci_do_pwrdown_cache_maintenance(psci_find_max_off_lvl(&state_info)); + + /* + * Plat. management: Perform platform specific actions to turn this + * cpu off e.g. exit cpu coherency, program the power controller etc. + */ + psci_plat_pm_ops->pwr_domain_off(&state_info); + +exit: + /* + * Release the locks corresponding to each power level in the + * reverse order to which they were acquired. + */ + psci_release_pwr_domain_locks(end_pwrlvl, + idx); + + /* + * Set the affinity info state to OFF. This writes directly to main + * memory as caches are disabled, so cache maintenance is required + * to ensure that later cached reads of aff_info_state return + * AFF_STATE_OFF. + */ + flush_cpu_data(psci_svc_cpu_data.aff_info_state); + psci_set_aff_info_state(AFF_STATE_OFF); + inv_cpu_data(psci_svc_cpu_data.aff_info_state); + + /* + * Check if all actions needed to safely power down this cpu have + * successfully completed. Enter a wfi loop which will allow the + * power controller to physically power down this cpu. + */ + if (rc == PSCI_E_SUCCESS) + psci_power_down_wfi(); + + return rc; +} diff --git a/services/std_svc/psci/psci_on.c b/services/std_svc/psci/psci_on.c new file mode 100644 index 0000000..d68198f --- /dev/null +++ b/services/std_svc/psci/psci_on.c @@ -0,0 +1,209 @@ +/* + * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "psci_private.h" + +/******************************************************************************* + * This function checks whether a cpu which has been requested to be turned on + * is OFF to begin with. + ******************************************************************************/ +static int cpu_on_validate_state(aff_info_state_t aff_state) +{ + if (aff_state == AFF_STATE_ON) + return PSCI_E_ALREADY_ON; + + if (aff_state == AFF_STATE_ON_PENDING) + return PSCI_E_ON_PENDING; + + assert(aff_state == AFF_STATE_OFF); + return PSCI_E_SUCCESS; +} + +/******************************************************************************* + * This function sets the aff_info_state in the per-cpu data of the CPU + * specified by cpu_idx + ******************************************************************************/ +static void psci_set_aff_info_state_by_idx(unsigned int cpu_idx, + aff_info_state_t aff_state) +{ + + set_cpu_data_by_index(cpu_idx, + psci_svc_cpu_data.aff_info_state, + aff_state); + + /* + * Flush aff_info_state as it will be accessed with caches turned OFF. + */ + flush_cpu_data_by_index(cpu_idx, psci_svc_cpu_data.aff_info_state); +} + +/******************************************************************************* + * Generic handler which is called to physically power on a cpu identified by + * its mpidr. It performs the generic, architectural, platform setup and state + * management to power on the target cpu e.g. it will ensure that + * enough information is stashed for it to resume execution in the non-secure + * security state. + * + * The state of all the relevant power domains are changed after calling the + * platform handler as it can return error. + ******************************************************************************/ +int psci_cpu_on_start(unsigned long target_cpu, + entry_point_info_t *ep, + int end_pwrlvl) +{ + int rc; + unsigned int target_idx = plat_core_pos_by_mpidr(target_cpu); + + /* + * This function must only be called on platforms where the + * CPU_ON platform hooks have been implemented. + */ + assert(psci_plat_pm_ops->pwr_domain_on && + psci_plat_pm_ops->pwr_domain_on_finish); + + /* Protect against multiple CPUs trying to turn ON the same target CPU */ + psci_spin_lock_cpu(target_idx); + + /* + * Generic management: Ensure that the cpu is off to be + * turned on. + */ + rc = cpu_on_validate_state(psci_get_aff_info_state_by_idx(target_idx)); + if (rc != PSCI_E_SUCCESS) + goto exit; + + /* + * Call the cpu on handler registered by the Secure Payload Dispatcher + * to let it do any bookeeping. If the handler encounters an error, it's + * expected to assert within + */ + if (psci_spd_pm && psci_spd_pm->svc_on) + psci_spd_pm->svc_on(target_cpu); + + /* + * Set the Affinity info state of the target cpu to ON_PENDING. + */ + psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_ON_PENDING); + + /* + * Perform generic, architecture and platform specific handling. + */ + /* + * Plat. management: Give the platform the current state + * of the target cpu to allow it to perform the necessary + * steps to power on. + */ + rc = psci_plat_pm_ops->pwr_domain_on((u_register_t)target_cpu); + assert(rc == PSCI_E_SUCCESS || rc == PSCI_E_INTERN_FAIL); + + if (rc == PSCI_E_SUCCESS) + /* Store the re-entry information for the non-secure world. */ + cm_init_context_by_index(target_idx, ep); + else + /* Restore the state on error. */ + psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_OFF); + +exit: + psci_spin_unlock_cpu(target_idx); + return rc; +} + +/******************************************************************************* + * The following function finish an earlier power on request. They + * are called by the common finisher routine in psci_common.c. The `state_info` + * is the psci_power_state from which this CPU has woken up from. + ******************************************************************************/ +void psci_cpu_on_finish(unsigned int cpu_idx, + psci_power_state_t *state_info) +{ + /* + * Plat. management: Perform the platform specific actions + * for this cpu e.g. enabling the gic or zeroing the mailbox + * register. The actual state of this cpu has already been + * changed. + */ + psci_plat_pm_ops->pwr_domain_on_finish(state_info); + + /* + * Arch. management: Enable data cache and manage stack memory + */ + psci_do_pwrup_cache_maintenance(); + + /* + * All the platform specific actions for turning this cpu + * on have completed. Perform enough arch.initialization + * to run in the non-secure address space. + */ + bl31_arch_setup(); + + /* + * Lock the CPU spin lock to make sure that the context initialization + * is done. Since the lock is only used in this function to create + * a synchronization point with cpu_on_start(), it can be released + * immediately. + */ + psci_spin_lock_cpu(cpu_idx); + psci_spin_unlock_cpu(cpu_idx); + + /* Ensure we have been explicitly woken up by another cpu */ + assert(psci_get_aff_info_state() == AFF_STATE_ON_PENDING); + + /* + * Call the cpu on finish handler registered by the Secure Payload + * Dispatcher to let it do any bookeeping. If the handler encounters an + * error, it's expected to assert within + */ + if (psci_spd_pm && psci_spd_pm->svc_on_finish) + psci_spd_pm->svc_on_finish(0); + + /* Populate the mpidr field within the cpu node array */ + /* This needs to be done only once */ + psci_cpu_pd_nodes[cpu_idx].mpidr = read_mpidr() & MPIDR_AFFINITY_MASK; + + /* + * Generic management: Now we just need to retrieve the + * information that we had stashed away during the cpu_on + * call to set this cpu on its way. + */ + cm_prepare_el3_exit(NON_SECURE); + + /* Clean caches before re-entering normal world */ + dcsw_op_louis(DCCSW); +} diff --git a/services/std_svc/psci/psci_private.h b/services/std_svc/psci/psci_private.h index 2955de7..e2e32c7 100644 --- a/services/std_svc/psci/psci_private.h +++ b/services/std_svc/psci/psci_private.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -34,22 +34,30 @@ #include #include #include +#include #include +#include /* * The following helper macros abstract the interface to the Bakery * Lock API. */ #if USE_COHERENT_MEM -#define psci_lock_init(aff_map, idx) bakery_lock_init(&(aff_map)[(idx)].lock) -#define psci_lock_get(node) bakery_lock_get(&((node)->lock)) -#define psci_lock_release(node) bakery_lock_release(&((node)->lock)) +#define psci_lock_init(non_cpu_pd_node, idx) \ + bakery_lock_init(&(non_cpu_pd_node)[(idx)].lock) +#define psci_lock_get(non_cpu_pd_node) \ + bakery_lock_get(&((non_cpu_pd_node)->lock)) +#define psci_lock_release(non_cpu_pd_node) \ + bakery_lock_release(&((non_cpu_pd_node)->lock)) #else -#define psci_lock_init(aff_map, idx) ((aff_map)[(idx)].aff_map_index = (idx)) -#define psci_lock_get(node) bakery_lock_get((node)->aff_map_index, \ - CPU_DATA_PSCI_LOCK_OFFSET) -#define psci_lock_release(node) bakery_lock_release((node)->aff_map_index,\ - CPU_DATA_PSCI_LOCK_OFFSET) +#define psci_lock_init(non_cpu_pd_node, idx) \ + ((non_cpu_pd_node)[(idx)].lock_index = (idx)) +#define psci_lock_get(non_cpu_pd_node) \ + bakery_lock_get((non_cpu_pd_node)->lock_index, \ + CPU_DATA_PSCI_LOCK_OFFSET) +#define psci_lock_release(non_cpu_pd_node) \ + bakery_lock_release((non_cpu_pd_node)->lock_index, \ + CPU_DATA_PSCI_LOCK_OFFSET) #endif /* @@ -72,38 +80,98 @@ define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64) | \ define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64)) +/* + * Helper macros to get/set the fields of PSCI per-cpu data. + */ +#define psci_set_aff_info_state(aff_state) \ + set_cpu_data(psci_svc_cpu_data.aff_info_state, aff_state) +#define psci_get_aff_info_state() \ + get_cpu_data(psci_svc_cpu_data.aff_info_state) +#define psci_get_aff_info_state_by_idx(idx) \ + get_cpu_data_by_index(idx, psci_svc_cpu_data.aff_info_state) +#define psci_get_suspend_pwrlvl() \ + get_cpu_data(psci_svc_cpu_data.target_pwrlvl) +#define psci_set_suspend_pwrlvl(target_lvl) \ + set_cpu_data(psci_svc_cpu_data.target_pwrlvl, target_lvl) +#define psci_set_cpu_local_state(state) \ + set_cpu_data(psci_svc_cpu_data.local_state, state) +#define psci_get_cpu_local_state() \ + get_cpu_data(psci_svc_cpu_data.local_state) +#define psci_get_cpu_local_state_by_idx(idx) \ + get_cpu_data_by_index(idx, psci_svc_cpu_data.local_state) + +/* + * Helper macros for the CPU level spinlocks + */ +#define psci_spin_lock_cpu(idx) spin_lock(&psci_cpu_pd_nodes[idx].cpu_lock) +#define psci_spin_unlock_cpu(idx) spin_unlock(&psci_cpu_pd_nodes[idx].cpu_lock) + +/* Helper macro to identify a CPU standby request in PSCI Suspend call */ +#define is_cpu_standby_req(is_power_down_state, retn_lvl) \ + (((!(is_power_down_state)) && ((retn_lvl) == 0)) ? 1 : 0) /******************************************************************************* - * The following two data structures hold the topology tree which in turn tracks - * the state of the all the affinity instances supported by the platform. + * The following two data structures implement the power domain tree. The tree + * is used to track the state of all the nodes i.e. power domain instances + * described by the platform. The tree consists of nodes that describe CPU power + * domains i.e. leaf nodes and all other power domains which are parents of a + * CPU power domain i.e. non-leaf nodes. ******************************************************************************/ -typedef struct aff_map_node { - unsigned long mpidr; - unsigned char ref_count; - unsigned char state; +typedef struct non_cpu_pwr_domain_node { + /* + * Index of the first CPU power domain node level 0 which has this node + * as its parent. + */ + unsigned int cpu_start_idx; + + /* + * Number of CPU power domains which are siblings of the domain indexed + * by 'cpu_start_idx' i.e. all the domains in the range 'cpu_start_idx + * -> cpu_start_idx + ncpus' have this node as their parent. + */ + unsigned int ncpus; + + /* + * Index of the parent power domain node. + * TODO: Figure out whether to whether using pointer is more efficient. + */ + unsigned int parent_node; + + plat_local_state_t local_state; + unsigned char level; #if USE_COHERENT_MEM bakery_lock_t lock; #else /* For indexing the bakery_info array in per CPU data */ - unsigned char aff_map_index; + unsigned char lock_index; #endif -} aff_map_node_t; +} non_cpu_pd_node_t; -typedef struct aff_limits_node { - int min; - int max; -} aff_limits_node_t; +typedef struct cpu_pwr_domain_node { + unsigned long mpidr; -typedef aff_map_node_t (*mpidr_aff_map_nodes_t[MPIDR_MAX_AFFLVL + 1]); -typedef void (*afflvl_power_on_finisher_t)(aff_map_node_t *); + /* + * Index of the parent power domain node. + * TODO: Figure out whether to whether using pointer is more efficient. + */ + unsigned int parent_node; + + /* + * A CPU power domain does not require state coordination like its + * parent power domains. Hence this node does not include a bakery + * lock. A spinlock is required by the CPU_ON handler to prevent a race + * when multiple CPUs try to turn ON the same target CPU. + */ + spinlock_t cpu_lock; +} cpu_pd_node_t; /******************************************************************************* * Data prototypes ******************************************************************************/ -extern const plat_pm_ops_t *psci_plat_pm_ops; -extern aff_map_node_t psci_aff_map[PSCI_NUM_AFFS]; -extern aff_limits_node_t psci_aff_limits[MPIDR_MAX_AFFLVL + 1]; +extern const plat_psci_ops_t *psci_plat_pm_ops; +extern non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS]; +extern cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT]; extern uint32_t psci_caps; /******************************************************************************* @@ -115,62 +183,54 @@ * Function prototypes ******************************************************************************/ /* Private exported functions from psci_common.c */ -unsigned short psci_get_state(aff_map_node_t *node); -unsigned short psci_get_phys_state(aff_map_node_t *node); -void psci_set_state(aff_map_node_t *node, unsigned short state); -unsigned long mpidr_set_aff_inst(unsigned long, unsigned char, int); -int psci_validate_mpidr(unsigned long, int); -int get_power_on_target_afflvl(void); -void psci_afflvl_power_on_finish(int, - int, - afflvl_power_on_finisher_t *); +int psci_validate_power_state(unsigned int power_state, + psci_power_state_t *state_info); +void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info); +int psci_validate_mpidr(unsigned long mpidr); +void psci_init_req_local_pwr_states(void); +void psci_power_up_finish(void); int psci_get_ns_ep_info(entry_point_info_t *ep, uint64_t entrypoint, uint64_t context_id); -int psci_check_afflvl_range(int start_afflvl, int end_afflvl); -void psci_do_afflvl_state_mgmt(uint32_t start_afflvl, - uint32_t end_afflvl, - aff_map_node_t *mpidr_nodes[], - uint32_t state); -void psci_acquire_afflvl_locks(int start_afflvl, - int end_afflvl, - aff_map_node_t *mpidr_nodes[]); -void psci_release_afflvl_locks(int start_afflvl, - int end_afflvl, - mpidr_aff_map_nodes_t mpidr_nodes); -void psci_print_affinity_map(void); -void psci_set_max_phys_off_afflvl(uint32_t afflvl); -uint32_t psci_find_max_phys_off_afflvl(uint32_t start_afflvl, - uint32_t end_afflvl, - aff_map_node_t *mpidr_nodes[]); +void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx, + int end_lvl, + unsigned int node_index[]); +void psci_do_state_coordination(int end_pwrlvl, + psci_power_state_t *state_info); +void psci_acquire_pwr_domain_locks(int end_pwrlvl, + unsigned int cpu_idx); +void psci_release_pwr_domain_locks(int end_pwrlvl, + unsigned int cpu_idx); +int psci_validate_suspend_req(const psci_power_state_t *state_info, + unsigned int is_power_down_state_req); +unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info); +unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info); +void psci_set_pwr_domains_to_run(uint32_t end_pwrlvl); +void psci_print_power_domain_map(void); unsigned int psci_is_last_on_cpu(void); int psci_spd_migrate_info(uint64_t *mpidr); -/* Private exported functions from psci_setup.c */ -int psci_get_aff_map_nodes(unsigned long mpidr, - int start_afflvl, - int end_afflvl, - aff_map_node_t *mpidr_nodes[]); -aff_map_node_t *psci_get_aff_map_node(unsigned long, int); +/* Private exported functions from psci_on.c */ +int psci_cpu_on_start(unsigned long target_cpu, + entry_point_info_t *ep, + int end_pwrlvl); -/* Private exported functions from psci_affinity_on.c */ -int psci_afflvl_on(unsigned long target_cpu, - entry_point_info_t *ep, - int start_afflvl, - int end_afflvl); +void psci_cpu_on_finish(unsigned int cpu_idx, + psci_power_state_t *state_info); -/* Private exported functions from psci_affinity_off.c */ -int psci_afflvl_off(int, int); +/* Private exported functions from psci_cpu_off.c */ +int psci_do_cpu_off(int end_pwrlvl); -/* Private exported functions from psci_affinity_suspend.c */ -void psci_afflvl_suspend(entry_point_info_t *ep, - int start_afflvl, - int end_afflvl); +/* Private exported functions from psci_pwrlvl_suspend.c */ +void psci_cpu_suspend_start(entry_point_info_t *ep, + int end_pwrlvl, + psci_power_state_t *state_info, + unsigned int is_power_down_state_req); -unsigned int psci_afflvl_suspend_finish(int, int); -void psci_set_suspend_power_state(unsigned int power_state); +void psci_cpu_suspend_finish(unsigned int cpu_idx, + psci_power_state_t *state_info); /* Private exported functions from psci_helpers.S */ -void psci_do_pwrdown_cache_maintenance(uint32_t affinity_level); +void psci_do_pwrdown_cache_maintenance(uint32_t pwr_level); void psci_do_pwrup_cache_maintenance(void); /* Private exported functions from psci_system_off.c */ diff --git a/services/std_svc/psci/psci_setup.c b/services/std_svc/psci/psci_setup.c index 01b559c..ce4da95 100644 --- a/services/std_svc/psci/psci_setup.c +++ b/services/std_svc/psci/psci_setup.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -42,351 +42,223 @@ * Per cpu non-secure contexts used to program the architectural state prior * return to the normal world. * TODO: Use the memory allocator to set aside memory for the contexts instead - * of relying on platform defined constants. Using PSCI_NUM_AFFS will be an - * overkill. + * of relying on platform defined constants. ******************************************************************************/ static cpu_context_t psci_ns_context[PLATFORM_CORE_COUNT]; -/******************************************************************************* - * In a system, a certain number of affinity instances are present at an - * affinity level. The cumulative number of instances across all levels are - * stored in 'psci_aff_map'. The topology tree has been flattenned into this - * array. To retrieve nodes, information about the extents of each affinity - * level i.e. start index and end index needs to be present. 'psci_aff_limits' - * stores this information. - ******************************************************************************/ -aff_limits_node_t psci_aff_limits[MPIDR_MAX_AFFLVL + 1]; - /****************************************************************************** * Define the psci capability variable. *****************************************************************************/ uint32_t psci_caps; - /******************************************************************************* - * Routines for retrieving the node corresponding to an affinity level instance - * in the mpidr. The first one uses binary search to find the node corresponding - * to the mpidr (key) at a particular affinity level. The second routine decides - * extents of the binary search at each affinity level. + * Function which initializes the 'psci_non_cpu_pd_nodes' or the + * 'psci_cpu_pd_nodes' corresponding to the power level. ******************************************************************************/ -static int psci_aff_map_get_idx(unsigned long key, - int min_idx, - int max_idx) +static void psci_init_pwr_domain_node(int node_idx, int parent_idx, int level) { - int mid; + if (level > PSCI_CPU_PWR_LVL) { + psci_non_cpu_pd_nodes[node_idx].level = level; + psci_lock_init(psci_non_cpu_pd_nodes, node_idx); + psci_non_cpu_pd_nodes[node_idx].parent_node = parent_idx; + psci_non_cpu_pd_nodes[node_idx].local_state = + PLAT_MAX_OFF_STATE; + } else { + psci_cpu_data_t *svc_cpu_data; - /* - * Terminating condition: If the max and min indices have crossed paths - * during the binary search then the key has not been found. - */ - if (max_idx < min_idx) - return PSCI_E_INVALID_PARAMS; + psci_cpu_pd_nodes[node_idx].parent_node = parent_idx; - /* - * Make sure we are within array limits. - */ - assert(min_idx >= 0 && max_idx < PSCI_NUM_AFFS); + /* Initialize with an invalid mpidr */ + psci_cpu_pd_nodes[node_idx].mpidr = PSCI_INVALID_MPIDR; - /* - * Bisect the array around 'mid' and then recurse into the array chunk - * where the key is likely to be found. The mpidrs in each node in the - * 'psci_aff_map' for a given affinity level are stored in an ascending - * order which makes the binary search possible. - */ - mid = min_idx + ((max_idx - min_idx) >> 1); /* Divide by 2 */ + svc_cpu_data = + &(_cpu_data_by_index(node_idx)->psci_svc_cpu_data); - if (psci_aff_map[mid].mpidr > key) - return psci_aff_map_get_idx(key, min_idx, mid - 1); - else if (psci_aff_map[mid].mpidr < key) - return psci_aff_map_get_idx(key, mid + 1, max_idx); - else - return mid; -} + /* Set the Affinity Info for the cores as OFF */ + svc_cpu_data->aff_info_state = AFF_STATE_OFF; -aff_map_node_t *psci_get_aff_map_node(unsigned long mpidr, int aff_lvl) -{ - int rc; + /* Invalidate the suspend level for the cpu */ + svc_cpu_data->target_pwrlvl = PSCI_INVALID_DATA; - if (aff_lvl > PLATFORM_MAX_AFFLVL) - return NULL; + /* Set the power state to OFF state */ + svc_cpu_data->local_state = PLAT_MAX_OFF_STATE; - /* Right shift the mpidr to the required affinity level */ - mpidr = mpidr_mask_lower_afflvls(mpidr, aff_lvl); + flush_dcache_range((uint64_t)svc_cpu_data, + sizeof(*svc_cpu_data)); - rc = psci_aff_map_get_idx(mpidr, - psci_aff_limits[aff_lvl].min, - psci_aff_limits[aff_lvl].max); - if (rc >= 0) - return &psci_aff_map[rc]; - else - return NULL; -} - -/******************************************************************************* - * This function populates an array with nodes corresponding to a given range of - * affinity levels in an mpidr. It returns successfully only when the affinity - * levels are correct, the mpidr is valid i.e. no affinity level is absent from - * the topology tree & the affinity instance at level 0 is not absent. - ******************************************************************************/ -int psci_get_aff_map_nodes(unsigned long mpidr, - int start_afflvl, - int end_afflvl, - aff_map_node_t *mpidr_nodes[]) -{ - int rc = PSCI_E_INVALID_PARAMS, level; - aff_map_node_t *node; - - rc = psci_check_afflvl_range(start_afflvl, end_afflvl); - if (rc != PSCI_E_SUCCESS) - return rc; - - for (level = start_afflvl; level <= end_afflvl; level++) { - - /* - * Grab the node for each affinity level. No affinity level - * can be missing as that would mean that the topology tree - * is corrupted. - */ - node = psci_get_aff_map_node(mpidr, level); - if (node == NULL) { - rc = PSCI_E_INVALID_PARAMS; - break; - } - - /* - * Skip absent affinity levels unless it's afffinity level 0. - * An absent cpu means that the mpidr is invalid. Save the - * pointer to the node for the present affinity level - */ - if (!(node->state & PSCI_AFF_PRESENT)) { - if (level == MPIDR_AFFLVL0) { - rc = PSCI_E_INVALID_PARAMS; - break; - } - - mpidr_nodes[level] = NULL; - } else - mpidr_nodes[level] = node; - } - - return rc; -} - -/******************************************************************************* - * Function which initializes the 'aff_map_node' corresponding to an affinity - * level instance. Each node has a unique mpidr, level and bakery lock. The data - * field is opaque and holds affinity level specific data e.g. for affinity - * level 0 it contains the index into arrays that hold the secure/non-secure - * state for a cpu that's been turned on/off - ******************************************************************************/ -static void psci_init_aff_map_node(unsigned long mpidr, - int level, - unsigned int idx) -{ - unsigned char state; - uint32_t linear_id; - psci_aff_map[idx].mpidr = mpidr; - psci_aff_map[idx].level = level; - psci_lock_init(psci_aff_map, idx); - - /* - * If an affinity instance is present then mark it as OFF to begin with. - */ - state = plat_get_aff_state(level, mpidr); - psci_aff_map[idx].state = state; - - if (level == MPIDR_AFFLVL0) { - - /* - * Mark the cpu as OFF. Higher affinity level reference counts - * have already been memset to 0 - */ - if (state & PSCI_AFF_PRESENT) - psci_set_state(&psci_aff_map[idx], PSCI_STATE_OFF); - - /* - * Associate a non-secure context with this affinity - * instance through the context management library. - */ - linear_id = platform_get_core_pos(mpidr); - assert(linear_id < PLATFORM_CORE_COUNT); - - /* Invalidate the suspend context for the node */ - set_cpu_data_by_index(linear_id, - psci_svc_cpu_data.power_state, - PSCI_INVALID_DATA); - - /* - * There is no state associated with the current execution - * context so ensure that any reads of the highest affinity - * level in a powered down state return PSCI_INVALID_DATA. - */ - set_cpu_data_by_index(linear_id, - psci_svc_cpu_data.max_phys_off_afflvl, - PSCI_INVALID_DATA); - - flush_cpu_data_by_index(linear_id, psci_svc_cpu_data); - - cm_set_context_by_mpidr(mpidr, - (void *) &psci_ns_context[linear_id], + cm_set_context_by_index(node_idx, + (void *) &psci_ns_context[node_idx], NON_SECURE); } - - return; } /******************************************************************************* - * Core routine used by the Breadth-First-Search algorithm to populate the - * affinity tree. Each level in the tree corresponds to an affinity level. This - * routine's aim is to traverse to the target affinity level and populate nodes - * in the 'psci_aff_map' for all the siblings at that level. It uses the current - * affinity level to keep track of how many levels from the root of the tree - * have been traversed. If the current affinity level != target affinity level, - * then the platform is asked to return the number of children that each - * affinity instance has at the current affinity level. Traversal is then done - * for each child at the next lower level i.e. current affinity level - 1. - * - * CAUTION: This routine assumes that affinity instance ids are allocated in a - * monotonically increasing manner at each affinity level in a mpidr starting - * from 0. If the platform breaks this assumption then this code will have to - * be reworked accordingly. - ******************************************************************************/ -static unsigned int psci_init_aff_map(unsigned long mpidr, - unsigned int affmap_idx, - int cur_afflvl, - int tgt_afflvl) + * This functions updates cpu_start_idx and ncpus field for each of the node in + * psci_non_cpu_pd_nodes[]. It does so by comparing the parent nodes of each of + * the CPUs and check whether they match with the parent of the previous + * CPU. The basic assumption for this work is that children of the same parent + * are allocated adjacent indices. The platform should ensure this though proper + * mapping of the CPUs to indices via plat_core_pos_by_mpidr() and + * plat_my_core_pos() APIs. + *******************************************************************************/ +static void psci_update_pwrlvl_limits(void) { - unsigned int ctr, aff_count; + int cpu_idx, j; + unsigned int nodes_idx[PLAT_MAX_PWR_LVL] = {0}; + unsigned int temp_index[PLAT_MAX_PWR_LVL]; - assert(cur_afflvl >= tgt_afflvl); + for (cpu_idx = 0; cpu_idx < PLATFORM_CORE_COUNT; cpu_idx++) { + psci_get_parent_pwr_domain_nodes(cpu_idx, + PLAT_MAX_PWR_LVL, + temp_index); + for (j = PLAT_MAX_PWR_LVL - 1; j >= 0; j--) { + if (temp_index[j] != nodes_idx[j]) { + nodes_idx[j] = temp_index[j]; + psci_non_cpu_pd_nodes[nodes_idx[j]].cpu_start_idx + = cpu_idx; + } + psci_non_cpu_pd_nodes[nodes_idx[j]].ncpus++; + } + } +} + +/******************************************************************************* + * Core routine to populate the power domain tree. The tree descriptor passed by + * the platform is populated breadth-first and the first entry in the map + * informs the number of root power domains. The parent nodes of the root nodes + * will point to an invalid entry(-1). + ******************************************************************************/ +static void populate_power_domain_tree(const unsigned char *topology) +{ + unsigned int i, j = 0, num_nodes_at_lvl = 1, num_nodes_at_next_lvl; + unsigned int node_index = 0, parent_node_index = 0, num_children; + int level = PLAT_MAX_PWR_LVL; /* - * Find the number of siblings at the current affinity level & - * assert if there are none 'cause then we have been invoked with - * an invalid mpidr. + * For each level the inputs are: + * - number of nodes at this level in plat_array i.e. num_nodes_at_level + * This is the sum of values of nodes at the parent level. + * - Index of first entry at this level in the plat_array i.e. + * parent_node_index. + * - Index of first free entry in psci_non_cpu_pd_nodes[] or + * psci_cpu_pd_nodes[] i.e. node_index depending upon the level. */ - aff_count = plat_get_aff_count(cur_afflvl, mpidr); - assert(aff_count); + while (level >= PSCI_CPU_PWR_LVL) { + num_nodes_at_next_lvl = 0; + /* + * For each entry (parent node) at this level in the plat_array: + * - Find the number of children + * - Allocate a node in a power domain array for each child + * - Set the parent of the child to the parent_node_index - 1 + * - Increment parent_node_index to point to the next parent + * - Accumulate the number of children at next level. + */ + for (i = 0; i < num_nodes_at_lvl; i++) { + assert(parent_node_index <= + PSCI_NUM_NON_CPU_PWR_DOMAINS); + num_children = topology[parent_node_index]; - if (tgt_afflvl < cur_afflvl) { - for (ctr = 0; ctr < aff_count; ctr++) { - mpidr = mpidr_set_aff_inst(mpidr, ctr, cur_afflvl); - affmap_idx = psci_init_aff_map(mpidr, - affmap_idx, - cur_afflvl - 1, - tgt_afflvl); - } - } else { - for (ctr = 0; ctr < aff_count; ctr++, affmap_idx++) { - mpidr = mpidr_set_aff_inst(mpidr, ctr, cur_afflvl); - psci_init_aff_map_node(mpidr, cur_afflvl, affmap_idx); + for (j = node_index; + j < node_index + num_children; j++) + psci_init_pwr_domain_node(j, + parent_node_index - 1, + level); + + node_index = j; + num_nodes_at_next_lvl += num_children; + parent_node_index++; } - /* affmap_idx is 1 greater than the max index of cur_afflvl */ - psci_aff_limits[cur_afflvl].max = affmap_idx - 1; + num_nodes_at_lvl = num_nodes_at_next_lvl; + level--; + + /* Reset the index for the cpu power domain array */ + if (level == PSCI_CPU_PWR_LVL) + node_index = 0; } - return affmap_idx; + /* Validate the sanity of array exported by the platform */ + assert(j == PLATFORM_CORE_COUNT); + +#if !USE_COHERENT_MEM + /* Flush the non CPU power domain data to memory */ + flush_dcache_range((uint64_t) &psci_non_cpu_pd_nodes, + sizeof(psci_non_cpu_pd_nodes)); +#endif } /******************************************************************************* - * This function initializes the topology tree by querying the platform. To do - * so, it's helper routines implement a Breadth-First-Search. At each affinity - * level the platform conveys the number of affinity instances that exist i.e. - * the affinity count. The algorithm populates the psci_aff_map recursively - * using this information. On a platform that implements two clusters of 4 cpus - * each, the populated aff_map_array would look like this: + * This function initializes the power domain topology tree by querying the + * platform. The power domain nodes higher than the CPU are populated in the + * array psci_non_cpu_pd_nodes[] and the CPU power domains are populated in + * psci_cpu_pd_nodes[]. The platform exports its static topology map through the + * populate_power_domain_topology_tree() API. The algorithm populates the + * psci_non_cpu_pd_nodes and psci_cpu_pd_nodes iteratively by using this + * topology map. On a platform that implements two clusters of 2 cpus each, and + * supporting 3 domain levels, the populated psci_non_cpu_pd_nodes would look + * like this: * - * <- cpus cluster0 -><- cpus cluster1 -> * --------------------------------------------------- - * | 0 | 1 | 0 | 1 | 2 | 3 | 0 | 1 | 2 | 3 | + * | system node | cluster 0 node | cluster 1 node | * --------------------------------------------------- - * ^ ^ - * cluster __| cpu __| - * limit limit * - * The first 2 entries are of the cluster nodes. The next 4 entries are of cpus - * within cluster 0. The last 4 entries are of cpus within cluster 1. - * The 'psci_aff_limits' array contains the max & min index of each affinity - * level within the 'psci_aff_map' array. This allows restricting search of a - * node at an affinity level between the indices in the limits array. + * And populated psci_cpu_pd_nodes would look like this : + * <- cpus cluster0 -><- cpus cluster1 -> + * ------------------------------------------------ + * | CPU 0 | CPU 1 | CPU 2 | CPU 3 | + * ------------------------------------------------ ******************************************************************************/ int32_t psci_setup(void) { - unsigned long mpidr = read_mpidr(); - int afflvl, affmap_idx, max_afflvl; - aff_map_node_t *node; + const unsigned char *topology_tree; - psci_plat_pm_ops = NULL; + /* Query the topology map from the platform */ + topology_tree = plat_get_power_domain_tree_desc(); - /* Find out the maximum affinity level that the platform implements */ - max_afflvl = PLATFORM_MAX_AFFLVL; - assert(max_afflvl <= MPIDR_MAX_AFFLVL); + /* Populate the power domain arrays using the platform topology map */ + populate_power_domain_tree(topology_tree); - /* - * This call traverses the topology tree with help from the platform and - * populates the affinity map using a breadth-first-search recursively. - * We assume that the platform allocates affinity instance ids from 0 - * onwards at each affinity level in the mpidr. FIRST_MPIDR = 0.0.0.0 - */ - affmap_idx = 0; - for (afflvl = max_afflvl; afflvl >= MPIDR_AFFLVL0; afflvl--) { - affmap_idx = psci_init_aff_map(FIRST_MPIDR, - affmap_idx, - max_afflvl, - afflvl); - } + /* Update the CPU limits for each node in psci_non_cpu_pd_nodes */ + psci_update_pwrlvl_limits(); + + /* Populate the mpidr field of cpu node for this CPU */ + psci_cpu_pd_nodes[plat_my_core_pos()].mpidr = + read_mpidr() & MPIDR_AFFINITY_MASK; #if !USE_COHERENT_MEM /* - * The psci_aff_map only needs flushing when it's not allocated in + * The psci_non_cpu_pd_nodes only needs flushing when it's not allocated in * coherent memory. */ - flush_dcache_range((uint64_t) &psci_aff_map, sizeof(psci_aff_map)); + flush_dcache_range((uint64_t) &psci_non_cpu_pd_nodes, + sizeof(psci_non_cpu_pd_nodes)); #endif - /* - * Set the bounds for the affinity counts of each level in the map. Also - * flush out the entire array so that it's visible to subsequent power - * management operations. The 'psci_aff_limits' array is allocated in - * normal memory. It will be accessed when the mmu is off e.g. after - * reset. Hence it needs to be flushed. - */ - for (afflvl = MPIDR_AFFLVL0; afflvl < max_afflvl; afflvl++) { - psci_aff_limits[afflvl].min = - psci_aff_limits[afflvl + 1].max + 1; - } + flush_dcache_range((uint64_t) &psci_cpu_pd_nodes, + sizeof(psci_cpu_pd_nodes)); - flush_dcache_range((unsigned long) psci_aff_limits, - sizeof(psci_aff_limits)); + psci_init_req_local_pwr_states(); /* - * Mark the affinity instances in our mpidr as ON. No need to lock as - * this is the primary cpu. + * Set the requested and target state of this CPU and all the higher + * power domain levels for this CPU to run. */ - mpidr &= MPIDR_AFFINITY_MASK; - for (afflvl = MPIDR_AFFLVL0; afflvl <= max_afflvl; afflvl++) { + psci_set_pwr_domains_to_run(PLAT_MAX_PWR_LVL); - node = psci_get_aff_map_node(mpidr, afflvl); - assert(node); - - /* Mark each present node as ON. */ - if (node->state & PSCI_AFF_PRESENT) - psci_set_state(node, PSCI_STATE_ON); - } - - platform_setup_pm(&psci_plat_pm_ops); + plat_setup_psci_ops((uintptr_t)psci_entrypoint, + &psci_plat_pm_ops); assert(psci_plat_pm_ops); /* Initialize the psci capability */ psci_caps = PSCI_GENERIC_CAP; - if (psci_plat_pm_ops->affinst_off) + if (psci_plat_pm_ops->pwr_domain_off) psci_caps |= define_psci_cap(PSCI_CPU_OFF); - if (psci_plat_pm_ops->affinst_on && psci_plat_pm_ops->affinst_on_finish) + if (psci_plat_pm_ops->pwr_domain_on && + psci_plat_pm_ops->pwr_domain_on_finish) psci_caps |= define_psci_cap(PSCI_CPU_ON_AARCH64); - if (psci_plat_pm_ops->affinst_suspend && - psci_plat_pm_ops->affinst_suspend_finish) { + if (psci_plat_pm_ops->pwr_domain_suspend && + psci_plat_pm_ops->pwr_domain_suspend_finish) { psci_caps |= define_psci_cap(PSCI_CPU_SUSPEND_AARCH64); if (psci_plat_pm_ops->get_sys_suspend_power_state) psci_caps |= define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64); diff --git a/services/std_svc/psci/psci_suspend.c b/services/std_svc/psci/psci_suspend.c new file mode 100644 index 0000000..71e4778 --- /dev/null +++ b/services/std_svc/psci/psci_suspend.c @@ -0,0 +1,265 @@ +/* + * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "psci_private.h" + +/******************************************************************************* + * This function does generic and platform specific operations after a wake-up + * from standby/retention states at multiple power levels. + ******************************************************************************/ +static void psci_suspend_to_standby_finisher(unsigned int cpu_idx, + psci_power_state_t *state_info, + unsigned int end_pwrlvl) +{ + psci_acquire_pwr_domain_locks(end_pwrlvl, + cpu_idx); + + /* + * Plat. management: Allow the platform to do operations + * on waking up from retention. + */ + psci_plat_pm_ops->pwr_domain_suspend_finish(state_info); + + /* + * Set the requested and target state of this CPU and all the higher + * power domain levels for this CPU to run. + */ + psci_set_pwr_domains_to_run(end_pwrlvl); + + psci_release_pwr_domain_locks(end_pwrlvl, + cpu_idx); +} + +/******************************************************************************* + * This function does generic and platform specific suspend to power down + * operations. + ******************************************************************************/ +static void psci_suspend_to_pwrdown_start(int end_pwrlvl, + entry_point_info_t *ep, + psci_power_state_t *state_info) +{ + /* Save PSCI target power level for the suspend finisher handler */ + psci_set_suspend_pwrlvl(end_pwrlvl); + + /* + * Flush the target power level as it will be accessed on power up with + * Data cache disabled. + */ + flush_cpu_data(psci_svc_cpu_data.target_pwrlvl); + + /* + * Call the cpu suspend handler registered by the Secure Payload + * Dispatcher to let it do any book-keeping. If the handler encounters an + * error, it's expected to assert within + */ + if (psci_spd_pm && psci_spd_pm->svc_suspend) + psci_spd_pm->svc_suspend(0); + + /* + * Store the re-entry information for the non-secure world. + */ + cm_init_my_context(ep); + + /* + * Arch. management. Perform the necessary steps to flush all + * cpu caches. Currently we assume that the power level correspond + * the cache level. + * TODO : Introduce a mechanism to query the cache level to flush + * and the cpu-ops power down to perform from the platform. + */ + psci_do_pwrdown_cache_maintenance(psci_find_max_off_lvl(state_info)); +} + +/******************************************************************************* + * Top level handler which is called when a cpu wants to suspend its execution. + * It is assumed that along with suspending the cpu power domain, power domains + * at higher levels until the target power level will be suspended as well. It + * coordinates with the platform to negotiate the target state for each of + * the power domain level till the target power domain level. It then performs + * generic, architectural, platform setup and state management required to + * suspend that power domain level and power domain levels below it. + * e.g. For a cpu that's to be suspended, it could mean programming the + * power controller whereas for a cluster that's to be suspended, it will call + * the platform specific code which will disable coherency at the interconnect + * level if the cpu is the last in the cluster and also the program the power + * controller. + * + * All the required parameter checks are performed at the beginning and after + * the state transition has been done, no further error is expected and it is + * not possible to undo any of the actions taken beyond that point. + ******************************************************************************/ +void psci_cpu_suspend_start(entry_point_info_t *ep, + int end_pwrlvl, + psci_power_state_t *state_info, + unsigned int is_power_down_state) +{ + int skip_wfi = 0; + unsigned int idx = plat_my_core_pos(); + + /* + * This function must only be called on platforms where the + * CPU_SUSPEND platform hooks have been implemented. + */ + assert(psci_plat_pm_ops->pwr_domain_suspend && + psci_plat_pm_ops->pwr_domain_suspend_finish); + + /* + * This function acquires the lock corresponding to each power + * level so that by the time all locks are taken, the system topology + * is snapshot and state management can be done safely. + */ + psci_acquire_pwr_domain_locks(end_pwrlvl, + idx); + + /* + * We check if there are any pending interrupts after the delay + * introduced by lock contention to increase the chances of early + * detection that a wake-up interrupt has fired. + */ + if (read_isr_el1()) { + skip_wfi = 1; + goto exit; + } + + /* + * This function is passed the requested state info and + * it returns the negotiated state info for each power level upto + * the end level specified. + */ + psci_do_state_coordination(end_pwrlvl, state_info); + + if (is_power_down_state) + psci_suspend_to_pwrdown_start(end_pwrlvl, ep, state_info); + + /* + * Plat. management: Allow the platform to perform the + * necessary actions to turn off this cpu e.g. set the + * platform defined mailbox with the psci entrypoint, + * program the power controller etc. + */ + psci_plat_pm_ops->pwr_domain_suspend(state_info); + +exit: + /* + * Release the locks corresponding to each power level in the + * reverse order to which they were acquired. + */ + psci_release_pwr_domain_locks(end_pwrlvl, + idx); + if (skip_wfi) + return; + + if (is_power_down_state) + psci_power_down_wfi(); + + /* + * We will reach here if only retention/standby states have been + * requested at multiple power levels. This means that the cpu + * context will be preserved. + */ + wfi(); + + /* + * After we wake up from context retaining suspend, call the + * context retaining suspend finisher. + */ + psci_suspend_to_standby_finisher(idx, state_info, end_pwrlvl); +} + +/******************************************************************************* + * The following functions finish an earlier suspend request. They + * are called by the common finisher routine in psci_common.c. The `state_info` + * is the psci_power_state from which this CPU has woken up from. + ******************************************************************************/ +void psci_cpu_suspend_finish(unsigned int cpu_idx, + psci_power_state_t *state_info) +{ + int32_t suspend_level; + uint64_t counter_freq; + + /* Ensure we have been woken up from a suspended state */ + assert(psci_get_aff_info_state() == AFF_STATE_ON && is_local_state_off(\ + state_info->pwr_domain_state[PSCI_CPU_PWR_LVL])); + + /* + * Plat. management: Perform the platform specific actions + * before we change the state of the cpu e.g. enabling the + * gic or zeroing the mailbox register. If anything goes + * wrong then assert as there is no way to recover from this + * situation. + */ + psci_plat_pm_ops->pwr_domain_suspend_finish(state_info); + + /* + * Arch. management: Enable the data cache, manage stack memory and + * restore the stashed EL3 architectural context from the 'cpu_context' + * structure for this cpu. + */ + psci_do_pwrup_cache_maintenance(); + + /* Re-init the cntfrq_el0 register */ + counter_freq = plat_get_syscnt_freq(); + write_cntfrq_el0(counter_freq); + + /* + * Call the cpu suspend finish handler registered by the Secure Payload + * Dispatcher to let it do any bookeeping. If the handler encounters an + * error, it's expected to assert within + */ + if (psci_spd_pm && psci_spd_pm->svc_suspend) { + suspend_level = psci_get_suspend_pwrlvl(); + assert (suspend_level != PSCI_INVALID_DATA); + psci_spd_pm->svc_suspend_finish(suspend_level); + } + + /* Invalidate the suspend level for the cpu */ + psci_set_suspend_pwrlvl(PSCI_INVALID_DATA); + + /* + * Generic management: Now we just need to retrieve the + * information that we had stashed away during the suspend + * call to set this cpu on its way. + */ + cm_prepare_el3_exit(NON_SECURE); + + /* Clean caches before re-entering normal world */ + dcsw_op_louis(DCCSW); +} diff --git a/services/std_svc/psci/psci_system_off.c b/services/std_svc/psci/psci_system_off.c index 970d4bb..28315d6 100644 --- a/services/std_svc/psci/psci_system_off.c +++ b/services/std_svc/psci/psci_system_off.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2014-2015, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -37,7 +37,7 @@ void psci_system_off(void) { - psci_print_affinity_map(); + psci_print_power_domain_map(); assert(psci_plat_pm_ops->system_off); @@ -54,7 +54,7 @@ void psci_system_reset(void) { - psci_print_affinity_map(); + psci_print_power_domain_map(); assert(psci_plat_pm_ops->system_reset); diff --git a/services/std_svc/psci1.0/psci_common.c b/services/std_svc/psci1.0/psci_common.c deleted file mode 100644 index 7f1a5fd..0000000 --- a/services/std_svc/psci1.0/psci_common.c +++ /dev/null @@ -1,877 +0,0 @@ -/* - * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "psci_private.h" - -/* - * SPD power management operations, expected to be supplied by the registered - * SPD on successful SP initialization - */ -const spd_pm_ops_t *psci_spd_pm; - -/* - * PSCI requested local power state map. This array is used to store the local - * power states requested by a CPU for power levels from level 1 to - * PLAT_MAX_PWR_LVL. It does not store the requested local power state for power - * level 0 (PSCI_CPU_PWR_LVL) as the requested and the target power state for a - * CPU are the same. - * - * During state coordination, the platform is passed an array containing the - * local states requested for a particular non cpu power domain by each cpu - * within the domain. - * - * TODO: Dense packing of the requested states will cause cache thrashing - * when multiple power domains write to it. If we allocate the requested - * states at each power level in a cache-line aligned per-domain memory, - * the cache thrashing can be avoided. - */ -static plat_local_state_t - psci_req_local_pwr_states[PLAT_MAX_PWR_LVL][PLATFORM_CORE_COUNT]; - - -/******************************************************************************* - * Arrays that hold the platform's power domain tree information for state - * management of power domains. - * Each node in the array 'psci_non_cpu_pd_nodes' corresponds to a power domain - * which is an ancestor of a CPU power domain. - * Each node in the array 'psci_cpu_pd_nodes' corresponds to a cpu power domain - ******************************************************************************/ -non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS] -#if USE_COHERENT_MEM -__attribute__ ((section("tzfw_coherent_mem"))) -#endif -; - -cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT]; - -/******************************************************************************* - * Pointer to functions exported by the platform to complete power mgmt. ops - ******************************************************************************/ -const plat_psci_ops_t *psci_plat_pm_ops; - -/****************************************************************************** - * Check that the maximum power level supported by the platform makes sense - *****************************************************************************/ -CASSERT(PLAT_MAX_PWR_LVL <= PSCI_MAX_PWR_LVL && \ - PLAT_MAX_PWR_LVL >= PSCI_CPU_PWR_LVL, \ - assert_platform_max_pwrlvl_check); - -/* - * The plat_local_state used by the platform is one of these types: RUN, - * RETENTION and OFF. The platform can define further sub-states for each type - * apart from RUN. This categorization is done to verify the sanity of the - * psci_power_state passed by the platform and to print debug information. The - * categorization is done on the basis of the following conditions: - * - * 1. If (plat_local_state == 0) then the category is STATE_TYPE_RUN. - * - * 2. If (0 < plat_local_state <= PLAT_MAX_RET_STATE), then the category is - * STATE_TYPE_RETN. - * - * 3. If (plat_local_state > PLAT_MAX_RET_STATE), then the category is - * STATE_TYPE_OFF. - */ -typedef enum plat_local_state_type { - STATE_TYPE_RUN = 0, - STATE_TYPE_RETN, - STATE_TYPE_OFF -} plat_local_state_type_t; - -/* The macro used to categorize plat_local_state. */ -#define find_local_state_type(plat_local_state) \ - ((plat_local_state) ? ((plat_local_state > PLAT_MAX_RET_STATE) \ - ? STATE_TYPE_OFF : STATE_TYPE_RETN) \ - : STATE_TYPE_RUN) - -/****************************************************************************** - * Check that the maximum retention level supported by the platform is less - * than the maximum off level. - *****************************************************************************/ -CASSERT(PLAT_MAX_RET_STATE < PLAT_MAX_OFF_STATE, \ - assert_platform_max_off_and_retn_state_check); - -/****************************************************************************** - * This function ensures that the power state parameter in a CPU_SUSPEND request - * is valid. If so, it returns the requested states for each power level. - *****************************************************************************/ -int psci_validate_power_state(unsigned int power_state, - psci_power_state_t *state_info) -{ - /* Check SBZ bits in power state are zero */ - if (psci_check_power_state(power_state)) - return PSCI_E_INVALID_PARAMS; - - assert(psci_plat_pm_ops->validate_power_state); - - /* Validate the power_state using platform pm_ops */ - return psci_plat_pm_ops->validate_power_state(power_state, state_info); -} - -/****************************************************************************** - * This function retrieves the `psci_power_state_t` for system suspend from - * the platform. - *****************************************************************************/ -void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info) -{ - /* - * Assert that the required pm_ops hook is implemented to ensure that - * the capability detected during psci_setup() is valid. - */ - assert(psci_plat_pm_ops->get_sys_suspend_power_state); - - /* - * Query the platform for the power_state required for system suspend - */ - psci_plat_pm_ops->get_sys_suspend_power_state(state_info); -} - -/******************************************************************************* - * This function verifies that the all the other cores in the system have been - * turned OFF and the current CPU is the last running CPU in the system. - * Returns 1 (true) if the current CPU is the last ON CPU or 0 (false) - * otherwise. - ******************************************************************************/ -unsigned int psci_is_last_on_cpu(void) -{ - unsigned int cpu_idx, my_idx = plat_my_core_pos(); - - for (cpu_idx = 0; cpu_idx < PLATFORM_CORE_COUNT; cpu_idx++) { - if (cpu_idx == my_idx) { - assert(psci_get_aff_info_state() == AFF_STATE_ON); - continue; - } - - if (psci_get_aff_info_state_by_idx(cpu_idx) != AFF_STATE_OFF) - return 0; - } - - return 1; -} - -/******************************************************************************* - * Routine to return the maximum power level to traverse to after a cpu has - * been physically powered up. It is expected to be called immediately after - * reset from assembler code. - ******************************************************************************/ -static int get_power_on_target_pwrlvl(void) -{ - int pwrlvl; - - /* - * Assume that this cpu was suspended and retrieve its target power - * level. If it is invalid then it could only have been turned off - * earlier. PLAT_MAX_PWR_LVL will be the highest power level a - * cpu can be turned off to. - */ - pwrlvl = psci_get_suspend_pwrlvl(); - if (pwrlvl == PSCI_INVALID_DATA) - pwrlvl = PLAT_MAX_PWR_LVL; - return pwrlvl; -} - -/****************************************************************************** - * Helper function to update the requested local power state array. This array - * does not store the requested state for the CPU power level. Hence an - * assertion is added to prevent us from accessing the wrong index. - *****************************************************************************/ -static void psci_set_req_local_pwr_state(unsigned int pwrlvl, - unsigned int cpu_idx, - plat_local_state_t req_pwr_state) -{ - assert(pwrlvl > PSCI_CPU_PWR_LVL); - psci_req_local_pwr_states[pwrlvl - 1][cpu_idx] = req_pwr_state; -} - -/****************************************************************************** - * This function initializes the psci_req_local_pwr_states. - *****************************************************************************/ -void psci_init_req_local_pwr_states(void) -{ - /* Initialize the requested state of all non CPU power domains as OFF */ - memset(&psci_req_local_pwr_states, PLAT_MAX_OFF_STATE, - sizeof(psci_req_local_pwr_states)); -} - -/****************************************************************************** - * Helper function to return a reference to an array containing the local power - * states requested by each cpu for a power domain at 'pwrlvl'. The size of the - * array will be the number of cpu power domains of which this power domain is - * an ancestor. These requested states will be used to determine a suitable - * target state for this power domain during psci state coordination. An - * assertion is added to prevent us from accessing the CPU power level. - *****************************************************************************/ -static plat_local_state_t *psci_get_req_local_pwr_states(int pwrlvl, - int cpu_idx) -{ - assert(pwrlvl > PSCI_CPU_PWR_LVL); - - return &psci_req_local_pwr_states[pwrlvl - 1][cpu_idx]; -} - -/****************************************************************************** - * Helper function to return the current local power state of each power domain - * from the current cpu power domain to its ancestor at the 'end_pwrlvl'. This - * function will be called after a cpu is powered on to find the local state - * each power domain has emerged from. - *****************************************************************************/ -static void psci_get_target_local_pwr_states(uint32_t end_pwrlvl, - psci_power_state_t *target_state) -{ - int lvl; - unsigned int parent_idx; - plat_local_state_t *pd_state = target_state->pwr_domain_state; - - pd_state[PSCI_CPU_PWR_LVL] = psci_get_cpu_local_state(); - parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node; - - /* Copy the local power state from node to state_info */ - for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) { -#if !USE_COHERENT_MEM - /* - * If using normal memory for psci_non_cpu_pd_nodes, we need - * to flush before reading the local power state as another - * cpu in the same power domain could have updated it and this - * code runs before caches are enabled. - */ - flush_dcache_range( - (uint64_t)&psci_non_cpu_pd_nodes[parent_idx], - sizeof(psci_non_cpu_pd_nodes[parent_idx])); -#endif - pd_state[lvl] = psci_non_cpu_pd_nodes[parent_idx].local_state; - parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; - } - - /* Set the the higher levels to RUN */ - for (; lvl <= PLAT_MAX_PWR_LVL; lvl++) - target_state->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN; -} - -/****************************************************************************** - * Helper function to set the target local power state that each power domain - * from the current cpu power domain to its ancestor at the 'end_pwrlvl' will - * enter. This function will be called after coordination of requested power - * states has been done for each power level. - *****************************************************************************/ -static void psci_set_target_local_pwr_states(uint32_t end_pwrlvl, - const psci_power_state_t *target_state) -{ - int lvl; - unsigned int parent_idx; - const plat_local_state_t *pd_state = target_state->pwr_domain_state; - - psci_set_cpu_local_state(pd_state[PSCI_CPU_PWR_LVL]); - - /* - * Need to flush as local_state will be accessed with Data Cache - * disabled during power on - */ - flush_cpu_data(psci_svc_cpu_data.local_state); - - parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node; - - /* Copy the local_state from state_info */ - for (lvl = 1; lvl <= end_pwrlvl; lvl++) { - psci_non_cpu_pd_nodes[parent_idx].local_state = pd_state[lvl]; -#if !USE_COHERENT_MEM - flush_dcache_range( - (uint64_t)&psci_non_cpu_pd_nodes[parent_idx], - sizeof(psci_non_cpu_pd_nodes[parent_idx])); -#endif - parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; - } -} - - -/******************************************************************************* - * PSCI helper function to get the parent nodes corresponding to a cpu_index. - ******************************************************************************/ -void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx, - int end_lvl, - unsigned int node_index[]) -{ - unsigned int parent_node = psci_cpu_pd_nodes[cpu_idx].parent_node; - int i; - - for (i = PSCI_CPU_PWR_LVL + 1; i <= end_lvl; i++) { - *node_index++ = parent_node; - parent_node = psci_non_cpu_pd_nodes[parent_node].parent_node; - } -} - -/****************************************************************************** - * This function is invoked post CPU power up and initialization. It sets the - * affinity info state, target power state and requested power state for the - * current CPU and all its ancestor power domains to RUN. - *****************************************************************************/ -void psci_set_pwr_domains_to_run(uint32_t end_pwrlvl) -{ - int lvl; - unsigned int parent_idx, cpu_idx = plat_my_core_pos(); - parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node; - - /* Reset the local_state to RUN for the non cpu power domains. */ - for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) { - psci_non_cpu_pd_nodes[parent_idx].local_state = - PSCI_LOCAL_STATE_RUN; -#if !USE_COHERENT_MEM - flush_dcache_range( - (uint64_t)&psci_non_cpu_pd_nodes[parent_idx], - sizeof(psci_non_cpu_pd_nodes[parent_idx])); -#endif - psci_set_req_local_pwr_state(lvl, - cpu_idx, - PSCI_LOCAL_STATE_RUN); - parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; - } - - /* Set the affinity info state to ON */ - psci_set_aff_info_state(AFF_STATE_ON); - - psci_set_cpu_local_state(PSCI_LOCAL_STATE_RUN); - flush_cpu_data(psci_svc_cpu_data); -} - -/****************************************************************************** - * This function is passed the local power states requested for each power - * domain (state_info) between the current CPU domain and its ancestors until - * the target power level (end_pwrlvl). It updates the array of requested power - * states with this information. - * - * Then, for each level (apart from the CPU level) until the 'end_pwrlvl', it - * retrieves the states requested by all the cpus of which the power domain at - * that level is an ancestor. It passes this information to the platform to - * coordinate and return the target power state. If the target state for a level - * is RUN then subsequent levels are not considered. At the CPU level, state - * coordination is not required. Hence, the requested and the target states are - * the same. - * - * The 'state_info' is updated with the target state for each level between the - * CPU and the 'end_pwrlvl' and returned to the caller. - * - * This function will only be invoked with data cache enabled and while - * powering down a core. - *****************************************************************************/ -void psci_do_state_coordination(int end_pwrlvl, psci_power_state_t *state_info) -{ - unsigned int lvl, parent_idx, cpu_idx = plat_my_core_pos(); - unsigned int start_idx, ncpus; - plat_local_state_t target_state, *req_states; - - parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node; - - /* For level 0, the requested state will be equivalent - to target state */ - for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) { - - /* First update the requested power state */ - psci_set_req_local_pwr_state(lvl, cpu_idx, - state_info->pwr_domain_state[lvl]); - - /* Get the requested power states for this power level */ - start_idx = psci_non_cpu_pd_nodes[parent_idx].cpu_start_idx; - req_states = psci_get_req_local_pwr_states(lvl, start_idx); - - /* - * Let the platform coordinate amongst the requested states at - * this power level and return the target local power state. - */ - ncpus = psci_non_cpu_pd_nodes[parent_idx].ncpus; - target_state = plat_get_target_pwr_state(lvl, - req_states, - ncpus); - - state_info->pwr_domain_state[lvl] = target_state; - - /* Break early if the negotiated target power state is RUN */ - if (is_local_state_run(state_info->pwr_domain_state[lvl])) - break; - - parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; - } - - /* - * This is for cases when we break out of the above loop early because - * the target power state is RUN at a power level < end_pwlvl. - * We update the requested power state from state_info and then - * set the target state as RUN. - */ - for (lvl = lvl + 1; lvl <= end_pwrlvl; lvl++) { - psci_set_req_local_pwr_state(lvl, cpu_idx, - state_info->pwr_domain_state[lvl]); - state_info->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN; - - } - - /* Update the target state in the power domain nodes */ - psci_set_target_local_pwr_states(end_pwrlvl, state_info); -} - -/****************************************************************************** - * This function validates a suspend request by making sure that if a standby - * state is requested then no power level is turned off and the highest power - * level is placed in a standby/retention state. - * - * It also ensures that the state level X will enter is not shallower than the - * state level X + 1 will enter. - * - * This validation will be enabled only for DEBUG builds as the platform is - * expected to perform these validations as well. - *****************************************************************************/ -int psci_validate_suspend_req(const psci_power_state_t *state_info, - unsigned int is_power_down_state) -{ - unsigned int max_off_lvl, target_lvl, max_retn_lvl; - plat_local_state_t state; - plat_local_state_type_t req_state_type, deepest_state_type; - int i; - - /* Find the target suspend power level */ - target_lvl = psci_find_target_suspend_lvl(state_info); - if (target_lvl == PSCI_INVALID_DATA) - return PSCI_E_INVALID_PARAMS; - - /* All power domain levels are in a RUN state to begin with */ - deepest_state_type = STATE_TYPE_RUN; - - for (i = target_lvl; i >= PSCI_CPU_PWR_LVL; i--) { - state = state_info->pwr_domain_state[i]; - req_state_type = find_local_state_type(state); - - /* - * While traversing from the highest power level to the lowest, - * the state requested for lower levels has to be the same or - * deeper i.e. equal to or greater than the state at the higher - * levels. If this condition is true, then the requested state - * becomes the deepest state encountered so far. - */ - if (req_state_type < deepest_state_type) - return PSCI_E_INVALID_PARAMS; - deepest_state_type = req_state_type; - } - - /* Find the highest off power level */ - max_off_lvl = psci_find_max_off_lvl(state_info); - - /* The target_lvl is either equal to the max_off_lvl or max_retn_lvl */ - max_retn_lvl = PSCI_INVALID_DATA; - if (target_lvl != max_off_lvl) - max_retn_lvl = target_lvl; - - /* - * If this is not a request for a power down state then max off level - * has to be invalid and max retention level has to be a valid power - * level. - */ - if (!is_power_down_state && (max_off_lvl != PSCI_INVALID_DATA || - max_retn_lvl == PSCI_INVALID_DATA)) - return PSCI_E_INVALID_PARAMS; - - return PSCI_E_SUCCESS; -} - -/****************************************************************************** - * This function finds the highest power level which will be powered down - * amongst all the power levels specified in the 'state_info' structure - *****************************************************************************/ -unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info) -{ - int i; - - for (i = PLAT_MAX_PWR_LVL; i >= PSCI_CPU_PWR_LVL; i--) { - if (is_local_state_off(state_info->pwr_domain_state[i])) - return i; - } - - return PSCI_INVALID_DATA; -} - -/****************************************************************************** - * This functions finds the level of the highest power domain which will be - * placed in a low power state during a suspend operation. - *****************************************************************************/ -unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info) -{ - int i; - - for (i = PLAT_MAX_PWR_LVL; i >= PSCI_CPU_PWR_LVL; i--) { - if (!is_local_state_run(state_info->pwr_domain_state[i])) - return i; - } - - return PSCI_INVALID_DATA; -} - -/******************************************************************************* - * This function is passed a cpu_index and the highest level in the topology - * tree that the operation should be applied to. It picks up locks in order of - * increasing power domain level in the range specified. - ******************************************************************************/ -void psci_acquire_pwr_domain_locks(int end_pwrlvl, unsigned int cpu_idx) -{ - unsigned int parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node; - int level; - - /* No locking required for level 0. Hence start locking from level 1 */ - for (level = PSCI_CPU_PWR_LVL + 1; level <= end_pwrlvl; level++) { - psci_lock_get(&psci_non_cpu_pd_nodes[parent_idx]); - parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; - } -} - -/******************************************************************************* - * This function is passed a cpu_index and the highest level in the topology - * tree that the operation should be applied to. It releases the locks in order - * of decreasing power domain level in the range specified. - ******************************************************************************/ -void psci_release_pwr_domain_locks(int end_pwrlvl, unsigned int cpu_idx) -{ - unsigned int parent_idx, parent_nodes[PLAT_MAX_PWR_LVL] = {0}; - int level; - - /* Get the parent nodes */ - psci_get_parent_pwr_domain_nodes(cpu_idx, end_pwrlvl, parent_nodes); - - /* Unlock top down. No unlocking required for level 0. */ - for (level = end_pwrlvl; level >= PSCI_CPU_PWR_LVL + 1; level--) { - parent_idx = parent_nodes[level - 1]; - psci_lock_release(&psci_non_cpu_pd_nodes[parent_idx]); - } -} - -/******************************************************************************* - * Simple routine to determine whether a mpidr is valid or not. - ******************************************************************************/ -int psci_validate_mpidr(unsigned long mpidr) -{ - if (plat_core_pos_by_mpidr(mpidr) < 0) - return PSCI_E_INVALID_PARAMS; - - return PSCI_E_SUCCESS; -} - -/******************************************************************************* - * This function determines the full entrypoint information for the requested - * PSCI entrypoint on power on/resume and returns it. - ******************************************************************************/ -int psci_get_ns_ep_info(entry_point_info_t *ep, - uint64_t entrypoint, uint64_t context_id) -{ - uint32_t ep_attr, mode, sctlr, daif, ee; - uint32_t ns_scr_el3 = read_scr_el3(); - uint32_t ns_sctlr_el1 = read_sctlr_el1(); - - sctlr = ns_scr_el3 & SCR_HCE_BIT ? read_sctlr_el2() : ns_sctlr_el1; - ee = 0; - - ep_attr = NON_SECURE | EP_ST_DISABLE; - if (sctlr & SCTLR_EE_BIT) { - ep_attr |= EP_EE_BIG; - ee = 1; - } - SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr); - - ep->pc = entrypoint; - memset(&ep->args, 0, sizeof(ep->args)); - ep->args.arg0 = context_id; - - /* - * Figure out whether the cpu enters the non-secure address space - * in aarch32 or aarch64 - */ - if (ns_scr_el3 & SCR_RW_BIT) { - - /* - * Check whether a Thumb entry point has been provided for an - * aarch64 EL - */ - if (entrypoint & 0x1) - return PSCI_E_INVALID_PARAMS; - - mode = ns_scr_el3 & SCR_HCE_BIT ? MODE_EL2 : MODE_EL1; - - ep->spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS); - } else { - - mode = ns_scr_el3 & SCR_HCE_BIT ? MODE32_hyp : MODE32_svc; - - /* - * TODO: Choose async. exception bits if HYP mode is not - * implemented according to the values of SCR.{AW, FW} bits - */ - daif = DAIF_ABT_BIT | DAIF_IRQ_BIT | DAIF_FIQ_BIT; - - ep->spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, daif); - } - - return PSCI_E_SUCCESS; -} - -/******************************************************************************* - * Generic handler which is called when a cpu is physically powered on. It - * traverses the node information and finds the highest power level powered - * off and performs generic, architectural, platform setup and state management - * to power on that power level and power levels below it. - * e.g. For a cpu that's been powered on, it will call the platform specific - * code to enable the gic cpu interface and for a cluster it will enable - * coherency at the interconnect level in addition to gic cpu interface. - ******************************************************************************/ -void psci_power_up_finish(void) -{ - unsigned int cpu_idx = plat_my_core_pos(); - psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} }; - int end_pwrlvl; - - /* - * Verify that we have been explicitly turned ON or resumed from - * suspend. - */ - if (psci_get_aff_info_state() == AFF_STATE_OFF) { - ERROR("Unexpected affinity info state"); - panic(); - } - - /* - * Get the maximum power domain level to traverse to after this cpu - * has been physically powered up. - */ - end_pwrlvl = get_power_on_target_pwrlvl(); - - /* - * This function acquires the lock corresponding to each power level so - * that by the time all locks are taken, the system topology is snapshot - * and state management can be done safely. - */ - psci_acquire_pwr_domain_locks(end_pwrlvl, - cpu_idx); - - psci_get_target_local_pwr_states(end_pwrlvl, &state_info); - - /* - * This CPU could be resuming from suspend or it could have just been - * turned on. To distinguish between these 2 cases, we examine the - * affinity state of the CPU: - * - If the affinity state is ON_PENDING then it has just been - * turned on. - * - Else it is resuming from suspend. - * - * Depending on the type of warm reset identified, choose the right set - * of power management handler and perform the generic, architecture - * and platform specific handling. - */ - if (psci_get_aff_info_state() == AFF_STATE_ON_PENDING) - psci_cpu_on_finish(cpu_idx, &state_info); - else - psci_cpu_suspend_finish(cpu_idx, &state_info); - - /* - * Set the requested and target state of this CPU and all the higher - * power domains which are ancestors of this CPU to run. - */ - psci_set_pwr_domains_to_run(end_pwrlvl); - - /* - * This loop releases the lock corresponding to each power level - * in the reverse order to which they were acquired. - */ - psci_release_pwr_domain_locks(end_pwrlvl, - cpu_idx); -} - -/******************************************************************************* - * This function initializes the set of hooks that PSCI invokes as part of power - * management operation. The power management hooks are expected to be provided - * by the SPD, after it finishes all its initialization - ******************************************************************************/ -void psci_register_spd_pm_hook(const spd_pm_ops_t *pm) -{ - assert(pm); - psci_spd_pm = pm; - - if (pm->svc_migrate) - psci_caps |= define_psci_cap(PSCI_MIG_AARCH64); - - if (pm->svc_migrate_info) - psci_caps |= define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64) - | define_psci_cap(PSCI_MIG_INFO_TYPE); -} - -/******************************************************************************* - * This function invokes the migrate info hook in the spd_pm_ops. It performs - * the necessary return value validation. If the Secure Payload is UP and - * migrate capable, it returns the mpidr of the CPU on which the Secure payload - * is resident through the mpidr parameter. Else the value of the parameter on - * return is undefined. - ******************************************************************************/ -int psci_spd_migrate_info(uint64_t *mpidr) -{ - int rc; - - if (!psci_spd_pm || !psci_spd_pm->svc_migrate_info) - return PSCI_E_NOT_SUPPORTED; - - rc = psci_spd_pm->svc_migrate_info(mpidr); - - assert(rc == PSCI_TOS_UP_MIG_CAP || rc == PSCI_TOS_NOT_UP_MIG_CAP \ - || rc == PSCI_TOS_NOT_PRESENT_MP || rc == PSCI_E_NOT_SUPPORTED); - - return rc; -} - - -/******************************************************************************* - * This function prints the state of all power domains present in the - * system - ******************************************************************************/ -void psci_print_power_domain_map(void) -{ -#if LOG_LEVEL >= LOG_LEVEL_INFO - unsigned int idx; - plat_local_state_t state; - plat_local_state_type_t state_type; - - /* This array maps to the PSCI_STATE_X definitions in psci.h */ - static const char *psci_state_type_str[] = { - "ON", - "RETENTION", - "OFF", - }; - - INFO("PSCI Power Domain Map:\n"); - for (idx = 0; idx < (PSCI_NUM_PWR_DOMAINS - PLATFORM_CORE_COUNT); - idx++) { - state_type = find_local_state_type( - psci_non_cpu_pd_nodes[idx].local_state); - INFO(" Domain Node : Level %u, parent_node %d," - " State %s (0x%x)\n", - psci_non_cpu_pd_nodes[idx].level, - psci_non_cpu_pd_nodes[idx].parent_node, - psci_state_type_str[state_type], - psci_non_cpu_pd_nodes[idx].local_state); - } - - for (idx = 0; idx < PLATFORM_CORE_COUNT; idx++) { - state = psci_get_cpu_local_state_by_idx(idx); - state_type = find_local_state_type(state); - INFO(" CPU Node : MPID 0x%lx, parent_node %d," - " State %s (0x%x)\n", - psci_cpu_pd_nodes[idx].mpidr, - psci_cpu_pd_nodes[idx].parent_node, - psci_state_type_str[state_type], - psci_get_cpu_local_state_by_idx(idx)); - } -#endif -} - -#if ENABLE_PLAT_COMPAT -/******************************************************************************* - * PSCI Compatibility helper function to return the 'power_state' parameter of - * the PSCI CPU SUSPEND request for the current CPU. Returns PSCI_INVALID_DATA - * if not invoked within CPU_SUSPEND for the current CPU. - ******************************************************************************/ -int psci_get_suspend_powerstate(void) -{ - /* Sanity check to verify that CPU is within CPU_SUSPEND */ - if (psci_get_aff_info_state() == AFF_STATE_ON && - !is_local_state_run(psci_get_cpu_local_state())) - return psci_power_state_compat[plat_my_core_pos()]; - - return PSCI_INVALID_DATA; -} - -/******************************************************************************* - * PSCI Compatibility helper function to return the state id of the current - * cpu encoded in the 'power_state' parameter. Returns PSCI_INVALID_DATA - * if not invoked within CPU_SUSPEND for the current CPU. - ******************************************************************************/ -int psci_get_suspend_stateid(void) -{ - unsigned int power_state; - power_state = psci_get_suspend_powerstate(); - if (power_state != PSCI_INVALID_DATA) - return psci_get_pstate_id(power_state); - - return PSCI_INVALID_DATA; -} - -/******************************************************************************* - * PSCI Compatibility helper function to return the state id encoded in the - * 'power_state' parameter of the CPU specified by 'mpidr'. Returns - * PSCI_INVALID_DATA if the CPU is not in CPU_SUSPEND. - ******************************************************************************/ -int psci_get_suspend_stateid_by_mpidr(unsigned long mpidr) -{ - int cpu_idx = plat_core_pos_by_mpidr(mpidr); - - if (cpu_idx == -1) - return PSCI_INVALID_DATA; - - /* Sanity check to verify that the CPU is in CPU_SUSPEND */ - if (psci_get_aff_info_state_by_idx(cpu_idx) == AFF_STATE_ON && - !is_local_state_run(psci_get_cpu_local_state_by_idx(cpu_idx))) - return psci_get_pstate_id(psci_power_state_compat[cpu_idx]); - - return PSCI_INVALID_DATA; -} - -/******************************************************************************* - * This function returns highest affinity level which is in OFF - * state. The affinity instance with which the level is associated is - * determined by the caller. - ******************************************************************************/ -unsigned int psci_get_max_phys_off_afflvl(void) -{ - psci_power_state_t state_info; - - memset(&state_info, 0, sizeof(state_info)); - psci_get_target_local_pwr_states(PLAT_MAX_PWR_LVL, &state_info); - - return psci_find_target_suspend_lvl(&state_info); -} - -/******************************************************************************* - * PSCI Compatibility helper function to return target affinity level requested - * for the CPU_SUSPEND. This function assumes affinity levels correspond to - * power domain levels on the platform. - ******************************************************************************/ -int psci_get_suspend_afflvl(void) -{ - return psci_get_suspend_pwrlvl(); -} - -#endif diff --git a/services/std_svc/psci1.0/psci_entry.S b/services/std_svc/psci1.0/psci_entry.S deleted file mode 100644 index 73c3377..0000000 --- a/services/std_svc/psci1.0/psci_entry.S +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include - - .globl psci_entrypoint - .globl psci_power_down_wfi - - /* -------------------------------------------------------------------- - * This CPU has been physically powered up. It is either resuming from - * suspend or has simply been turned on. In both cases, call the power - * on finisher. - * -------------------------------------------------------------------- - */ -func psci_entrypoint - /* - * On the warm boot path, most of the EL3 initialisations performed by - * 'el3_entrypoint_common' must be skipped: - * - * - Only when the platform bypasses the BL1/BL3-1 entrypoint by - * programming the reset address do we need to set the CPU endianness. - * In other cases, we assume this has been taken care by the - * entrypoint code. - * - * - No need to determine the type of boot, we know it is a warm boot. - * - * - Do not try to distinguish between primary and secondary CPUs, this - * notion only exists for a cold boot. - * - * - No need to initialise the memory or the C runtime environment, - * it has been done once and for all on the cold boot path. - */ - el3_entrypoint_common \ - _set_endian=PROGRAMMABLE_RESET_ADDRESS \ - _warm_boot_mailbox=0 \ - _secondary_cold_boot=0 \ - _init_memory=0 \ - _init_c_runtime=0 \ - _exception_vectors=runtime_exceptions - - /* -------------------------------------------- - * Enable the MMU with the DCache disabled. It - * is safe to use stacks allocated in normal - * memory as a result. All memory accesses are - * marked nGnRnE when the MMU is disabled. So - * all the stack writes will make it to memory. - * All memory accesses are marked Non-cacheable - * when the MMU is enabled but D$ is disabled. - * So used stack memory is guaranteed to be - * visible immediately after the MMU is enabled - * Enabling the DCache at the same time as the - * MMU can lead to speculatively fetched and - * possibly stale stack memory being read from - * other caches. This can lead to coherency - * issues. - * -------------------------------------------- - */ - mov x0, #DISABLE_DCACHE - bl bl31_plat_enable_mmu - - bl psci_power_up_finish - - b el3_exit -endfunc psci_entrypoint - - /* -------------------------------------------- - * This function is called to indicate to the - * power controller that it is safe to power - * down this cpu. It should not exit the wfi - * and will be released from reset upon power - * up. 'wfi_spill' is used to catch erroneous - * exits from wfi. - * -------------------------------------------- - */ -func psci_power_down_wfi - dsb sy // ensure write buffer empty - wfi -wfi_spill: - b wfi_spill -endfunc psci_power_down_wfi - diff --git a/services/std_svc/psci1.0/psci_helpers.S b/services/std_svc/psci1.0/psci_helpers.S deleted file mode 100644 index bbfa5d5..0000000 --- a/services/std_svc/psci1.0/psci_helpers.S +++ /dev/null @@ -1,154 +0,0 @@ -/* - * Copyright (c) 2014-2015, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include - - .globl psci_do_pwrdown_cache_maintenance - .globl psci_do_pwrup_cache_maintenance - -/* ----------------------------------------------------------------------- - * void psci_do_pwrdown_cache_maintenance(uint32_t power level); - * - * This function performs cache maintenance for the specified power - * level. The levels of cache affected are determined by the power - * level which is passed as the argument i.e. level 0 results - * in a flush of the L1 cache. Both the L1 and L2 caches are flushed - * for a higher power level. - * - * Additionally, this function also ensures that stack memory is correctly - * flushed out to avoid coherency issues due to a change in its memory - * attributes after the data cache is disabled. - * ----------------------------------------------------------------------- - */ -func psci_do_pwrdown_cache_maintenance - stp x29, x30, [sp,#-16]! - stp x19, x20, [sp,#-16]! - - /* --------------------------------------------- - * Determine to how many levels of cache will be - * subject to cache maintenance. Power level - * 0 implies that only the cpu is being powered - * down. Only the L1 data cache needs to be - * flushed to the PoU in this case. For a higher - * power level we are assuming that a flush - * of L1 data and L2 unified cache is enough. - * This information should be provided by the - * platform. - * --------------------------------------------- - */ - cmp x0, #PSCI_CPU_PWR_LVL - b.eq do_core_pwr_dwn - bl prepare_cluster_pwr_dwn - b do_stack_maintenance - -do_core_pwr_dwn: - bl prepare_core_pwr_dwn - - /* --------------------------------------------- - * Do stack maintenance by flushing the used - * stack to the main memory and invalidating the - * remainder. - * --------------------------------------------- - */ -do_stack_maintenance: - bl plat_get_my_stack - - /* --------------------------------------------- - * Calculate and store the size of the used - * stack memory in x1. - * --------------------------------------------- - */ - mov x19, x0 - mov x1, sp - sub x1, x0, x1 - mov x0, sp - bl flush_dcache_range - - /* --------------------------------------------- - * Calculate and store the size of the unused - * stack memory in x1. Calculate and store the - * stack base address in x0. - * --------------------------------------------- - */ - sub x0, x19, #PLATFORM_STACK_SIZE - sub x1, sp, x0 - bl inv_dcache_range - - ldp x19, x20, [sp], #16 - ldp x29, x30, [sp], #16 - ret -endfunc psci_do_pwrdown_cache_maintenance - - -/* ----------------------------------------------------------------------- - * void psci_do_pwrup_cache_maintenance(void); - * - * This function performs cache maintenance after this cpu is powered up. - * Currently, this involves managing the used stack memory before turning - * on the data cache. - * ----------------------------------------------------------------------- - */ -func psci_do_pwrup_cache_maintenance - stp x29, x30, [sp,#-16]! - - /* --------------------------------------------- - * Ensure any inflight stack writes have made it - * to main memory. - * --------------------------------------------- - */ - dmb st - - /* --------------------------------------------- - * Calculate and store the size of the used - * stack memory in x1. Calculate and store the - * stack base address in x0. - * --------------------------------------------- - */ - bl plat_get_my_stack - mov x1, sp - sub x1, x0, x1 - mov x0, sp - bl inv_dcache_range - - /* --------------------------------------------- - * Enable the data cache. - * --------------------------------------------- - */ - mrs x0, sctlr_el3 - orr x0, x0, #SCTLR_C_BIT - msr sctlr_el3, x0 - isb - - ldp x29, x30, [sp], #16 - ret -endfunc psci_do_pwrup_cache_maintenance diff --git a/services/std_svc/psci1.0/psci_main.c b/services/std_svc/psci1.0/psci_main.c deleted file mode 100644 index f024291..0000000 --- a/services/std_svc/psci1.0/psci_main.c +++ /dev/null @@ -1,447 +0,0 @@ -/* - * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include "psci_private.h" - -/******************************************************************************* - * PSCI frontend api for servicing SMCs. Described in the PSCI spec. - ******************************************************************************/ -int psci_cpu_on(unsigned long target_cpu, - unsigned long entrypoint, - unsigned long context_id) - -{ - int rc; - unsigned int end_pwrlvl; - entry_point_info_t ep; - - /* Determine if the cpu exists of not */ - rc = psci_validate_mpidr(target_cpu); - if (rc != PSCI_E_SUCCESS) - return PSCI_E_INVALID_PARAMS; - - /* Validate the entrypoint using platform pm_ops */ - if (psci_plat_pm_ops->validate_ns_entrypoint) { - rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint); - if (rc != PSCI_E_SUCCESS) { - assert(rc == PSCI_E_INVALID_PARAMS); - return PSCI_E_INVALID_PARAMS; - } - } - - /* - * Verify and derive the re-entry information for - * the non-secure world from the non-secure state from - * where this call originated. - */ - rc = psci_get_ns_ep_info(&ep, entrypoint, context_id); - if (rc != PSCI_E_SUCCESS) - return rc; - - /* - * To turn this cpu on, specify which power - * levels need to be turned on - */ - end_pwrlvl = PLAT_MAX_PWR_LVL; - rc = psci_cpu_on_start(target_cpu, - &ep, - end_pwrlvl); - return rc; -} - -unsigned int psci_version(void) -{ - return PSCI_MAJOR_VER | PSCI_MINOR_VER; -} - -int psci_cpu_suspend(unsigned int power_state, - unsigned long entrypoint, - unsigned long context_id) -{ - int rc; - unsigned int target_pwrlvl, is_power_down_state; - entry_point_info_t ep; - psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} }; - plat_local_state_t cpu_pd_state; - - /* Validate the power_state parameter */ - rc = psci_validate_power_state(power_state, &state_info); - if (rc != PSCI_E_SUCCESS) { - assert(rc == PSCI_E_INVALID_PARAMS); - return rc; - } - - /* - * Get the value of the state type bit from the power state parameter. - */ - is_power_down_state = psci_get_pstate_type(power_state); - - /* Sanity check the requested suspend levels */ - assert (psci_validate_suspend_req(&state_info, is_power_down_state) - == PSCI_E_SUCCESS); - - target_pwrlvl = psci_find_target_suspend_lvl(&state_info); - - /* Fast path for CPU standby.*/ - if (is_cpu_standby_req(is_power_down_state, target_pwrlvl)) { - if (!psci_plat_pm_ops->cpu_standby) - return PSCI_E_INVALID_PARAMS; - - /* - * Set the state of the CPU power domain to the platform - * specific retention state and enter the standby state. - */ - cpu_pd_state = state_info.pwr_domain_state[PSCI_CPU_PWR_LVL]; - psci_set_cpu_local_state(cpu_pd_state); - psci_plat_pm_ops->cpu_standby(cpu_pd_state); - - /* Upon exit from standby, set the state back to RUN. */ - psci_set_cpu_local_state(PSCI_LOCAL_STATE_RUN); - - return PSCI_E_SUCCESS; - } - - /* - * If a power down state has been requested, we need to verify entry - * point and program entry information. - */ - if (is_power_down_state) { - if (psci_plat_pm_ops->validate_ns_entrypoint) { - rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint); - if (rc != PSCI_E_SUCCESS) { - assert(rc == PSCI_E_INVALID_PARAMS); - return rc; - } - } - - /* - * Verify and derive the re-entry information for - * the non-secure world from the non-secure state from - * where this call originated. - */ - rc = psci_get_ns_ep_info(&ep, entrypoint, context_id); - if (rc != PSCI_E_SUCCESS) - return rc; - } - - /* - * Do what is needed to enter the power down state. Upon success, - * enter the final wfi which will power down this CPU. This function - * might return if the power down was abandoned for any reason, e.g. - * arrival of an interrupt - */ - psci_cpu_suspend_start(&ep, - target_pwrlvl, - &state_info, - is_power_down_state); - - return PSCI_E_SUCCESS; -} - -int psci_system_suspend(unsigned long entrypoint, - unsigned long context_id) -{ - int rc; - psci_power_state_t state_info; - entry_point_info_t ep; - - /* Validate the entrypoint using platform pm_ops */ - if (psci_plat_pm_ops->validate_ns_entrypoint) { - rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint); - if (rc != PSCI_E_SUCCESS) { - assert(rc == PSCI_E_INVALID_PARAMS); - return rc; - } - } - - /* Check if the current CPU is the last ON CPU in the system */ - if (!psci_is_last_on_cpu()) - return PSCI_E_DENIED; - - /* - * Verify and derive the re-entry information for - * the non-secure world from the non-secure state from - * where this call originated. - */ - rc = psci_get_ns_ep_info(&ep, entrypoint, context_id); - if (rc != PSCI_E_SUCCESS) - return rc; - - /* Query the psci_power_state for system suspend */ - psci_query_sys_suspend_pwrstate(&state_info); - - /* Ensure that the psci_power_state makes sense */ - assert(psci_find_target_suspend_lvl(&state_info) == PLAT_MAX_PWR_LVL); - assert(psci_validate_suspend_req(&state_info, PSTATE_TYPE_POWERDOWN) - == PSCI_E_SUCCESS); - assert(is_local_state_off(state_info.pwr_domain_state[PLAT_MAX_PWR_LVL])); - - /* - * Do what is needed to enter the system suspend state. This function - * might return if the power down was abandoned for any reason, e.g. - * arrival of an interrupt - */ - psci_cpu_suspend_start(&ep, - PLAT_MAX_PWR_LVL, - &state_info, - PSTATE_TYPE_POWERDOWN); - - return PSCI_E_SUCCESS; -} - -int psci_cpu_off(void) -{ - int rc; - int target_pwrlvl = PLAT_MAX_PWR_LVL; - - /* - * Do what is needed to power off this CPU and possible higher power - * levels if it able to do so. Upon success, enter the final wfi - * which will power down this CPU. - */ - rc = psci_do_cpu_off(target_pwrlvl); - - /* - * The only error cpu_off can return is E_DENIED. So check if that's - * indeed the case. - */ - assert (rc == PSCI_E_DENIED); - - return rc; -} - -int psci_affinity_info(unsigned long target_affinity, - unsigned int lowest_affinity_level) -{ - unsigned int target_idx; - - /* We dont support level higher than PSCI_CPU_PWR_LVL */ - if (lowest_affinity_level > PSCI_CPU_PWR_LVL) - return PSCI_E_INVALID_PARAMS; - - /* Calculate the cpu index of the target */ - target_idx = plat_core_pos_by_mpidr(target_affinity); - if (target_idx == -1) - return PSCI_E_INVALID_PARAMS; - - return psci_get_aff_info_state_by_idx(target_idx); -} - -int psci_migrate(unsigned long target_cpu) -{ - int rc; - unsigned long resident_cpu_mpidr; - - rc = psci_spd_migrate_info(&resident_cpu_mpidr); - if (rc != PSCI_TOS_UP_MIG_CAP) - return (rc == PSCI_TOS_NOT_UP_MIG_CAP) ? - PSCI_E_DENIED : PSCI_E_NOT_SUPPORTED; - - /* - * Migrate should only be invoked on the CPU where - * the Secure OS is resident. - */ - if (resident_cpu_mpidr != read_mpidr_el1()) - return PSCI_E_NOT_PRESENT; - - /* Check the validity of the specified target cpu */ - rc = psci_validate_mpidr(target_cpu); - if (rc != PSCI_E_SUCCESS) - return PSCI_E_INVALID_PARAMS; - - assert(psci_spd_pm && psci_spd_pm->svc_migrate); - - rc = psci_spd_pm->svc_migrate(read_mpidr_el1(), target_cpu); - assert(rc == PSCI_E_SUCCESS || rc == PSCI_E_INTERN_FAIL); - - return rc; -} - -int psci_migrate_info_type(void) -{ - unsigned long resident_cpu_mpidr; - - return psci_spd_migrate_info(&resident_cpu_mpidr); -} - -long psci_migrate_info_up_cpu(void) -{ - unsigned long resident_cpu_mpidr; - int rc; - - /* - * Return value of this depends upon what - * psci_spd_migrate_info() returns. - */ - rc = psci_spd_migrate_info(&resident_cpu_mpidr); - if (rc != PSCI_TOS_NOT_UP_MIG_CAP && rc != PSCI_TOS_UP_MIG_CAP) - return PSCI_E_INVALID_PARAMS; - - return resident_cpu_mpidr; -} - -int psci_features(unsigned int psci_fid) -{ - uint32_t local_caps = psci_caps; - - /* Check if it is a 64 bit function */ - if (((psci_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_64) - local_caps &= PSCI_CAP_64BIT_MASK; - - /* Check for invalid fid */ - if (!(is_std_svc_call(psci_fid) && is_valid_fast_smc(psci_fid) - && is_psci_fid(psci_fid))) - return PSCI_E_NOT_SUPPORTED; - - - /* Check if the psci fid is supported or not */ - if (!(local_caps & define_psci_cap(psci_fid))) - return PSCI_E_NOT_SUPPORTED; - - /* Format the feature flags */ - if (psci_fid == PSCI_CPU_SUSPEND_AARCH32 || - psci_fid == PSCI_CPU_SUSPEND_AARCH64) { - /* - * The trusted firmware does not support OS Initiated Mode. - */ - return (FF_PSTATE << FF_PSTATE_SHIFT) | - ((!FF_SUPPORTS_OS_INIT_MODE) << FF_MODE_SUPPORT_SHIFT); - } - - /* Return 0 for all other fid's */ - return PSCI_E_SUCCESS; -} - -/******************************************************************************* - * PSCI top level handler for servicing SMCs. - ******************************************************************************/ -uint64_t psci_smc_handler(uint32_t smc_fid, - uint64_t x1, - uint64_t x2, - uint64_t x3, - uint64_t x4, - void *cookie, - void *handle, - uint64_t flags) -{ - if (is_caller_secure(flags)) - SMC_RET1(handle, SMC_UNK); - - /* Check the fid against the capabilities */ - if (!(psci_caps & define_psci_cap(smc_fid))) - SMC_RET1(handle, SMC_UNK); - - if (((smc_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_32) { - /* 32-bit PSCI function, clear top parameter bits */ - - x1 = (uint32_t)x1; - x2 = (uint32_t)x2; - x3 = (uint32_t)x3; - - switch (smc_fid) { - case PSCI_VERSION: - SMC_RET1(handle, psci_version()); - - case PSCI_CPU_OFF: - SMC_RET1(handle, psci_cpu_off()); - - case PSCI_CPU_SUSPEND_AARCH32: - SMC_RET1(handle, psci_cpu_suspend(x1, x2, x3)); - - case PSCI_CPU_ON_AARCH32: - SMC_RET1(handle, psci_cpu_on(x1, x2, x3)); - - case PSCI_AFFINITY_INFO_AARCH32: - SMC_RET1(handle, psci_affinity_info(x1, x2)); - - case PSCI_MIG_AARCH32: - SMC_RET1(handle, psci_migrate(x1)); - - case PSCI_MIG_INFO_TYPE: - SMC_RET1(handle, psci_migrate_info_type()); - - case PSCI_MIG_INFO_UP_CPU_AARCH32: - SMC_RET1(handle, psci_migrate_info_up_cpu()); - - case PSCI_SYSTEM_SUSPEND_AARCH32: - SMC_RET1(handle, psci_system_suspend(x1, x2)); - - case PSCI_SYSTEM_OFF: - psci_system_off(); - /* We should never return from psci_system_off() */ - - case PSCI_SYSTEM_RESET: - psci_system_reset(); - /* We should never return from psci_system_reset() */ - - case PSCI_FEATURES: - SMC_RET1(handle, psci_features(x1)); - - default: - break; - } - } else { - /* 64-bit PSCI function */ - - switch (smc_fid) { - case PSCI_CPU_SUSPEND_AARCH64: - SMC_RET1(handle, psci_cpu_suspend(x1, x2, x3)); - - case PSCI_CPU_ON_AARCH64: - SMC_RET1(handle, psci_cpu_on(x1, x2, x3)); - - case PSCI_AFFINITY_INFO_AARCH64: - SMC_RET1(handle, psci_affinity_info(x1, x2)); - - case PSCI_MIG_AARCH64: - SMC_RET1(handle, psci_migrate(x1)); - - case PSCI_MIG_INFO_UP_CPU_AARCH64: - SMC_RET1(handle, psci_migrate_info_up_cpu()); - - case PSCI_SYSTEM_SUSPEND_AARCH64: - SMC_RET1(handle, psci_system_suspend(x1, x2)); - - default: - break; - } - } - - WARN("Unimplemented PSCI Call: 0x%x \n", smc_fid); - SMC_RET1(handle, SMC_UNK); -} diff --git a/services/std_svc/psci1.0/psci_off.c b/services/std_svc/psci1.0/psci_off.c deleted file mode 100644 index 28fa52c..0000000 --- a/services/std_svc/psci1.0/psci_off.c +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include -#include -#include "psci_private.h" - -/****************************************************************************** - * Construct the psci_power_state to request power OFF at all power levels. - ******************************************************************************/ -static void psci_set_power_off_state(psci_power_state_t *state_info) -{ - int lvl; - - for (lvl = PSCI_CPU_PWR_LVL; lvl <= PLAT_MAX_PWR_LVL; lvl++) - state_info->pwr_domain_state[lvl] = PLAT_MAX_OFF_STATE; -} - -/****************************************************************************** - * Top level handler which is called when a cpu wants to power itself down. - * It's assumed that along with turning the cpu power domain off, power - * domains at higher levels will be turned off as far as possible. It finds - * the highest level where a domain has to be powered off by traversing the - * node information and then performs generic, architectural, platform setup - * and state management required to turn OFF that power domain and domains - * below it. e.g. For a cpu that's to be powered OFF, it could mean programming - * the power controller whereas for a cluster that's to be powered off, it will - * call the platform specific code which will disable coherency at the - * interconnect level if the cpu is the last in the cluster and also the - * program the power controller. - ******************************************************************************/ -int psci_do_cpu_off(int end_pwrlvl) -{ - int rc, idx = plat_my_core_pos(); - psci_power_state_t state_info; - - /* - * This function must only be called on platforms where the - * CPU_OFF platform hooks have been implemented. - */ - assert(psci_plat_pm_ops->pwr_domain_off); - - /* - * This function acquires the lock corresponding to each power - * level so that by the time all locks are taken, the system topology - * is snapshot and state management can be done safely. - */ - psci_acquire_pwr_domain_locks(end_pwrlvl, - idx); - - /* - * Call the cpu off handler registered by the Secure Payload Dispatcher - * to let it do any bookkeeping. Assume that the SPD always reports an - * E_DENIED error if SP refuse to power down - */ - if (psci_spd_pm && psci_spd_pm->svc_off) { - rc = psci_spd_pm->svc_off(0); - if (rc) - goto exit; - } - - /* Construct the psci_power_state for CPU_OFF */ - psci_set_power_off_state(&state_info); - - /* - * This function is passed the requested state info and - * it returns the negotiated state info for each power level upto - * the end level specified. - */ - psci_do_state_coordination(end_pwrlvl, &state_info); - - /* - * Arch. management. Perform the necessary steps to flush all - * cpu caches. - */ - psci_do_pwrdown_cache_maintenance(psci_find_max_off_lvl(&state_info)); - - /* - * Plat. management: Perform platform specific actions to turn this - * cpu off e.g. exit cpu coherency, program the power controller etc. - */ - psci_plat_pm_ops->pwr_domain_off(&state_info); - -exit: - /* - * Release the locks corresponding to each power level in the - * reverse order to which they were acquired. - */ - psci_release_pwr_domain_locks(end_pwrlvl, - idx); - - /* - * Set the affinity info state to OFF. This writes directly to main - * memory as caches are disabled, so cache maintenance is required - * to ensure that later cached reads of aff_info_state return - * AFF_STATE_OFF. - */ - flush_cpu_data(psci_svc_cpu_data.aff_info_state); - psci_set_aff_info_state(AFF_STATE_OFF); - inv_cpu_data(psci_svc_cpu_data.aff_info_state); - - /* - * Check if all actions needed to safely power down this cpu have - * successfully completed. Enter a wfi loop which will allow the - * power controller to physically power down this cpu. - */ - if (rc == PSCI_E_SUCCESS) - psci_power_down_wfi(); - - return rc; -} diff --git a/services/std_svc/psci1.0/psci_on.c b/services/std_svc/psci1.0/psci_on.c deleted file mode 100644 index d68198f..0000000 --- a/services/std_svc/psci1.0/psci_on.c +++ /dev/null @@ -1,209 +0,0 @@ -/* - * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "psci_private.h" - -/******************************************************************************* - * This function checks whether a cpu which has been requested to be turned on - * is OFF to begin with. - ******************************************************************************/ -static int cpu_on_validate_state(aff_info_state_t aff_state) -{ - if (aff_state == AFF_STATE_ON) - return PSCI_E_ALREADY_ON; - - if (aff_state == AFF_STATE_ON_PENDING) - return PSCI_E_ON_PENDING; - - assert(aff_state == AFF_STATE_OFF); - return PSCI_E_SUCCESS; -} - -/******************************************************************************* - * This function sets the aff_info_state in the per-cpu data of the CPU - * specified by cpu_idx - ******************************************************************************/ -static void psci_set_aff_info_state_by_idx(unsigned int cpu_idx, - aff_info_state_t aff_state) -{ - - set_cpu_data_by_index(cpu_idx, - psci_svc_cpu_data.aff_info_state, - aff_state); - - /* - * Flush aff_info_state as it will be accessed with caches turned OFF. - */ - flush_cpu_data_by_index(cpu_idx, psci_svc_cpu_data.aff_info_state); -} - -/******************************************************************************* - * Generic handler which is called to physically power on a cpu identified by - * its mpidr. It performs the generic, architectural, platform setup and state - * management to power on the target cpu e.g. it will ensure that - * enough information is stashed for it to resume execution in the non-secure - * security state. - * - * The state of all the relevant power domains are changed after calling the - * platform handler as it can return error. - ******************************************************************************/ -int psci_cpu_on_start(unsigned long target_cpu, - entry_point_info_t *ep, - int end_pwrlvl) -{ - int rc; - unsigned int target_idx = plat_core_pos_by_mpidr(target_cpu); - - /* - * This function must only be called on platforms where the - * CPU_ON platform hooks have been implemented. - */ - assert(psci_plat_pm_ops->pwr_domain_on && - psci_plat_pm_ops->pwr_domain_on_finish); - - /* Protect against multiple CPUs trying to turn ON the same target CPU */ - psci_spin_lock_cpu(target_idx); - - /* - * Generic management: Ensure that the cpu is off to be - * turned on. - */ - rc = cpu_on_validate_state(psci_get_aff_info_state_by_idx(target_idx)); - if (rc != PSCI_E_SUCCESS) - goto exit; - - /* - * Call the cpu on handler registered by the Secure Payload Dispatcher - * to let it do any bookeeping. If the handler encounters an error, it's - * expected to assert within - */ - if (psci_spd_pm && psci_spd_pm->svc_on) - psci_spd_pm->svc_on(target_cpu); - - /* - * Set the Affinity info state of the target cpu to ON_PENDING. - */ - psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_ON_PENDING); - - /* - * Perform generic, architecture and platform specific handling. - */ - /* - * Plat. management: Give the platform the current state - * of the target cpu to allow it to perform the necessary - * steps to power on. - */ - rc = psci_plat_pm_ops->pwr_domain_on((u_register_t)target_cpu); - assert(rc == PSCI_E_SUCCESS || rc == PSCI_E_INTERN_FAIL); - - if (rc == PSCI_E_SUCCESS) - /* Store the re-entry information for the non-secure world. */ - cm_init_context_by_index(target_idx, ep); - else - /* Restore the state on error. */ - psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_OFF); - -exit: - psci_spin_unlock_cpu(target_idx); - return rc; -} - -/******************************************************************************* - * The following function finish an earlier power on request. They - * are called by the common finisher routine in psci_common.c. The `state_info` - * is the psci_power_state from which this CPU has woken up from. - ******************************************************************************/ -void psci_cpu_on_finish(unsigned int cpu_idx, - psci_power_state_t *state_info) -{ - /* - * Plat. management: Perform the platform specific actions - * for this cpu e.g. enabling the gic or zeroing the mailbox - * register. The actual state of this cpu has already been - * changed. - */ - psci_plat_pm_ops->pwr_domain_on_finish(state_info); - - /* - * Arch. management: Enable data cache and manage stack memory - */ - psci_do_pwrup_cache_maintenance(); - - /* - * All the platform specific actions for turning this cpu - * on have completed. Perform enough arch.initialization - * to run in the non-secure address space. - */ - bl31_arch_setup(); - - /* - * Lock the CPU spin lock to make sure that the context initialization - * is done. Since the lock is only used in this function to create - * a synchronization point with cpu_on_start(), it can be released - * immediately. - */ - psci_spin_lock_cpu(cpu_idx); - psci_spin_unlock_cpu(cpu_idx); - - /* Ensure we have been explicitly woken up by another cpu */ - assert(psci_get_aff_info_state() == AFF_STATE_ON_PENDING); - - /* - * Call the cpu on finish handler registered by the Secure Payload - * Dispatcher to let it do any bookeeping. If the handler encounters an - * error, it's expected to assert within - */ - if (psci_spd_pm && psci_spd_pm->svc_on_finish) - psci_spd_pm->svc_on_finish(0); - - /* Populate the mpidr field within the cpu node array */ - /* This needs to be done only once */ - psci_cpu_pd_nodes[cpu_idx].mpidr = read_mpidr() & MPIDR_AFFINITY_MASK; - - /* - * Generic management: Now we just need to retrieve the - * information that we had stashed away during the cpu_on - * call to set this cpu on its way. - */ - cm_prepare_el3_exit(NON_SECURE); - - /* Clean caches before re-entering normal world */ - dcsw_op_louis(DCCSW); -} diff --git a/services/std_svc/psci1.0/psci_private.h b/services/std_svc/psci1.0/psci_private.h deleted file mode 100644 index e2e32c7..0000000 --- a/services/std_svc/psci1.0/psci_private.h +++ /dev/null @@ -1,240 +0,0 @@ -/* - * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef __PSCI_PRIVATE_H__ -#define __PSCI_PRIVATE_H__ - -#include -#include -#include -#include -#include -#include - -/* - * The following helper macros abstract the interface to the Bakery - * Lock API. - */ -#if USE_COHERENT_MEM -#define psci_lock_init(non_cpu_pd_node, idx) \ - bakery_lock_init(&(non_cpu_pd_node)[(idx)].lock) -#define psci_lock_get(non_cpu_pd_node) \ - bakery_lock_get(&((non_cpu_pd_node)->lock)) -#define psci_lock_release(non_cpu_pd_node) \ - bakery_lock_release(&((non_cpu_pd_node)->lock)) -#else -#define psci_lock_init(non_cpu_pd_node, idx) \ - ((non_cpu_pd_node)[(idx)].lock_index = (idx)) -#define psci_lock_get(non_cpu_pd_node) \ - bakery_lock_get((non_cpu_pd_node)->lock_index, \ - CPU_DATA_PSCI_LOCK_OFFSET) -#define psci_lock_release(non_cpu_pd_node) \ - bakery_lock_release((non_cpu_pd_node)->lock_index, \ - CPU_DATA_PSCI_LOCK_OFFSET) -#endif - -/* - * The PSCI capability which are provided by the generic code but does not - * depend on the platform or spd capabilities. - */ -#define PSCI_GENERIC_CAP \ - (define_psci_cap(PSCI_VERSION) | \ - define_psci_cap(PSCI_AFFINITY_INFO_AARCH64) | \ - define_psci_cap(PSCI_FEATURES)) - -/* - * The PSCI capabilities mask for 64 bit functions. - */ -#define PSCI_CAP_64BIT_MASK \ - (define_psci_cap(PSCI_CPU_SUSPEND_AARCH64) | \ - define_psci_cap(PSCI_CPU_ON_AARCH64) | \ - define_psci_cap(PSCI_AFFINITY_INFO_AARCH64) | \ - define_psci_cap(PSCI_MIG_AARCH64) | \ - define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64) | \ - define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64)) - -/* - * Helper macros to get/set the fields of PSCI per-cpu data. - */ -#define psci_set_aff_info_state(aff_state) \ - set_cpu_data(psci_svc_cpu_data.aff_info_state, aff_state) -#define psci_get_aff_info_state() \ - get_cpu_data(psci_svc_cpu_data.aff_info_state) -#define psci_get_aff_info_state_by_idx(idx) \ - get_cpu_data_by_index(idx, psci_svc_cpu_data.aff_info_state) -#define psci_get_suspend_pwrlvl() \ - get_cpu_data(psci_svc_cpu_data.target_pwrlvl) -#define psci_set_suspend_pwrlvl(target_lvl) \ - set_cpu_data(psci_svc_cpu_data.target_pwrlvl, target_lvl) -#define psci_set_cpu_local_state(state) \ - set_cpu_data(psci_svc_cpu_data.local_state, state) -#define psci_get_cpu_local_state() \ - get_cpu_data(psci_svc_cpu_data.local_state) -#define psci_get_cpu_local_state_by_idx(idx) \ - get_cpu_data_by_index(idx, psci_svc_cpu_data.local_state) - -/* - * Helper macros for the CPU level spinlocks - */ -#define psci_spin_lock_cpu(idx) spin_lock(&psci_cpu_pd_nodes[idx].cpu_lock) -#define psci_spin_unlock_cpu(idx) spin_unlock(&psci_cpu_pd_nodes[idx].cpu_lock) - -/* Helper macro to identify a CPU standby request in PSCI Suspend call */ -#define is_cpu_standby_req(is_power_down_state, retn_lvl) \ - (((!(is_power_down_state)) && ((retn_lvl) == 0)) ? 1 : 0) - -/******************************************************************************* - * The following two data structures implement the power domain tree. The tree - * is used to track the state of all the nodes i.e. power domain instances - * described by the platform. The tree consists of nodes that describe CPU power - * domains i.e. leaf nodes and all other power domains which are parents of a - * CPU power domain i.e. non-leaf nodes. - ******************************************************************************/ -typedef struct non_cpu_pwr_domain_node { - /* - * Index of the first CPU power domain node level 0 which has this node - * as its parent. - */ - unsigned int cpu_start_idx; - - /* - * Number of CPU power domains which are siblings of the domain indexed - * by 'cpu_start_idx' i.e. all the domains in the range 'cpu_start_idx - * -> cpu_start_idx + ncpus' have this node as their parent. - */ - unsigned int ncpus; - - /* - * Index of the parent power domain node. - * TODO: Figure out whether to whether using pointer is more efficient. - */ - unsigned int parent_node; - - plat_local_state_t local_state; - - unsigned char level; -#if USE_COHERENT_MEM - bakery_lock_t lock; -#else - /* For indexing the bakery_info array in per CPU data */ - unsigned char lock_index; -#endif -} non_cpu_pd_node_t; - -typedef struct cpu_pwr_domain_node { - unsigned long mpidr; - - /* - * Index of the parent power domain node. - * TODO: Figure out whether to whether using pointer is more efficient. - */ - unsigned int parent_node; - - /* - * A CPU power domain does not require state coordination like its - * parent power domains. Hence this node does not include a bakery - * lock. A spinlock is required by the CPU_ON handler to prevent a race - * when multiple CPUs try to turn ON the same target CPU. - */ - spinlock_t cpu_lock; -} cpu_pd_node_t; - -/******************************************************************************* - * Data prototypes - ******************************************************************************/ -extern const plat_psci_ops_t *psci_plat_pm_ops; -extern non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS]; -extern cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT]; -extern uint32_t psci_caps; - -/******************************************************************************* - * SPD's power management hooks registered with PSCI - ******************************************************************************/ -extern const spd_pm_ops_t *psci_spd_pm; - -/******************************************************************************* - * Function prototypes - ******************************************************************************/ -/* Private exported functions from psci_common.c */ -int psci_validate_power_state(unsigned int power_state, - psci_power_state_t *state_info); -void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info); -int psci_validate_mpidr(unsigned long mpidr); -void psci_init_req_local_pwr_states(void); -void psci_power_up_finish(void); -int psci_get_ns_ep_info(entry_point_info_t *ep, - uint64_t entrypoint, uint64_t context_id); -void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx, - int end_lvl, - unsigned int node_index[]); -void psci_do_state_coordination(int end_pwrlvl, - psci_power_state_t *state_info); -void psci_acquire_pwr_domain_locks(int end_pwrlvl, - unsigned int cpu_idx); -void psci_release_pwr_domain_locks(int end_pwrlvl, - unsigned int cpu_idx); -int psci_validate_suspend_req(const psci_power_state_t *state_info, - unsigned int is_power_down_state_req); -unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info); -unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info); -void psci_set_pwr_domains_to_run(uint32_t end_pwrlvl); -void psci_print_power_domain_map(void); -unsigned int psci_is_last_on_cpu(void); -int psci_spd_migrate_info(uint64_t *mpidr); - -/* Private exported functions from psci_on.c */ -int psci_cpu_on_start(unsigned long target_cpu, - entry_point_info_t *ep, - int end_pwrlvl); - -void psci_cpu_on_finish(unsigned int cpu_idx, - psci_power_state_t *state_info); - -/* Private exported functions from psci_cpu_off.c */ -int psci_do_cpu_off(int end_pwrlvl); - -/* Private exported functions from psci_pwrlvl_suspend.c */ -void psci_cpu_suspend_start(entry_point_info_t *ep, - int end_pwrlvl, - psci_power_state_t *state_info, - unsigned int is_power_down_state_req); - -void psci_cpu_suspend_finish(unsigned int cpu_idx, - psci_power_state_t *state_info); - -/* Private exported functions from psci_helpers.S */ -void psci_do_pwrdown_cache_maintenance(uint32_t pwr_level); -void psci_do_pwrup_cache_maintenance(void); - -/* Private exported functions from psci_system_off.c */ -void __dead2 psci_system_off(void); -void __dead2 psci_system_reset(void); - -#endif /* __PSCI_PRIVATE_H__ */ diff --git a/services/std_svc/psci1.0/psci_setup.c b/services/std_svc/psci1.0/psci_setup.c deleted file mode 100644 index ce4da95..0000000 --- a/services/std_svc/psci1.0/psci_setup.c +++ /dev/null @@ -1,272 +0,0 @@ -/* - * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include "psci_private.h" - -/******************************************************************************* - * Per cpu non-secure contexts used to program the architectural state prior - * return to the normal world. - * TODO: Use the memory allocator to set aside memory for the contexts instead - * of relying on platform defined constants. - ******************************************************************************/ -static cpu_context_t psci_ns_context[PLATFORM_CORE_COUNT]; - -/****************************************************************************** - * Define the psci capability variable. - *****************************************************************************/ -uint32_t psci_caps; - -/******************************************************************************* - * Function which initializes the 'psci_non_cpu_pd_nodes' or the - * 'psci_cpu_pd_nodes' corresponding to the power level. - ******************************************************************************/ -static void psci_init_pwr_domain_node(int node_idx, int parent_idx, int level) -{ - if (level > PSCI_CPU_PWR_LVL) { - psci_non_cpu_pd_nodes[node_idx].level = level; - psci_lock_init(psci_non_cpu_pd_nodes, node_idx); - psci_non_cpu_pd_nodes[node_idx].parent_node = parent_idx; - psci_non_cpu_pd_nodes[node_idx].local_state = - PLAT_MAX_OFF_STATE; - } else { - psci_cpu_data_t *svc_cpu_data; - - psci_cpu_pd_nodes[node_idx].parent_node = parent_idx; - - /* Initialize with an invalid mpidr */ - psci_cpu_pd_nodes[node_idx].mpidr = PSCI_INVALID_MPIDR; - - svc_cpu_data = - &(_cpu_data_by_index(node_idx)->psci_svc_cpu_data); - - /* Set the Affinity Info for the cores as OFF */ - svc_cpu_data->aff_info_state = AFF_STATE_OFF; - - /* Invalidate the suspend level for the cpu */ - svc_cpu_data->target_pwrlvl = PSCI_INVALID_DATA; - - /* Set the power state to OFF state */ - svc_cpu_data->local_state = PLAT_MAX_OFF_STATE; - - flush_dcache_range((uint64_t)svc_cpu_data, - sizeof(*svc_cpu_data)); - - cm_set_context_by_index(node_idx, - (void *) &psci_ns_context[node_idx], - NON_SECURE); - } -} - -/******************************************************************************* - * This functions updates cpu_start_idx and ncpus field for each of the node in - * psci_non_cpu_pd_nodes[]. It does so by comparing the parent nodes of each of - * the CPUs and check whether they match with the parent of the previous - * CPU. The basic assumption for this work is that children of the same parent - * are allocated adjacent indices. The platform should ensure this though proper - * mapping of the CPUs to indices via plat_core_pos_by_mpidr() and - * plat_my_core_pos() APIs. - *******************************************************************************/ -static void psci_update_pwrlvl_limits(void) -{ - int cpu_idx, j; - unsigned int nodes_idx[PLAT_MAX_PWR_LVL] = {0}; - unsigned int temp_index[PLAT_MAX_PWR_LVL]; - - for (cpu_idx = 0; cpu_idx < PLATFORM_CORE_COUNT; cpu_idx++) { - psci_get_parent_pwr_domain_nodes(cpu_idx, - PLAT_MAX_PWR_LVL, - temp_index); - for (j = PLAT_MAX_PWR_LVL - 1; j >= 0; j--) { - if (temp_index[j] != nodes_idx[j]) { - nodes_idx[j] = temp_index[j]; - psci_non_cpu_pd_nodes[nodes_idx[j]].cpu_start_idx - = cpu_idx; - } - psci_non_cpu_pd_nodes[nodes_idx[j]].ncpus++; - } - } -} - -/******************************************************************************* - * Core routine to populate the power domain tree. The tree descriptor passed by - * the platform is populated breadth-first and the first entry in the map - * informs the number of root power domains. The parent nodes of the root nodes - * will point to an invalid entry(-1). - ******************************************************************************/ -static void populate_power_domain_tree(const unsigned char *topology) -{ - unsigned int i, j = 0, num_nodes_at_lvl = 1, num_nodes_at_next_lvl; - unsigned int node_index = 0, parent_node_index = 0, num_children; - int level = PLAT_MAX_PWR_LVL; - - /* - * For each level the inputs are: - * - number of nodes at this level in plat_array i.e. num_nodes_at_level - * This is the sum of values of nodes at the parent level. - * - Index of first entry at this level in the plat_array i.e. - * parent_node_index. - * - Index of first free entry in psci_non_cpu_pd_nodes[] or - * psci_cpu_pd_nodes[] i.e. node_index depending upon the level. - */ - while (level >= PSCI_CPU_PWR_LVL) { - num_nodes_at_next_lvl = 0; - /* - * For each entry (parent node) at this level in the plat_array: - * - Find the number of children - * - Allocate a node in a power domain array for each child - * - Set the parent of the child to the parent_node_index - 1 - * - Increment parent_node_index to point to the next parent - * - Accumulate the number of children at next level. - */ - for (i = 0; i < num_nodes_at_lvl; i++) { - assert(parent_node_index <= - PSCI_NUM_NON_CPU_PWR_DOMAINS); - num_children = topology[parent_node_index]; - - for (j = node_index; - j < node_index + num_children; j++) - psci_init_pwr_domain_node(j, - parent_node_index - 1, - level); - - node_index = j; - num_nodes_at_next_lvl += num_children; - parent_node_index++; - } - - num_nodes_at_lvl = num_nodes_at_next_lvl; - level--; - - /* Reset the index for the cpu power domain array */ - if (level == PSCI_CPU_PWR_LVL) - node_index = 0; - } - - /* Validate the sanity of array exported by the platform */ - assert(j == PLATFORM_CORE_COUNT); - -#if !USE_COHERENT_MEM - /* Flush the non CPU power domain data to memory */ - flush_dcache_range((uint64_t) &psci_non_cpu_pd_nodes, - sizeof(psci_non_cpu_pd_nodes)); -#endif -} - -/******************************************************************************* - * This function initializes the power domain topology tree by querying the - * platform. The power domain nodes higher than the CPU are populated in the - * array psci_non_cpu_pd_nodes[] and the CPU power domains are populated in - * psci_cpu_pd_nodes[]. The platform exports its static topology map through the - * populate_power_domain_topology_tree() API. The algorithm populates the - * psci_non_cpu_pd_nodes and psci_cpu_pd_nodes iteratively by using this - * topology map. On a platform that implements two clusters of 2 cpus each, and - * supporting 3 domain levels, the populated psci_non_cpu_pd_nodes would look - * like this: - * - * --------------------------------------------------- - * | system node | cluster 0 node | cluster 1 node | - * --------------------------------------------------- - * - * And populated psci_cpu_pd_nodes would look like this : - * <- cpus cluster0 -><- cpus cluster1 -> - * ------------------------------------------------ - * | CPU 0 | CPU 1 | CPU 2 | CPU 3 | - * ------------------------------------------------ - ******************************************************************************/ -int32_t psci_setup(void) -{ - const unsigned char *topology_tree; - - /* Query the topology map from the platform */ - topology_tree = plat_get_power_domain_tree_desc(); - - /* Populate the power domain arrays using the platform topology map */ - populate_power_domain_tree(topology_tree); - - /* Update the CPU limits for each node in psci_non_cpu_pd_nodes */ - psci_update_pwrlvl_limits(); - - /* Populate the mpidr field of cpu node for this CPU */ - psci_cpu_pd_nodes[plat_my_core_pos()].mpidr = - read_mpidr() & MPIDR_AFFINITY_MASK; - -#if !USE_COHERENT_MEM - /* - * The psci_non_cpu_pd_nodes only needs flushing when it's not allocated in - * coherent memory. - */ - flush_dcache_range((uint64_t) &psci_non_cpu_pd_nodes, - sizeof(psci_non_cpu_pd_nodes)); -#endif - - flush_dcache_range((uint64_t) &psci_cpu_pd_nodes, - sizeof(psci_cpu_pd_nodes)); - - psci_init_req_local_pwr_states(); - - /* - * Set the requested and target state of this CPU and all the higher - * power domain levels for this CPU to run. - */ - psci_set_pwr_domains_to_run(PLAT_MAX_PWR_LVL); - - plat_setup_psci_ops((uintptr_t)psci_entrypoint, - &psci_plat_pm_ops); - assert(psci_plat_pm_ops); - - /* Initialize the psci capability */ - psci_caps = PSCI_GENERIC_CAP; - - if (psci_plat_pm_ops->pwr_domain_off) - psci_caps |= define_psci_cap(PSCI_CPU_OFF); - if (psci_plat_pm_ops->pwr_domain_on && - psci_plat_pm_ops->pwr_domain_on_finish) - psci_caps |= define_psci_cap(PSCI_CPU_ON_AARCH64); - if (psci_plat_pm_ops->pwr_domain_suspend && - psci_plat_pm_ops->pwr_domain_suspend_finish) { - psci_caps |= define_psci_cap(PSCI_CPU_SUSPEND_AARCH64); - if (psci_plat_pm_ops->get_sys_suspend_power_state) - psci_caps |= define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64); - } - if (psci_plat_pm_ops->system_off) - psci_caps |= define_psci_cap(PSCI_SYSTEM_OFF); - if (psci_plat_pm_ops->system_reset) - psci_caps |= define_psci_cap(PSCI_SYSTEM_RESET); - - return 0; -} diff --git a/services/std_svc/psci1.0/psci_suspend.c b/services/std_svc/psci1.0/psci_suspend.c deleted file mode 100644 index 71e4778..0000000 --- a/services/std_svc/psci1.0/psci_suspend.c +++ /dev/null @@ -1,265 +0,0 @@ -/* - * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "psci_private.h" - -/******************************************************************************* - * This function does generic and platform specific operations after a wake-up - * from standby/retention states at multiple power levels. - ******************************************************************************/ -static void psci_suspend_to_standby_finisher(unsigned int cpu_idx, - psci_power_state_t *state_info, - unsigned int end_pwrlvl) -{ - psci_acquire_pwr_domain_locks(end_pwrlvl, - cpu_idx); - - /* - * Plat. management: Allow the platform to do operations - * on waking up from retention. - */ - psci_plat_pm_ops->pwr_domain_suspend_finish(state_info); - - /* - * Set the requested and target state of this CPU and all the higher - * power domain levels for this CPU to run. - */ - psci_set_pwr_domains_to_run(end_pwrlvl); - - psci_release_pwr_domain_locks(end_pwrlvl, - cpu_idx); -} - -/******************************************************************************* - * This function does generic and platform specific suspend to power down - * operations. - ******************************************************************************/ -static void psci_suspend_to_pwrdown_start(int end_pwrlvl, - entry_point_info_t *ep, - psci_power_state_t *state_info) -{ - /* Save PSCI target power level for the suspend finisher handler */ - psci_set_suspend_pwrlvl(end_pwrlvl); - - /* - * Flush the target power level as it will be accessed on power up with - * Data cache disabled. - */ - flush_cpu_data(psci_svc_cpu_data.target_pwrlvl); - - /* - * Call the cpu suspend handler registered by the Secure Payload - * Dispatcher to let it do any book-keeping. If the handler encounters an - * error, it's expected to assert within - */ - if (psci_spd_pm && psci_spd_pm->svc_suspend) - psci_spd_pm->svc_suspend(0); - - /* - * Store the re-entry information for the non-secure world. - */ - cm_init_my_context(ep); - - /* - * Arch. management. Perform the necessary steps to flush all - * cpu caches. Currently we assume that the power level correspond - * the cache level. - * TODO : Introduce a mechanism to query the cache level to flush - * and the cpu-ops power down to perform from the platform. - */ - psci_do_pwrdown_cache_maintenance(psci_find_max_off_lvl(state_info)); -} - -/******************************************************************************* - * Top level handler which is called when a cpu wants to suspend its execution. - * It is assumed that along with suspending the cpu power domain, power domains - * at higher levels until the target power level will be suspended as well. It - * coordinates with the platform to negotiate the target state for each of - * the power domain level till the target power domain level. It then performs - * generic, architectural, platform setup and state management required to - * suspend that power domain level and power domain levels below it. - * e.g. For a cpu that's to be suspended, it could mean programming the - * power controller whereas for a cluster that's to be suspended, it will call - * the platform specific code which will disable coherency at the interconnect - * level if the cpu is the last in the cluster and also the program the power - * controller. - * - * All the required parameter checks are performed at the beginning and after - * the state transition has been done, no further error is expected and it is - * not possible to undo any of the actions taken beyond that point. - ******************************************************************************/ -void psci_cpu_suspend_start(entry_point_info_t *ep, - int end_pwrlvl, - psci_power_state_t *state_info, - unsigned int is_power_down_state) -{ - int skip_wfi = 0; - unsigned int idx = plat_my_core_pos(); - - /* - * This function must only be called on platforms where the - * CPU_SUSPEND platform hooks have been implemented. - */ - assert(psci_plat_pm_ops->pwr_domain_suspend && - psci_plat_pm_ops->pwr_domain_suspend_finish); - - /* - * This function acquires the lock corresponding to each power - * level so that by the time all locks are taken, the system topology - * is snapshot and state management can be done safely. - */ - psci_acquire_pwr_domain_locks(end_pwrlvl, - idx); - - /* - * We check if there are any pending interrupts after the delay - * introduced by lock contention to increase the chances of early - * detection that a wake-up interrupt has fired. - */ - if (read_isr_el1()) { - skip_wfi = 1; - goto exit; - } - - /* - * This function is passed the requested state info and - * it returns the negotiated state info for each power level upto - * the end level specified. - */ - psci_do_state_coordination(end_pwrlvl, state_info); - - if (is_power_down_state) - psci_suspend_to_pwrdown_start(end_pwrlvl, ep, state_info); - - /* - * Plat. management: Allow the platform to perform the - * necessary actions to turn off this cpu e.g. set the - * platform defined mailbox with the psci entrypoint, - * program the power controller etc. - */ - psci_plat_pm_ops->pwr_domain_suspend(state_info); - -exit: - /* - * Release the locks corresponding to each power level in the - * reverse order to which they were acquired. - */ - psci_release_pwr_domain_locks(end_pwrlvl, - idx); - if (skip_wfi) - return; - - if (is_power_down_state) - psci_power_down_wfi(); - - /* - * We will reach here if only retention/standby states have been - * requested at multiple power levels. This means that the cpu - * context will be preserved. - */ - wfi(); - - /* - * After we wake up from context retaining suspend, call the - * context retaining suspend finisher. - */ - psci_suspend_to_standby_finisher(idx, state_info, end_pwrlvl); -} - -/******************************************************************************* - * The following functions finish an earlier suspend request. They - * are called by the common finisher routine in psci_common.c. The `state_info` - * is the psci_power_state from which this CPU has woken up from. - ******************************************************************************/ -void psci_cpu_suspend_finish(unsigned int cpu_idx, - psci_power_state_t *state_info) -{ - int32_t suspend_level; - uint64_t counter_freq; - - /* Ensure we have been woken up from a suspended state */ - assert(psci_get_aff_info_state() == AFF_STATE_ON && is_local_state_off(\ - state_info->pwr_domain_state[PSCI_CPU_PWR_LVL])); - - /* - * Plat. management: Perform the platform specific actions - * before we change the state of the cpu e.g. enabling the - * gic or zeroing the mailbox register. If anything goes - * wrong then assert as there is no way to recover from this - * situation. - */ - psci_plat_pm_ops->pwr_domain_suspend_finish(state_info); - - /* - * Arch. management: Enable the data cache, manage stack memory and - * restore the stashed EL3 architectural context from the 'cpu_context' - * structure for this cpu. - */ - psci_do_pwrup_cache_maintenance(); - - /* Re-init the cntfrq_el0 register */ - counter_freq = plat_get_syscnt_freq(); - write_cntfrq_el0(counter_freq); - - /* - * Call the cpu suspend finish handler registered by the Secure Payload - * Dispatcher to let it do any bookeeping. If the handler encounters an - * error, it's expected to assert within - */ - if (psci_spd_pm && psci_spd_pm->svc_suspend) { - suspend_level = psci_get_suspend_pwrlvl(); - assert (suspend_level != PSCI_INVALID_DATA); - psci_spd_pm->svc_suspend_finish(suspend_level); - } - - /* Invalidate the suspend level for the cpu */ - psci_set_suspend_pwrlvl(PSCI_INVALID_DATA); - - /* - * Generic management: Now we just need to retrieve the - * information that we had stashed away during the suspend - * call to set this cpu on its way. - */ - cm_prepare_el3_exit(NON_SECURE); - - /* Clean caches before re-entering normal world */ - dcsw_op_louis(DCCSW); -} diff --git a/services/std_svc/psci1.0/psci_system_off.c b/services/std_svc/psci1.0/psci_system_off.c deleted file mode 100644 index 28315d6..0000000 --- a/services/std_svc/psci1.0/psci_system_off.c +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright (c) 2014-2015, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include -#include "psci_private.h" - -void psci_system_off(void) -{ - psci_print_power_domain_map(); - - assert(psci_plat_pm_ops->system_off); - - /* Notify the Secure Payload Dispatcher */ - if (psci_spd_pm && psci_spd_pm->svc_system_off) { - psci_spd_pm->svc_system_off(); - } - - /* Call the platform specific hook */ - psci_plat_pm_ops->system_off(); - - /* This function does not return. We should never get here */ -} - -void psci_system_reset(void) -{ - psci_print_power_domain_map(); - - assert(psci_plat_pm_ops->system_reset); - - /* Notify the Secure Payload Dispatcher */ - if (psci_spd_pm && psci_spd_pm->svc_system_reset) { - psci_spd_pm->svc_system_reset(); - } - - /* Call the platform specific hook */ - psci_plat_pm_ops->system_reset(); - - /* This function does not return. We should never get here */ -}