diff --git a/Makefile b/Makefile index 800312c..bb5098b 100644 --- a/Makefile +++ b/Makefile @@ -226,20 +226,24 @@ plat/common/aarch64/platform_helpers.S \ ${STDLIB_SRCS} -INCLUDES += -Iinclude/bl1 \ - -Iinclude/bl31 \ - -Iinclude/bl31/services \ - -Iinclude/common \ - -Iinclude/drivers \ - -Iinclude/drivers/arm \ - -Iinclude/drivers/auth \ - -Iinclude/drivers/io \ - -Iinclude/drivers/ti/uart \ - -Iinclude/lib \ - -Iinclude/lib/aarch64 \ - -Iinclude/lib/cpus/aarch64 \ - -Iinclude/plat/common \ - ${PLAT_INCLUDES} \ +INCLUDES += -Iinclude/bl1 \ + -Iinclude/bl31 \ + -Iinclude/common \ + -Iinclude/common/aarch64 \ + -Iinclude/drivers \ + -Iinclude/drivers/arm \ + -Iinclude/drivers/auth \ + -Iinclude/drivers/io \ + -Iinclude/drivers/ti/uart \ + -Iinclude/lib \ + -Iinclude/lib/aarch64 \ + -Iinclude/lib/cpus/aarch64 \ + -Iinclude/lib/el3_runtime \ + -Iinclude/lib/el3_runtime/aarch64 \ + -Iinclude/lib/psci \ + -Iinclude/plat/common \ + -Iinclude/services \ + ${PLAT_INCLUDES} \ ${SPD_INCLUDES} diff --git a/bl1/bl1.mk b/bl1/bl1.mk index 21e87c7..591e047 100644 --- a/bl1/bl1.mk +++ b/bl1/bl1.mk @@ -1,5 +1,5 @@ # -# Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. +# Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: @@ -33,9 +33,9 @@ bl1/aarch64/bl1_entrypoint.S \ bl1/aarch64/bl1_exceptions.S \ bl1/bl1_context_mgmt.c \ - common/aarch64/context.S \ - common/context_mgmt.c \ lib/cpus/aarch64/cpu_helpers.S \ + lib/el3_runtime/aarch64/context.S \ + lib/el3_runtime/aarch64/context_mgmt.c \ plat/common/plat_bl1_common.c ifeq (${TRUSTED_BOARD_BOOT},1) diff --git a/bl31/aarch64/bl31_arch_setup.c b/bl31/aarch64/bl31_arch_setup.c deleted file mode 100644 index 3deacba..0000000 --- a/bl31/aarch64/bl31_arch_setup.c +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include -#include -#include - -/******************************************************************************* - * This duplicates what the primary cpu did after a cold boot in BL1. The same - * needs to be done when a cpu is hotplugged in. This function could also over- - * ride any EL3 setup done by BL1 as this code resides in rw memory. - ******************************************************************************/ -void bl31_arch_setup(void) -{ - /* Program the counter frequency */ - write_cntfrq_el0(plat_get_syscnt_freq2()); - - /* Initialize the cpu_ops pointer. */ - init_cpu_ops(); -} diff --git a/bl31/aarch64/bl31_entrypoint.S b/bl31/aarch64/bl31_entrypoint.S index 1c8eed9..4c3a515 100644 --- a/bl31/aarch64/bl31_entrypoint.S +++ b/bl31/aarch64/bl31_entrypoint.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -31,9 +31,10 @@ #include #include #include +#include .globl bl31_entrypoint - + .globl bl31_warm_entrypoint /* ----------------------------------------------------- * bl31_entrypoint() is the cold boot entrypoint, @@ -132,3 +133,60 @@ b el3_exit endfunc bl31_entrypoint + + /* -------------------------------------------------------------------- + * This CPU has been physically powered up. It is either resuming from + * suspend or has simply been turned on. In both cases, call the BL31 + * warmboot entrypoint + * -------------------------------------------------------------------- + */ +func bl31_warm_entrypoint + /* + * On the warm boot path, most of the EL3 initialisations performed by + * 'el3_entrypoint_common' must be skipped: + * + * - Only when the platform bypasses the BL1/BL31 entrypoint by + * programming the reset address do we need to set the CPU endianness. + * In other cases, we assume this has been taken care by the + * entrypoint code. + * + * - No need to determine the type of boot, we know it is a warm boot. + * + * - Do not try to distinguish between primary and secondary CPUs, this + * notion only exists for a cold boot. + * + * - No need to initialise the memory or the C runtime environment, + * it has been done once and for all on the cold boot path. + */ + el3_entrypoint_common \ + _set_endian=PROGRAMMABLE_RESET_ADDRESS \ + _warm_boot_mailbox=0 \ + _secondary_cold_boot=0 \ + _init_memory=0 \ + _init_c_runtime=0 \ + _exception_vectors=runtime_exceptions + + /* -------------------------------------------- + * Enable the MMU with the DCache disabled. It + * is safe to use stacks allocated in normal + * memory as a result. All memory accesses are + * marked nGnRnE when the MMU is disabled. So + * all the stack writes will make it to memory. + * All memory accesses are marked Non-cacheable + * when the MMU is enabled but D$ is disabled. + * So used stack memory is guaranteed to be + * visible immediately after the MMU is enabled + * Enabling the DCache at the same time as the + * MMU can lead to speculatively fetched and + * possibly stale stack memory being read from + * other caches. This can lead to coherency + * issues. + * -------------------------------------------- + */ + mov x0, #DISABLE_DCACHE + bl bl31_plat_enable_mmu + + bl psci_warmboot_entrypoint + + b el3_exit +endfunc bl31_warm_entrypoint diff --git a/bl31/aarch64/cpu_data.S b/bl31/aarch64/cpu_data.S deleted file mode 100644 index 0842825..0000000 --- a/bl31/aarch64/cpu_data.S +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (c) 2014-2015, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include - -.globl init_cpu_data_ptr -.globl _cpu_data_by_index - -/* ----------------------------------------------------------------- - * void init_cpu_data_ptr(void) - * - * Initialise the TPIDR_EL3 register to refer to the cpu_data_t - * for the calling CPU. This must be called before cm_get_cpu_data() - * - * This can be called without a valid stack. It assumes that - * plat_my_core_pos() does not clobber register x10. - * clobbers: x0, x1, x10 - * ----------------------------------------------------------------- - */ -func init_cpu_data_ptr - mov x10, x30 - bl plat_my_core_pos - bl _cpu_data_by_index - msr tpidr_el3, x0 - ret x10 -endfunc init_cpu_data_ptr - -/* ----------------------------------------------------------------- - * cpu_data_t *_cpu_data_by_index(uint32_t cpu_index) - * - * Return the cpu_data structure for the CPU with given linear index - * - * This can be called without a valid stack. - * clobbers: x0, x1 - * ----------------------------------------------------------------- - */ -func _cpu_data_by_index - adr x1, percpu_data - add x0, x1, x0, LSL #CPU_DATA_LOG2SIZE - ret -endfunc _cpu_data_by_index diff --git a/bl31/bl31.mk b/bl31/bl31.mk index 8a7fccb..4de511b 100644 --- a/bl31/bl31.mk +++ b/bl31/bl31.mk @@ -28,45 +28,22 @@ # POSSIBILITY OF SUCH DAMAGE. # +include lib/psci/psci_lib.mk + BL31_SOURCES += bl31/bl31_main.c \ - bl31/cpu_data_array.c \ - bl31/runtime_svc.c \ bl31/interrupt_mgmt.c \ - bl31/aarch64/bl31_arch_setup.c \ bl31/aarch64/bl31_entrypoint.S \ - bl31/aarch64/cpu_data.S \ bl31/aarch64/runtime_exceptions.S \ bl31/aarch64/crash_reporting.S \ bl31/bl31_context_mgmt.c \ - common/aarch64/context.S \ - common/context_mgmt.c \ - lib/cpus/aarch64/cpu_helpers.S \ - lib/locks/exclusive/spinlock.S \ + common/runtime_svc.c \ services/std_svc/std_svc_setup.c \ - services/std_svc/psci/psci_off.c \ - services/std_svc/psci/psci_on.c \ - services/std_svc/psci/psci_suspend.c \ - services/std_svc/psci/psci_common.c \ - services/std_svc/psci/psci_entry.S \ - services/std_svc/psci/psci_helpers.S \ - services/std_svc/psci/psci_main.c \ - services/std_svc/psci/psci_setup.c \ - services/std_svc/psci/psci_system_off.c - -ifeq (${USE_COHERENT_MEM}, 1) -BL31_SOURCES += lib/locks/bakery/bakery_lock_coherent.c -else -BL31_SOURCES += lib/locks/bakery/bakery_lock_normal.c -endif + ${PSCI_LIB_SOURCES} ifeq (${ENABLE_PMF}, 1) BL31_SOURCES += lib/pmf/pmf_main.c endif -ifeq (${ENABLE_PSCI_STAT}, 1) -BL31_SOURCES += services/std_svc/psci/psci_stat.c -endif - BL31_LINKERFILE := bl31/bl31.ld.S # Flag used to indicate if Crash reporting via console should be included diff --git a/bl31/bl31_context_mgmt.c b/bl31/bl31_context_mgmt.c index ae24424..f8751c2 100644 --- a/bl31/bl31_context_mgmt.c +++ b/bl31/bl31_context_mgmt.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -30,6 +30,7 @@ #include #include +#include #include #include #include @@ -130,4 +131,4 @@ else cm_init_context_by_index(platform_get_core_pos(mpidr), ep); } -#endif \ No newline at end of file +#endif diff --git a/bl31/bl31_main.c b/bl31/bl31_main.c index 7f04d21..f95ef41 100644 --- a/bl31/bl31_main.c +++ b/bl31/bl31_main.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -59,6 +59,12 @@ void bl31_lib_init(void) { cm_init(); + + /* + * Initialize the PSCI library here. This also does EL3 architectural + * setup. + */ + psci_setup((uintptr_t)bl31_warm_entrypoint); } /******************************************************************************* @@ -74,9 +80,6 @@ NOTICE("BL31: %s\n", version_string); NOTICE("BL31: %s\n", build_message); - /* Perform remaining generic architectural setup from EL3 */ - bl31_arch_setup(); - /* Perform platform setup in BL31 */ bl31_platform_setup(); diff --git a/bl31/cpu_data_array.c b/bl31/cpu_data_array.c deleted file mode 100644 index 4cba118..0000000 --- a/bl31/cpu_data_array.c +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include - -/* The per_cpu_ptr_cache_t space allocation */ -cpu_data_t percpu_data[PLATFORM_CORE_COUNT]; diff --git a/bl31/runtime_svc.c b/bl31/runtime_svc.c deleted file mode 100644 index f011f11..0000000 --- a/bl31/runtime_svc.c +++ /dev/null @@ -1,146 +0,0 @@ -/* - * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include - -/******************************************************************************* - * The 'rt_svc_descs' array holds the runtime service descriptors exported by - * services by placing them in the 'rt_svc_descs' linker section. - * The 'rt_svc_descs_indices' array holds the index of a descriptor in the - * 'rt_svc_descs' array. When an SMC arrives, the OEN[29:24] bits and the call - * type[31] bit in the function id are combined to get an index into the - * 'rt_svc_descs_indices' array. This gives the index of the descriptor in the - * 'rt_svc_descs' array which contains the SMC handler. - ******************************************************************************/ -#define RT_SVC_DESCS_START ((uint64_t) (&__RT_SVC_DESCS_START__)) -#define RT_SVC_DESCS_END ((uint64_t) (&__RT_SVC_DESCS_END__)) -uint8_t rt_svc_descs_indices[MAX_RT_SVCS]; -static rt_svc_desc_t *rt_svc_descs; - -/******************************************************************************* - * Simple routine to sanity check a runtime service descriptor before using it - ******************************************************************************/ -static int32_t validate_rt_svc_desc(rt_svc_desc_t *desc) -{ - if (desc == NULL) - return -EINVAL; - - if (desc->start_oen > desc->end_oen) - return -EINVAL; - - if (desc->end_oen >= OEN_LIMIT) - return -EINVAL; - - if (desc->call_type != SMC_TYPE_FAST && desc->call_type != SMC_TYPE_STD) - return -EINVAL; - - /* A runtime service having no init or handle function doesn't make sense */ - if (desc->init == NULL && desc->handle == NULL) - return -EINVAL; - - return 0; -} - -/******************************************************************************* - * This function calls the initialisation routine in the descriptor exported by - * a runtime service. Once a descriptor has been validated, its start & end - * owning entity numbers and the call type are combined to form a unique oen. - * The unique oen is used as an index into the 'rt_svc_descs_indices' array. - * The index of the runtime service descriptor is stored at this index. - ******************************************************************************/ -void runtime_svc_init(void) -{ - int32_t rc = 0; - uint32_t index, start_idx, end_idx; - uint64_t rt_svc_descs_num; - - /* If no runtime services are implemented then simply bail out */ - rt_svc_descs_num = RT_SVC_DESCS_END - RT_SVC_DESCS_START; - rt_svc_descs_num /= sizeof(rt_svc_desc_t); - if (rt_svc_descs_num == 0) - return; - - /* Initialise internal variables to invalid state */ - memset(rt_svc_descs_indices, -1, sizeof(rt_svc_descs_indices)); - - rt_svc_descs = (rt_svc_desc_t *) RT_SVC_DESCS_START; - for (index = 0; index < rt_svc_descs_num; index++) { - - /* - * An invalid descriptor is an error condition since it is - * difficult to predict the system behaviour in the absence - * of this service. - */ - rc = validate_rt_svc_desc(&rt_svc_descs[index]); - if (rc) { - ERROR("Invalid runtime service descriptor %p (%s)\n", - (void *) &rt_svc_descs[index], - rt_svc_descs[index].name); - goto error; - } - - /* - * The runtime service may have separate rt_svc_desc_t - * for its fast smc and standard smc. Since the service itself - * need to be initialized only once, only one of them will have - * an initialisation routine defined. Call the initialisation - * routine for this runtime service, if it is defined. - */ - if (rt_svc_descs[index].init) { - rc = rt_svc_descs[index].init(); - if (rc) { - ERROR("Error initializing runtime service %s\n", - rt_svc_descs[index].name); - continue; - } - } - - /* - * Fill the indices corresponding to the start and end - * owning entity numbers with the index of the - * descriptor which will handle the SMCs for this owning - * entity range. - */ - start_idx = get_unique_oen(rt_svc_descs[index].start_oen, - rt_svc_descs[index].call_type); - end_idx = get_unique_oen(rt_svc_descs[index].end_oen, - rt_svc_descs[index].call_type); - - for (; start_idx <= end_idx; start_idx++) - rt_svc_descs_indices[start_idx] = index; - } - - return; -error: - panic(); -} diff --git a/common/aarch64/context.S b/common/aarch64/context.S deleted file mode 100644 index d51daa7..0000000 --- a/common/aarch64/context.S +++ /dev/null @@ -1,405 +0,0 @@ -/* - * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include - - .global el1_sysregs_context_save - .global el1_sysregs_context_restore -#if CTX_INCLUDE_FPREGS - .global fpregs_context_save - .global fpregs_context_restore -#endif - .global save_gp_registers - .global restore_gp_registers_eret - .global restore_gp_registers_callee_eret - .global el3_exit - -/* ----------------------------------------------------- - * The following function strictly follows the AArch64 - * PCS to use x9-x17 (temporary caller-saved registers) - * to save EL1 system register context. It assumes that - * 'x0' is pointing to a 'el1_sys_regs' structure where - * the register context will be saved. - * ----------------------------------------------------- - */ -func el1_sysregs_context_save - - mrs x9, spsr_el1 - mrs x10, elr_el1 - stp x9, x10, [x0, #CTX_SPSR_EL1] - - mrs x15, sctlr_el1 - mrs x16, actlr_el1 - stp x15, x16, [x0, #CTX_SCTLR_EL1] - - mrs x17, cpacr_el1 - mrs x9, csselr_el1 - stp x17, x9, [x0, #CTX_CPACR_EL1] - - mrs x10, sp_el1 - mrs x11, esr_el1 - stp x10, x11, [x0, #CTX_SP_EL1] - - mrs x12, ttbr0_el1 - mrs x13, ttbr1_el1 - stp x12, x13, [x0, #CTX_TTBR0_EL1] - - mrs x14, mair_el1 - mrs x15, amair_el1 - stp x14, x15, [x0, #CTX_MAIR_EL1] - - mrs x16, tcr_el1 - mrs x17, tpidr_el1 - stp x16, x17, [x0, #CTX_TCR_EL1] - - mrs x9, tpidr_el0 - mrs x10, tpidrro_el0 - stp x9, x10, [x0, #CTX_TPIDR_EL0] - - mrs x13, par_el1 - mrs x14, far_el1 - stp x13, x14, [x0, #CTX_PAR_EL1] - - mrs x15, afsr0_el1 - mrs x16, afsr1_el1 - stp x15, x16, [x0, #CTX_AFSR0_EL1] - - mrs x17, contextidr_el1 - mrs x9, vbar_el1 - stp x17, x9, [x0, #CTX_CONTEXTIDR_EL1] - - /* Save AArch32 system registers if the build has instructed so */ -#if CTX_INCLUDE_AARCH32_REGS - mrs x11, spsr_abt - mrs x12, spsr_und - stp x11, x12, [x0, #CTX_SPSR_ABT] - - mrs x13, spsr_irq - mrs x14, spsr_fiq - stp x13, x14, [x0, #CTX_SPSR_IRQ] - - mrs x15, dacr32_el2 - mrs x16, ifsr32_el2 - stp x15, x16, [x0, #CTX_DACR32_EL2] - - mrs x17, fpexc32_el2 - str x17, [x0, #CTX_FP_FPEXC32_EL2] -#endif - - /* Save NS timer registers if the build has instructed so */ -#if NS_TIMER_SWITCH - mrs x10, cntp_ctl_el0 - mrs x11, cntp_cval_el0 - stp x10, x11, [x0, #CTX_CNTP_CTL_EL0] - - mrs x12, cntv_ctl_el0 - mrs x13, cntv_cval_el0 - stp x12, x13, [x0, #CTX_CNTV_CTL_EL0] - - mrs x14, cntkctl_el1 - str x14, [x0, #CTX_CNTKCTL_EL1] -#endif - - ret -endfunc el1_sysregs_context_save - -/* ----------------------------------------------------- - * The following function strictly follows the AArch64 - * PCS to use x9-x17 (temporary caller-saved registers) - * to restore EL1 system register context. It assumes - * that 'x0' is pointing to a 'el1_sys_regs' structure - * from where the register context will be restored - * ----------------------------------------------------- - */ -func el1_sysregs_context_restore - - ldp x9, x10, [x0, #CTX_SPSR_EL1] - msr spsr_el1, x9 - msr elr_el1, x10 - - ldp x15, x16, [x0, #CTX_SCTLR_EL1] - msr sctlr_el1, x15 - msr actlr_el1, x16 - - ldp x17, x9, [x0, #CTX_CPACR_EL1] - msr cpacr_el1, x17 - msr csselr_el1, x9 - - ldp x10, x11, [x0, #CTX_SP_EL1] - msr sp_el1, x10 - msr esr_el1, x11 - - ldp x12, x13, [x0, #CTX_TTBR0_EL1] - msr ttbr0_el1, x12 - msr ttbr1_el1, x13 - - ldp x14, x15, [x0, #CTX_MAIR_EL1] - msr mair_el1, x14 - msr amair_el1, x15 - - ldp x16, x17, [x0, #CTX_TCR_EL1] - msr tcr_el1, x16 - msr tpidr_el1, x17 - - ldp x9, x10, [x0, #CTX_TPIDR_EL0] - msr tpidr_el0, x9 - msr tpidrro_el0, x10 - - ldp x13, x14, [x0, #CTX_PAR_EL1] - msr par_el1, x13 - msr far_el1, x14 - - ldp x15, x16, [x0, #CTX_AFSR0_EL1] - msr afsr0_el1, x15 - msr afsr1_el1, x16 - - ldp x17, x9, [x0, #CTX_CONTEXTIDR_EL1] - msr contextidr_el1, x17 - msr vbar_el1, x9 - - /* Restore AArch32 system registers if the build has instructed so */ -#if CTX_INCLUDE_AARCH32_REGS - ldp x11, x12, [x0, #CTX_SPSR_ABT] - msr spsr_abt, x11 - msr spsr_und, x12 - - ldp x13, x14, [x0, #CTX_SPSR_IRQ] - msr spsr_irq, x13 - msr spsr_fiq, x14 - - ldp x15, x16, [x0, #CTX_DACR32_EL2] - msr dacr32_el2, x15 - msr ifsr32_el2, x16 - - ldr x17, [x0, #CTX_FP_FPEXC32_EL2] - msr fpexc32_el2, x17 -#endif - /* Restore NS timer registers if the build has instructed so */ -#if NS_TIMER_SWITCH - ldp x10, x11, [x0, #CTX_CNTP_CTL_EL0] - msr cntp_ctl_el0, x10 - msr cntp_cval_el0, x11 - - ldp x12, x13, [x0, #CTX_CNTV_CTL_EL0] - msr cntv_ctl_el0, x12 - msr cntv_cval_el0, x13 - - ldr x14, [x0, #CTX_CNTKCTL_EL1] - msr cntkctl_el1, x14 -#endif - - /* No explict ISB required here as ERET covers it */ - ret -endfunc el1_sysregs_context_restore - -/* ----------------------------------------------------- - * The following function follows the aapcs_64 strictly - * to use x9-x17 (temporary caller-saved registers - * according to AArch64 PCS) to save floating point - * register context. It assumes that 'x0' is pointing to - * a 'fp_regs' structure where the register context will - * be saved. - * - * Access to VFP registers will trap if CPTR_EL3.TFP is - * set. However currently we don't use VFP registers - * nor set traps in Trusted Firmware, and assume it's - * cleared - * - * TODO: Revisit when VFP is used in secure world - * ----------------------------------------------------- - */ -#if CTX_INCLUDE_FPREGS -func fpregs_context_save - stp q0, q1, [x0, #CTX_FP_Q0] - stp q2, q3, [x0, #CTX_FP_Q2] - stp q4, q5, [x0, #CTX_FP_Q4] - stp q6, q7, [x0, #CTX_FP_Q6] - stp q8, q9, [x0, #CTX_FP_Q8] - stp q10, q11, [x0, #CTX_FP_Q10] - stp q12, q13, [x0, #CTX_FP_Q12] - stp q14, q15, [x0, #CTX_FP_Q14] - stp q16, q17, [x0, #CTX_FP_Q16] - stp q18, q19, [x0, #CTX_FP_Q18] - stp q20, q21, [x0, #CTX_FP_Q20] - stp q22, q23, [x0, #CTX_FP_Q22] - stp q24, q25, [x0, #CTX_FP_Q24] - stp q26, q27, [x0, #CTX_FP_Q26] - stp q28, q29, [x0, #CTX_FP_Q28] - stp q30, q31, [x0, #CTX_FP_Q30] - - mrs x9, fpsr - str x9, [x0, #CTX_FP_FPSR] - - mrs x10, fpcr - str x10, [x0, #CTX_FP_FPCR] - - ret -endfunc fpregs_context_save - -/* ----------------------------------------------------- - * The following function follows the aapcs_64 strictly - * to use x9-x17 (temporary caller-saved registers - * according to AArch64 PCS) to restore floating point - * register context. It assumes that 'x0' is pointing to - * a 'fp_regs' structure from where the register context - * will be restored. - * - * Access to VFP registers will trap if CPTR_EL3.TFP is - * set. However currently we don't use VFP registers - * nor set traps in Trusted Firmware, and assume it's - * cleared - * - * TODO: Revisit when VFP is used in secure world - * ----------------------------------------------------- - */ -func fpregs_context_restore - ldp q0, q1, [x0, #CTX_FP_Q0] - ldp q2, q3, [x0, #CTX_FP_Q2] - ldp q4, q5, [x0, #CTX_FP_Q4] - ldp q6, q7, [x0, #CTX_FP_Q6] - ldp q8, q9, [x0, #CTX_FP_Q8] - ldp q10, q11, [x0, #CTX_FP_Q10] - ldp q12, q13, [x0, #CTX_FP_Q12] - ldp q14, q15, [x0, #CTX_FP_Q14] - ldp q16, q17, [x0, #CTX_FP_Q16] - ldp q18, q19, [x0, #CTX_FP_Q18] - ldp q20, q21, [x0, #CTX_FP_Q20] - ldp q22, q23, [x0, #CTX_FP_Q22] - ldp q24, q25, [x0, #CTX_FP_Q24] - ldp q26, q27, [x0, #CTX_FP_Q26] - ldp q28, q29, [x0, #CTX_FP_Q28] - ldp q30, q31, [x0, #CTX_FP_Q30] - - ldr x9, [x0, #CTX_FP_FPSR] - msr fpsr, x9 - - ldr x10, [x0, #CTX_FP_FPCR] - msr fpcr, x10 - - /* - * No explict ISB required here as ERET to - * switch to secure EL1 or non-secure world - * covers it - */ - - ret -endfunc fpregs_context_restore -#endif /* CTX_INCLUDE_FPREGS */ - -/* ----------------------------------------------------- - * The following functions are used to save and restore - * all the general purpose registers. Ideally we would - * only save and restore the callee saved registers when - * a world switch occurs but that type of implementation - * is more complex. So currently we will always save and - * restore these registers on entry and exit of EL3. - * These are not macros to ensure their invocation fits - * within the 32 instructions per exception vector. - * clobbers: x18 - * ----------------------------------------------------- - */ -func save_gp_registers - stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] - stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] - stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4] - stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6] - stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8] - stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10] - stp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12] - stp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14] - stp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16] - stp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18] - stp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20] - stp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22] - stp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24] - stp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26] - stp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28] - mrs x18, sp_el0 - str x18, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0] - ret -endfunc save_gp_registers - -func restore_gp_registers_eret - ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] - ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] - b restore_gp_registers_callee_eret -endfunc restore_gp_registers_eret - -func restore_gp_registers_callee_eret - ldp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4] - ldp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6] - ldp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8] - ldp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10] - ldp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12] - ldp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14] - ldp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18] - ldp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20] - ldp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22] - ldp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24] - ldp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26] - ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28] - ldp x30, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] - msr sp_el0, x17 - ldp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16] - eret -endfunc restore_gp_registers_callee_eret - - /* ----------------------------------------------------- - * This routine assumes that the SP_EL3 is pointing to - * a valid context structure from where the gp regs and - * other special registers can be retrieved. - * ----------------------------------------------------- - */ -func el3_exit - /* ----------------------------------------------------- - * Save the current SP_EL0 i.e. the EL3 runtime stack - * which will be used for handling the next SMC. Then - * switch to SP_EL3 - * ----------------------------------------------------- - */ - mov x17, sp - msr spsel, #1 - str x17, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP] - - /* ----------------------------------------------------- - * Restore SPSR_EL3, ELR_EL3 and SCR_EL3 prior to ERET - * ----------------------------------------------------- - */ - ldr x18, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3] - ldp x16, x17, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3] - msr scr_el3, x18 - msr spsr_el3, x16 - msr elr_el3, x17 - - /* Restore saved general purpose registers and return */ - b restore_gp_registers_eret -endfunc el3_exit diff --git a/common/bl_common.c b/common/bl_common.c index 7cafe63..acb2ec6 100644 --- a/common/bl_common.c +++ b/common/bl_common.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -40,24 +40,20 @@ #include #include -unsigned long page_align(unsigned long value, unsigned dir) +uintptr_t page_align(uintptr_t value, unsigned dir) { - unsigned long page_size = 1 << FOUR_KB_SHIFT; - /* Round up the limit to the next page boundary */ - if (value & (page_size - 1)) { - value &= ~(page_size - 1); + if (value & (PAGE_SIZE - 1)) { + value &= ~(PAGE_SIZE - 1); if (dir == UP) - value += page_size; + value += PAGE_SIZE; } return value; } -static inline unsigned int is_page_aligned (unsigned long addr) { - const unsigned long page_size = 1 << FOUR_KB_SHIFT; - - return (addr & (page_size - 1)) == 0; +static inline unsigned int is_page_aligned (uintptr_t addr) { + return (addr & (PAGE_SIZE - 1)) == 0; } /****************************************************************************** @@ -65,8 +61,8 @@ * given the extents of free memory. * Return 1 if it is free, 0 otherwise. *****************************************************************************/ -static int is_mem_free(uint64_t free_base, size_t free_size, - uint64_t addr, size_t size) +static int is_mem_free(uintptr_t free_base, size_t free_size, + uintptr_t addr, size_t size) { return (addr >= free_base) && (addr + size <= free_base + free_size); } @@ -77,9 +73,9 @@ * size of the smallest chunk of free memory surrounding the sub-region in * 'small_chunk_size'. *****************************************************************************/ -static unsigned int choose_mem_pos(uint64_t mem_start, uint64_t mem_end, - uint64_t submem_start, uint64_t submem_end, - size_t *small_chunk_size) +static unsigned int choose_mem_pos(uintptr_t mem_start, uintptr_t mem_end, + uintptr_t submem_start, uintptr_t submem_end, + size_t *small_chunk_size) { size_t top_chunk_size, bottom_chunk_size; @@ -106,8 +102,8 @@ * reflect the memory usage. * The caller must ensure the memory to reserve is free. *****************************************************************************/ -void reserve_mem(uint64_t *free_base, size_t *free_size, - uint64_t addr, size_t size) +void reserve_mem(uintptr_t *free_base, size_t *free_size, + uintptr_t addr, size_t size) { size_t discard_size; size_t reserved_size; @@ -127,26 +123,26 @@ if (pos == BOTTOM) *free_base = addr + size; - VERBOSE("Reserved 0x%lx bytes (discarded 0x%lx bytes %s)\n", + VERBOSE("Reserved 0x%zx bytes (discarded 0x%zx bytes %s)\n", reserved_size, discard_size, pos == TOP ? "above" : "below"); } -static void dump_load_info(unsigned long image_load_addr, - unsigned long image_size, +static void dump_load_info(uintptr_t image_load_addr, + size_t image_size, const meminfo_t *mem_layout) { - INFO("Trying to load image at address 0x%lx, size = 0x%lx\n", - image_load_addr, image_size); + INFO("Trying to load image at address %p, size = 0x%zx\n", + (void *)image_load_addr, image_size); INFO("Current memory layout:\n"); - INFO(" total region = [0x%lx, 0x%lx]\n", mem_layout->total_base, - mem_layout->total_base + mem_layout->total_size); - INFO(" free region = [0x%lx, 0x%lx]\n", mem_layout->free_base, - mem_layout->free_base + mem_layout->free_size); + INFO(" total region = [%p, %p]\n", (void *)mem_layout->total_base, + (void *)(mem_layout->total_base + mem_layout->total_size)); + INFO(" free region = [%p, %p]\n", (void *)mem_layout->free_base, + (void *)(mem_layout->free_base + mem_layout->free_size)); } /* Generic function to return the size of an image */ -unsigned long image_size(unsigned int image_id) +size_t image_size(unsigned int image_id) { uintptr_t dev_handle; uintptr_t image_handle; @@ -367,9 +363,8 @@ ******************************************************************************/ void print_entry_point_info(const entry_point_info_t *ep_info) { - INFO("Entry point address = 0x%llx\n", - (unsigned long long) ep_info->pc); - INFO("SPSR = 0x%lx\n", (unsigned long) ep_info->spsr); + INFO("Entry point address = %p\n", (void *)ep_info->pc); + INFO("SPSR = 0x%x\n", ep_info->spsr); #define PRINT_IMAGE_ARG(n) \ VERBOSE("Argument #" #n " = 0x%llx\n", \ diff --git a/common/context_mgmt.c b/common/context_mgmt.c deleted file mode 100644 index 3ccbd03..0000000 --- a/common/context_mgmt.c +++ /dev/null @@ -1,383 +0,0 @@ -/* - * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - - -/******************************************************************************* - * Context management library initialisation routine. This library is used by - * runtime services to share pointers to 'cpu_context' structures for the secure - * and non-secure states. Management of the structures and their associated - * memory is not done by the context management library e.g. the PSCI service - * manages the cpu context used for entry from and exit to the non-secure state. - * The Secure payload dispatcher service manages the context(s) corresponding to - * the secure state. It also uses this library to get access to the non-secure - * state cpu context pointers. - * Lastly, this library provides the api to make SP_EL3 point to the cpu context - * which will used for programming an entry into a lower EL. The same context - * will used to save state upon exception entry from that EL. - ******************************************************************************/ -void cm_init(void) -{ - /* - * The context management library has only global data to intialize, but - * that will be done when the BSS is zeroed out - */ -} - -/******************************************************************************* - * The following function initializes the cpu_context 'ctx' for - * first use, and sets the initial entrypoint state as specified by the - * entry_point_info structure. - * - * The security state to initialize is determined by the SECURE attribute - * of the entry_point_info. The function returns a pointer to the initialized - * context and sets this as the next context to return to. - * - * The EE and ST attributes are used to configure the endianess and secure - * timer availability for the new execution context. - * - * To prepare the register state for entry call cm_prepare_el3_exit() and - * el3_exit(). For Secure-EL1 cm_prepare_el3_exit() is equivalent to - * cm_e1_sysreg_context_restore(). - ******************************************************************************/ -static void cm_init_context_common(cpu_context_t *ctx, const entry_point_info_t *ep) -{ - unsigned int security_state; - uint32_t scr_el3; - el3_state_t *state; - gp_regs_t *gp_regs; - unsigned long sctlr_elx; - - assert(ctx); - - security_state = GET_SECURITY_STATE(ep->h.attr); - - /* Clear any residual register values from the context */ - memset(ctx, 0, sizeof(*ctx)); - - /* - * Base the context SCR on the current value, adjust for entry point - * specific requirements and set trap bits from the IMF - * TODO: provide the base/global SCR bits using another mechanism? - */ - scr_el3 = read_scr(); - scr_el3 &= ~(SCR_NS_BIT | SCR_RW_BIT | SCR_FIQ_BIT | SCR_IRQ_BIT | - SCR_ST_BIT | SCR_HCE_BIT); - - if (security_state != SECURE) - scr_el3 |= SCR_NS_BIT; - - if (GET_RW(ep->spsr) == MODE_RW_64) - scr_el3 |= SCR_RW_BIT; - - if (EP_GET_ST(ep->h.attr)) - scr_el3 |= SCR_ST_BIT; - -#ifndef HANDLE_EA_EL3_FIRST - /* Explicitly stop to trap aborts from lower exception levels. */ - scr_el3 &= ~SCR_EA_BIT; -#endif - -#if IMAGE_BL31 - /* - * IRQ/FIQ bits only need setting if interrupt routing - * model has been set up for BL31. - */ - scr_el3 |= get_scr_el3_from_routing_model(security_state); -#endif - - /* - * Set up SCTLR_ELx for the target exception level: - * EE bit is taken from the entrypoint attributes - * M, C and I bits must be zero (as required by PSCI specification) - * - * The target exception level is based on the spsr mode requested. - * If execution is requested to EL2 or hyp mode, HVC is enabled - * via SCR_EL3.HCE. - * - * Always compute the SCTLR_EL1 value and save in the cpu_context - * - the EL2 registers are set up by cm_preapre_ns_entry() as they - * are not part of the stored cpu_context - * - * TODO: In debug builds the spsr should be validated and checked - * against the CPU support, security state, endianess and pc - */ - sctlr_elx = EP_GET_EE(ep->h.attr) ? SCTLR_EE_BIT : 0; - if (GET_RW(ep->spsr) == MODE_RW_64) - sctlr_elx |= SCTLR_EL1_RES1; - else - sctlr_elx |= SCTLR_AARCH32_EL1_RES1; - write_ctx_reg(get_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_elx); - - if ((GET_RW(ep->spsr) == MODE_RW_64 - && GET_EL(ep->spsr) == MODE_EL2) - || (GET_RW(ep->spsr) != MODE_RW_64 - && GET_M32(ep->spsr) == MODE32_hyp)) { - scr_el3 |= SCR_HCE_BIT; - } - - /* Populate EL3 state so that we've the right context before doing ERET */ - state = get_el3state_ctx(ctx); - write_ctx_reg(state, CTX_SCR_EL3, scr_el3); - write_ctx_reg(state, CTX_ELR_EL3, ep->pc); - write_ctx_reg(state, CTX_SPSR_EL3, ep->spsr); - - /* - * Store the X0-X7 value from the entrypoint into the context - * Use memcpy as we are in control of the layout of the structures - */ - gp_regs = get_gpregs_ctx(ctx); - memcpy(gp_regs, (void *)&ep->args, sizeof(aapcs64_params_t)); -} - -/******************************************************************************* - * The following function initializes the cpu_context for a CPU specified by - * its `cpu_idx` for first use, and sets the initial entrypoint state as - * specified by the entry_point_info structure. - ******************************************************************************/ -void cm_init_context_by_index(unsigned int cpu_idx, - const entry_point_info_t *ep) -{ - cpu_context_t *ctx; - ctx = cm_get_context_by_index(cpu_idx, GET_SECURITY_STATE(ep->h.attr)); - cm_init_context_common(ctx, ep); -} - -/******************************************************************************* - * The following function initializes the cpu_context for the current CPU - * for first use, and sets the initial entrypoint state as specified by the - * entry_point_info structure. - ******************************************************************************/ -void cm_init_my_context(const entry_point_info_t *ep) -{ - cpu_context_t *ctx; - ctx = cm_get_context(GET_SECURITY_STATE(ep->h.attr)); - cm_init_context_common(ctx, ep); -} - -/******************************************************************************* - * Prepare the CPU system registers for first entry into secure or normal world - * - * If execution is requested to EL2 or hyp mode, SCTLR_EL2 is initialized - * If execution is requested to non-secure EL1 or svc mode, and the CPU supports - * EL2 then EL2 is disabled by configuring all necessary EL2 registers. - * For all entries, the EL1 registers are initialized from the cpu_context - ******************************************************************************/ -void cm_prepare_el3_exit(uint32_t security_state) -{ - uint32_t sctlr_elx, scr_el3, cptr_el2; - cpu_context_t *ctx = cm_get_context(security_state); - - assert(ctx); - - if (security_state == NON_SECURE) { - scr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3); - if (scr_el3 & SCR_HCE_BIT) { - /* Use SCTLR_EL1.EE value to initialise sctlr_el2 */ - sctlr_elx = read_ctx_reg(get_sysregs_ctx(ctx), - CTX_SCTLR_EL1); - sctlr_elx &= ~SCTLR_EE_BIT; - sctlr_elx |= SCTLR_EL2_RES1; - write_sctlr_el2(sctlr_elx); - } else if (read_id_aa64pfr0_el1() & - (ID_AA64PFR0_ELX_MASK << ID_AA64PFR0_EL2_SHIFT)) { - /* EL2 present but unused, need to disable safely */ - - /* HCR_EL2 = 0, except RW bit set to match SCR_EL3 */ - write_hcr_el2((scr_el3 & SCR_RW_BIT) ? HCR_RW_BIT : 0); - - /* SCTLR_EL2 : can be ignored when bypassing */ - - /* CPTR_EL2 : disable all traps TCPAC, TTA, TFP */ - cptr_el2 = read_cptr_el2(); - cptr_el2 &= ~(TCPAC_BIT | TTA_BIT | TFP_BIT); - write_cptr_el2(cptr_el2); - - /* Enable EL1 access to timer */ - write_cnthctl_el2(EL1PCEN_BIT | EL1PCTEN_BIT); - - /* Reset CNTVOFF_EL2 */ - write_cntvoff_el2(0); - - /* Set VPIDR, VMPIDR to match MIDR, MPIDR */ - write_vpidr_el2(read_midr_el1()); - write_vmpidr_el2(read_mpidr_el1()); - - /* - * Reset VTTBR_EL2. - * Needed because cache maintenance operations depend on - * the VMID even when non-secure EL1&0 stage 2 address - * translation are disabled. - */ - write_vttbr_el2(0); - } - } - - el1_sysregs_context_restore(get_sysregs_ctx(ctx)); - - cm_set_next_context(ctx); -} - -/******************************************************************************* - * The next four functions are used by runtime services to save and restore - * EL1 context on the 'cpu_context' structure for the specified security - * state. - ******************************************************************************/ -void cm_el1_sysregs_context_save(uint32_t security_state) -{ - cpu_context_t *ctx; - - ctx = cm_get_context(security_state); - assert(ctx); - - el1_sysregs_context_save(get_sysregs_ctx(ctx)); -} - -void cm_el1_sysregs_context_restore(uint32_t security_state) -{ - cpu_context_t *ctx; - - ctx = cm_get_context(security_state); - assert(ctx); - - el1_sysregs_context_restore(get_sysregs_ctx(ctx)); -} - -/******************************************************************************* - * This function populates ELR_EL3 member of 'cpu_context' pertaining to the - * given security state with the given entrypoint - ******************************************************************************/ -void cm_set_elr_el3(uint32_t security_state, uint64_t entrypoint) -{ - cpu_context_t *ctx; - el3_state_t *state; - - ctx = cm_get_context(security_state); - assert(ctx); - - /* Populate EL3 state so that ERET jumps to the correct entry */ - state = get_el3state_ctx(ctx); - write_ctx_reg(state, CTX_ELR_EL3, entrypoint); -} - -/******************************************************************************* - * This function populates ELR_EL3 and SPSR_EL3 members of 'cpu_context' - * pertaining to the given security state - ******************************************************************************/ -void cm_set_elr_spsr_el3(uint32_t security_state, - uint64_t entrypoint, uint32_t spsr) -{ - cpu_context_t *ctx; - el3_state_t *state; - - ctx = cm_get_context(security_state); - assert(ctx); - - /* Populate EL3 state so that ERET jumps to the correct entry */ - state = get_el3state_ctx(ctx); - write_ctx_reg(state, CTX_ELR_EL3, entrypoint); - write_ctx_reg(state, CTX_SPSR_EL3, spsr); -} - -/******************************************************************************* - * This function updates a single bit in the SCR_EL3 member of the 'cpu_context' - * pertaining to the given security state using the value and bit position - * specified in the parameters. It preserves all other bits. - ******************************************************************************/ -void cm_write_scr_el3_bit(uint32_t security_state, - uint32_t bit_pos, - uint32_t value) -{ - cpu_context_t *ctx; - el3_state_t *state; - uint32_t scr_el3; - - ctx = cm_get_context(security_state); - assert(ctx); - - /* Ensure that the bit position is a valid one */ - assert((1 << bit_pos) & SCR_VALID_BIT_MASK); - - /* Ensure that the 'value' is only a bit wide */ - assert(value <= 1); - - /* - * Get the SCR_EL3 value from the cpu context, clear the desired bit - * and set it to its new value. - */ - state = get_el3state_ctx(ctx); - scr_el3 = read_ctx_reg(state, CTX_SCR_EL3); - scr_el3 &= ~(1 << bit_pos); - scr_el3 |= value << bit_pos; - write_ctx_reg(state, CTX_SCR_EL3, scr_el3); -} - -/******************************************************************************* - * This function retrieves SCR_EL3 member of 'cpu_context' pertaining to the - * given security state. - ******************************************************************************/ -uint32_t cm_get_scr_el3(uint32_t security_state) -{ - cpu_context_t *ctx; - el3_state_t *state; - - ctx = cm_get_context(security_state); - assert(ctx); - - /* Populate EL3 state so that ERET jumps to the correct entry */ - state = get_el3state_ctx(ctx); - return read_ctx_reg(state, CTX_SCR_EL3); -} - -/******************************************************************************* - * This function is used to program the context that's used for exception - * return. This initializes the SP_EL3 to a pointer to a 'cpu_context' set for - * the required security state - ******************************************************************************/ -void cm_set_next_eret_context(uint32_t security_state) -{ - cpu_context_t *ctx; - - ctx = cm_get_context(security_state); - assert(ctx); - - cm_set_next_context(ctx); -} diff --git a/common/runtime_svc.c b/common/runtime_svc.c new file mode 100644 index 0000000..8729e29 --- /dev/null +++ b/common/runtime_svc.c @@ -0,0 +1,149 @@ +/* + * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include + +/******************************************************************************* + * The 'rt_svc_descs' array holds the runtime service descriptors exported by + * services by placing them in the 'rt_svc_descs' linker section. + * The 'rt_svc_descs_indices' array holds the index of a descriptor in the + * 'rt_svc_descs' array. When an SMC arrives, the OEN[29:24] bits and the call + * type[31] bit in the function id are combined to get an index into the + * 'rt_svc_descs_indices' array. This gives the index of the descriptor in the + * 'rt_svc_descs' array which contains the SMC handler. + ******************************************************************************/ +#define RT_SVC_DESCS_START ((uintptr_t) (&__RT_SVC_DESCS_START__)) +#define RT_SVC_DESCS_END ((uintptr_t) (&__RT_SVC_DESCS_END__)) +uint8_t rt_svc_descs_indices[MAX_RT_SVCS]; +static rt_svc_desc_t *rt_svc_descs; + +#define RT_SVC_DECS_NUM ((RT_SVC_DESCS_END - RT_SVC_DESCS_START)\ + / sizeof(rt_svc_desc_t)) + +/******************************************************************************* + * Simple routine to sanity check a runtime service descriptor before using it + ******************************************************************************/ +static int32_t validate_rt_svc_desc(rt_svc_desc_t *desc) +{ + if (desc == NULL) + return -EINVAL; + + if (desc->start_oen > desc->end_oen) + return -EINVAL; + + if (desc->end_oen >= OEN_LIMIT) + return -EINVAL; + + if (desc->call_type != SMC_TYPE_FAST && desc->call_type != SMC_TYPE_STD) + return -EINVAL; + + /* A runtime service having no init or handle function doesn't make sense */ + if (desc->init == NULL && desc->handle == NULL) + return -EINVAL; + + return 0; +} + +/******************************************************************************* + * This function calls the initialisation routine in the descriptor exported by + * a runtime service. Once a descriptor has been validated, its start & end + * owning entity numbers and the call type are combined to form a unique oen. + * The unique oen is used as an index into the 'rt_svc_descs_indices' array. + * The index of the runtime service descriptor is stored at this index. + ******************************************************************************/ +void runtime_svc_init(void) +{ + int rc = 0, index, start_idx, end_idx; + + /* Assert the number of descriptors detected are less than maximum indices */ + assert((RT_SVC_DECS_NUM >= 0) && (RT_SVC_DECS_NUM < MAX_RT_SVCS)); + + /* If no runtime services are implemented then simply bail out */ + if (RT_SVC_DECS_NUM == 0) + return; + + /* Initialise internal variables to invalid state */ + memset(rt_svc_descs_indices, -1, sizeof(rt_svc_descs_indices)); + + rt_svc_descs = (rt_svc_desc_t *) RT_SVC_DESCS_START; + for (index = 0; index < RT_SVC_DECS_NUM; index++) { + + /* + * An invalid descriptor is an error condition since it is + * difficult to predict the system behaviour in the absence + * of this service. + */ + rc = validate_rt_svc_desc(&rt_svc_descs[index]); + if (rc) { + ERROR("Invalid runtime service descriptor %p (%s)\n", + (void *) &rt_svc_descs[index], + rt_svc_descs[index].name); + goto error; + } + + /* + * The runtime service may have separate rt_svc_desc_t + * for its fast smc and standard smc. Since the service itself + * need to be initialized only once, only one of them will have + * an initialisation routine defined. Call the initialisation + * routine for this runtime service, if it is defined. + */ + if (rt_svc_descs[index].init) { + rc = rt_svc_descs[index].init(); + if (rc) { + ERROR("Error initializing runtime service %s\n", + rt_svc_descs[index].name); + continue; + } + } + + /* + * Fill the indices corresponding to the start and end + * owning entity numbers with the index of the + * descriptor which will handle the SMCs for this owning + * entity range. + */ + start_idx = get_unique_oen(rt_svc_descs[index].start_oen, + rt_svc_descs[index].call_type); + end_idx = get_unique_oen(rt_svc_descs[index].end_oen, + rt_svc_descs[index].call_type); + + for (; start_idx <= end_idx; start_idx++) + rt_svc_descs_indices[start_idx] = index; + } + + return; +error: + panic(); +} diff --git a/docs/firmware-design.md b/docs/firmware-design.md index b99a283..d9f9ff0 100644 --- a/docs/firmware-design.md +++ b/docs/firmware-design.md @@ -1779,10 +1779,11 @@ the platform. * **Common code.** This is platform and architecture agnostic code. * **Library code.** This code comprises of functionality commonly used by all - other code. + other code. The PSCI implementation and other EL3 runtime frameworks reside + as Library components. * **Stage specific.** Code specific to a boot stage. * **Drivers.** -* **Services.** EL3 runtime services, e.g. PSCI or SPD. Specific SPD services +* **Services.** EL3 runtime services (eg: SPD). Specific SPD services reside in the `services/spd` directory (e.g. `services/spd/tspd`). Each boot loader stage uses code from one or more of the above mentioned diff --git a/docs/porting-guide.md b/docs/porting-guide.md index 23033d5..8dad4a0 100644 --- a/docs/porting-guide.md +++ b/docs/porting-guide.md @@ -545,7 +545,7 @@ ### Function : plat_get_my_entrypoint() [mandatory when PROGRAMMABLE_RESET_ADDRESS == 0] Argument : void - Return : unsigned long + Return : uintptr_t This function is called with the called with the MMU and caches disabled (`SCTLR_EL3.M` = 0 and `SCTLR_EL3.C` = 0). The function is responsible for @@ -748,7 +748,7 @@ ### Function : plat_get_my_stack() Argument : void - Return : unsigned long + Return : uintptr_t This function returns the base address of the normal memory stack that has been allocated for the current CPU. For BL images that only require a @@ -966,7 +966,7 @@ ### Function : bl1_init_bl2_mem_layout() [optional] - Argument : meminfo *, meminfo *, unsigned int, unsigned long + Argument : meminfo *, meminfo * Return : void BL1 needs to tell the next stage the amount of secure RAM available diff --git a/docs/psci-pd-tree.md b/docs/psci-pd-tree.md index 6ae686d..c253905 100644 --- a/docs/psci-pd-tree.md +++ b/docs/psci-pd-tree.md @@ -203,7 +203,7 @@ } non_cpu_pd_node_t; typedef struct cpu_pwr_domain_node { - unsigned long mpidr; + u_register_t mpidr; /* Index of the parent power domain node */ unsigned int parent_node; diff --git a/docs/rt-svc-writers-guide.md b/docs/rt-svc-writers-guide.md index 40cee14..4b811fe 100644 --- a/docs/rt-svc-writers-guide.md +++ b/docs/rt-svc-writers-guide.md @@ -95,8 +95,7 @@ ARM Trusted Firmware has a [`services`] directory in the source tree under which each owning entity can place the implementation of its runtime service. The -[PSCI] implementation is located here in the [`services/std_svc/psci`] -directory. +[PSCI] implementation is located here in the [`lib/psci`] directory. Runtime service sources will need to include the [`runtime_svc.h`] header file. @@ -114,7 +113,7 @@ is also used for diagnostic purposes * `_start` and `_end` values must be based on the `OEN_*` values defined in - [`smcc_helpers.h`] + [`smcc.h`] * `_type` must be one of `SMC_TYPE_FAST` or `SMC_TYPE_STD` @@ -124,12 +123,12 @@ * `_smch` is the SMC handler function with the `rt_svc_handle` signature: - typedef uint64_t (*rt_svc_handle)(uint32_t smc_fid, - uint64_t x1, uint64_t x2, - uint64_t x3, uint64_t x4, + typedef uintptr_t (*rt_svc_handle_t)(uint32_t smc_fid, + u_register_t x1, u_register_t x2, + u_register_t x3, u_register_t x4, void *cookie, void *handle, - uint64_t flags); + u_register_t flags); Details of the requirements and behavior of the two callbacks is provided in the following sections. @@ -189,12 +188,12 @@ handler function (`_smch` in the service declaration). This function must have the following signature: - typedef uint64_t (*rt_svc_handle)(uint32_t smc_fid, - uint64_t x1, uint64_t x2, - uint64_t x3, uint64_t x4, - void *reserved, - void *handle, - uint64_t flags); + typedef uintptr_t (*rt_svc_handle_t)(uint32_t smc_fid, + u_register_t x1, u_register_t x2, + u_register_t x3, u_register_t x4, + void *cookie, + void *handle, + u_register_t flags); The handler is responsible for: @@ -253,10 +252,9 @@ SMC_RET3(handle, x0, x1, x2); SMC_RET4(handle, x0, x1, x2, x3); -The `reserved` parameter to the handler is reserved for future use and can be -ignored. The value returned by a SMC handler is also reserved for future use - -completion of the handler function must always be via one of the `SMC_RETn()` -macros. +The `cookie` parameter to the handler is reserved for future use and can be +ignored. The `handle` is returned by the SMC handler - completion of the +handler function must always be via one of the `SMC_RETn()` macros. NOTE: The PSCI and Test Secure-EL1 Payload Dispatcher services do not follow all of the above requirements yet. @@ -299,12 +297,11 @@ _Copyright (c) 2014-2015, ARM Limited and Contributors. All rights reserved._ -[Firmware Design]: ./firmware-design.md - +[Firmware Design]: ./firmware-design.md [`services`]: ../services -[`services/std_svc/psci`]: ../services/std_svc/psci +[`lib/psci`]: ../lib/psci [`std_svc_setup.c`]: ../services/std_svc/std_svc_setup.c -[`runtime_svc.h`]: ../include/bl31/runtime_svc.h -[`smcc_helpers.h`]: ../include/common/smcc_helpers.h +[`runtime_svc.h`]: ../include/common/runtime_svc.h +[`smcc.h`]: ../include/lib/smcc.h [PSCI]: http://infocenter.arm.com/help/topic/com.arm.doc.den0022c/DEN0022C_Power_State_Coordination_Interface.pdf "Power State Coordination Interface PDD (ARM DEN 0022C)" [SMCCC]: http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html "SMC Calling Convention PDD (ARM DEN 0028A)" diff --git a/drivers/arm/ccn/ccn_private.h b/drivers/arm/ccn/ccn_private.h index fffa2ca..a5a6146 100644 --- a/drivers/arm/ccn/ccn_private.h +++ b/drivers/arm/ccn/ccn_private.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -104,7 +104,7 @@ #define WAIT_FOR_DOMAIN_CTRL_OP_COMPLETION(region_id, stat_reg_offset, \ op_reg_offset, rn_id_map) \ { \ - uint64_t status_reg; \ + unsigned long long status_reg; \ do { \ status_reg = ccn_reg_read((ccn_plat_desc->periphbase), \ (region_id), \ @@ -208,7 +208,7 @@ /* * Helper function to return number of set bits in bitmap */ -static inline unsigned int count_set_bits(uint64_t bitmap) +static inline unsigned int count_set_bits(unsigned long long bitmap) { unsigned int count = 0; diff --git a/drivers/arm/gic/v3/gicv3_helpers.c b/drivers/arm/gic/v3/gicv3_helpers.c index 07ae54c..0a81c86 100644 --- a/drivers/arm/gic/v3/gicv3_helpers.c +++ b/drivers/arm/gic/v3/gicv3_helpers.c @@ -250,7 +250,7 @@ uintptr_t gicr_base, mpidr_hash_fn mpidr_to_core_pos) { - unsigned long mpidr; + u_register_t mpidr; unsigned int proc_num; unsigned long long typer_val; uintptr_t rdistif_base = gicr_base; @@ -320,7 +320,7 @@ unsigned int int_grp) { unsigned int index, irq_num; - uint64_t gic_affinity_val; + unsigned long long gic_affinity_val; assert((int_grp == INTR_GROUP1S) || (int_grp == INTR_GROUP0)); /* If `num_ints` is not 0, ensure that `sec_intr_list` is not NULL */ diff --git a/include/bl31/bl31.h b/include/bl31/bl31.h index 96867b0..8352c49 100644 --- a/include/bl31/bl31.h +++ b/include/bl31/bl31.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -36,11 +36,11 @@ /******************************************************************************* * Function prototypes ******************************************************************************/ -void bl31_arch_setup(void); void bl31_next_el_arch_setup(uint32_t security_state); void bl31_set_next_image_type(uint32_t type); uint32_t bl31_get_next_image_type(void); void bl31_prepare_next_image_entry(void); void bl31_register_bl32_init(int32_t (*)(void)); +void bl31_warm_entrypoint(void); #endif /* __BL31_H__ */ diff --git a/include/bl31/cpu_data.h b/include/bl31/cpu_data.h deleted file mode 100644 index 2b506c7..0000000 --- a/include/bl31/cpu_data.h +++ /dev/null @@ -1,139 +0,0 @@ -/* - * Copyright (c) 2014-2015, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef __CPU_DATA_H__ -#define __CPU_DATA_H__ - -/* Offsets for the cpu_data structure */ -#define CPU_DATA_CRASH_BUF_OFFSET 0x18 -#if CRASH_REPORTING -#define CPU_DATA_LOG2SIZE 7 -#else -#define CPU_DATA_LOG2SIZE 6 -#endif -/* need enough space in crash buffer to save 8 registers */ -#define CPU_DATA_CRASH_BUF_SIZE 64 -#define CPU_DATA_CPU_OPS_PTR 0x10 - -#ifndef __ASSEMBLY__ - -#include -#include -#include -#include -#include - -/* Offsets for the cpu_data structure */ -#define CPU_DATA_PSCI_LOCK_OFFSET __builtin_offsetof\ - (cpu_data_t, psci_svc_cpu_data.pcpu_bakery_info) - -#if PLAT_PCPU_DATA_SIZE -#define CPU_DATA_PLAT_PCPU_OFFSET __builtin_offsetof\ - (cpu_data_t, platform_cpu_data) -#endif - -/******************************************************************************* - * Function & variable prototypes - ******************************************************************************/ - -/******************************************************************************* - * Cache of frequently used per-cpu data: - * Pointers to non-secure and secure security state contexts - * Address of the crash stack - * It is aligned to the cache line boundary to allow efficient concurrent - * manipulation of these pointers on different cpus - * - * TODO: Add other commonly used variables to this (tf_issues#90) - * - * The data structure and the _cpu_data accessors should not be used directly - * by components that have per-cpu members. The member access macros should be - * used for this. - ******************************************************************************/ -typedef struct cpu_data { - void *cpu_context[2]; - uint64_t cpu_ops_ptr; -#if CRASH_REPORTING - uint64_t crash_buf[CPU_DATA_CRASH_BUF_SIZE >> 3]; -#endif - struct psci_cpu_data psci_svc_cpu_data; -#if PLAT_PCPU_DATA_SIZE - uint8_t platform_cpu_data[PLAT_PCPU_DATA_SIZE]; -#endif -} __aligned(CACHE_WRITEBACK_GRANULE) cpu_data_t; - -#if CRASH_REPORTING -/* verify assembler offsets match data structures */ -CASSERT(CPU_DATA_CRASH_BUF_OFFSET == __builtin_offsetof - (cpu_data_t, crash_buf), - assert_cpu_data_crash_stack_offset_mismatch); -#endif - -CASSERT((1 << CPU_DATA_LOG2SIZE) == sizeof(cpu_data_t), - assert_cpu_data_log2size_mismatch); - -CASSERT(CPU_DATA_CPU_OPS_PTR == __builtin_offsetof - (cpu_data_t, cpu_ops_ptr), - assert_cpu_data_cpu_ops_ptr_offset_mismatch); - -struct cpu_data *_cpu_data_by_index(uint32_t cpu_index); - -/* Return the cpu_data structure for the current CPU. */ -static inline struct cpu_data *_cpu_data(void) -{ - return (cpu_data_t *)read_tpidr_el3(); -} - - -/************************************************************************** - * APIs for initialising and accessing per-cpu data - *************************************************************************/ - -void init_cpu_data_ptr(void); -void init_cpu_ops(void); - -#define get_cpu_data(_m) _cpu_data()->_m -#define set_cpu_data(_m, _v) _cpu_data()->_m = _v -#define get_cpu_data_by_index(_ix, _m) _cpu_data_by_index(_ix)->_m -#define set_cpu_data_by_index(_ix, _m, _v) _cpu_data_by_index(_ix)->_m = _v - -#define flush_cpu_data(_m) flush_dcache_range((uint64_t) \ - &(_cpu_data()->_m), \ - sizeof(_cpu_data()->_m)) -#define inv_cpu_data(_m) inv_dcache_range((uint64_t) \ - &(_cpu_data()->_m), \ - sizeof(_cpu_data()->_m)) -#define flush_cpu_data_by_index(_ix, _m) \ - flush_dcache_range((uint64_t) \ - &(_cpu_data_by_index(_ix)->_m), \ - sizeof(_cpu_data_by_index(_ix)->_m)) - - -#endif /* __ASSEMBLY__ */ -#endif /* __CPU_DATA_H__ */ diff --git a/include/bl31/runtime_svc.h b/include/bl31/runtime_svc.h deleted file mode 100644 index 03f906e..0000000 --- a/include/bl31/runtime_svc.h +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef __RUNTIME_SVC_H__ -#define __RUNTIME_SVC_H__ - -#include /* to include exception types */ -#include /* to include SMCC definitions */ - - -/******************************************************************************* - * Structure definition, typedefs & constants for the runtime service framework - ******************************************************************************/ - -/* - * Constants to allow the assembler access a runtime service - * descriptor - */ -#define RT_SVC_SIZE_LOG2 5 -#define SIZEOF_RT_SVC_DESC (1 << RT_SVC_SIZE_LOG2) -#define RT_SVC_DESC_INIT 16 -#define RT_SVC_DESC_HANDLE 24 - -/* - * The function identifier has 6 bits for the owning entity number and - * single bit for the type of smc call. When taken together these - * values limit the maximum number of runtime services to 128. - */ -#define MAX_RT_SVCS 128 - -#ifndef __ASSEMBLY__ - -/* Prototype for runtime service initializing function */ -typedef int32_t (*rt_svc_init_t)(void); - -/* - * Prototype for runtime service SMC handler function. x0 (SMC Function ID) to - * x4 are as passed by the caller. Rest of the arguments to SMC and the context - * can be accessed using the handle pointer. The cookie parameter is reserved - * for future use - */ -typedef uint64_t (*rt_svc_handle_t)(uint32_t smc_fid, - uint64_t x1, - uint64_t x2, - uint64_t x3, - uint64_t x4, - void *cookie, - void *handle, - uint64_t flags); -typedef struct rt_svc_desc { - uint8_t start_oen; - uint8_t end_oen; - uint8_t call_type; - const char *name; - rt_svc_init_t init; - rt_svc_handle_t handle; -} rt_svc_desc_t; - -/* - * Convenience macro to declare a service descriptor - */ -#define DECLARE_RT_SVC(_name, _start, _end, _type, _setup, _smch) \ - static const rt_svc_desc_t __svc_desc_ ## _name \ - __section("rt_svc_descs") __used = { \ - .start_oen = _start, \ - .end_oen = _end, \ - .call_type = _type, \ - .name = #_name, \ - .init = _setup, \ - .handle = _smch } - -/* - * Compile time assertions related to the 'rt_svc_desc' structure to: - * 1. ensure that the assembler and the compiler view of the size - * of the structure are the same. - * 2. ensure that the assembler and the compiler see the initialisation - * routine at the same offset. - * 3. ensure that the assembler and the compiler see the handler - * routine at the same offset. - */ -CASSERT((sizeof(rt_svc_desc_t) == SIZEOF_RT_SVC_DESC), \ - assert_sizeof_rt_svc_desc_mismatch); -CASSERT(RT_SVC_DESC_INIT == __builtin_offsetof(rt_svc_desc_t, init), \ - assert_rt_svc_desc_init_offset_mismatch); -CASSERT(RT_SVC_DESC_HANDLE == __builtin_offsetof(rt_svc_desc_t, handle), \ - assert_rt_svc_desc_handle_offset_mismatch); - - -/* - * This macro combines the call type and the owning entity number corresponding - * to a runtime service to generate a unique owning entity number. This unique - * oen is used to access an entry in the 'rt_svc_descs_indices' array. The entry - * contains the index of the service descriptor in the 'rt_svc_descs' array. - */ -#define get_unique_oen(oen, call_type) ((oen & FUNCID_OEN_MASK) | \ - ((call_type & FUNCID_TYPE_MASK) \ - << FUNCID_OEN_WIDTH)) - -/******************************************************************************* - * Function & variable prototypes - ******************************************************************************/ -void runtime_svc_init(void); -extern uint64_t __RT_SVC_DESCS_START__; -extern uint64_t __RT_SVC_DESCS_END__; -void init_crash_reporting(void); - -#endif /*__ASSEMBLY__*/ -#endif /* __RUNTIME_SVC_H__ */ diff --git a/include/bl31/services/psci.h b/include/bl31/services/psci.h deleted file mode 100644 index 20aa52e..0000000 --- a/include/bl31/services/psci.h +++ /dev/null @@ -1,345 +0,0 @@ -/* - * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef __PSCI_H__ -#define __PSCI_H__ - -#include -#include /* for PLAT_NUM_PWR_DOMAINS */ -#if ENABLE_PLAT_COMPAT -#include -#endif - -/******************************************************************************* - * Number of power domains whose state this PSCI implementation can track - ******************************************************************************/ -#ifdef PLAT_NUM_PWR_DOMAINS -#define PSCI_NUM_PWR_DOMAINS PLAT_NUM_PWR_DOMAINS -#else -#define PSCI_NUM_PWR_DOMAINS (2 * PLATFORM_CORE_COUNT) -#endif - -#define PSCI_NUM_NON_CPU_PWR_DOMAINS (PSCI_NUM_PWR_DOMAINS - \ - PLATFORM_CORE_COUNT) - -/* This is the power level corresponding to a CPU */ -#define PSCI_CPU_PWR_LVL 0 - -/* - * The maximum power level supported by PSCI. Since PSCI CPU_SUSPEND - * uses the old power_state parameter format which has 2 bits to specify the - * power level, this constant is defined to be 3. - */ -#define PSCI_MAX_PWR_LVL 3 - -/******************************************************************************* - * Defines for runtime services function ids - ******************************************************************************/ -#define PSCI_VERSION 0x84000000 -#define PSCI_CPU_SUSPEND_AARCH32 0x84000001 -#define PSCI_CPU_SUSPEND_AARCH64 0xc4000001 -#define PSCI_CPU_OFF 0x84000002 -#define PSCI_CPU_ON_AARCH32 0x84000003 -#define PSCI_CPU_ON_AARCH64 0xc4000003 -#define PSCI_AFFINITY_INFO_AARCH32 0x84000004 -#define PSCI_AFFINITY_INFO_AARCH64 0xc4000004 -#define PSCI_MIG_AARCH32 0x84000005 -#define PSCI_MIG_AARCH64 0xc4000005 -#define PSCI_MIG_INFO_TYPE 0x84000006 -#define PSCI_MIG_INFO_UP_CPU_AARCH32 0x84000007 -#define PSCI_MIG_INFO_UP_CPU_AARCH64 0xc4000007 -#define PSCI_SYSTEM_OFF 0x84000008 -#define PSCI_SYSTEM_RESET 0x84000009 -#define PSCI_FEATURES 0x8400000A -#define PSCI_SYSTEM_SUSPEND_AARCH32 0x8400000E -#define PSCI_SYSTEM_SUSPEND_AARCH64 0xc400000E -#define PSCI_STAT_RESIDENCY_AARCH32 0x84000010 -#define PSCI_STAT_RESIDENCY_AARCH64 0xc4000010 -#define PSCI_STAT_COUNT_AARCH32 0x84000011 -#define PSCI_STAT_COUNT_AARCH64 0xc4000011 - -/* Macro to help build the psci capabilities bitfield */ -#define define_psci_cap(x) (1 << (x & 0x1f)) - -/* - * Number of PSCI calls (above) implemented - */ -#if ENABLE_PSCI_STAT -#define PSCI_NUM_CALLS 22 -#else -#define PSCI_NUM_CALLS 18 -#endif - -/******************************************************************************* - * PSCI Migrate and friends - ******************************************************************************/ -#define PSCI_TOS_UP_MIG_CAP 0 -#define PSCI_TOS_NOT_UP_MIG_CAP 1 -#define PSCI_TOS_NOT_PRESENT_MP 2 - -/******************************************************************************* - * PSCI CPU_SUSPEND 'power_state' parameter specific defines - ******************************************************************************/ -#define PSTATE_ID_SHIFT 0 - -#if PSCI_EXTENDED_STATE_ID -#define PSTATE_VALID_MASK 0xB0000000 -#define PSTATE_TYPE_SHIFT 30 -#define PSTATE_ID_MASK 0xfffffff -#else -#define PSTATE_VALID_MASK 0xFCFE0000 -#define PSTATE_TYPE_SHIFT 16 -#define PSTATE_PWR_LVL_SHIFT 24 -#define PSTATE_ID_MASK 0xffff -#define PSTATE_PWR_LVL_MASK 0x3 - -#define psci_get_pstate_pwrlvl(pstate) (((pstate) >> PSTATE_PWR_LVL_SHIFT) & \ - PSTATE_PWR_LVL_MASK) -#define psci_make_powerstate(state_id, type, pwrlvl) \ - (((state_id) & PSTATE_ID_MASK) << PSTATE_ID_SHIFT) |\ - (((type) & PSTATE_TYPE_MASK) << PSTATE_TYPE_SHIFT) |\ - (((pwrlvl) & PSTATE_PWR_LVL_MASK) << PSTATE_PWR_LVL_SHIFT) -#endif /* __PSCI_EXTENDED_STATE_ID__ */ - -#define PSTATE_TYPE_STANDBY 0x0 -#define PSTATE_TYPE_POWERDOWN 0x1 -#define PSTATE_TYPE_MASK 0x1 - -#define psci_get_pstate_id(pstate) (((pstate) >> PSTATE_ID_SHIFT) & \ - PSTATE_ID_MASK) -#define psci_get_pstate_type(pstate) (((pstate) >> PSTATE_TYPE_SHIFT) & \ - PSTATE_TYPE_MASK) -#define psci_check_power_state(pstate) ((pstate) & PSTATE_VALID_MASK) - -/******************************************************************************* - * PSCI CPU_FEATURES feature flag specific defines - ******************************************************************************/ -/* Features flags for CPU SUSPEND power state parameter format. Bits [1:1] */ -#define FF_PSTATE_SHIFT 1 -#define FF_PSTATE_ORIG 0 -#define FF_PSTATE_EXTENDED 1 -#if PSCI_EXTENDED_STATE_ID -#define FF_PSTATE FF_PSTATE_EXTENDED -#else -#define FF_PSTATE FF_PSTATE_ORIG -#endif - -/* Features flags for CPU SUSPEND OS Initiated mode support. Bits [0:0] */ -#define FF_MODE_SUPPORT_SHIFT 0 -#define FF_SUPPORTS_OS_INIT_MODE 1 - -/******************************************************************************* - * PSCI version - ******************************************************************************/ -#define PSCI_MAJOR_VER (1 << 16) -#define PSCI_MINOR_VER 0x0 - -/******************************************************************************* - * PSCI error codes - ******************************************************************************/ -#define PSCI_E_SUCCESS 0 -#define PSCI_E_NOT_SUPPORTED -1 -#define PSCI_E_INVALID_PARAMS -2 -#define PSCI_E_DENIED -3 -#define PSCI_E_ALREADY_ON -4 -#define PSCI_E_ON_PENDING -5 -#define PSCI_E_INTERN_FAIL -6 -#define PSCI_E_NOT_PRESENT -7 -#define PSCI_E_DISABLED -8 -#define PSCI_E_INVALID_ADDRESS -9 - -#define PSCI_INVALID_MPIDR ~((u_register_t)0) - -#ifndef __ASSEMBLY__ - -#include -#include - -/* - * These are the states reported by the PSCI_AFFINITY_INFO API for the specified - * CPU. The definitions of these states can be found in Section 5.7.1 in the - * PSCI specification (ARM DEN 0022C). - */ -typedef enum { - AFF_STATE_ON = 0, - AFF_STATE_OFF = 1, - AFF_STATE_ON_PENDING = 2 -} aff_info_state_t; - -/* - * Macro to represent invalid affinity level within PSCI. - */ -#define PSCI_INVALID_PWR_LVL (PLAT_MAX_PWR_LVL + 1) - -/* - * Type for representing the local power state at a particular level. - */ -typedef uint8_t plat_local_state_t; - -/* The local state macro used to represent RUN state. */ -#define PSCI_LOCAL_STATE_RUN 0 - -/* - * Macro to test whether the plat_local_state is RUN state - */ -#define is_local_state_run(plat_local_state) \ - ((plat_local_state) == PSCI_LOCAL_STATE_RUN) - -/* - * Macro to test whether the plat_local_state is RETENTION state - */ -#define is_local_state_retn(plat_local_state) \ - (((plat_local_state) > PSCI_LOCAL_STATE_RUN) && \ - ((plat_local_state) <= PLAT_MAX_RET_STATE)) - -/* - * Macro to test whether the plat_local_state is OFF state - */ -#define is_local_state_off(plat_local_state) \ - (((plat_local_state) > PLAT_MAX_RET_STATE) && \ - ((plat_local_state) <= PLAT_MAX_OFF_STATE)) - -/***************************************************************************** - * This data structure defines the representation of the power state parameter - * for its exchange between the generic PSCI code and the platform port. For - * example, it is used by the platform port to specify the requested power - * states during a power management operation. It is used by the generic code to - * inform the platform about the target power states that each level should - * enter. - ****************************************************************************/ -typedef struct psci_power_state { - /* - * The pwr_domain_state[] stores the local power state at each level - * for the CPU. - */ - plat_local_state_t pwr_domain_state[PLAT_MAX_PWR_LVL + 1]; -} psci_power_state_t; - -/******************************************************************************* - * Structure used to store per-cpu information relevant to the PSCI service. - * It is populated in the per-cpu data array. In return we get a guarantee that - * this information will not reside on a cache line shared with another cpu. - ******************************************************************************/ -typedef struct psci_cpu_data { - /* State as seen by PSCI Affinity Info API */ - aff_info_state_t aff_info_state; - - /* - * Highest power level which takes part in a power management - * operation. - */ - unsigned char target_pwrlvl; - - /* The local power state of this CPU */ - plat_local_state_t local_state; -} psci_cpu_data_t; - -/******************************************************************************* - * Structure populated by platform specific code to export routines which - * perform common low level power management functions - ******************************************************************************/ -typedef struct plat_psci_ops { - void (*cpu_standby)(plat_local_state_t cpu_state); - int (*pwr_domain_on)(u_register_t mpidr); - void (*pwr_domain_off)(const psci_power_state_t *target_state); - void (*pwr_domain_suspend)(const psci_power_state_t *target_state); - void (*pwr_domain_on_finish)(const psci_power_state_t *target_state); - void (*pwr_domain_suspend_finish)( - const psci_power_state_t *target_state); - void (*pwr_domain_pwr_down_wfi)( - const psci_power_state_t *target_state) __dead2; - void (*system_off)(void) __dead2; - void (*system_reset)(void) __dead2; - int (*validate_power_state)(unsigned int power_state, - psci_power_state_t *req_state); - int (*validate_ns_entrypoint)(uintptr_t ns_entrypoint); - void (*get_sys_suspend_power_state)( - psci_power_state_t *req_state); - int (*get_pwr_lvl_state_idx)(plat_local_state_t pwr_domain_state, - int pwrlvl); - int (*translate_power_state_by_mpidr)(u_register_t mpidr, - unsigned int power_state, - psci_power_state_t *output_state); -} plat_psci_ops_t; - -/******************************************************************************* - * Optional structure populated by the Secure Payload Dispatcher to be given a - * chance to perform any bookkeeping before PSCI executes a power management - * operation. It also allows PSCI to determine certain properties of the SP e.g. - * migrate capability etc. - ******************************************************************************/ -typedef struct spd_pm_ops { - void (*svc_on)(uint64_t target_cpu); - int32_t (*svc_off)(uint64_t __unused); - void (*svc_suspend)(uint64_t max_off_pwrlvl); - void (*svc_on_finish)(uint64_t __unused); - void (*svc_suspend_finish)(uint64_t max_off_pwrlvl); - int32_t (*svc_migrate)(uint64_t from_cpu, uint64_t to_cpu); - int32_t (*svc_migrate_info)(uint64_t *resident_cpu); - void (*svc_system_off)(void); - void (*svc_system_reset)(void); -} spd_pm_ops_t; - -/******************************************************************************* - * Function & Data prototypes - ******************************************************************************/ -unsigned int psci_version(void); -int psci_cpu_on(u_register_t target_cpu, - uintptr_t entrypoint, - u_register_t context_id); -int psci_cpu_suspend(unsigned int power_state, - uintptr_t entrypoint, - u_register_t context_id); -int psci_system_suspend(uintptr_t entrypoint, u_register_t context_id); -int psci_cpu_off(void); -int psci_affinity_info(u_register_t target_affinity, - unsigned int lowest_affinity_level); -int psci_migrate(u_register_t target_cpu); -int psci_migrate_info_type(void); -long psci_migrate_info_up_cpu(void); -int psci_features(unsigned int psci_fid); -void __dead2 psci_power_down_wfi(void); -void psci_entrypoint(void); -void psci_register_spd_pm_hook(const spd_pm_ops_t *); -uint64_t psci_smc_handler(uint32_t smc_fid, - uint64_t x1, - uint64_t x2, - uint64_t x3, - uint64_t x4, - void *cookie, - void *handle, - uint64_t flags); - -/* PSCI setup function */ -int psci_setup(void); - -#endif /*__ASSEMBLY__*/ - -#endif /* __PSCI_H__ */ diff --git a/include/bl31/services/psci_compat.h b/include/bl31/services/psci_compat.h deleted file mode 100644 index 24bd8dc..0000000 --- a/include/bl31/services/psci_compat.h +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef __PSCI_COMPAT_H__ -#define __PSCI_COMPAT_H__ - -#include -#include - -#ifndef __ASSEMBLY__ -/* - * The below declarations are to enable compatibility for the platform ports - * using the old platform interface and psci helpers. - */ -#define PLAT_MAX_PWR_LVL PLATFORM_MAX_AFFLVL -#define PLAT_NUM_PWR_DOMAINS PLATFORM_NUM_AFFS - -/******************************************************************************* - * PSCI affinity related constants. An affinity instance could - * be present or absent physically to cater for asymmetric topologies. - ******************************************************************************/ -#define PSCI_AFF_ABSENT 0x0 -#define PSCI_AFF_PRESENT 0x1 - -#define PSCI_STATE_ON 0x0 -#define PSCI_STATE_OFF 0x1 -#define PSCI_STATE_ON_PENDING 0x2 -#define PSCI_STATE_SUSPEND 0x3 - -/* - * Using the compatibility platform interfaces means that the local states - * used in psci_power_state_t need to only convey whether its power down - * or standby state. The onus is on the platform port to do the right thing - * including the state coordination in case multiple power down states are - * involved. Hence if we assume 3 generic states viz, run, standby and - * power down, we can assign 1 and 2 to standby and power down respectively. - */ -#define PLAT_MAX_RET_STATE 1 -#define PLAT_MAX_OFF_STATE 2 - -/* - * Macro to represent invalid affinity level within PSCI. - */ -#define PSCI_INVALID_DATA -1 - -#define psci_get_pstate_afflvl(pstate) psci_get_pstate_pwrlvl(pstate) - -/* - * This array stores the 'power_state' requests of each CPU during - * CPU_SUSPEND and SYSTEM_SUSPEND which will be populated by the - * compatibility layer when appropriate platform hooks are invoked. - */ -extern unsigned int psci_power_state_compat[PLATFORM_CORE_COUNT]; - -/******************************************************************************* - * Structure populated by platform specific code to export routines which - * perform common low level pm functions - ******************************************************************************/ -typedef struct plat_pm_ops { - void (*affinst_standby)(unsigned int power_state); - int (*affinst_on)(unsigned long mpidr, - unsigned long sec_entrypoint, - unsigned int afflvl, - unsigned int state); - void (*affinst_off)(unsigned int afflvl, unsigned int state); - void (*affinst_suspend)(unsigned long sec_entrypoint, - unsigned int afflvl, - unsigned int state); - void (*affinst_on_finish)(unsigned int afflvl, unsigned int state); - void (*affinst_suspend_finish)(unsigned int afflvl, - unsigned int state); - void (*system_off)(void) __dead2; - void (*system_reset)(void) __dead2; - int (*validate_power_state)(unsigned int power_state); - int (*validate_ns_entrypoint)(unsigned long ns_entrypoint); - unsigned int (*get_sys_suspend_power_state)(void); -} plat_pm_ops_t; - -/******************************************************************************* - * Function & Data prototypes to enable compatibility for older platform ports - ******************************************************************************/ -int psci_get_suspend_stateid_by_mpidr(unsigned long); -int psci_get_suspend_stateid(void); -int psci_get_suspend_powerstate(void); -unsigned int psci_get_max_phys_off_afflvl(void); -int psci_get_suspend_afflvl(void); - -#endif /* ____ASSEMBLY__ */ -#endif /* __PSCI_COMPAT_H__ */ diff --git a/include/bl31/services/std_svc.h b/include/bl31/services/std_svc.h deleted file mode 100644 index cbd5b62..0000000 --- a/include/bl31/services/std_svc.h +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef __STD_SVC_H__ -#define __STD_SVC_H__ - -/* SMC function IDs for Standard Service queries */ - -#define ARM_STD_SVC_CALL_COUNT 0x8400ff00 -#define ARM_STD_SVC_UID 0x8400ff01 -/* 0x8400ff02 is reserved */ -#define ARM_STD_SVC_VERSION 0x8400ff03 - -/* ARM Standard Service Calls version numbers */ -#define STD_SVC_VERSION_MAJOR 0x0 -#define STD_SVC_VERSION_MINOR 0x1 - -/* The macros below are used to identify PSCI calls from the SMC function ID */ -#define PSCI_FID_MASK 0xffe0u -#define PSCI_FID_VALUE 0u -#define is_psci_fid(_fid) \ - (((_fid) & PSCI_FID_MASK) == PSCI_FID_VALUE) - -#endif /* __STD_SVC_H__ */ diff --git a/include/common/aarch64/asm_macros.S b/include/common/aarch64/asm_macros.S new file mode 100644 index 0000000..cc8f424 --- /dev/null +++ b/include/common/aarch64/asm_macros.S @@ -0,0 +1,181 @@ +/* + * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef __ASM_MACROS_S__ +#define __ASM_MACROS_S__ + +#include +#include + + + .macro func_prologue + stp x29, x30, [sp, #-0x10]! + mov x29,sp + .endm + + .macro func_epilogue + ldp x29, x30, [sp], #0x10 + .endm + + + .macro dcache_line_size reg, tmp + mrs \tmp, ctr_el0 + ubfx \tmp, \tmp, #16, #4 + mov \reg, #4 + lsl \reg, \reg, \tmp + .endm + + + .macro icache_line_size reg, tmp + mrs \tmp, ctr_el0 + and \tmp, \tmp, #0xf + mov \reg, #4 + lsl \reg, \reg, \tmp + .endm + + + .macro smc_check label + mrs x0, esr_el3 + ubfx x0, x0, #ESR_EC_SHIFT, #ESR_EC_LENGTH + cmp x0, #EC_AARCH64_SMC + b.ne $label + .endm + + /* + * Declare the exception vector table, enforcing it is aligned on a + * 2KB boundary, as required by the ARMv8 architecture. + * Use zero bytes as the fill value to be stored in the padding bytes + * so that it inserts illegal AArch64 instructions. This increases + * security, robustness and potentially facilitates debugging. + */ + .macro vector_base label + .section .vectors, "ax" + .align 11, 0 + \label: + .endm + + /* + * Create an entry in the exception vector table, enforcing it is + * aligned on a 128-byte boundary, as required by the ARMv8 architecture. + * Use zero bytes as the fill value to be stored in the padding bytes + * so that it inserts illegal AArch64 instructions. This increases + * security, robustness and potentially facilitates debugging. + */ + .macro vector_entry label + .section .vectors, "ax" + .align 7, 0 + \label: + .endm + + /* + * This macro verifies that the given vector doesn't exceed the + * architectural limit of 32 instructions. This is meant to be placed + * immediately after the last instruction in the vector. It takes the + * vector entry as the parameter + */ + .macro check_vector_size since + .if (. - \since) > (32 * 4) + .error "Vector exceeds 32 instructions" + .endif + .endm + +#if ENABLE_PLAT_COMPAT + /* + * This macro calculates the base address of an MP stack using the + * platform_get_core_pos() index, the name of the stack storage and + * the size of each stack + * In: X0 = MPIDR of CPU whose stack is wanted + * Out: X0 = physical address of stack base + * Clobber: X30, X1, X2 + */ + .macro get_mp_stack _name, _size + bl platform_get_core_pos + ldr x2, =(\_name + \_size) + mov x1, #\_size + madd x0, x0, x1, x2 + .endm +#endif + + /* + * This macro calculates the base address of the current CPU's MP stack + * using the plat_my_core_pos() index, the name of the stack storage + * and the size of each stack + * Out: X0 = physical address of stack base + * Clobber: X30, X1, X2 + */ + .macro get_my_mp_stack _name, _size + bl plat_my_core_pos + ldr x2, =(\_name + \_size) + mov x1, #\_size + madd x0, x0, x1, x2 + .endm + + /* + * This macro calculates the base address of a UP stack using the + * name of the stack storage and the size of the stack + * Out: X0 = physical address of stack base + */ + .macro get_up_stack _name, _size + ldr x0, =(\_name + \_size) + .endm + + /* + * Helper macro to generate the best mov/movk combinations according + * the value to be moved. The 16 bits from '_shift' are tested and + * if not zero, they are moved into '_reg' without affecting + * other bits. + */ + .macro _mov_imm16 _reg, _val, _shift + .if (\_val >> \_shift) & 0xffff + .if (\_val & (1 << \_shift - 1)) + movk \_reg, (\_val >> \_shift) & 0xffff, LSL \_shift + .else + mov \_reg, \_val & (0xffff << \_shift) + .endif + .endif + .endm + + /* + * Helper macro to load arbitrary values into 32 or 64-bit registers + * which generates the best mov/movk combinations. Many base addresses + * are 64KB aligned the macro will eliminate updating bits 15:0 in + * that case + */ + .macro mov_imm _reg, _val + .if (\_val) == 0 + mov \_reg, #0 + .else + _mov_imm16 \_reg, (\_val), 0 + _mov_imm16 \_reg, (\_val), 16 + _mov_imm16 \_reg, (\_val), 32 + _mov_imm16 \_reg, (\_val), 48 + .endif + .endm + +#endif /* __ASM_MACROS_S__ */ diff --git a/include/common/aarch64/assert_macros.S b/include/common/aarch64/assert_macros.S new file mode 100644 index 0000000..b7e536c --- /dev/null +++ b/include/common/aarch64/assert_macros.S @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef __ASSERT_MACROS_S__ +#define __ASSERT_MACROS_S__ + + /* + * Assembler macro to enable asm_assert. Use this macro wherever + * assert is required in assembly. Please note that the macro makes + * use of label '300' to provide the logic and the caller + * should make sure that this label is not used to branch prior + * to calling this macro. + */ +#define ASM_ASSERT(_cc) \ +.ifndef .L_assert_filename ;\ + .pushsection .rodata.str1.1, "aS" ;\ + .L_assert_filename: ;\ + .string __FILE__ ;\ + .popsection ;\ +.endif ;\ + b._cc 300f ;\ + adr x0, .L_assert_filename ;\ + mov x1, __LINE__ ;\ + b asm_assert ;\ +300: + +#endif /* __ASSERT_MACROS_S__ */ diff --git a/include/common/aarch64/el3_common_macros.S b/include/common/aarch64/el3_common_macros.S new file mode 100644 index 0000000..9b22a73 --- /dev/null +++ b/include/common/aarch64/el3_common_macros.S @@ -0,0 +1,280 @@ +/* + * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __EL3_COMMON_MACROS_S__ +#define __EL3_COMMON_MACROS_S__ + +#include +#include + + /* + * Helper macro to initialise EL3 registers we care about. + */ + .macro el3_arch_init_common _exception_vectors + /* --------------------------------------------------------------------- + * Enable the instruction cache, stack pointer and data access alignment + * checks + * --------------------------------------------------------------------- + */ + mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT) + mrs x0, sctlr_el3 + orr x0, x0, x1 + msr sctlr_el3, x0 + isb + +#if IMAGE_BL31 + /* --------------------------------------------------------------------- + * Initialise the per-cpu cache pointer to the CPU. + * This is done early to enable crash reporting to have access to crash + * stack. Since crash reporting depends on cpu_data to report the + * unhandled exception, not doing so can lead to recursive exceptions + * due to a NULL TPIDR_EL3. + * --------------------------------------------------------------------- + */ + bl init_cpu_data_ptr +#endif /* IMAGE_BL31 */ + + /* --------------------------------------------------------------------- + * Set the exception vectors. + * --------------------------------------------------------------------- + */ + adr x0, \_exception_vectors + msr vbar_el3, x0 + isb + + /* --------------------------------------------------------------------- + * Early set RES1 bits in SCR_EL3. Set EA bit to catch both + * External Aborts and SError Interrupts in EL3 and also the SIF bit + * to disable instruction fetches from Non-secure memory. + * --------------------------------------------------------------------- + */ + mov x0, #(SCR_RES1_BITS | SCR_EA_BIT | SCR_SIF_BIT) + msr scr_el3, x0 + /* --------------------------------------------------------------------- + * Enable External Aborts and SError Interrupts now that the exception + * vectors have been setup. + * --------------------------------------------------------------------- + */ + msr daifclr, #DAIF_ABT_BIT + + /* --------------------------------------------------------------------- + * The initial state of the Architectural feature trap register + * (CPTR_EL3) is unknown and it must be set to a known state. All + * feature traps are disabled. Some bits in this register are marked as + * reserved and should not be modified. + * + * CPTR_EL3.TCPAC: This causes a direct access to the CPACR_EL1 from EL1 + * or the CPTR_EL2 from EL2 to trap to EL3 unless it is trapped at EL2. + * + * CPTR_EL3.TTA: This causes access to the Trace functionality to trap + * to EL3 when executed from EL0, EL1, EL2, or EL3. If system register + * access to trace functionality is not supported, this bit is RES0. + * + * CPTR_EL3.TFP: This causes instructions that access the registers + * associated with Floating Point and Advanced SIMD execution to trap + * to EL3 when executed from any exception level, unless trapped to EL1 + * or EL2. + * --------------------------------------------------------------------- + */ + mrs x0, cptr_el3 + bic w0, w0, #TCPAC_BIT + bic w0, w0, #TTA_BIT + bic w0, w0, #TFP_BIT + msr cptr_el3, x0 + .endm + +/* ----------------------------------------------------------------------------- + * This is the super set of actions that need to be performed during a cold boot + * or a warm boot in EL3. This code is shared by BL1 and BL31. + * + * This macro will always perform reset handling, architectural initialisations + * and stack setup. The rest of the actions are optional because they might not + * be needed, depending on the context in which this macro is called. This is + * why this macro is parameterised ; each parameter allows to enable/disable + * some actions. + * + * _set_endian: + * Whether the macro needs to configure the endianness of data accesses. + * + * _warm_boot_mailbox: + * Whether the macro needs to detect the type of boot (cold/warm). The + * detection is based on the platform entrypoint address : if it is zero + * then it is a cold boot, otherwise it is a warm boot. In the latter case, + * this macro jumps on the platform entrypoint address. + * + * _secondary_cold_boot: + * Whether the macro needs to identify the CPU that is calling it: primary + * CPU or secondary CPU. The primary CPU will be allowed to carry on with + * the platform initialisations, while the secondaries will be put in a + * platform-specific state in the meantime. + * + * If the caller knows this macro will only be called by the primary CPU + * then this parameter can be defined to 0 to skip this step. + * + * _init_memory: + * Whether the macro needs to initialise the memory. + * + * _init_c_runtime: + * Whether the macro needs to initialise the C runtime environment. + * + * _exception_vectors: + * Address of the exception vectors to program in the VBAR_EL3 register. + * ----------------------------------------------------------------------------- + */ + .macro el3_entrypoint_common \ + _set_endian, _warm_boot_mailbox, _secondary_cold_boot, \ + _init_memory, _init_c_runtime, _exception_vectors + + .if \_set_endian + /* ------------------------------------------------------------- + * Set the CPU endianness before doing anything that might + * involve memory reads or writes. + * ------------------------------------------------------------- + */ + mrs x0, sctlr_el3 + bic x0, x0, #SCTLR_EE_BIT + msr sctlr_el3, x0 + isb + .endif /* _set_endian */ + + .if \_warm_boot_mailbox + /* ------------------------------------------------------------- + * This code will be executed for both warm and cold resets. + * Now is the time to distinguish between the two. + * Query the platform entrypoint address and if it is not zero + * then it means it is a warm boot so jump to this address. + * ------------------------------------------------------------- + */ + bl plat_get_my_entrypoint + cbz x0, do_cold_boot + br x0 + + do_cold_boot: + .endif /* _warm_boot_mailbox */ + + /* --------------------------------------------------------------------- + * It is a cold boot. + * Perform any processor specific actions upon reset e.g. cache, TLB + * invalidations etc. + * --------------------------------------------------------------------- + */ + bl reset_handler + + el3_arch_init_common \_exception_vectors + + .if \_secondary_cold_boot + /* ------------------------------------------------------------- + * Check if this is a primary or secondary CPU cold boot. + * The primary CPU will set up the platform while the + * secondaries are placed in a platform-specific state until the + * primary CPU performs the necessary actions to bring them out + * of that state and allows entry into the OS. + * ------------------------------------------------------------- + */ + bl plat_is_my_cpu_primary + cbnz w0, do_primary_cold_boot + + /* This is a cold boot on a secondary CPU */ + bl plat_secondary_cold_boot_setup + /* plat_secondary_cold_boot_setup() is not supposed to return */ + bl el3_panic + + do_primary_cold_boot: + .endif /* _secondary_cold_boot */ + + /* --------------------------------------------------------------------- + * Initialize memory now. Secondary CPU initialization won't get to this + * point. + * --------------------------------------------------------------------- + */ + + .if \_init_memory + bl platform_mem_init + .endif /* _init_memory */ + + /* --------------------------------------------------------------------- + * Init C runtime environment: + * - Zero-initialise the NOBITS sections. There are 2 of them: + * - the .bss section; + * - the coherent memory section (if any). + * - Relocate the data section from ROM to RAM, if required. + * --------------------------------------------------------------------- + */ + .if \_init_c_runtime +#if IMAGE_BL31 + /* ------------------------------------------------------------- + * Invalidate the RW memory used by the BL31 image. This + * includes the data and NOBITS sections. This is done to + * safeguard against possible corruption of this memory by + * dirty cache lines in a system cache as a result of use by + * an earlier boot loader stage. + * ------------------------------------------------------------- + */ + adr x0, __RW_START__ + adr x1, __RW_END__ + sub x1, x1, x0 + bl inv_dcache_range +#endif /* IMAGE_BL31 */ + + ldr x0, =__BSS_START__ + ldr x1, =__BSS_SIZE__ + bl zeromem16 + +#if USE_COHERENT_MEM + ldr x0, =__COHERENT_RAM_START__ + ldr x1, =__COHERENT_RAM_UNALIGNED_SIZE__ + bl zeromem16 +#endif + +#if IMAGE_BL1 + ldr x0, =__DATA_RAM_START__ + ldr x1, =__DATA_ROM_START__ + ldr x2, =__DATA_SIZE__ + bl memcpy16 +#endif + .endif /* _init_c_runtime */ + + /* --------------------------------------------------------------------- + * Use SP_EL0 for the C runtime stack. + * --------------------------------------------------------------------- + */ + msr spsel, #0 + + /* --------------------------------------------------------------------- + * Allocate a stack whose memory will be marked as Normal-IS-WBWA when + * the MMU is enabled. There is no risk of reading stale stack memory + * after enabling the MMU as only the primary CPU is running at the + * moment. + * --------------------------------------------------------------------- + */ + bl plat_set_my_stack + .endm + +#endif /* __EL3_COMMON_MACROS_S__ */ diff --git a/include/common/asm_macros.S b/include/common/asm_macros.S deleted file mode 100644 index bd8bb70..0000000 --- a/include/common/asm_macros.S +++ /dev/null @@ -1,255 +0,0 @@ -/* - * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ -#ifndef __ASM_MACROS_S__ -#define __ASM_MACROS_S__ - -#include - - - .macro func_prologue - stp x29, x30, [sp, #-0x10]! - mov x29,sp - .endm - - .macro func_epilogue - ldp x29, x30, [sp], #0x10 - .endm - - - .macro dcache_line_size reg, tmp - mrs \tmp, ctr_el0 - ubfx \tmp, \tmp, #16, #4 - mov \reg, #4 - lsl \reg, \reg, \tmp - .endm - - - .macro icache_line_size reg, tmp - mrs \tmp, ctr_el0 - and \tmp, \tmp, #0xf - mov \reg, #4 - lsl \reg, \reg, \tmp - .endm - - - .macro smc_check label - mrs x0, esr_el3 - ubfx x0, x0, #ESR_EC_SHIFT, #ESR_EC_LENGTH - cmp x0, #EC_AARCH64_SMC - b.ne $label - .endm - - /* - * Declare the exception vector table, enforcing it is aligned on a - * 2KB boundary, as required by the ARMv8 architecture. - * Use zero bytes as the fill value to be stored in the padding bytes - * so that it inserts illegal AArch64 instructions. This increases - * security, robustness and potentially facilitates debugging. - */ - .macro vector_base label - .section .vectors, "ax" - .align 11, 0 - \label: - .endm - - /* - * Create an entry in the exception vector table, enforcing it is - * aligned on a 128-byte boundary, as required by the ARMv8 architecture. - * Use zero bytes as the fill value to be stored in the padding bytes - * so that it inserts illegal AArch64 instructions. This increases - * security, robustness and potentially facilitates debugging. - */ - .macro vector_entry label - .section .vectors, "ax" - .align 7, 0 - \label: - .endm - - /* - * This macro verifies that the given vector doesn't exceed the - * architectural limit of 32 instructions. This is meant to be placed - * immediately after the last instruction in the vector. It takes the - * vector entry as the parameter - */ - .macro check_vector_size since - .if (. - \since) > (32 * 4) - .error "Vector exceeds 32 instructions" - .endif - .endm - - /* - * This macro is used to create a function label and place the - * code into a separate text section based on the function name - * to enable elimination of unused code during linking - */ - .macro func _name - .section .text.\_name, "ax" - .type \_name, %function - .func \_name - \_name: - .endm - - /* - * This macro is used to mark the end of a function. - */ - .macro endfunc _name - .endfunc - .size \_name, . - \_name - .endm - - /* - * Theses macros are used to create function labels for deprecated - * APIs. If ERROR_DEPRECATED is non zero, the callers of these APIs - * will fail to link and cause build failure. - */ -#if ERROR_DEPRECATED - .macro func_deprecated _name - func deprecated\_name - .endm - - .macro endfunc_deprecated _name - endfunc deprecated\_name - .endm -#else - .macro func_deprecated _name - func \_name - .endm - - .macro endfunc_deprecated _name - endfunc \_name - .endm -#endif - - /* - * Helper assembler macro to count trailing zeros. The output is - * populated in the `TZ_COUNT` symbol. - */ - .macro count_tz _value, _tz_count - .if \_value - count_tz "(\_value >> 1)", "(\_tz_count + 1)" - .else - .equ TZ_COUNT, (\_tz_count - 1) - .endif - .endm - - /* - * This macro declares an array of 1 or more stacks, properly - * aligned and in the requested section - */ -#define DEFAULT_STACK_ALIGN (1 << 6) /* In case the caller doesnt provide alignment */ - - .macro declare_stack _name, _section, _size, _count, _align=DEFAULT_STACK_ALIGN - count_tz \_align, 0 - .if (\_align - (1 << TZ_COUNT)) - .error "Incorrect stack alignment specified (Must be a power of 2)." - .endif - .if ((\_size & ((1 << TZ_COUNT) - 1)) <> 0) - .error "Stack size not correctly aligned" - .endif - .section \_section, "aw", %nobits - .align TZ_COUNT - \_name: - .space ((\_count) * (\_size)), 0 - .endm - -#if ENABLE_PLAT_COMPAT - /* - * This macro calculates the base address of an MP stack using the - * platform_get_core_pos() index, the name of the stack storage and - * the size of each stack - * In: X0 = MPIDR of CPU whose stack is wanted - * Out: X0 = physical address of stack base - * Clobber: X30, X1, X2 - */ - .macro get_mp_stack _name, _size - bl platform_get_core_pos - ldr x2, =(\_name + \_size) - mov x1, #\_size - madd x0, x0, x1, x2 - .endm -#endif - - /* - * This macro calculates the base address of the current CPU's MP stack - * using the plat_my_core_pos() index, the name of the stack storage - * and the size of each stack - * Out: X0 = physical address of stack base - * Clobber: X30, X1, X2 - */ - .macro get_my_mp_stack _name, _size - bl plat_my_core_pos - ldr x2, =(\_name + \_size) - mov x1, #\_size - madd x0, x0, x1, x2 - .endm - - /* - * This macro calculates the base address of a UP stack using the - * name of the stack storage and the size of the stack - * Out: X0 = physical address of stack base - */ - .macro get_up_stack _name, _size - ldr x0, =(\_name + \_size) - .endm - - /* - * Helper macro to generate the best mov/movk combinations according - * the value to be moved. The 16 bits from '_shift' are tested and - * if not zero, they are moved into '_reg' without affecting - * other bits. - */ - .macro _mov_imm16 _reg, _val, _shift - .if (\_val >> \_shift) & 0xffff - .if (\_val & (1 << \_shift - 1)) - movk \_reg, (\_val >> \_shift) & 0xffff, LSL \_shift - .else - mov \_reg, \_val & (0xffff << \_shift) - .endif - .endif - .endm - - /* - * Helper macro to load arbitrary values into 32 or 64-bit registers - * which generates the best mov/movk combinations. Many base addresses - * are 64KB aligned the macro will eliminate updating bits 15:0 in - * that case - */ - .macro mov_imm _reg, _val - .if (\_val) == 0 - mov \_reg, #0 - .else - _mov_imm16 \_reg, (\_val), 0 - _mov_imm16 \_reg, (\_val), 16 - _mov_imm16 \_reg, (\_val), 32 - _mov_imm16 \_reg, (\_val), 48 - .endif - .endm - -#endif /* __ASM_MACROS_S__ */ diff --git a/include/common/asm_macros_common.S b/include/common/asm_macros_common.S new file mode 100644 index 0000000..ee59a93 --- /dev/null +++ b/include/common/asm_macros_common.S @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef __ASM_MACROS_COMMON_S__ +#define __ASM_MACROS_COMMON_S__ + +#include + + /* + * This macro is used to create a function label and place the + * code into a separate text section based on the function name + * to enable elimination of unused code during linking + */ + .macro func _name + .section .text.\_name, "ax" + .type \_name, %function + .func \_name + \_name: + .endm + + /* + * This macro is used to mark the end of a function. + */ + .macro endfunc _name + .endfunc + .size \_name, . - \_name + .endm + + /* + * Theses macros are used to create function labels for deprecated + * APIs. If ERROR_DEPRECATED is non zero, the callers of these APIs + * will fail to link and cause build failure. + */ +#if ERROR_DEPRECATED + .macro func_deprecated _name + func deprecated\_name + .endm + + .macro endfunc_deprecated _name + endfunc deprecated\_name + .endm +#else + .macro func_deprecated _name + func \_name + .endm + + .macro endfunc_deprecated _name + endfunc \_name + .endm +#endif + + /* + * Helper assembler macro to count trailing zeros. The output is + * populated in the `TZ_COUNT` symbol. + */ + .macro count_tz _value, _tz_count + .if \_value + count_tz "(\_value >> 1)", "(\_tz_count + 1)" + .else + .equ TZ_COUNT, (\_tz_count - 1) + .endif + .endm + + /* + * This macro declares an array of 1 or more stacks, properly + * aligned and in the requested section + */ +#define DEFAULT_STACK_ALIGN (1 << 6) /* In case the caller doesnt provide alignment */ + + .macro declare_stack _name, _section, _size, _count, _align=DEFAULT_STACK_ALIGN + count_tz \_align, 0 + .if (\_align - (1 << TZ_COUNT)) + .error "Incorrect stack alignment specified (Must be a power of 2)." + .endif + .if ((\_size & ((1 << TZ_COUNT) - 1)) <> 0) + .error "Stack size not correctly aligned" + .endif + .section \_section, "aw", %nobits + .align TZ_COUNT + \_name: + .space ((\_count) * (\_size)), 0 + .endm + + +#endif /* __ASM_MACROS_COMMON_S__ */ diff --git a/include/common/assert_macros.S b/include/common/assert_macros.S deleted file mode 100644 index cb6c78b..0000000 --- a/include/common/assert_macros.S +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright (c) 2014-2015, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ -#ifndef __ASSERT_MACROS_S__ -#define __ASSERT_MACROS_S__ - - /* - * Assembler macro to enable asm_assert. Use this macro wherever - * assert is required in assembly. Please note that the macro makes - * use of label '300' to provide the logic and the caller - * should make sure that this label is not used to branch prior - * to calling this macro. - */ -#define ASM_ASSERT(_cc) \ -.ifndef .L_assert_filename ;\ - .pushsection .rodata.str1.1, "aS" ;\ - .L_assert_filename: ;\ - .string __FILE__ ;\ - .popsection ;\ -.endif ;\ - b._cc 300f ;\ - adr x0, .L_assert_filename ;\ - mov x1, __LINE__ ;\ - b asm_assert ;\ -300: - -#endif /* __ASSERT_MACROS_S__ */ diff --git a/include/common/bl_common.h b/include/common/bl_common.h index 646a817..3aa0836 100644 --- a/include/common/bl_common.h +++ b/include/common/bl_common.h @@ -137,6 +137,7 @@ #include #include #include +#include #include /* To retain compatibility */ /* @@ -144,28 +145,28 @@ * BL images */ #if SEPARATE_CODE_AND_RODATA -extern unsigned long __TEXT_START__; -extern unsigned long __TEXT_END__; -extern unsigned long __RODATA_START__; -extern unsigned long __RODATA_END__; +extern uintptr_t __TEXT_START__; +extern uintptr_t __TEXT_END__; +extern uintptr_t __RODATA_START__; +extern uintptr_t __RODATA_END__; #else -extern unsigned long __RO_START__; -extern unsigned long __RO_END__; +extern uintptr_t __RO_START__; +extern uintptr_t __RO_END__; #endif #if IMAGE_BL2 -extern unsigned long __BL2_END__; +extern uintptr_t __BL2_END__; #elif IMAGE_BL2U -extern unsigned long __BL2U_END__; +extern uintptr_t __BL2U_END__; #elif IMAGE_BL31 -extern unsigned long __BL31_END__; +extern uintptr_t __BL31_END__; #elif IMAGE_BL32 -extern unsigned long __BL32_END__; +extern uintptr_t __BL32_END__; #endif /* IMAGE_BLX */ #if USE_COHERENT_MEM -extern unsigned long __COHERENT_RAM_START__; -extern unsigned long __COHERENT_RAM_END__; +extern uintptr_t __COHERENT_RAM_START__; +extern uintptr_t __COHERENT_RAM_END__; #endif @@ -174,21 +175,21 @@ * memory is available for its use and how much is already used. ******************************************************************************/ typedef struct meminfo { - uint64_t total_base; + uintptr_t total_base; size_t total_size; - uint64_t free_base; + uintptr_t free_base; size_t free_size; } meminfo_t; typedef struct aapcs64_params { - unsigned long arg0; - unsigned long arg1; - unsigned long arg2; - unsigned long arg3; - unsigned long arg4; - unsigned long arg5; - unsigned long arg6; - unsigned long arg7; + u_register_t arg0; + u_register_t arg1; + u_register_t arg2; + u_register_t arg3; + u_register_t arg4; + u_register_t arg5; + u_register_t arg6; + u_register_t arg7; } aapcs64_params_t; /*************************************************************************** @@ -284,7 +285,7 @@ __builtin_offsetof(entry_point_info_t, args), \ assert_BL31_args_offset_mismatch); -CASSERT(sizeof(unsigned long) == +CASSERT(sizeof(uintptr_t) == __builtin_offsetof(entry_point_info_t, spsr) - \ __builtin_offsetof(entry_point_info_t, pc), \ assert_entrypoint_and_spsr_should_be_adjacent); @@ -292,8 +293,8 @@ /******************************************************************************* * Function & variable prototypes ******************************************************************************/ -unsigned long page_align(unsigned long, unsigned); -unsigned long image_size(unsigned int image_id); +uintptr_t page_align(uintptr_t, unsigned); +size_t image_size(unsigned int image_id); int load_image(meminfo_t *mem_layout, unsigned int image_id, uintptr_t image_base, @@ -307,8 +308,8 @@ extern const char build_message[]; extern const char version_string[]; -void reserve_mem(uint64_t *free_base, size_t *free_size, - uint64_t addr, size_t size); +void reserve_mem(uintptr_t *free_base, size_t *free_size, + uintptr_t addr, size_t size); void print_entry_point_info(const entry_point_info_t *ep_info); diff --git a/include/common/context.h b/include/common/context.h deleted file mode 100644 index ec47f2a..0000000 --- a/include/common/context.h +++ /dev/null @@ -1,351 +0,0 @@ -/* - * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef __CONTEXT_H__ -#define __CONTEXT_H__ - -/******************************************************************************* - * Constants that allow assembler code to access members of and the 'gp_regs' - * structure at their correct offsets. - ******************************************************************************/ -#define CTX_GPREGS_OFFSET 0x0 -#define CTX_GPREG_X0 0x0 -#define CTX_GPREG_X1 0x8 -#define CTX_GPREG_X2 0x10 -#define CTX_GPREG_X3 0x18 -#define CTX_GPREG_X4 0x20 -#define CTX_GPREG_X5 0x28 -#define CTX_GPREG_X6 0x30 -#define CTX_GPREG_X7 0x38 -#define CTX_GPREG_X8 0x40 -#define CTX_GPREG_X9 0x48 -#define CTX_GPREG_X10 0x50 -#define CTX_GPREG_X11 0x58 -#define CTX_GPREG_X12 0x60 -#define CTX_GPREG_X13 0x68 -#define CTX_GPREG_X14 0x70 -#define CTX_GPREG_X15 0x78 -#define CTX_GPREG_X16 0x80 -#define CTX_GPREG_X17 0x88 -#define CTX_GPREG_X18 0x90 -#define CTX_GPREG_X19 0x98 -#define CTX_GPREG_X20 0xa0 -#define CTX_GPREG_X21 0xa8 -#define CTX_GPREG_X22 0xb0 -#define CTX_GPREG_X23 0xb8 -#define CTX_GPREG_X24 0xc0 -#define CTX_GPREG_X25 0xc8 -#define CTX_GPREG_X26 0xd0 -#define CTX_GPREG_X27 0xd8 -#define CTX_GPREG_X28 0xe0 -#define CTX_GPREG_X29 0xe8 -#define CTX_GPREG_LR 0xf0 -#define CTX_GPREG_SP_EL0 0xf8 -#define CTX_GPREGS_END 0x100 - -/******************************************************************************* - * Constants that allow assembler code to access members of and the 'el3_state' - * structure at their correct offsets. Note that some of the registers are only - * 32-bits wide but are stored as 64-bit values for convenience - ******************************************************************************/ -#define CTX_EL3STATE_OFFSET (CTX_GPREGS_OFFSET + CTX_GPREGS_END) -#define CTX_SCR_EL3 0x0 -#define CTX_RUNTIME_SP 0x8 -#define CTX_SPSR_EL3 0x10 -#define CTX_ELR_EL3 0x18 -#define CTX_EL3STATE_END 0x20 - -/******************************************************************************* - * Constants that allow assembler code to access members of and the - * 'el1_sys_regs' structure at their correct offsets. Note that some of the - * registers are only 32-bits wide but are stored as 64-bit values for - * convenience - ******************************************************************************/ -#define CTX_SYSREGS_OFFSET (CTX_EL3STATE_OFFSET + CTX_EL3STATE_END) -#define CTX_SPSR_EL1 0x0 -#define CTX_ELR_EL1 0x8 -#define CTX_SCTLR_EL1 0x10 -#define CTX_ACTLR_EL1 0x18 -#define CTX_CPACR_EL1 0x20 -#define CTX_CSSELR_EL1 0x28 -#define CTX_SP_EL1 0x30 -#define CTX_ESR_EL1 0x38 -#define CTX_TTBR0_EL1 0x40 -#define CTX_TTBR1_EL1 0x48 -#define CTX_MAIR_EL1 0x50 -#define CTX_AMAIR_EL1 0x58 -#define CTX_TCR_EL1 0x60 -#define CTX_TPIDR_EL1 0x68 -#define CTX_TPIDR_EL0 0x70 -#define CTX_TPIDRRO_EL0 0x78 -#define CTX_PAR_EL1 0x80 -#define CTX_FAR_EL1 0x88 -#define CTX_AFSR0_EL1 0x90 -#define CTX_AFSR1_EL1 0x98 -#define CTX_CONTEXTIDR_EL1 0xa0 -#define CTX_VBAR_EL1 0xa8 - -/* - * If the platform is AArch64-only, there is no need to save and restore these - * AArch32 registers. - */ -#if CTX_INCLUDE_AARCH32_REGS -#define CTX_SPSR_ABT 0xb0 -#define CTX_SPSR_UND 0xb8 -#define CTX_SPSR_IRQ 0xc0 -#define CTX_SPSR_FIQ 0xc8 -#define CTX_DACR32_EL2 0xd0 -#define CTX_IFSR32_EL2 0xd8 -#define CTX_FP_FPEXC32_EL2 0xe0 -#define CTX_TIMER_SYSREGS_OFF 0xf0 /* Align to the next 16 byte boundary */ -#else -#define CTX_TIMER_SYSREGS_OFF 0xb0 -#endif /* __CTX_INCLUDE_AARCH32_REGS__ */ - -/* - * If the timer registers aren't saved and restored, we don't have to reserve - * space for them in the context - */ -#if NS_TIMER_SWITCH -#define CTX_CNTP_CTL_EL0 (CTX_TIMER_SYSREGS_OFF + 0x0) -#define CTX_CNTP_CVAL_EL0 (CTX_TIMER_SYSREGS_OFF + 0x8) -#define CTX_CNTV_CTL_EL0 (CTX_TIMER_SYSREGS_OFF + 0x10) -#define CTX_CNTV_CVAL_EL0 (CTX_TIMER_SYSREGS_OFF + 0x18) -#define CTX_CNTKCTL_EL1 (CTX_TIMER_SYSREGS_OFF + 0x20) -#define CTX_SYSREGS_END (CTX_TIMER_SYSREGS_OFF + 0x30) /* Align to the next 16 byte boundary */ -#else -#define CTX_SYSREGS_END CTX_TIMER_SYSREGS_OFF -#endif /* __NS_TIMER_SWITCH__ */ - -/******************************************************************************* - * Constants that allow assembler code to access members of and the 'fp_regs' - * structure at their correct offsets. - ******************************************************************************/ -#if CTX_INCLUDE_FPREGS -#define CTX_FPREGS_OFFSET (CTX_SYSREGS_OFFSET + CTX_SYSREGS_END) -#define CTX_FP_Q0 0x0 -#define CTX_FP_Q1 0x10 -#define CTX_FP_Q2 0x20 -#define CTX_FP_Q3 0x30 -#define CTX_FP_Q4 0x40 -#define CTX_FP_Q5 0x50 -#define CTX_FP_Q6 0x60 -#define CTX_FP_Q7 0x70 -#define CTX_FP_Q8 0x80 -#define CTX_FP_Q9 0x90 -#define CTX_FP_Q10 0xa0 -#define CTX_FP_Q11 0xb0 -#define CTX_FP_Q12 0xc0 -#define CTX_FP_Q13 0xd0 -#define CTX_FP_Q14 0xe0 -#define CTX_FP_Q15 0xf0 -#define CTX_FP_Q16 0x100 -#define CTX_FP_Q17 0x110 -#define CTX_FP_Q18 0x120 -#define CTX_FP_Q19 0x130 -#define CTX_FP_Q20 0x140 -#define CTX_FP_Q21 0x150 -#define CTX_FP_Q22 0x160 -#define CTX_FP_Q23 0x170 -#define CTX_FP_Q24 0x180 -#define CTX_FP_Q25 0x190 -#define CTX_FP_Q26 0x1a0 -#define CTX_FP_Q27 0x1b0 -#define CTX_FP_Q28 0x1c0 -#define CTX_FP_Q29 0x1d0 -#define CTX_FP_Q30 0x1e0 -#define CTX_FP_Q31 0x1f0 -#define CTX_FP_FPSR 0x200 -#define CTX_FP_FPCR 0x208 -#define CTX_FPREGS_END 0x210 -#endif - -#ifndef __ASSEMBLY__ - -#include -#include /* for CACHE_WRITEBACK_GRANULE */ -#include - -/* - * Common constants to help define the 'cpu_context' structure and its - * members below. - */ -#define DWORD_SHIFT 3 -#define DEFINE_REG_STRUCT(name, num_regs) \ - typedef struct name { \ - uint64_t _regs[num_regs]; \ - } __aligned(16) name##_t - -/* Constants to determine the size of individual context structures */ -#define CTX_GPREG_ALL (CTX_GPREGS_END >> DWORD_SHIFT) -#define CTX_SYSREG_ALL (CTX_SYSREGS_END >> DWORD_SHIFT) -#if CTX_INCLUDE_FPREGS -#define CTX_FPREG_ALL (CTX_FPREGS_END >> DWORD_SHIFT) -#endif -#define CTX_EL3STATE_ALL (CTX_EL3STATE_END >> DWORD_SHIFT) - -/* - * AArch64 general purpose register context structure. Usually x0-x18, - * lr are saved as the compiler is expected to preserve the remaining - * callee saved registers if used by the C runtime and the assembler - * does not touch the remaining. But in case of world switch during - * exception handling, we need to save the callee registers too. - */ -DEFINE_REG_STRUCT(gp_regs, CTX_GPREG_ALL); - -/* - * AArch64 EL1 system register context structure for preserving the - * architectural state during switches from one security state to - * another in EL1. - */ -DEFINE_REG_STRUCT(el1_sys_regs, CTX_SYSREG_ALL); - -/* - * AArch64 floating point register context structure for preserving - * the floating point state during switches from one security state to - * another. - */ -#if CTX_INCLUDE_FPREGS -DEFINE_REG_STRUCT(fp_regs, CTX_FPREG_ALL); -#endif - -/* - * Miscellaneous registers used by EL3 firmware to maintain its state - * across exception entries and exits - */ -DEFINE_REG_STRUCT(el3_state, CTX_EL3STATE_ALL); - -/* - * Macros to access members of any of the above structures using their - * offsets - */ -#define read_ctx_reg(ctx, offset) ((ctx)->_regs[offset >> DWORD_SHIFT]) -#define write_ctx_reg(ctx, offset, val) (((ctx)->_regs[offset >> DWORD_SHIFT]) \ - = val) - -/* - * Top-level context structure which is used by EL3 firmware to - * preserve the state of a core at EL1 in one of the two security - * states and save enough EL3 meta data to be able to return to that - * EL and security state. The context management library will be used - * to ensure that SP_EL3 always points to an instance of this - * structure at exception entry and exit. Each instance will - * correspond to either the secure or the non-secure state. - */ -typedef struct cpu_context { - gp_regs_t gpregs_ctx; - el3_state_t el3state_ctx; - el1_sys_regs_t sysregs_ctx; -#if CTX_INCLUDE_FPREGS - fp_regs_t fpregs_ctx; -#endif -} cpu_context_t; - -/* Macros to access members of the 'cpu_context_t' structure */ -#define get_el3state_ctx(h) (&((cpu_context_t *) h)->el3state_ctx) -#if CTX_INCLUDE_FPREGS -#define get_fpregs_ctx(h) (&((cpu_context_t *) h)->fpregs_ctx) -#endif -#define get_sysregs_ctx(h) (&((cpu_context_t *) h)->sysregs_ctx) -#define get_gpregs_ctx(h) (&((cpu_context_t *) h)->gpregs_ctx) - -/* - * Compile time assertions related to the 'cpu_context' structure to - * ensure that the assembler and the compiler view of the offsets of - * the structure members is the same. - */ -CASSERT(CTX_GPREGS_OFFSET == __builtin_offsetof(cpu_context_t, gpregs_ctx), \ - assert_core_context_gp_offset_mismatch); -CASSERT(CTX_SYSREGS_OFFSET == __builtin_offsetof(cpu_context_t, sysregs_ctx), \ - assert_core_context_sys_offset_mismatch); -#if CTX_INCLUDE_FPREGS -CASSERT(CTX_FPREGS_OFFSET == __builtin_offsetof(cpu_context_t, fpregs_ctx), \ - assert_core_context_fp_offset_mismatch); -#endif -CASSERT(CTX_EL3STATE_OFFSET == __builtin_offsetof(cpu_context_t, el3state_ctx), \ - assert_core_context_el3state_offset_mismatch); - -/* - * Helper macro to set the general purpose registers that correspond to - * parameters in an aapcs_64 call i.e. x0-x7 - */ -#define set_aapcs_args0(ctx, x0) do { \ - write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X0, x0); \ - } while (0); -#define set_aapcs_args1(ctx, x0, x1) do { \ - write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X1, x1); \ - set_aapcs_args0(ctx, x0); \ - } while (0); -#define set_aapcs_args2(ctx, x0, x1, x2) do { \ - write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X2, x2); \ - set_aapcs_args1(ctx, x0, x1); \ - } while (0); -#define set_aapcs_args3(ctx, x0, x1, x2, x3) do { \ - write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X3, x3); \ - set_aapcs_args2(ctx, x0, x1, x2); \ - } while (0); -#define set_aapcs_args4(ctx, x0, x1, x2, x3, x4) do { \ - write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X4, x4); \ - set_aapcs_args3(ctx, x0, x1, x2, x3); \ - } while (0); -#define set_aapcs_args5(ctx, x0, x1, x2, x3, x4, x5) do { \ - write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X5, x5); \ - set_aapcs_args4(ctx, x0, x1, x2, x3, x4); \ - } while (0); -#define set_aapcs_args6(ctx, x0, x1, x2, x3, x4, x5, x6) do { \ - write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X6, x6); \ - set_aapcs_args5(ctx, x0, x1, x2, x3, x4, x5); \ - } while (0); -#define set_aapcs_args7(ctx, x0, x1, x2, x3, x4, x5, x6, x7) do { \ - write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X7, x7); \ - set_aapcs_args6(ctx, x0, x1, x2, x3, x4, x5, x6); \ - } while (0); - -/******************************************************************************* - * Function prototypes - ******************************************************************************/ -void el1_sysregs_context_save(el1_sys_regs_t *regs); -void el1_sysregs_context_restore(el1_sys_regs_t *regs); -#if CTX_INCLUDE_FPREGS -void fpregs_context_save(fp_regs_t *regs); -void fpregs_context_restore(fp_regs_t *regs); -#endif - - -#undef CTX_SYSREG_ALL -#if CTX_INCLUDE_FPREGS -#undef CTX_FPREG_ALL -#endif -#undef CTX_GPREG_ALL -#undef CTX_EL3STATE_ALL - -#endif /* __ASSEMBLY__ */ - -#endif /* __CONTEXT_H__ */ diff --git a/include/common/context_mgmt.h b/include/common/context_mgmt.h deleted file mode 100644 index a76ecbe..0000000 --- a/include/common/context_mgmt.h +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef __CM_H__ -#define __CM_H__ - -#include -#include - -/******************************************************************************* - * Forward declarations - ******************************************************************************/ -struct entry_point_info; - -/******************************************************************************* - * Function & variable prototypes - ******************************************************************************/ -void cm_init(void); -void *cm_get_context_by_mpidr(uint64_t mpidr, - uint32_t security_state) __deprecated; -void cm_set_context_by_mpidr(uint64_t mpidr, - void *context, - uint32_t security_state) __deprecated; -void *cm_get_context_by_index(unsigned int cpu_idx, - unsigned int security_state); -void cm_set_context_by_index(unsigned int cpu_idx, - void *context, - unsigned int security_state); -void *cm_get_context(uint32_t security_state); -void cm_set_context(void *context, uint32_t security_state); -void cm_init_context(uint64_t mpidr, - const struct entry_point_info *ep) __deprecated; -void cm_init_my_context(const struct entry_point_info *ep); -void cm_init_context_by_index(unsigned int cpu_idx, - const struct entry_point_info *ep); -void cm_prepare_el3_exit(uint32_t security_state); -void cm_el1_sysregs_context_save(uint32_t security_state); -void cm_el1_sysregs_context_restore(uint32_t security_state); -void cm_set_elr_el3(uint32_t security_state, uint64_t entrypoint); -void cm_set_elr_spsr_el3(uint32_t security_state, - uint64_t entrypoint, uint32_t spsr); -void cm_write_scr_el3_bit(uint32_t security_state, - uint32_t bit_pos, - uint32_t value); -void cm_set_next_eret_context(uint32_t security_state); -uint32_t cm_get_scr_el3(uint32_t security_state); - -/* Inline definitions */ - -/******************************************************************************* - * This function is used to program the context that's used for exception - * return. This initializes the SP_EL3 to a pointer to a 'cpu_context' set for - * the required security state - ******************************************************************************/ -static inline void cm_set_next_context(void *context) -{ -#if DEBUG - uint64_t sp_mode; - - /* - * Check that this function is called with SP_EL0 as the stack - * pointer - */ - __asm__ volatile("mrs %0, SPSel\n" - : "=r" (sp_mode)); - - assert(sp_mode == MODE_SP_EL0); -#endif - - __asm__ volatile("msr spsel, #1\n" - "mov sp, %0\n" - "msr spsel, #0\n" - : : "r" (context)); -} -#endif /* __CM_H__ */ diff --git a/include/common/el3_common_macros.S b/include/common/el3_common_macros.S deleted file mode 100644 index 9b22a73..0000000 --- a/include/common/el3_common_macros.S +++ /dev/null @@ -1,280 +0,0 @@ -/* - * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef __EL3_COMMON_MACROS_S__ -#define __EL3_COMMON_MACROS_S__ - -#include -#include - - /* - * Helper macro to initialise EL3 registers we care about. - */ - .macro el3_arch_init_common _exception_vectors - /* --------------------------------------------------------------------- - * Enable the instruction cache, stack pointer and data access alignment - * checks - * --------------------------------------------------------------------- - */ - mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT) - mrs x0, sctlr_el3 - orr x0, x0, x1 - msr sctlr_el3, x0 - isb - -#if IMAGE_BL31 - /* --------------------------------------------------------------------- - * Initialise the per-cpu cache pointer to the CPU. - * This is done early to enable crash reporting to have access to crash - * stack. Since crash reporting depends on cpu_data to report the - * unhandled exception, not doing so can lead to recursive exceptions - * due to a NULL TPIDR_EL3. - * --------------------------------------------------------------------- - */ - bl init_cpu_data_ptr -#endif /* IMAGE_BL31 */ - - /* --------------------------------------------------------------------- - * Set the exception vectors. - * --------------------------------------------------------------------- - */ - adr x0, \_exception_vectors - msr vbar_el3, x0 - isb - - /* --------------------------------------------------------------------- - * Early set RES1 bits in SCR_EL3. Set EA bit to catch both - * External Aborts and SError Interrupts in EL3 and also the SIF bit - * to disable instruction fetches from Non-secure memory. - * --------------------------------------------------------------------- - */ - mov x0, #(SCR_RES1_BITS | SCR_EA_BIT | SCR_SIF_BIT) - msr scr_el3, x0 - /* --------------------------------------------------------------------- - * Enable External Aborts and SError Interrupts now that the exception - * vectors have been setup. - * --------------------------------------------------------------------- - */ - msr daifclr, #DAIF_ABT_BIT - - /* --------------------------------------------------------------------- - * The initial state of the Architectural feature trap register - * (CPTR_EL3) is unknown and it must be set to a known state. All - * feature traps are disabled. Some bits in this register are marked as - * reserved and should not be modified. - * - * CPTR_EL3.TCPAC: This causes a direct access to the CPACR_EL1 from EL1 - * or the CPTR_EL2 from EL2 to trap to EL3 unless it is trapped at EL2. - * - * CPTR_EL3.TTA: This causes access to the Trace functionality to trap - * to EL3 when executed from EL0, EL1, EL2, or EL3. If system register - * access to trace functionality is not supported, this bit is RES0. - * - * CPTR_EL3.TFP: This causes instructions that access the registers - * associated with Floating Point and Advanced SIMD execution to trap - * to EL3 when executed from any exception level, unless trapped to EL1 - * or EL2. - * --------------------------------------------------------------------- - */ - mrs x0, cptr_el3 - bic w0, w0, #TCPAC_BIT - bic w0, w0, #TTA_BIT - bic w0, w0, #TFP_BIT - msr cptr_el3, x0 - .endm - -/* ----------------------------------------------------------------------------- - * This is the super set of actions that need to be performed during a cold boot - * or a warm boot in EL3. This code is shared by BL1 and BL31. - * - * This macro will always perform reset handling, architectural initialisations - * and stack setup. The rest of the actions are optional because they might not - * be needed, depending on the context in which this macro is called. This is - * why this macro is parameterised ; each parameter allows to enable/disable - * some actions. - * - * _set_endian: - * Whether the macro needs to configure the endianness of data accesses. - * - * _warm_boot_mailbox: - * Whether the macro needs to detect the type of boot (cold/warm). The - * detection is based on the platform entrypoint address : if it is zero - * then it is a cold boot, otherwise it is a warm boot. In the latter case, - * this macro jumps on the platform entrypoint address. - * - * _secondary_cold_boot: - * Whether the macro needs to identify the CPU that is calling it: primary - * CPU or secondary CPU. The primary CPU will be allowed to carry on with - * the platform initialisations, while the secondaries will be put in a - * platform-specific state in the meantime. - * - * If the caller knows this macro will only be called by the primary CPU - * then this parameter can be defined to 0 to skip this step. - * - * _init_memory: - * Whether the macro needs to initialise the memory. - * - * _init_c_runtime: - * Whether the macro needs to initialise the C runtime environment. - * - * _exception_vectors: - * Address of the exception vectors to program in the VBAR_EL3 register. - * ----------------------------------------------------------------------------- - */ - .macro el3_entrypoint_common \ - _set_endian, _warm_boot_mailbox, _secondary_cold_boot, \ - _init_memory, _init_c_runtime, _exception_vectors - - .if \_set_endian - /* ------------------------------------------------------------- - * Set the CPU endianness before doing anything that might - * involve memory reads or writes. - * ------------------------------------------------------------- - */ - mrs x0, sctlr_el3 - bic x0, x0, #SCTLR_EE_BIT - msr sctlr_el3, x0 - isb - .endif /* _set_endian */ - - .if \_warm_boot_mailbox - /* ------------------------------------------------------------- - * This code will be executed for both warm and cold resets. - * Now is the time to distinguish between the two. - * Query the platform entrypoint address and if it is not zero - * then it means it is a warm boot so jump to this address. - * ------------------------------------------------------------- - */ - bl plat_get_my_entrypoint - cbz x0, do_cold_boot - br x0 - - do_cold_boot: - .endif /* _warm_boot_mailbox */ - - /* --------------------------------------------------------------------- - * It is a cold boot. - * Perform any processor specific actions upon reset e.g. cache, TLB - * invalidations etc. - * --------------------------------------------------------------------- - */ - bl reset_handler - - el3_arch_init_common \_exception_vectors - - .if \_secondary_cold_boot - /* ------------------------------------------------------------- - * Check if this is a primary or secondary CPU cold boot. - * The primary CPU will set up the platform while the - * secondaries are placed in a platform-specific state until the - * primary CPU performs the necessary actions to bring them out - * of that state and allows entry into the OS. - * ------------------------------------------------------------- - */ - bl plat_is_my_cpu_primary - cbnz w0, do_primary_cold_boot - - /* This is a cold boot on a secondary CPU */ - bl plat_secondary_cold_boot_setup - /* plat_secondary_cold_boot_setup() is not supposed to return */ - bl el3_panic - - do_primary_cold_boot: - .endif /* _secondary_cold_boot */ - - /* --------------------------------------------------------------------- - * Initialize memory now. Secondary CPU initialization won't get to this - * point. - * --------------------------------------------------------------------- - */ - - .if \_init_memory - bl platform_mem_init - .endif /* _init_memory */ - - /* --------------------------------------------------------------------- - * Init C runtime environment: - * - Zero-initialise the NOBITS sections. There are 2 of them: - * - the .bss section; - * - the coherent memory section (if any). - * - Relocate the data section from ROM to RAM, if required. - * --------------------------------------------------------------------- - */ - .if \_init_c_runtime -#if IMAGE_BL31 - /* ------------------------------------------------------------- - * Invalidate the RW memory used by the BL31 image. This - * includes the data and NOBITS sections. This is done to - * safeguard against possible corruption of this memory by - * dirty cache lines in a system cache as a result of use by - * an earlier boot loader stage. - * ------------------------------------------------------------- - */ - adr x0, __RW_START__ - adr x1, __RW_END__ - sub x1, x1, x0 - bl inv_dcache_range -#endif /* IMAGE_BL31 */ - - ldr x0, =__BSS_START__ - ldr x1, =__BSS_SIZE__ - bl zeromem16 - -#if USE_COHERENT_MEM - ldr x0, =__COHERENT_RAM_START__ - ldr x1, =__COHERENT_RAM_UNALIGNED_SIZE__ - bl zeromem16 -#endif - -#if IMAGE_BL1 - ldr x0, =__DATA_RAM_START__ - ldr x1, =__DATA_ROM_START__ - ldr x2, =__DATA_SIZE__ - bl memcpy16 -#endif - .endif /* _init_c_runtime */ - - /* --------------------------------------------------------------------- - * Use SP_EL0 for the C runtime stack. - * --------------------------------------------------------------------- - */ - msr spsel, #0 - - /* --------------------------------------------------------------------- - * Allocate a stack whose memory will be marked as Normal-IS-WBWA when - * the MMU is enabled. There is no risk of reading stale stack memory - * after enabling the MMU as only the primary CPU is running at the - * moment. - * --------------------------------------------------------------------- - */ - bl plat_set_my_stack - .endm - -#endif /* __EL3_COMMON_MACROS_S__ */ diff --git a/include/common/runtime_svc.h b/include/common/runtime_svc.h new file mode 100644 index 0000000..adafcee --- /dev/null +++ b/include/common/runtime_svc.h @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __RUNTIME_SVC_H__ +#define __RUNTIME_SVC_H__ + +#include /* to include exception types */ +#include /* to include SMCC definitions */ + + +/******************************************************************************* + * Structure definition, typedefs & constants for the runtime service framework + ******************************************************************************/ + +/* + * Constants to allow the assembler access a runtime service + * descriptor + */ +#define RT_SVC_SIZE_LOG2 5 +#define SIZEOF_RT_SVC_DESC (1 << RT_SVC_SIZE_LOG2) +#define RT_SVC_DESC_INIT 16 +#define RT_SVC_DESC_HANDLE 24 + +/* + * The function identifier has 6 bits for the owning entity number and + * single bit for the type of smc call. When taken together these + * values limit the maximum number of runtime services to 128. + */ +#define MAX_RT_SVCS 128 + +#ifndef __ASSEMBLY__ + +/* Prototype for runtime service initializing function */ +typedef int32_t (*rt_svc_init_t)(void); + +/* + * Prototype for runtime service SMC handler function. x0 (SMC Function ID) to + * x4 are as passed by the caller. Rest of the arguments to SMC and the context + * can be accessed using the handle pointer. The cookie parameter is reserved + * for future use + */ +typedef uintptr_t (*rt_svc_handle_t)(uint32_t smc_fid, + u_register_t x1, + u_register_t x2, + u_register_t x3, + u_register_t x4, + void *cookie, + void *handle, + u_register_t flags); +typedef struct rt_svc_desc { + uint8_t start_oen; + uint8_t end_oen; + uint8_t call_type; + const char *name; + rt_svc_init_t init; + rt_svc_handle_t handle; +} rt_svc_desc_t; + +/* + * Convenience macro to declare a service descriptor + */ +#define DECLARE_RT_SVC(_name, _start, _end, _type, _setup, _smch) \ + static const rt_svc_desc_t __svc_desc_ ## _name \ + __section("rt_svc_descs") __used = { \ + .start_oen = _start, \ + .end_oen = _end, \ + .call_type = _type, \ + .name = #_name, \ + .init = _setup, \ + .handle = _smch } + +/* + * Compile time assertions related to the 'rt_svc_desc' structure to: + * 1. ensure that the assembler and the compiler view of the size + * of the structure are the same. + * 2. ensure that the assembler and the compiler see the initialisation + * routine at the same offset. + * 3. ensure that the assembler and the compiler see the handler + * routine at the same offset. + */ +CASSERT((sizeof(rt_svc_desc_t) == SIZEOF_RT_SVC_DESC), \ + assert_sizeof_rt_svc_desc_mismatch); +CASSERT(RT_SVC_DESC_INIT == __builtin_offsetof(rt_svc_desc_t, init), \ + assert_rt_svc_desc_init_offset_mismatch); +CASSERT(RT_SVC_DESC_HANDLE == __builtin_offsetof(rt_svc_desc_t, handle), \ + assert_rt_svc_desc_handle_offset_mismatch); + + +/* + * This macro combines the call type and the owning entity number corresponding + * to a runtime service to generate a unique owning entity number. This unique + * oen is used to access an entry in the 'rt_svc_descs_indices' array. The entry + * contains the index of the service descriptor in the 'rt_svc_descs' array. + */ +#define get_unique_oen(oen, call_type) ((oen & FUNCID_OEN_MASK) | \ + ((call_type & FUNCID_TYPE_MASK) \ + << FUNCID_OEN_WIDTH)) + +/******************************************************************************* + * Function & variable prototypes + ******************************************************************************/ +void runtime_svc_init(void); +extern uintptr_t __RT_SVC_DESCS_START__; +extern uintptr_t __RT_SVC_DESCS_END__; +void init_crash_reporting(void); + +#endif /*__ASSEMBLY__*/ +#endif /* __RUNTIME_SVC_H__ */ diff --git a/include/common/smcc_helpers.h b/include/common/smcc_helpers.h deleted file mode 100644 index 6a07b01..0000000 --- a/include/common/smcc_helpers.h +++ /dev/null @@ -1,165 +0,0 @@ -/* - * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef __SMCC_HELPERS_H__ -#define __SMCC_HELPERS_H__ - -/******************************************************************************* - * Bit definitions inside the function id as per the SMC calling convention - ******************************************************************************/ -#define FUNCID_TYPE_SHIFT 31 -#define FUNCID_CC_SHIFT 30 -#define FUNCID_OEN_SHIFT 24 -#define FUNCID_NUM_SHIFT 0 - -#define FUNCID_TYPE_MASK 0x1 -#define FUNCID_CC_MASK 0x1 -#define FUNCID_OEN_MASK 0x3f -#define FUNCID_NUM_MASK 0xffff - -#define FUNCID_TYPE_WIDTH 1 -#define FUNCID_CC_WIDTH 1 -#define FUNCID_OEN_WIDTH 6 -#define FUNCID_NUM_WIDTH 16 - -#define GET_SMC_CC(id) ((id >> FUNCID_CC_SHIFT) & \ - FUNCID_CC_MASK) -#define GET_SMC_TYPE(id) ((id >> FUNCID_TYPE_SHIFT) & \ - FUNCID_TYPE_MASK) - -#define SMC_64 1 -#define SMC_32 0 -#define SMC_UNK 0xffffffff -#define SMC_TYPE_FAST 1 -#define SMC_TYPE_STD 0 -#define SMC_PREEMPTED 0xfffffffe -/******************************************************************************* - * Owning entity number definitions inside the function id as per the SMC - * calling convention - ******************************************************************************/ -#define OEN_ARM_START 0 -#define OEN_ARM_END 0 -#define OEN_CPU_START 1 -#define OEN_CPU_END 1 -#define OEN_SIP_START 2 -#define OEN_SIP_END 2 -#define OEN_OEM_START 3 -#define OEN_OEM_END 3 -#define OEN_STD_START 4 /* Standard Calls */ -#define OEN_STD_END 4 -#define OEN_TAP_START 48 /* Trusted Applications */ -#define OEN_TAP_END 49 -#define OEN_TOS_START 50 /* Trusted OS */ -#define OEN_TOS_END 63 -#define OEN_LIMIT 64 - -#ifndef __ASSEMBLY__ - -#include -#include -#include - -/* Various flags passed to SMC handlers */ -#define SMC_FROM_SECURE (0 << 0) -#define SMC_FROM_NON_SECURE (1 << 0) - -#define is_caller_non_secure(_f) (!!(_f & SMC_FROM_NON_SECURE)) -#define is_caller_secure(_f) (!(is_caller_non_secure(_f))) - -/* Convenience macros to return from SMC handler */ -#define SMC_RET0(_h) { \ - return (uint64_t) (_h); \ -} -#define SMC_RET1(_h, _x0) { \ - write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X0, (_x0)); \ - SMC_RET0(_h); \ -} -#define SMC_RET2(_h, _x0, _x1) { \ - write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X1, (_x1)); \ - SMC_RET1(_h, (_x0)); \ -} -#define SMC_RET3(_h, _x0, _x1, _x2) { \ - write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X2, (_x2)); \ - SMC_RET2(_h, (_x0), (_x1)); \ -} -#define SMC_RET4(_h, _x0, _x1, _x2, _x3) { \ - write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X3, (_x3)); \ - SMC_RET3(_h, (_x0), (_x1), (_x2)); \ -} - -/* - * Convenience macros to access general purpose registers using handle provided - * to SMC handler. These takes the offset values defined in context.h - */ -#define SMC_GET_GP(_h, _g) \ - read_ctx_reg(get_gpregs_ctx(_h), (_g)); -#define SMC_SET_GP(_h, _g, _v) \ - write_ctx_reg(get_gpregs_ctx(_h), (_g), (_v)); - -/* - * Convenience macros to access EL3 context registers using handle provided to - * SMC handler. These takes the offset values defined in context.h - */ -#define SMC_GET_EL3(_h, _e) \ - read_ctx_reg(get_el3state_ctx(_h), (_e)); -#define SMC_SET_EL3(_h, _e, _v) \ - write_ctx_reg(get_el3state_ctx(_h), (_e), (_v)); - -/* The macro below is used to identify a Standard Service SMC call */ -#define is_std_svc_call(_fid) ((((_fid) >> FUNCID_OEN_SHIFT) & \ - FUNCID_OEN_MASK) == OEN_STD_START) - -/* The macro below is used to identify a valid Fast SMC call */ -#define is_valid_fast_smc(_fid) ((!(((_fid) >> 16) & 0xff)) && \ - (GET_SMC_TYPE(_fid) == SMC_TYPE_FAST)) - -/* - * Macro to define UUID for services. Apart from defining and initializing a - * uuid_t structure, this macro verifies that the first word of the defined UUID - * does not equal SMC_UNK. This is to ensure that the caller won't mistake the - * returned UUID in x0 for an invalid SMC error return - */ -#define DEFINE_SVC_UUID(_name, _tl, _tm, _th, _cl, _ch, \ - _n0, _n1, _n2, _n3, _n4, _n5) \ - CASSERT(_tl != SMC_UNK, invalid_svc_uuid);\ - static const uuid_t _name = { \ - _tl, _tm, _th, _cl, _ch, \ - { _n0, _n1, _n2, _n3, _n4, _n5 } \ - } - -/* Return a UUID in the SMC return registers */ -#define SMC_UUID_RET(_h, _uuid) \ - SMC_RET4(handle, ((const uint32_t *) &(_uuid))[0], \ - ((const uint32_t *) &(_uuid))[1], \ - ((const uint32_t *) &(_uuid))[2], \ - ((const uint32_t *) &(_uuid))[3]) - -#endif /*__ASSEMBLY__*/ -#endif /* __SMCC_HELPERS_H__ */ diff --git a/include/drivers/arm/gic_v3.h b/include/drivers/arm/gic_v3.h index a1b6f1b..c5360ff 100644 --- a/include/drivers/arm/gic_v3.h +++ b/include/drivers/arm/gic_v3.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -41,6 +41,7 @@ #include #include +#include /* GICv3 Re-distributor interface registers & shifts */ @@ -74,7 +75,7 @@ /******************************************************************************* * Function prototypes ******************************************************************************/ -uintptr_t gicv3_get_rdist(uintptr_t gicr_base, uint64_t mpidr); +uintptr_t gicv3_get_rdist(uintptr_t gicr_base, u_register_t mpidr); /******************************************************************************* * GIC Redistributor interface accessors diff --git a/include/drivers/arm/gicv3.h b/include/drivers/arm/gicv3.h index ae6fd91..e915c07 100644 --- a/include/drivers/arm/gicv3.h +++ b/include/drivers/arm/gicv3.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -170,6 +170,7 @@ #ifndef __ASSEMBLY__ #include +#include #define gicv3_is_intr_id_special_identifier(id) \ (((id) >= PENDING_G1S_INTID) && ((id) <= GIC_SPURIOUS_INTERRUPT)) @@ -234,7 +235,7 @@ * a hash function. Otherwise, the "Processor Number" field will be used to * access the array elements. ******************************************************************************/ -typedef unsigned int (*mpidr_hash_fn)(unsigned long mpidr); +typedef unsigned int (*mpidr_hash_fn)(u_register_t mpidr); typedef struct gicv3_driver_data { uintptr_t gicd_base; diff --git a/include/lib/aarch64/smcc_helpers.h b/include/lib/aarch64/smcc_helpers.h new file mode 100644 index 0000000..617a5bc --- /dev/null +++ b/include/lib/aarch64/smcc_helpers.h @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __SMCC_HELPERS_H__ +#define __SMCC_HELPERS_H__ + +#include + +#ifndef __ASSEMBLY__ +#include + +/* Convenience macros to return from SMC handler */ +#define SMC_RET0(_h) { \ + return (uint64_t) (_h); \ +} +#define SMC_RET1(_h, _x0) { \ + write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X0, (_x0)); \ + SMC_RET0(_h); \ +} +#define SMC_RET2(_h, _x0, _x1) { \ + write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X1, (_x1)); \ + SMC_RET1(_h, (_x0)); \ +} +#define SMC_RET3(_h, _x0, _x1, _x2) { \ + write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X2, (_x2)); \ + SMC_RET2(_h, (_x0), (_x1)); \ +} +#define SMC_RET4(_h, _x0, _x1, _x2, _x3) { \ + write_ctx_reg(get_gpregs_ctx(_h), CTX_GPREG_X3, (_x3)); \ + SMC_RET3(_h, (_x0), (_x1), (_x2)); \ +} + +/* + * Convenience macros to access general purpose registers using handle provided + * to SMC handler. These take the offset values defined in context.h + */ +#define SMC_GET_GP(_h, _g) \ + read_ctx_reg(get_gpregs_ctx(_h), (_g)) +#define SMC_SET_GP(_h, _g, _v) \ + write_ctx_reg(get_gpregs_ctx(_h), (_g), (_v)) + +/* + * Convenience macros to access EL3 context registers using handle provided to + * SMC handler. These take the offset values defined in context.h + */ +#define SMC_GET_EL3(_h, _e) \ + read_ctx_reg(get_el3state_ctx(_h), (_e)) +#define SMC_SET_EL3(_h, _e, _v) \ + write_ctx_reg(get_el3state_ctx(_h), (_e), (_v)) + +/* Return a UUID in the SMC return registers */ +#define SMC_UUID_RET(_h, _uuid) \ + SMC_RET4(handle, ((const uint32_t *) &(_uuid))[0], \ + ((const uint32_t *) &(_uuid))[1], \ + ((const uint32_t *) &(_uuid))[2], \ + ((const uint32_t *) &(_uuid))[3]) + +#endif /*__ASSEMBLY__*/ +#endif /* __SMCC_HELPERS_H__ */ diff --git a/include/lib/el3_runtime/aarch64/context.h b/include/lib/el3_runtime/aarch64/context.h new file mode 100644 index 0000000..b528c03 --- /dev/null +++ b/include/lib/el3_runtime/aarch64/context.h @@ -0,0 +1,351 @@ +/* + * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __CONTEXT_H__ +#define __CONTEXT_H__ + +/******************************************************************************* + * Constants that allow assembler code to access members of and the 'gp_regs' + * structure at their correct offsets. + ******************************************************************************/ +#define CTX_GPREGS_OFFSET 0x0 +#define CTX_GPREG_X0 0x0 +#define CTX_GPREG_X1 0x8 +#define CTX_GPREG_X2 0x10 +#define CTX_GPREG_X3 0x18 +#define CTX_GPREG_X4 0x20 +#define CTX_GPREG_X5 0x28 +#define CTX_GPREG_X6 0x30 +#define CTX_GPREG_X7 0x38 +#define CTX_GPREG_X8 0x40 +#define CTX_GPREG_X9 0x48 +#define CTX_GPREG_X10 0x50 +#define CTX_GPREG_X11 0x58 +#define CTX_GPREG_X12 0x60 +#define CTX_GPREG_X13 0x68 +#define CTX_GPREG_X14 0x70 +#define CTX_GPREG_X15 0x78 +#define CTX_GPREG_X16 0x80 +#define CTX_GPREG_X17 0x88 +#define CTX_GPREG_X18 0x90 +#define CTX_GPREG_X19 0x98 +#define CTX_GPREG_X20 0xa0 +#define CTX_GPREG_X21 0xa8 +#define CTX_GPREG_X22 0xb0 +#define CTX_GPREG_X23 0xb8 +#define CTX_GPREG_X24 0xc0 +#define CTX_GPREG_X25 0xc8 +#define CTX_GPREG_X26 0xd0 +#define CTX_GPREG_X27 0xd8 +#define CTX_GPREG_X28 0xe0 +#define CTX_GPREG_X29 0xe8 +#define CTX_GPREG_LR 0xf0 +#define CTX_GPREG_SP_EL0 0xf8 +#define CTX_GPREGS_END 0x100 + +/******************************************************************************* + * Constants that allow assembler code to access members of and the 'el3_state' + * structure at their correct offsets. Note that some of the registers are only + * 32-bits wide but are stored as 64-bit values for convenience + ******************************************************************************/ +#define CTX_EL3STATE_OFFSET (CTX_GPREGS_OFFSET + CTX_GPREGS_END) +#define CTX_SCR_EL3 0x0 +#define CTX_RUNTIME_SP 0x8 +#define CTX_SPSR_EL3 0x10 +#define CTX_ELR_EL3 0x18 +#define CTX_EL3STATE_END 0x20 + +/******************************************************************************* + * Constants that allow assembler code to access members of and the + * 'el1_sys_regs' structure at their correct offsets. Note that some of the + * registers are only 32-bits wide but are stored as 64-bit values for + * convenience + ******************************************************************************/ +#define CTX_SYSREGS_OFFSET (CTX_EL3STATE_OFFSET + CTX_EL3STATE_END) +#define CTX_SPSR_EL1 0x0 +#define CTX_ELR_EL1 0x8 +#define CTX_SCTLR_EL1 0x10 +#define CTX_ACTLR_EL1 0x18 +#define CTX_CPACR_EL1 0x20 +#define CTX_CSSELR_EL1 0x28 +#define CTX_SP_EL1 0x30 +#define CTX_ESR_EL1 0x38 +#define CTX_TTBR0_EL1 0x40 +#define CTX_TTBR1_EL1 0x48 +#define CTX_MAIR_EL1 0x50 +#define CTX_AMAIR_EL1 0x58 +#define CTX_TCR_EL1 0x60 +#define CTX_TPIDR_EL1 0x68 +#define CTX_TPIDR_EL0 0x70 +#define CTX_TPIDRRO_EL0 0x78 +#define CTX_PAR_EL1 0x80 +#define CTX_FAR_EL1 0x88 +#define CTX_AFSR0_EL1 0x90 +#define CTX_AFSR1_EL1 0x98 +#define CTX_CONTEXTIDR_EL1 0xa0 +#define CTX_VBAR_EL1 0xa8 + +/* + * If the platform is AArch64-only, there is no need to save and restore these + * AArch32 registers. + */ +#if CTX_INCLUDE_AARCH32_REGS +#define CTX_SPSR_ABT 0xb0 +#define CTX_SPSR_UND 0xb8 +#define CTX_SPSR_IRQ 0xc0 +#define CTX_SPSR_FIQ 0xc8 +#define CTX_DACR32_EL2 0xd0 +#define CTX_IFSR32_EL2 0xd8 +#define CTX_FP_FPEXC32_EL2 0xe0 +#define CTX_TIMER_SYSREGS_OFF 0xf0 /* Align to the next 16 byte boundary */ +#else +#define CTX_TIMER_SYSREGS_OFF 0xb0 +#endif /* __CTX_INCLUDE_AARCH32_REGS__ */ + +/* + * If the timer registers aren't saved and restored, we don't have to reserve + * space for them in the context + */ +#if NS_TIMER_SWITCH +#define CTX_CNTP_CTL_EL0 (CTX_TIMER_SYSREGS_OFF + 0x0) +#define CTX_CNTP_CVAL_EL0 (CTX_TIMER_SYSREGS_OFF + 0x8) +#define CTX_CNTV_CTL_EL0 (CTX_TIMER_SYSREGS_OFF + 0x10) +#define CTX_CNTV_CVAL_EL0 (CTX_TIMER_SYSREGS_OFF + 0x18) +#define CTX_CNTKCTL_EL1 (CTX_TIMER_SYSREGS_OFF + 0x20) +#define CTX_SYSREGS_END (CTX_TIMER_SYSREGS_OFF + 0x30) /* Align to the next 16 byte boundary */ +#else +#define CTX_SYSREGS_END CTX_TIMER_SYSREGS_OFF +#endif /* __NS_TIMER_SWITCH__ */ + +/******************************************************************************* + * Constants that allow assembler code to access members of and the 'fp_regs' + * structure at their correct offsets. + ******************************************************************************/ +#if CTX_INCLUDE_FPREGS +#define CTX_FPREGS_OFFSET (CTX_SYSREGS_OFFSET + CTX_SYSREGS_END) +#define CTX_FP_Q0 0x0 +#define CTX_FP_Q1 0x10 +#define CTX_FP_Q2 0x20 +#define CTX_FP_Q3 0x30 +#define CTX_FP_Q4 0x40 +#define CTX_FP_Q5 0x50 +#define CTX_FP_Q6 0x60 +#define CTX_FP_Q7 0x70 +#define CTX_FP_Q8 0x80 +#define CTX_FP_Q9 0x90 +#define CTX_FP_Q10 0xa0 +#define CTX_FP_Q11 0xb0 +#define CTX_FP_Q12 0xc0 +#define CTX_FP_Q13 0xd0 +#define CTX_FP_Q14 0xe0 +#define CTX_FP_Q15 0xf0 +#define CTX_FP_Q16 0x100 +#define CTX_FP_Q17 0x110 +#define CTX_FP_Q18 0x120 +#define CTX_FP_Q19 0x130 +#define CTX_FP_Q20 0x140 +#define CTX_FP_Q21 0x150 +#define CTX_FP_Q22 0x160 +#define CTX_FP_Q23 0x170 +#define CTX_FP_Q24 0x180 +#define CTX_FP_Q25 0x190 +#define CTX_FP_Q26 0x1a0 +#define CTX_FP_Q27 0x1b0 +#define CTX_FP_Q28 0x1c0 +#define CTX_FP_Q29 0x1d0 +#define CTX_FP_Q30 0x1e0 +#define CTX_FP_Q31 0x1f0 +#define CTX_FP_FPSR 0x200 +#define CTX_FP_FPCR 0x208 +#define CTX_FPREGS_END 0x210 +#endif + +#ifndef __ASSEMBLY__ + +#include +#include /* for CACHE_WRITEBACK_GRANULE */ +#include + +/* + * Common constants to help define the 'cpu_context' structure and its + * members below. + */ +#define DWORD_SHIFT 3 +#define DEFINE_REG_STRUCT(name, num_regs) \ + typedef struct name { \ + uint64_t _regs[num_regs]; \ + } __aligned(16) name##_t + +/* Constants to determine the size of individual context structures */ +#define CTX_GPREG_ALL (CTX_GPREGS_END >> DWORD_SHIFT) +#define CTX_SYSREG_ALL (CTX_SYSREGS_END >> DWORD_SHIFT) +#if CTX_INCLUDE_FPREGS +#define CTX_FPREG_ALL (CTX_FPREGS_END >> DWORD_SHIFT) +#endif +#define CTX_EL3STATE_ALL (CTX_EL3STATE_END >> DWORD_SHIFT) + +/* + * AArch64 general purpose register context structure. Usually x0-x18, + * lr are saved as the compiler is expected to preserve the remaining + * callee saved registers if used by the C runtime and the assembler + * does not touch the remaining. But in case of world switch during + * exception handling, we need to save the callee registers too. + */ +DEFINE_REG_STRUCT(gp_regs, CTX_GPREG_ALL); + +/* + * AArch64 EL1 system register context structure for preserving the + * architectural state during switches from one security state to + * another in EL1. + */ +DEFINE_REG_STRUCT(el1_sys_regs, CTX_SYSREG_ALL); + +/* + * AArch64 floating point register context structure for preserving + * the floating point state during switches from one security state to + * another. + */ +#if CTX_INCLUDE_FPREGS +DEFINE_REG_STRUCT(fp_regs, CTX_FPREG_ALL); +#endif + +/* + * Miscellaneous registers used by EL3 firmware to maintain its state + * across exception entries and exits + */ +DEFINE_REG_STRUCT(el3_state, CTX_EL3STATE_ALL); + +/* + * Macros to access members of any of the above structures using their + * offsets + */ +#define read_ctx_reg(ctx, offset) ((ctx)->_regs[offset >> DWORD_SHIFT]) +#define write_ctx_reg(ctx, offset, val) (((ctx)->_regs[offset >> DWORD_SHIFT]) \ + = val) + +/* + * Top-level context structure which is used by EL3 firmware to + * preserve the state of a core at EL1 in one of the two security + * states and save enough EL3 meta data to be able to return to that + * EL and security state. The context management library will be used + * to ensure that SP_EL3 always points to an instance of this + * structure at exception entry and exit. Each instance will + * correspond to either the secure or the non-secure state. + */ +typedef struct cpu_context { + gp_regs_t gpregs_ctx; + el3_state_t el3state_ctx; + el1_sys_regs_t sysregs_ctx; +#if CTX_INCLUDE_FPREGS + fp_regs_t fpregs_ctx; +#endif +} cpu_context_t; + +/* Macros to access members of the 'cpu_context_t' structure */ +#define get_el3state_ctx(h) (&((cpu_context_t *) h)->el3state_ctx) +#if CTX_INCLUDE_FPREGS +#define get_fpregs_ctx(h) (&((cpu_context_t *) h)->fpregs_ctx) +#endif +#define get_sysregs_ctx(h) (&((cpu_context_t *) h)->sysregs_ctx) +#define get_gpregs_ctx(h) (&((cpu_context_t *) h)->gpregs_ctx) + +/* + * Compile time assertions related to the 'cpu_context' structure to + * ensure that the assembler and the compiler view of the offsets of + * the structure members is the same. + */ +CASSERT(CTX_GPREGS_OFFSET == __builtin_offsetof(cpu_context_t, gpregs_ctx), \ + assert_core_context_gp_offset_mismatch); +CASSERT(CTX_SYSREGS_OFFSET == __builtin_offsetof(cpu_context_t, sysregs_ctx), \ + assert_core_context_sys_offset_mismatch); +#if CTX_INCLUDE_FPREGS +CASSERT(CTX_FPREGS_OFFSET == __builtin_offsetof(cpu_context_t, fpregs_ctx), \ + assert_core_context_fp_offset_mismatch); +#endif +CASSERT(CTX_EL3STATE_OFFSET == __builtin_offsetof(cpu_context_t, el3state_ctx), \ + assert_core_context_el3state_offset_mismatch); + +/* + * Helper macro to set the general purpose registers that correspond to + * parameters in an aapcs_64 call i.e. x0-x7 + */ +#define set_aapcs_args0(ctx, x0) do { \ + write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X0, x0); \ + } while (0) +#define set_aapcs_args1(ctx, x0, x1) do { \ + write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X1, x1); \ + set_aapcs_args0(ctx, x0); \ + } while (0) +#define set_aapcs_args2(ctx, x0, x1, x2) do { \ + write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X2, x2); \ + set_aapcs_args1(ctx, x0, x1); \ + } while (0) +#define set_aapcs_args3(ctx, x0, x1, x2, x3) do { \ + write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X3, x3); \ + set_aapcs_args2(ctx, x0, x1, x2); \ + } while (0) +#define set_aapcs_args4(ctx, x0, x1, x2, x3, x4) do { \ + write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X4, x4); \ + set_aapcs_args3(ctx, x0, x1, x2, x3); \ + } while (0) +#define set_aapcs_args5(ctx, x0, x1, x2, x3, x4, x5) do { \ + write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X5, x5); \ + set_aapcs_args4(ctx, x0, x1, x2, x3, x4); \ + } while (0) +#define set_aapcs_args6(ctx, x0, x1, x2, x3, x4, x5, x6) do { \ + write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X6, x6); \ + set_aapcs_args5(ctx, x0, x1, x2, x3, x4, x5); \ + } while (0) +#define set_aapcs_args7(ctx, x0, x1, x2, x3, x4, x5, x6, x7) do { \ + write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X7, x7); \ + set_aapcs_args6(ctx, x0, x1, x2, x3, x4, x5, x6); \ + } while (0) + +/******************************************************************************* + * Function prototypes + ******************************************************************************/ +void el1_sysregs_context_save(el1_sys_regs_t *regs); +void el1_sysregs_context_restore(el1_sys_regs_t *regs); +#if CTX_INCLUDE_FPREGS +void fpregs_context_save(fp_regs_t *regs); +void fpregs_context_restore(fp_regs_t *regs); +#endif + + +#undef CTX_SYSREG_ALL +#if CTX_INCLUDE_FPREGS +#undef CTX_FPREG_ALL +#endif +#undef CTX_GPREG_ALL +#undef CTX_EL3STATE_ALL + +#endif /* __ASSEMBLY__ */ + +#endif /* __CONTEXT_H__ */ diff --git a/include/lib/el3_runtime/context_mgmt.h b/include/lib/el3_runtime/context_mgmt.h new file mode 100644 index 0000000..672ea11 --- /dev/null +++ b/include/lib/el3_runtime/context_mgmt.h @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __CM_H__ +#define __CM_H__ + +#include + +/******************************************************************************* + * Forward declarations + ******************************************************************************/ +struct entry_point_info; + +/******************************************************************************* + * Function & variable prototypes + ******************************************************************************/ +void cm_init(void); +void *cm_get_context_by_mpidr(uint64_t mpidr, + uint32_t security_state) __deprecated; +void cm_set_context_by_mpidr(uint64_t mpidr, + void *context, + uint32_t security_state) __deprecated; +void *cm_get_context_by_index(unsigned int cpu_idx, + unsigned int security_state); +void cm_set_context_by_index(unsigned int cpu_idx, + void *context, + unsigned int security_state); +void *cm_get_context(uint32_t security_state); +void cm_set_context(void *context, uint32_t security_state); +void cm_init_context(uint64_t mpidr, + const struct entry_point_info *ep) __deprecated; +void cm_init_my_context(const struct entry_point_info *ep); +void cm_init_context_by_index(unsigned int cpu_idx, + const struct entry_point_info *ep); +void cm_prepare_el3_exit(uint32_t security_state); +void cm_el1_sysregs_context_save(uint32_t security_state); +void cm_el1_sysregs_context_restore(uint32_t security_state); +void cm_set_elr_el3(uint32_t security_state, uintptr_t entrypoint); +void cm_set_elr_spsr_el3(uint32_t security_state, + uintptr_t entrypoint, uint32_t spsr); +void cm_write_scr_el3_bit(uint32_t security_state, + uint32_t bit_pos, + uint32_t value); +void cm_set_next_eret_context(uint32_t security_state); +uint32_t cm_get_scr_el3(uint32_t security_state); + +/* Inline definitions */ + +/******************************************************************************* + * This function is used to program the context that's used for exception + * return. This initializes the SP_EL3 to a pointer to a 'cpu_context' set for + * the required security state + ******************************************************************************/ +static inline void cm_set_next_context(void *context) +{ +#if DEBUG + uint64_t sp_mode; + + /* + * Check that this function is called with SP_EL0 as the stack + * pointer + */ + __asm__ volatile("mrs %0, SPSel\n" + : "=r" (sp_mode)); + + assert(sp_mode == MODE_SP_EL0); +#endif + + __asm__ volatile("msr spsel, #1\n" + "mov sp, %0\n" + "msr spsel, #0\n" + : : "r" (context)); +} +#endif /* __CM_H__ */ diff --git a/include/lib/el3_runtime/cpu_data.h b/include/lib/el3_runtime/cpu_data.h new file mode 100644 index 0000000..4fc801b --- /dev/null +++ b/include/lib/el3_runtime/cpu_data.h @@ -0,0 +1,139 @@ +/* + * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __CPU_DATA_H__ +#define __CPU_DATA_H__ + +/* Offsets for the cpu_data structure */ +#define CPU_DATA_CRASH_BUF_OFFSET 0x18 +#if CRASH_REPORTING +#define CPU_DATA_LOG2SIZE 7 +#else +#define CPU_DATA_LOG2SIZE 6 +#endif +/* need enough space in crash buffer to save 8 registers */ +#define CPU_DATA_CRASH_BUF_SIZE 64 +#define CPU_DATA_CPU_OPS_PTR 0x10 + +#ifndef __ASSEMBLY__ + +#include +#include +#include +#include +#include + +/* Offsets for the cpu_data structure */ +#define CPU_DATA_PSCI_LOCK_OFFSET __builtin_offsetof\ + (cpu_data_t, psci_svc_cpu_data.pcpu_bakery_info) + +#if PLAT_PCPU_DATA_SIZE +#define CPU_DATA_PLAT_PCPU_OFFSET __builtin_offsetof\ + (cpu_data_t, platform_cpu_data) +#endif + +/******************************************************************************* + * Function & variable prototypes + ******************************************************************************/ + +/******************************************************************************* + * Cache of frequently used per-cpu data: + * Pointers to non-secure and secure security state contexts + * Address of the crash stack + * It is aligned to the cache line boundary to allow efficient concurrent + * manipulation of these pointers on different cpus + * + * TODO: Add other commonly used variables to this (tf_issues#90) + * + * The data structure and the _cpu_data accessors should not be used directly + * by components that have per-cpu members. The member access macros should be + * used for this. + ******************************************************************************/ +typedef struct cpu_data { + void *cpu_context[2]; + uintptr_t cpu_ops_ptr; +#if CRASH_REPORTING + u_register_t crash_buf[CPU_DATA_CRASH_BUF_SIZE >> 3]; +#endif + struct psci_cpu_data psci_svc_cpu_data; +#if PLAT_PCPU_DATA_SIZE + uint8_t platform_cpu_data[PLAT_PCPU_DATA_SIZE]; +#endif +} __aligned(CACHE_WRITEBACK_GRANULE) cpu_data_t; + +#if CRASH_REPORTING +/* verify assembler offsets match data structures */ +CASSERT(CPU_DATA_CRASH_BUF_OFFSET == __builtin_offsetof + (cpu_data_t, crash_buf), + assert_cpu_data_crash_stack_offset_mismatch); +#endif + +CASSERT((1 << CPU_DATA_LOG2SIZE) == sizeof(cpu_data_t), + assert_cpu_data_log2size_mismatch); + +CASSERT(CPU_DATA_CPU_OPS_PTR == __builtin_offsetof + (cpu_data_t, cpu_ops_ptr), + assert_cpu_data_cpu_ops_ptr_offset_mismatch); + +struct cpu_data *_cpu_data_by_index(uint32_t cpu_index); + +/* Return the cpu_data structure for the current CPU. */ +static inline struct cpu_data *_cpu_data(void) +{ + return (cpu_data_t *)read_tpidr_el3(); +} + + +/************************************************************************** + * APIs for initialising and accessing per-cpu data + *************************************************************************/ + +void init_cpu_data_ptr(void); +void init_cpu_ops(void); + +#define get_cpu_data(_m) _cpu_data()->_m +#define set_cpu_data(_m, _v) _cpu_data()->_m = _v +#define get_cpu_data_by_index(_ix, _m) _cpu_data_by_index(_ix)->_m +#define set_cpu_data_by_index(_ix, _m, _v) _cpu_data_by_index(_ix)->_m = _v + +#define flush_cpu_data(_m) flush_dcache_range((uintptr_t) \ + &(_cpu_data()->_m), \ + sizeof(_cpu_data()->_m)) +#define inv_cpu_data(_m) inv_dcache_range((uintptr_t) \ + &(_cpu_data()->_m), \ + sizeof(_cpu_data()->_m)) +#define flush_cpu_data_by_index(_ix, _m) \ + flush_dcache_range((uintptr_t) \ + &(_cpu_data_by_index(_ix)->_m), \ + sizeof(_cpu_data_by_index(_ix)->_m)) + + +#endif /* __ASSEMBLY__ */ +#endif /* __CPU_DATA_H__ */ diff --git a/include/lib/psci/psci.h b/include/lib/psci/psci.h new file mode 100644 index 0000000..c3e9ef7 --- /dev/null +++ b/include/lib/psci/psci.h @@ -0,0 +1,365 @@ +/* + * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __PSCI_H__ +#define __PSCI_H__ + +#include +#include /* for PLAT_NUM_PWR_DOMAINS */ +#if ENABLE_PLAT_COMPAT +#include +#endif + +/******************************************************************************* + * Number of power domains whose state this PSCI implementation can track + ******************************************************************************/ +#ifdef PLAT_NUM_PWR_DOMAINS +#define PSCI_NUM_PWR_DOMAINS PLAT_NUM_PWR_DOMAINS +#else +#define PSCI_NUM_PWR_DOMAINS (2 * PLATFORM_CORE_COUNT) +#endif + +#define PSCI_NUM_NON_CPU_PWR_DOMAINS (PSCI_NUM_PWR_DOMAINS - \ + PLATFORM_CORE_COUNT) + +/* This is the power level corresponding to a CPU */ +#define PSCI_CPU_PWR_LVL 0 + +/* + * The maximum power level supported by PSCI. Since PSCI CPU_SUSPEND + * uses the old power_state parameter format which has 2 bits to specify the + * power level, this constant is defined to be 3. + */ +#define PSCI_MAX_PWR_LVL 3 + +/******************************************************************************* + * Defines for runtime services function ids + ******************************************************************************/ +#define PSCI_VERSION 0x84000000 +#define PSCI_CPU_SUSPEND_AARCH32 0x84000001 +#define PSCI_CPU_SUSPEND_AARCH64 0xc4000001 +#define PSCI_CPU_OFF 0x84000002 +#define PSCI_CPU_ON_AARCH32 0x84000003 +#define PSCI_CPU_ON_AARCH64 0xc4000003 +#define PSCI_AFFINITY_INFO_AARCH32 0x84000004 +#define PSCI_AFFINITY_INFO_AARCH64 0xc4000004 +#define PSCI_MIG_AARCH32 0x84000005 +#define PSCI_MIG_AARCH64 0xc4000005 +#define PSCI_MIG_INFO_TYPE 0x84000006 +#define PSCI_MIG_INFO_UP_CPU_AARCH32 0x84000007 +#define PSCI_MIG_INFO_UP_CPU_AARCH64 0xc4000007 +#define PSCI_SYSTEM_OFF 0x84000008 +#define PSCI_SYSTEM_RESET 0x84000009 +#define PSCI_FEATURES 0x8400000A +#define PSCI_SYSTEM_SUSPEND_AARCH32 0x8400000E +#define PSCI_SYSTEM_SUSPEND_AARCH64 0xc400000E +#define PSCI_STAT_RESIDENCY_AARCH32 0x84000010 +#define PSCI_STAT_RESIDENCY_AARCH64 0xc4000010 +#define PSCI_STAT_COUNT_AARCH32 0x84000011 +#define PSCI_STAT_COUNT_AARCH64 0xc4000011 + +/* Macro to help build the psci capabilities bitfield */ +#define define_psci_cap(x) (1 << (x & 0x1f)) + +/* + * Number of PSCI calls (above) implemented + */ +#if ENABLE_PSCI_STAT +#define PSCI_NUM_CALLS 22 +#else +#define PSCI_NUM_CALLS 18 +#endif + +/* The macros below are used to identify PSCI calls from the SMC function ID */ +#define PSCI_FID_MASK 0xffe0u +#define PSCI_FID_VALUE 0u +#define is_psci_fid(_fid) \ + (((_fid) & PSCI_FID_MASK) == PSCI_FID_VALUE) + +/******************************************************************************* + * PSCI Migrate and friends + ******************************************************************************/ +#define PSCI_TOS_UP_MIG_CAP 0 +#define PSCI_TOS_NOT_UP_MIG_CAP 1 +#define PSCI_TOS_NOT_PRESENT_MP 2 + +/******************************************************************************* + * PSCI CPU_SUSPEND 'power_state' parameter specific defines + ******************************************************************************/ +#define PSTATE_ID_SHIFT 0 + +#if PSCI_EXTENDED_STATE_ID +#define PSTATE_VALID_MASK 0xB0000000 +#define PSTATE_TYPE_SHIFT 30 +#define PSTATE_ID_MASK 0xfffffff +#else +#define PSTATE_VALID_MASK 0xFCFE0000 +#define PSTATE_TYPE_SHIFT 16 +#define PSTATE_PWR_LVL_SHIFT 24 +#define PSTATE_ID_MASK 0xffff +#define PSTATE_PWR_LVL_MASK 0x3 + +#define psci_get_pstate_pwrlvl(pstate) (((pstate) >> PSTATE_PWR_LVL_SHIFT) & \ + PSTATE_PWR_LVL_MASK) +#define psci_make_powerstate(state_id, type, pwrlvl) \ + (((state_id) & PSTATE_ID_MASK) << PSTATE_ID_SHIFT) |\ + (((type) & PSTATE_TYPE_MASK) << PSTATE_TYPE_SHIFT) |\ + (((pwrlvl) & PSTATE_PWR_LVL_MASK) << PSTATE_PWR_LVL_SHIFT) +#endif /* __PSCI_EXTENDED_STATE_ID__ */ + +#define PSTATE_TYPE_STANDBY 0x0 +#define PSTATE_TYPE_POWERDOWN 0x1 +#define PSTATE_TYPE_MASK 0x1 + +#define psci_get_pstate_id(pstate) (((pstate) >> PSTATE_ID_SHIFT) & \ + PSTATE_ID_MASK) +#define psci_get_pstate_type(pstate) (((pstate) >> PSTATE_TYPE_SHIFT) & \ + PSTATE_TYPE_MASK) +#define psci_check_power_state(pstate) ((pstate) & PSTATE_VALID_MASK) + +/******************************************************************************* + * PSCI CPU_FEATURES feature flag specific defines + ******************************************************************************/ +/* Features flags for CPU SUSPEND power state parameter format. Bits [1:1] */ +#define FF_PSTATE_SHIFT 1 +#define FF_PSTATE_ORIG 0 +#define FF_PSTATE_EXTENDED 1 +#if PSCI_EXTENDED_STATE_ID +#define FF_PSTATE FF_PSTATE_EXTENDED +#else +#define FF_PSTATE FF_PSTATE_ORIG +#endif + +/* Features flags for CPU SUSPEND OS Initiated mode support. Bits [0:0] */ +#define FF_MODE_SUPPORT_SHIFT 0 +#define FF_SUPPORTS_OS_INIT_MODE 1 + +/******************************************************************************* + * PSCI version + ******************************************************************************/ +#define PSCI_MAJOR_VER (1 << 16) +#define PSCI_MINOR_VER 0x0 + +/******************************************************************************* + * PSCI error codes + ******************************************************************************/ +#define PSCI_E_SUCCESS 0 +#define PSCI_E_NOT_SUPPORTED -1 +#define PSCI_E_INVALID_PARAMS -2 +#define PSCI_E_DENIED -3 +#define PSCI_E_ALREADY_ON -4 +#define PSCI_E_ON_PENDING -5 +#define PSCI_E_INTERN_FAIL -6 +#define PSCI_E_NOT_PRESENT -7 +#define PSCI_E_DISABLED -8 +#define PSCI_E_INVALID_ADDRESS -9 + +#define PSCI_INVALID_MPIDR ~((u_register_t)0) + +#ifndef __ASSEMBLY__ + +#include +#include + +/* + * These are the states reported by the PSCI_AFFINITY_INFO API for the specified + * CPU. The definitions of these states can be found in Section 5.7.1 in the + * PSCI specification (ARM DEN 0022C). + */ +typedef enum { + AFF_STATE_ON = 0, + AFF_STATE_OFF = 1, + AFF_STATE_ON_PENDING = 2 +} aff_info_state_t; + +/* + * Macro to represent invalid affinity level within PSCI. + */ +#define PSCI_INVALID_PWR_LVL (PLAT_MAX_PWR_LVL + 1) + +/* + * Type for representing the local power state at a particular level. + */ +typedef uint8_t plat_local_state_t; + +/* The local state macro used to represent RUN state. */ +#define PSCI_LOCAL_STATE_RUN 0 + +/* + * Macro to test whether the plat_local_state is RUN state + */ +#define is_local_state_run(plat_local_state) \ + ((plat_local_state) == PSCI_LOCAL_STATE_RUN) + +/* + * Macro to test whether the plat_local_state is RETENTION state + */ +#define is_local_state_retn(plat_local_state) \ + (((plat_local_state) > PSCI_LOCAL_STATE_RUN) && \ + ((plat_local_state) <= PLAT_MAX_RET_STATE)) + +/* + * Macro to test whether the plat_local_state is OFF state + */ +#define is_local_state_off(plat_local_state) \ + (((plat_local_state) > PLAT_MAX_RET_STATE) && \ + ((plat_local_state) <= PLAT_MAX_OFF_STATE)) + +/***************************************************************************** + * This data structure defines the representation of the power state parameter + * for its exchange between the generic PSCI code and the platform port. For + * example, it is used by the platform port to specify the requested power + * states during a power management operation. It is used by the generic code to + * inform the platform about the target power states that each level should + * enter. + ****************************************************************************/ +typedef struct psci_power_state { + /* + * The pwr_domain_state[] stores the local power state at each level + * for the CPU. + */ + plat_local_state_t pwr_domain_state[PLAT_MAX_PWR_LVL + 1]; +} psci_power_state_t; + +/******************************************************************************* + * Structure used to store per-cpu information relevant to the PSCI service. + * It is populated in the per-cpu data array. In return we get a guarantee that + * this information will not reside on a cache line shared with another cpu. + ******************************************************************************/ +typedef struct psci_cpu_data { + /* State as seen by PSCI Affinity Info API */ + aff_info_state_t aff_info_state; + + /* + * Highest power level which takes part in a power management + * operation. + */ + unsigned char target_pwrlvl; + + /* The local power state of this CPU */ + plat_local_state_t local_state; +} psci_cpu_data_t; + +/******************************************************************************* + * Structure populated by platform specific code to export routines which + * perform common low level power management functions + ******************************************************************************/ +typedef struct plat_psci_ops { + void (*cpu_standby)(plat_local_state_t cpu_state); + int (*pwr_domain_on)(u_register_t mpidr); + void (*pwr_domain_off)(const psci_power_state_t *target_state); + void (*pwr_domain_suspend)(const psci_power_state_t *target_state); + void (*pwr_domain_on_finish)(const psci_power_state_t *target_state); + void (*pwr_domain_suspend_finish)( + const psci_power_state_t *target_state); + void (*pwr_domain_pwr_down_wfi)( + const psci_power_state_t *target_state) __dead2; + void (*system_off)(void) __dead2; + void (*system_reset)(void) __dead2; + int (*validate_power_state)(unsigned int power_state, + psci_power_state_t *req_state); + int (*validate_ns_entrypoint)(uintptr_t ns_entrypoint); + void (*get_sys_suspend_power_state)( + psci_power_state_t *req_state); + int (*get_pwr_lvl_state_idx)(plat_local_state_t pwr_domain_state, + int pwrlvl); + int (*translate_power_state_by_mpidr)(u_register_t mpidr, + unsigned int power_state, + psci_power_state_t *output_state); +} plat_psci_ops_t; + +/******************************************************************************* + * Optional structure populated by the Secure Payload Dispatcher to be given a + * chance to perform any bookkeeping before PSCI executes a power management + * operation. It also allows PSCI to determine certain properties of the SP e.g. + * migrate capability etc. + ******************************************************************************/ +typedef struct spd_pm_ops { + void (*svc_on)(u_register_t target_cpu); + int32_t (*svc_off)(u_register_t __unused); + void (*svc_suspend)(u_register_t max_off_pwrlvl); + void (*svc_on_finish)(u_register_t __unused); + void (*svc_suspend_finish)(u_register_t max_off_pwrlvl); + int32_t (*svc_migrate)(u_register_t from_cpu, u_register_t to_cpu); + int32_t (*svc_migrate_info)(u_register_t *resident_cpu); + void (*svc_system_off)(void); + void (*svc_system_reset)(void); +} spd_pm_ops_t; + +/******************************************************************************* + * Function & Data prototypes + ******************************************************************************/ +unsigned int psci_version(void); +int psci_cpu_on(u_register_t target_cpu, + uintptr_t entrypoint, + u_register_t context_id); +int psci_cpu_suspend(unsigned int power_state, + uintptr_t entrypoint, + u_register_t context_id); +int psci_system_suspend(uintptr_t entrypoint, u_register_t context_id); +int psci_cpu_off(void); +int psci_affinity_info(u_register_t target_affinity, + unsigned int lowest_affinity_level); +int psci_migrate(u_register_t target_cpu); +int psci_migrate_info_type(void); +long psci_migrate_info_up_cpu(void); +int psci_features(unsigned int psci_fid); +void __dead2 psci_power_down_wfi(void); +void psci_arch_setup(void); + +/* + * The below API is deprecated. This is now replaced by bl31_warmboot_entry in + * AArch64. + */ +void psci_entrypoint(void) __deprecated; + +/******************************************************************************* + * Forward declarations + ******************************************************************************/ +struct entry_point_info; + +/****************************************************************************** + * PSCI Library Interfaces + *****************************************************************************/ +u_register_t psci_smc_handler(uint32_t smc_fid, + u_register_t x1, + u_register_t x2, + u_register_t x3, + u_register_t x4, + void *cookie, + void *handle, + u_register_t flags); +int psci_setup(uintptr_t mailbox_ep); +void psci_warmboot_entrypoint(void); +void psci_register_spd_pm_hook(const spd_pm_ops_t *pm); + +#endif /*__ASSEMBLY__*/ + +#endif /* __PSCI_H__ */ diff --git a/include/lib/psci/psci_compat.h b/include/lib/psci/psci_compat.h new file mode 100644 index 0000000..3554667 --- /dev/null +++ b/include/lib/psci/psci_compat.h @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __PSCI_COMPAT_H__ +#define __PSCI_COMPAT_H__ + +#include +#include + +#ifndef __ASSEMBLY__ +/* + * The below declarations are to enable compatibility for the platform ports + * using the old platform interface and psci helpers. + */ +#define PLAT_MAX_PWR_LVL PLATFORM_MAX_AFFLVL +#define PLAT_NUM_PWR_DOMAINS PLATFORM_NUM_AFFS + +/******************************************************************************* + * PSCI affinity related constants. An affinity instance could + * be present or absent physically to cater for asymmetric topologies. + ******************************************************************************/ +#define PSCI_AFF_ABSENT 0x0 +#define PSCI_AFF_PRESENT 0x1 + +#define PSCI_STATE_ON 0x0 +#define PSCI_STATE_OFF 0x1 +#define PSCI_STATE_ON_PENDING 0x2 +#define PSCI_STATE_SUSPEND 0x3 + +/* + * Using the compatibility platform interfaces means that the local states + * used in psci_power_state_t need to only convey whether its power down + * or standby state. The onus is on the platform port to do the right thing + * including the state coordination in case multiple power down states are + * involved. Hence if we assume 3 generic states viz, run, standby and + * power down, we can assign 1 and 2 to standby and power down respectively. + */ +#define PLAT_MAX_RET_STATE 1 +#define PLAT_MAX_OFF_STATE 2 + +/* + * Macro to represent invalid affinity level within PSCI. + */ +#define PSCI_INVALID_DATA -1 + +#define psci_get_pstate_afflvl(pstate) psci_get_pstate_pwrlvl(pstate) + +/* + * This array stores the 'power_state' requests of each CPU during + * CPU_SUSPEND and SYSTEM_SUSPEND which will be populated by the + * compatibility layer when appropriate platform hooks are invoked. + */ +extern unsigned int psci_power_state_compat[PLATFORM_CORE_COUNT]; + +/******************************************************************************* + * Structure populated by platform specific code to export routines which + * perform common low level pm functions + ******************************************************************************/ +typedef struct plat_pm_ops { + void (*affinst_standby)(unsigned int power_state); + int (*affinst_on)(unsigned long mpidr, + unsigned long sec_entrypoint, + unsigned int afflvl, + unsigned int state); + void (*affinst_off)(unsigned int afflvl, unsigned int state); + void (*affinst_suspend)(unsigned long sec_entrypoint, + unsigned int afflvl, + unsigned int state); + void (*affinst_on_finish)(unsigned int afflvl, unsigned int state); + void (*affinst_suspend_finish)(unsigned int afflvl, + unsigned int state); + void (*system_off)(void) __dead2; + void (*system_reset)(void) __dead2; + int (*validate_power_state)(unsigned int power_state); + int (*validate_ns_entrypoint)(unsigned long ns_entrypoint); + unsigned int (*get_sys_suspend_power_state)(void); +} plat_pm_ops_t; + +/******************************************************************************* + * Function & Data prototypes to enable compatibility for older platform ports + ******************************************************************************/ +int psci_get_suspend_stateid_by_mpidr(unsigned long); +int psci_get_suspend_stateid(void); +int psci_get_suspend_powerstate(void); +unsigned int psci_get_max_phys_off_afflvl(void); +int psci_get_suspend_afflvl(void); + +#endif /* ____ASSEMBLY__ */ +#endif /* __PSCI_COMPAT_H__ */ diff --git a/include/lib/smcc.h b/include/lib/smcc.h new file mode 100644 index 0000000..c415ba1 --- /dev/null +++ b/include/lib/smcc.h @@ -0,0 +1,118 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __SMCC_H__ +#define __SMCC_H__ + +/******************************************************************************* + * Bit definitions inside the function id as per the SMC calling convention + ******************************************************************************/ +#define FUNCID_TYPE_SHIFT 31 +#define FUNCID_CC_SHIFT 30 +#define FUNCID_OEN_SHIFT 24 +#define FUNCID_NUM_SHIFT 0 + +#define FUNCID_TYPE_MASK 0x1 +#define FUNCID_CC_MASK 0x1 +#define FUNCID_OEN_MASK 0x3f +#define FUNCID_NUM_MASK 0xffff + +#define FUNCID_TYPE_WIDTH 1 +#define FUNCID_CC_WIDTH 1 +#define FUNCID_OEN_WIDTH 6 +#define FUNCID_NUM_WIDTH 16 + +#define GET_SMC_CC(id) ((id >> FUNCID_CC_SHIFT) & \ + FUNCID_CC_MASK) +#define GET_SMC_TYPE(id) ((id >> FUNCID_TYPE_SHIFT) & \ + FUNCID_TYPE_MASK) + +#define SMC_64 1 +#define SMC_32 0 +#define SMC_UNK 0xffffffff +#define SMC_TYPE_FAST 1 +#define SMC_TYPE_STD 0 +#define SMC_PREEMPTED 0xfffffffe +/******************************************************************************* + * Owning entity number definitions inside the function id as per the SMC + * calling convention + ******************************************************************************/ +#define OEN_ARM_START 0 +#define OEN_ARM_END 0 +#define OEN_CPU_START 1 +#define OEN_CPU_END 1 +#define OEN_SIP_START 2 +#define OEN_SIP_END 2 +#define OEN_OEM_START 3 +#define OEN_OEM_END 3 +#define OEN_STD_START 4 /* Standard Calls */ +#define OEN_STD_END 4 +#define OEN_TAP_START 48 /* Trusted Applications */ +#define OEN_TAP_END 49 +#define OEN_TOS_START 50 /* Trusted OS */ +#define OEN_TOS_END 63 +#define OEN_LIMIT 64 + +#ifndef __ASSEMBLY__ + +#include +#include + +/* Various flags passed to SMC handlers */ +#define SMC_FROM_SECURE (0 << 0) +#define SMC_FROM_NON_SECURE (1 << 0) + +#define is_caller_non_secure(_f) (!!(_f & SMC_FROM_NON_SECURE)) +#define is_caller_secure(_f) (!(is_caller_non_secure(_f))) + +/* The macro below is used to identify a Standard Service SMC call */ +#define is_std_svc_call(_fid) ((((_fid) >> FUNCID_OEN_SHIFT) & \ + FUNCID_OEN_MASK) == OEN_STD_START) + +/* The macro below is used to identify a valid Fast SMC call */ +#define is_valid_fast_smc(_fid) ((!(((_fid) >> 16) & 0xff)) && \ + (GET_SMC_TYPE(_fid) == SMC_TYPE_FAST)) + +/* + * Macro to define UUID for services. Apart from defining and initializing a + * uuid_t structure, this macro verifies that the first word of the defined UUID + * does not equal SMC_UNK. This is to ensure that the caller won't mistake the + * returned UUID in x0 for an invalid SMC error return + */ +#define DEFINE_SVC_UUID(_name, _tl, _tm, _th, _cl, _ch, \ + _n0, _n1, _n2, _n3, _n4, _n5) \ + CASSERT(_tl != SMC_UNK, invalid_svc_uuid);\ + static const uuid_t _name = { \ + _tl, _tm, _th, _cl, _ch, \ + { _n0, _n1, _n2, _n3, _n4, _n5 } \ + } + +#endif /*__ASSEMBLY__*/ +#endif /* __SMCC_H__ */ diff --git a/include/lib/stdlib/machine/_stdint.h b/include/lib/stdlib/machine/_stdint.h index e36c659..9a4f35f 100644 --- a/include/lib/stdlib/machine/_stdint.h +++ b/include/lib/stdlib/machine/_stdint.h @@ -30,6 +30,11 @@ * $FreeBSD$ */ +/* + * Portions copyright (c) 2016, ARM Limited and Contributors. + * All rights reserved. + */ + #ifndef _MACHINE__STDINT_H_ #define _MACHINE__STDINT_H_ @@ -38,12 +43,12 @@ #define INT8_C(c) (c) #define INT16_C(c) (c) #define INT32_C(c) (c) -#define INT64_C(c) (c ## L) +#define INT64_C(c) (c ## LL) #define UINT8_C(c) (c) #define UINT16_C(c) (c) #define UINT32_C(c) (c ## U) -#define UINT64_C(c) (c ## UL) +#define UINT64_C(c) (c ## ULL) #define INTMAX_C(c) INT64_C(c) #define UINTMAX_C(c) UINT64_C(c) @@ -60,19 +65,19 @@ #define INT8_MIN (-0x7f-1) #define INT16_MIN (-0x7fff-1) #define INT32_MIN (-0x7fffffff-1) -#define INT64_MIN (-0x7fffffffffffffffL-1) +#define INT64_MIN (-0x7fffffffffffffffLL-1) /* Maximum values of exact-width signed integer types. */ #define INT8_MAX 0x7f #define INT16_MAX 0x7fff #define INT32_MAX 0x7fffffff -#define INT64_MAX 0x7fffffffffffffffL +#define INT64_MAX 0x7fffffffffffffffLL /* Maximum values of exact-width unsigned integer types. */ #define UINT8_MAX 0xff #define UINT16_MAX 0xffff #define UINT32_MAX 0xffffffffU -#define UINT64_MAX 0xffffffffffffffffUL +#define UINT64_MAX 0xffffffffffffffffULL /* * ISO/IEC 9899:1999 diff --git a/include/plat/arm/common/plat_arm.h b/include/plat/arm/common/plat_arm.h index 06912eb..0ffdb5c 100644 --- a/include/plat/arm/common/plat_arm.h +++ b/include/plat/arm/common/plat_arm.h @@ -45,15 +45,15 @@ /* * Utility functions common to ARM standard platforms */ -void arm_setup_page_tables(unsigned long total_base, - unsigned long total_size, - unsigned long code_start, - unsigned long code_limit, - unsigned long rodata_start, - unsigned long rodata_limit +void arm_setup_page_tables(uintptr_t total_base, + size_t total_size, + uintptr_t code_start, + uintptr_t code_limit, + uintptr_t rodata_start, + uintptr_t rodata_limit #if USE_COHERENT_MEM - , unsigned long coh_start, - unsigned long coh_limit + , uintptr_t coh_start, + uintptr_t coh_limit #endif ); diff --git a/include/plat/common/platform.h b/include/plat/common/platform.h index 390721f..1d2a373 100644 --- a/include/plat/common/platform.h +++ b/include/plat/common/platform.h @@ -83,7 +83,7 @@ /******************************************************************************* * Optional common functions (may be overridden) ******************************************************************************/ -unsigned long plat_get_my_stack(void); +uintptr_t plat_get_my_stack(void); void plat_report_exception(unsigned long); int plat_crash_console_init(void); int plat_crash_console_putc(int c); diff --git a/include/services/std_svc.h b/include/services/std_svc.h new file mode 100644 index 0000000..0feb2ea --- /dev/null +++ b/include/services/std_svc.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __STD_SVC_H__ +#define __STD_SVC_H__ + +/* SMC function IDs for Standard Service queries */ + +#define ARM_STD_SVC_CALL_COUNT 0x8400ff00 +#define ARM_STD_SVC_UID 0x8400ff01 +/* 0x8400ff02 is reserved */ +#define ARM_STD_SVC_VERSION 0x8400ff03 + +/* ARM Standard Service Calls version numbers */ +#define STD_SVC_VERSION_MAJOR 0x0 +#define STD_SVC_VERSION_MINOR 0x1 + +#endif /* __STD_SVC_H__ */ diff --git a/lib/el3_runtime/aarch64/context.S b/lib/el3_runtime/aarch64/context.S new file mode 100644 index 0000000..7982e50 --- /dev/null +++ b/lib/el3_runtime/aarch64/context.S @@ -0,0 +1,405 @@ +/* + * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include + + .global el1_sysregs_context_save + .global el1_sysregs_context_restore +#if CTX_INCLUDE_FPREGS + .global fpregs_context_save + .global fpregs_context_restore +#endif + .global save_gp_registers + .global restore_gp_registers_eret + .global restore_gp_registers_callee_eret + .global el3_exit + +/* ----------------------------------------------------- + * The following function strictly follows the AArch64 + * PCS to use x9-x17 (temporary caller-saved registers) + * to save EL1 system register context. It assumes that + * 'x0' is pointing to a 'el1_sys_regs' structure where + * the register context will be saved. + * ----------------------------------------------------- + */ +func el1_sysregs_context_save + + mrs x9, spsr_el1 + mrs x10, elr_el1 + stp x9, x10, [x0, #CTX_SPSR_EL1] + + mrs x15, sctlr_el1 + mrs x16, actlr_el1 + stp x15, x16, [x0, #CTX_SCTLR_EL1] + + mrs x17, cpacr_el1 + mrs x9, csselr_el1 + stp x17, x9, [x0, #CTX_CPACR_EL1] + + mrs x10, sp_el1 + mrs x11, esr_el1 + stp x10, x11, [x0, #CTX_SP_EL1] + + mrs x12, ttbr0_el1 + mrs x13, ttbr1_el1 + stp x12, x13, [x0, #CTX_TTBR0_EL1] + + mrs x14, mair_el1 + mrs x15, amair_el1 + stp x14, x15, [x0, #CTX_MAIR_EL1] + + mrs x16, tcr_el1 + mrs x17, tpidr_el1 + stp x16, x17, [x0, #CTX_TCR_EL1] + + mrs x9, tpidr_el0 + mrs x10, tpidrro_el0 + stp x9, x10, [x0, #CTX_TPIDR_EL0] + + mrs x13, par_el1 + mrs x14, far_el1 + stp x13, x14, [x0, #CTX_PAR_EL1] + + mrs x15, afsr0_el1 + mrs x16, afsr1_el1 + stp x15, x16, [x0, #CTX_AFSR0_EL1] + + mrs x17, contextidr_el1 + mrs x9, vbar_el1 + stp x17, x9, [x0, #CTX_CONTEXTIDR_EL1] + + /* Save AArch32 system registers if the build has instructed so */ +#if CTX_INCLUDE_AARCH32_REGS + mrs x11, spsr_abt + mrs x12, spsr_und + stp x11, x12, [x0, #CTX_SPSR_ABT] + + mrs x13, spsr_irq + mrs x14, spsr_fiq + stp x13, x14, [x0, #CTX_SPSR_IRQ] + + mrs x15, dacr32_el2 + mrs x16, ifsr32_el2 + stp x15, x16, [x0, #CTX_DACR32_EL2] + + mrs x17, fpexc32_el2 + str x17, [x0, #CTX_FP_FPEXC32_EL2] +#endif + + /* Save NS timer registers if the build has instructed so */ +#if NS_TIMER_SWITCH + mrs x10, cntp_ctl_el0 + mrs x11, cntp_cval_el0 + stp x10, x11, [x0, #CTX_CNTP_CTL_EL0] + + mrs x12, cntv_ctl_el0 + mrs x13, cntv_cval_el0 + stp x12, x13, [x0, #CTX_CNTV_CTL_EL0] + + mrs x14, cntkctl_el1 + str x14, [x0, #CTX_CNTKCTL_EL1] +#endif + + ret +endfunc el1_sysregs_context_save + +/* ----------------------------------------------------- + * The following function strictly follows the AArch64 + * PCS to use x9-x17 (temporary caller-saved registers) + * to restore EL1 system register context. It assumes + * that 'x0' is pointing to a 'el1_sys_regs' structure + * from where the register context will be restored + * ----------------------------------------------------- + */ +func el1_sysregs_context_restore + + ldp x9, x10, [x0, #CTX_SPSR_EL1] + msr spsr_el1, x9 + msr elr_el1, x10 + + ldp x15, x16, [x0, #CTX_SCTLR_EL1] + msr sctlr_el1, x15 + msr actlr_el1, x16 + + ldp x17, x9, [x0, #CTX_CPACR_EL1] + msr cpacr_el1, x17 + msr csselr_el1, x9 + + ldp x10, x11, [x0, #CTX_SP_EL1] + msr sp_el1, x10 + msr esr_el1, x11 + + ldp x12, x13, [x0, #CTX_TTBR0_EL1] + msr ttbr0_el1, x12 + msr ttbr1_el1, x13 + + ldp x14, x15, [x0, #CTX_MAIR_EL1] + msr mair_el1, x14 + msr amair_el1, x15 + + ldp x16, x17, [x0, #CTX_TCR_EL1] + msr tcr_el1, x16 + msr tpidr_el1, x17 + + ldp x9, x10, [x0, #CTX_TPIDR_EL0] + msr tpidr_el0, x9 + msr tpidrro_el0, x10 + + ldp x13, x14, [x0, #CTX_PAR_EL1] + msr par_el1, x13 + msr far_el1, x14 + + ldp x15, x16, [x0, #CTX_AFSR0_EL1] + msr afsr0_el1, x15 + msr afsr1_el1, x16 + + ldp x17, x9, [x0, #CTX_CONTEXTIDR_EL1] + msr contextidr_el1, x17 + msr vbar_el1, x9 + + /* Restore AArch32 system registers if the build has instructed so */ +#if CTX_INCLUDE_AARCH32_REGS + ldp x11, x12, [x0, #CTX_SPSR_ABT] + msr spsr_abt, x11 + msr spsr_und, x12 + + ldp x13, x14, [x0, #CTX_SPSR_IRQ] + msr spsr_irq, x13 + msr spsr_fiq, x14 + + ldp x15, x16, [x0, #CTX_DACR32_EL2] + msr dacr32_el2, x15 + msr ifsr32_el2, x16 + + ldr x17, [x0, #CTX_FP_FPEXC32_EL2] + msr fpexc32_el2, x17 +#endif + /* Restore NS timer registers if the build has instructed so */ +#if NS_TIMER_SWITCH + ldp x10, x11, [x0, #CTX_CNTP_CTL_EL0] + msr cntp_ctl_el0, x10 + msr cntp_cval_el0, x11 + + ldp x12, x13, [x0, #CTX_CNTV_CTL_EL0] + msr cntv_ctl_el0, x12 + msr cntv_cval_el0, x13 + + ldr x14, [x0, #CTX_CNTKCTL_EL1] + msr cntkctl_el1, x14 +#endif + + /* No explict ISB required here as ERET covers it */ + ret +endfunc el1_sysregs_context_restore + +/* ----------------------------------------------------- + * The following function follows the aapcs_64 strictly + * to use x9-x17 (temporary caller-saved registers + * according to AArch64 PCS) to save floating point + * register context. It assumes that 'x0' is pointing to + * a 'fp_regs' structure where the register context will + * be saved. + * + * Access to VFP registers will trap if CPTR_EL3.TFP is + * set. However currently we don't use VFP registers + * nor set traps in Trusted Firmware, and assume it's + * cleared + * + * TODO: Revisit when VFP is used in secure world + * ----------------------------------------------------- + */ +#if CTX_INCLUDE_FPREGS +func fpregs_context_save + stp q0, q1, [x0, #CTX_FP_Q0] + stp q2, q3, [x0, #CTX_FP_Q2] + stp q4, q5, [x0, #CTX_FP_Q4] + stp q6, q7, [x0, #CTX_FP_Q6] + stp q8, q9, [x0, #CTX_FP_Q8] + stp q10, q11, [x0, #CTX_FP_Q10] + stp q12, q13, [x0, #CTX_FP_Q12] + stp q14, q15, [x0, #CTX_FP_Q14] + stp q16, q17, [x0, #CTX_FP_Q16] + stp q18, q19, [x0, #CTX_FP_Q18] + stp q20, q21, [x0, #CTX_FP_Q20] + stp q22, q23, [x0, #CTX_FP_Q22] + stp q24, q25, [x0, #CTX_FP_Q24] + stp q26, q27, [x0, #CTX_FP_Q26] + stp q28, q29, [x0, #CTX_FP_Q28] + stp q30, q31, [x0, #CTX_FP_Q30] + + mrs x9, fpsr + str x9, [x0, #CTX_FP_FPSR] + + mrs x10, fpcr + str x10, [x0, #CTX_FP_FPCR] + + ret +endfunc fpregs_context_save + +/* ----------------------------------------------------- + * The following function follows the aapcs_64 strictly + * to use x9-x17 (temporary caller-saved registers + * according to AArch64 PCS) to restore floating point + * register context. It assumes that 'x0' is pointing to + * a 'fp_regs' structure from where the register context + * will be restored. + * + * Access to VFP registers will trap if CPTR_EL3.TFP is + * set. However currently we don't use VFP registers + * nor set traps in Trusted Firmware, and assume it's + * cleared + * + * TODO: Revisit when VFP is used in secure world + * ----------------------------------------------------- + */ +func fpregs_context_restore + ldp q0, q1, [x0, #CTX_FP_Q0] + ldp q2, q3, [x0, #CTX_FP_Q2] + ldp q4, q5, [x0, #CTX_FP_Q4] + ldp q6, q7, [x0, #CTX_FP_Q6] + ldp q8, q9, [x0, #CTX_FP_Q8] + ldp q10, q11, [x0, #CTX_FP_Q10] + ldp q12, q13, [x0, #CTX_FP_Q12] + ldp q14, q15, [x0, #CTX_FP_Q14] + ldp q16, q17, [x0, #CTX_FP_Q16] + ldp q18, q19, [x0, #CTX_FP_Q18] + ldp q20, q21, [x0, #CTX_FP_Q20] + ldp q22, q23, [x0, #CTX_FP_Q22] + ldp q24, q25, [x0, #CTX_FP_Q24] + ldp q26, q27, [x0, #CTX_FP_Q26] + ldp q28, q29, [x0, #CTX_FP_Q28] + ldp q30, q31, [x0, #CTX_FP_Q30] + + ldr x9, [x0, #CTX_FP_FPSR] + msr fpsr, x9 + + ldr x10, [x0, #CTX_FP_FPCR] + msr fpcr, x10 + + /* + * No explict ISB required here as ERET to + * switch to secure EL1 or non-secure world + * covers it + */ + + ret +endfunc fpregs_context_restore +#endif /* CTX_INCLUDE_FPREGS */ + +/* ----------------------------------------------------- + * The following functions are used to save and restore + * all the general purpose registers. Ideally we would + * only save and restore the callee saved registers when + * a world switch occurs but that type of implementation + * is more complex. So currently we will always save and + * restore these registers on entry and exit of EL3. + * These are not macros to ensure their invocation fits + * within the 32 instructions per exception vector. + * clobbers: x18 + * ----------------------------------------------------- + */ +func save_gp_registers + stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] + stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] + stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4] + stp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6] + stp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8] + stp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10] + stp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12] + stp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14] + stp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16] + stp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18] + stp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20] + stp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22] + stp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24] + stp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26] + stp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28] + mrs x18, sp_el0 + str x18, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_SP_EL0] + ret +endfunc save_gp_registers + +func restore_gp_registers_eret + ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] + ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2] + b restore_gp_registers_callee_eret +endfunc restore_gp_registers_eret + +func restore_gp_registers_callee_eret + ldp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4] + ldp x6, x7, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X6] + ldp x8, x9, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X8] + ldp x10, x11, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X10] + ldp x12, x13, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X12] + ldp x14, x15, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X14] + ldp x18, x19, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X18] + ldp x20, x21, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X20] + ldp x22, x23, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X22] + ldp x24, x25, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X24] + ldp x26, x27, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X26] + ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28] + ldp x30, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] + msr sp_el0, x17 + ldp x16, x17, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X16] + eret +endfunc restore_gp_registers_callee_eret + + /* ----------------------------------------------------- + * This routine assumes that the SP_EL3 is pointing to + * a valid context structure from where the gp regs and + * other special registers can be retrieved. + * ----------------------------------------------------- + */ +func el3_exit + /* ----------------------------------------------------- + * Save the current SP_EL0 i.e. the EL3 runtime stack + * which will be used for handling the next SMC. Then + * switch to SP_EL3 + * ----------------------------------------------------- + */ + mov x17, sp + msr spsel, #1 + str x17, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP] + + /* ----------------------------------------------------- + * Restore SPSR_EL3, ELR_EL3 and SCR_EL3 prior to ERET + * ----------------------------------------------------- + */ + ldr x18, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3] + ldp x16, x17, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3] + msr scr_el3, x18 + msr spsr_el3, x16 + msr elr_el3, x17 + + /* Restore saved general purpose registers and return */ + b restore_gp_registers_eret +endfunc el3_exit diff --git a/lib/el3_runtime/aarch64/context_mgmt.c b/lib/el3_runtime/aarch64/context_mgmt.c new file mode 100644 index 0000000..4527aa3 --- /dev/null +++ b/lib/el3_runtime/aarch64/context_mgmt.c @@ -0,0 +1,383 @@ +/* + * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +/******************************************************************************* + * Context management library initialisation routine. This library is used by + * runtime services to share pointers to 'cpu_context' structures for the secure + * and non-secure states. Management of the structures and their associated + * memory is not done by the context management library e.g. the PSCI service + * manages the cpu context used for entry from and exit to the non-secure state. + * The Secure payload dispatcher service manages the context(s) corresponding to + * the secure state. It also uses this library to get access to the non-secure + * state cpu context pointers. + * Lastly, this library provides the api to make SP_EL3 point to the cpu context + * which will used for programming an entry into a lower EL. The same context + * will used to save state upon exception entry from that EL. + ******************************************************************************/ +void cm_init(void) +{ + /* + * The context management library has only global data to intialize, but + * that will be done when the BSS is zeroed out + */ +} + +/******************************************************************************* + * The following function initializes the cpu_context 'ctx' for + * first use, and sets the initial entrypoint state as specified by the + * entry_point_info structure. + * + * The security state to initialize is determined by the SECURE attribute + * of the entry_point_info. The function returns a pointer to the initialized + * context and sets this as the next context to return to. + * + * The EE and ST attributes are used to configure the endianess and secure + * timer availability for the new execution context. + * + * To prepare the register state for entry call cm_prepare_el3_exit() and + * el3_exit(). For Secure-EL1 cm_prepare_el3_exit() is equivalent to + * cm_e1_sysreg_context_restore(). + ******************************************************************************/ +static void cm_init_context_common(cpu_context_t *ctx, const entry_point_info_t *ep) +{ + unsigned int security_state; + uint32_t scr_el3; + el3_state_t *state; + gp_regs_t *gp_regs; + unsigned long sctlr_elx; + + assert(ctx); + + security_state = GET_SECURITY_STATE(ep->h.attr); + + /* Clear any residual register values from the context */ + memset(ctx, 0, sizeof(*ctx)); + + /* + * Base the context SCR on the current value, adjust for entry point + * specific requirements and set trap bits from the IMF + * TODO: provide the base/global SCR bits using another mechanism? + */ + scr_el3 = read_scr(); + scr_el3 &= ~(SCR_NS_BIT | SCR_RW_BIT | SCR_FIQ_BIT | SCR_IRQ_BIT | + SCR_ST_BIT | SCR_HCE_BIT); + + if (security_state != SECURE) + scr_el3 |= SCR_NS_BIT; + + if (GET_RW(ep->spsr) == MODE_RW_64) + scr_el3 |= SCR_RW_BIT; + + if (EP_GET_ST(ep->h.attr)) + scr_el3 |= SCR_ST_BIT; + +#ifndef HANDLE_EA_EL3_FIRST + /* Explicitly stop to trap aborts from lower exception levels. */ + scr_el3 &= ~SCR_EA_BIT; +#endif + +#if IMAGE_BL31 + /* + * IRQ/FIQ bits only need setting if interrupt routing + * model has been set up for BL31. + */ + scr_el3 |= get_scr_el3_from_routing_model(security_state); +#endif + + /* + * Set up SCTLR_ELx for the target exception level: + * EE bit is taken from the entrypoint attributes + * M, C and I bits must be zero (as required by PSCI specification) + * + * The target exception level is based on the spsr mode requested. + * If execution is requested to EL2 or hyp mode, HVC is enabled + * via SCR_EL3.HCE. + * + * Always compute the SCTLR_EL1 value and save in the cpu_context + * - the EL2 registers are set up by cm_preapre_ns_entry() as they + * are not part of the stored cpu_context + * + * TODO: In debug builds the spsr should be validated and checked + * against the CPU support, security state, endianess and pc + */ + sctlr_elx = EP_GET_EE(ep->h.attr) ? SCTLR_EE_BIT : 0; + if (GET_RW(ep->spsr) == MODE_RW_64) + sctlr_elx |= SCTLR_EL1_RES1; + else + sctlr_elx |= SCTLR_AARCH32_EL1_RES1; + write_ctx_reg(get_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_elx); + + if ((GET_RW(ep->spsr) == MODE_RW_64 + && GET_EL(ep->spsr) == MODE_EL2) + || (GET_RW(ep->spsr) != MODE_RW_64 + && GET_M32(ep->spsr) == MODE32_hyp)) { + scr_el3 |= SCR_HCE_BIT; + } + + /* Populate EL3 state so that we've the right context before doing ERET */ + state = get_el3state_ctx(ctx); + write_ctx_reg(state, CTX_SCR_EL3, scr_el3); + write_ctx_reg(state, CTX_ELR_EL3, ep->pc); + write_ctx_reg(state, CTX_SPSR_EL3, ep->spsr); + + /* + * Store the X0-X7 value from the entrypoint into the context + * Use memcpy as we are in control of the layout of the structures + */ + gp_regs = get_gpregs_ctx(ctx); + memcpy(gp_regs, (void *)&ep->args, sizeof(aapcs64_params_t)); +} + +/******************************************************************************* + * The following function initializes the cpu_context for a CPU specified by + * its `cpu_idx` for first use, and sets the initial entrypoint state as + * specified by the entry_point_info structure. + ******************************************************************************/ +void cm_init_context_by_index(unsigned int cpu_idx, + const entry_point_info_t *ep) +{ + cpu_context_t *ctx; + ctx = cm_get_context_by_index(cpu_idx, GET_SECURITY_STATE(ep->h.attr)); + cm_init_context_common(ctx, ep); +} + +/******************************************************************************* + * The following function initializes the cpu_context for the current CPU + * for first use, and sets the initial entrypoint state as specified by the + * entry_point_info structure. + ******************************************************************************/ +void cm_init_my_context(const entry_point_info_t *ep) +{ + cpu_context_t *ctx; + ctx = cm_get_context(GET_SECURITY_STATE(ep->h.attr)); + cm_init_context_common(ctx, ep); +} + +/******************************************************************************* + * Prepare the CPU system registers for first entry into secure or normal world + * + * If execution is requested to EL2 or hyp mode, SCTLR_EL2 is initialized + * If execution is requested to non-secure EL1 or svc mode, and the CPU supports + * EL2 then EL2 is disabled by configuring all necessary EL2 registers. + * For all entries, the EL1 registers are initialized from the cpu_context + ******************************************************************************/ +void cm_prepare_el3_exit(uint32_t security_state) +{ + uint32_t sctlr_elx, scr_el3, cptr_el2; + cpu_context_t *ctx = cm_get_context(security_state); + + assert(ctx); + + if (security_state == NON_SECURE) { + scr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3); + if (scr_el3 & SCR_HCE_BIT) { + /* Use SCTLR_EL1.EE value to initialise sctlr_el2 */ + sctlr_elx = read_ctx_reg(get_sysregs_ctx(ctx), + CTX_SCTLR_EL1); + sctlr_elx &= ~SCTLR_EE_BIT; + sctlr_elx |= SCTLR_EL2_RES1; + write_sctlr_el2(sctlr_elx); + } else if (read_id_aa64pfr0_el1() & + (ID_AA64PFR0_ELX_MASK << ID_AA64PFR0_EL2_SHIFT)) { + /* EL2 present but unused, need to disable safely */ + + /* HCR_EL2 = 0, except RW bit set to match SCR_EL3 */ + write_hcr_el2((scr_el3 & SCR_RW_BIT) ? HCR_RW_BIT : 0); + + /* SCTLR_EL2 : can be ignored when bypassing */ + + /* CPTR_EL2 : disable all traps TCPAC, TTA, TFP */ + cptr_el2 = read_cptr_el2(); + cptr_el2 &= ~(TCPAC_BIT | TTA_BIT | TFP_BIT); + write_cptr_el2(cptr_el2); + + /* Enable EL1 access to timer */ + write_cnthctl_el2(EL1PCEN_BIT | EL1PCTEN_BIT); + + /* Reset CNTVOFF_EL2 */ + write_cntvoff_el2(0); + + /* Set VPIDR, VMPIDR to match MIDR, MPIDR */ + write_vpidr_el2(read_midr_el1()); + write_vmpidr_el2(read_mpidr_el1()); + + /* + * Reset VTTBR_EL2. + * Needed because cache maintenance operations depend on + * the VMID even when non-secure EL1&0 stage 2 address + * translation are disabled. + */ + write_vttbr_el2(0); + } + } + + el1_sysregs_context_restore(get_sysregs_ctx(ctx)); + + cm_set_next_context(ctx); +} + +/******************************************************************************* + * The next four functions are used by runtime services to save and restore + * EL1 context on the 'cpu_context' structure for the specified security + * state. + ******************************************************************************/ +void cm_el1_sysregs_context_save(uint32_t security_state) +{ + cpu_context_t *ctx; + + ctx = cm_get_context(security_state); + assert(ctx); + + el1_sysregs_context_save(get_sysregs_ctx(ctx)); +} + +void cm_el1_sysregs_context_restore(uint32_t security_state) +{ + cpu_context_t *ctx; + + ctx = cm_get_context(security_state); + assert(ctx); + + el1_sysregs_context_restore(get_sysregs_ctx(ctx)); +} + +/******************************************************************************* + * This function populates ELR_EL3 member of 'cpu_context' pertaining to the + * given security state with the given entrypoint + ******************************************************************************/ +void cm_set_elr_el3(uint32_t security_state, uintptr_t entrypoint) +{ + cpu_context_t *ctx; + el3_state_t *state; + + ctx = cm_get_context(security_state); + assert(ctx); + + /* Populate EL3 state so that ERET jumps to the correct entry */ + state = get_el3state_ctx(ctx); + write_ctx_reg(state, CTX_ELR_EL3, entrypoint); +} + +/******************************************************************************* + * This function populates ELR_EL3 and SPSR_EL3 members of 'cpu_context' + * pertaining to the given security state + ******************************************************************************/ +void cm_set_elr_spsr_el3(uint32_t security_state, + uintptr_t entrypoint, uint32_t spsr) +{ + cpu_context_t *ctx; + el3_state_t *state; + + ctx = cm_get_context(security_state); + assert(ctx); + + /* Populate EL3 state so that ERET jumps to the correct entry */ + state = get_el3state_ctx(ctx); + write_ctx_reg(state, CTX_ELR_EL3, entrypoint); + write_ctx_reg(state, CTX_SPSR_EL3, spsr); +} + +/******************************************************************************* + * This function updates a single bit in the SCR_EL3 member of the 'cpu_context' + * pertaining to the given security state using the value and bit position + * specified in the parameters. It preserves all other bits. + ******************************************************************************/ +void cm_write_scr_el3_bit(uint32_t security_state, + uint32_t bit_pos, + uint32_t value) +{ + cpu_context_t *ctx; + el3_state_t *state; + uint32_t scr_el3; + + ctx = cm_get_context(security_state); + assert(ctx); + + /* Ensure that the bit position is a valid one */ + assert((1 << bit_pos) & SCR_VALID_BIT_MASK); + + /* Ensure that the 'value' is only a bit wide */ + assert(value <= 1); + + /* + * Get the SCR_EL3 value from the cpu context, clear the desired bit + * and set it to its new value. + */ + state = get_el3state_ctx(ctx); + scr_el3 = read_ctx_reg(state, CTX_SCR_EL3); + scr_el3 &= ~(1 << bit_pos); + scr_el3 |= value << bit_pos; + write_ctx_reg(state, CTX_SCR_EL3, scr_el3); +} + +/******************************************************************************* + * This function retrieves SCR_EL3 member of 'cpu_context' pertaining to the + * given security state. + ******************************************************************************/ +uint32_t cm_get_scr_el3(uint32_t security_state) +{ + cpu_context_t *ctx; + el3_state_t *state; + + ctx = cm_get_context(security_state); + assert(ctx); + + /* Populate EL3 state so that ERET jumps to the correct entry */ + state = get_el3state_ctx(ctx); + return read_ctx_reg(state, CTX_SCR_EL3); +} + +/******************************************************************************* + * This function is used to program the context that's used for exception + * return. This initializes the SP_EL3 to a pointer to a 'cpu_context' set for + * the required security state + ******************************************************************************/ +void cm_set_next_eret_context(uint32_t security_state) +{ + cpu_context_t *ctx; + + ctx = cm_get_context(security_state); + assert(ctx); + + cm_set_next_context(ctx); +} diff --git a/lib/el3_runtime/aarch64/cpu_data.S b/lib/el3_runtime/aarch64/cpu_data.S new file mode 100644 index 0000000..2cc07ba --- /dev/null +++ b/lib/el3_runtime/aarch64/cpu_data.S @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include + +.globl init_cpu_data_ptr +.globl _cpu_data_by_index + +/* ----------------------------------------------------------------- + * void init_cpu_data_ptr(void) + * + * Initialise the TPIDR_EL3 register to refer to the cpu_data_t + * for the calling CPU. This must be called before cm_get_cpu_data() + * + * This can be called without a valid stack. It assumes that + * plat_my_core_pos() does not clobber register x10. + * clobbers: x0, x1, x10 + * ----------------------------------------------------------------- + */ +func init_cpu_data_ptr + mov x10, x30 + bl plat_my_core_pos + bl _cpu_data_by_index + msr tpidr_el3, x0 + ret x10 +endfunc init_cpu_data_ptr + +/* ----------------------------------------------------------------- + * cpu_data_t *_cpu_data_by_index(uint32_t cpu_index) + * + * Return the cpu_data structure for the CPU with given linear index + * + * This can be called without a valid stack. + * clobbers: x0, x1 + * ----------------------------------------------------------------- + */ +func _cpu_data_by_index + adr x1, percpu_data + add x0, x1, x0, LSL #CPU_DATA_LOG2SIZE + ret +endfunc _cpu_data_by_index diff --git a/lib/el3_runtime/cpu_data_array.c b/lib/el3_runtime/cpu_data_array.c new file mode 100644 index 0000000..eba21a5 --- /dev/null +++ b/lib/el3_runtime/cpu_data_array.c @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include + +/* The per_cpu_ptr_cache_t space allocation */ +cpu_data_t percpu_data[PLATFORM_CORE_COUNT]; diff --git a/lib/locks/bakery/bakery_lock_normal.c b/lib/locks/bakery/bakery_lock_normal.c index 45b870b..efc1b57 100644 --- a/lib/locks/bakery/bakery_lock_normal.c +++ b/lib/locks/bakery/bakery_lock_normal.c @@ -82,13 +82,13 @@ #define write_cache_op(addr, cached) \ do { \ - (cached ? dccvac((uint64_t)addr) :\ - dcivac((uint64_t)addr));\ + (cached ? dccvac((uintptr_t)addr) :\ + dcivac((uintptr_t)addr));\ dsbish();\ } while (0) #define read_cache_op(addr, cached) if (cached) \ - dccivac((uint64_t)addr) + dccivac((uintptr_t)addr) static unsigned int bakery_get_ticket(bakery_lock_t *lock, unsigned int me, int is_cached) diff --git a/lib/psci/aarch64/psci_helpers.S b/lib/psci/aarch64/psci_helpers.S new file mode 100644 index 0000000..ff250a0 --- /dev/null +++ b/lib/psci/aarch64/psci_helpers.S @@ -0,0 +1,180 @@ +/* + * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include + + .globl psci_do_pwrdown_cache_maintenance + .globl psci_do_pwrup_cache_maintenance + .globl psci_power_down_wfi +#if !ERROR_DEPRECATED + .globl psci_entrypoint +#endif + +/* ----------------------------------------------------------------------- + * void psci_do_pwrdown_cache_maintenance(unsigned int power level); + * + * This function performs cache maintenance for the specified power + * level. The levels of cache affected are determined by the power + * level which is passed as the argument i.e. level 0 results + * in a flush of the L1 cache. Both the L1 and L2 caches are flushed + * for a higher power level. + * + * Additionally, this function also ensures that stack memory is correctly + * flushed out to avoid coherency issues due to a change in its memory + * attributes after the data cache is disabled. + * ----------------------------------------------------------------------- + */ +func psci_do_pwrdown_cache_maintenance + stp x29, x30, [sp,#-16]! + stp x19, x20, [sp,#-16]! + + /* --------------------------------------------- + * Determine to how many levels of cache will be + * subject to cache maintenance. Power level + * 0 implies that only the cpu is being powered + * down. Only the L1 data cache needs to be + * flushed to the PoU in this case. For a higher + * power level we are assuming that a flush + * of L1 data and L2 unified cache is enough. + * This information should be provided by the + * platform. + * --------------------------------------------- + */ + cmp w0, #PSCI_CPU_PWR_LVL + b.eq do_core_pwr_dwn + bl prepare_cluster_pwr_dwn + b do_stack_maintenance + +do_core_pwr_dwn: + bl prepare_core_pwr_dwn + + /* --------------------------------------------- + * Do stack maintenance by flushing the used + * stack to the main memory and invalidating the + * remainder. + * --------------------------------------------- + */ +do_stack_maintenance: + bl plat_get_my_stack + + /* --------------------------------------------- + * Calculate and store the size of the used + * stack memory in x1. + * --------------------------------------------- + */ + mov x19, x0 + mov x1, sp + sub x1, x0, x1 + mov x0, sp + bl flush_dcache_range + + /* --------------------------------------------- + * Calculate and store the size of the unused + * stack memory in x1. Calculate and store the + * stack base address in x0. + * --------------------------------------------- + */ + sub x0, x19, #PLATFORM_STACK_SIZE + sub x1, sp, x0 + bl inv_dcache_range + + ldp x19, x20, [sp], #16 + ldp x29, x30, [sp], #16 + ret +endfunc psci_do_pwrdown_cache_maintenance + + +/* ----------------------------------------------------------------------- + * void psci_do_pwrup_cache_maintenance(void); + * + * This function performs cache maintenance after this cpu is powered up. + * Currently, this involves managing the used stack memory before turning + * on the data cache. + * ----------------------------------------------------------------------- + */ +func psci_do_pwrup_cache_maintenance + stp x29, x30, [sp,#-16]! + + /* --------------------------------------------- + * Ensure any inflight stack writes have made it + * to main memory. + * --------------------------------------------- + */ + dmb st + + /* --------------------------------------------- + * Calculate and store the size of the used + * stack memory in x1. Calculate and store the + * stack base address in x0. + * --------------------------------------------- + */ + bl plat_get_my_stack + mov x1, sp + sub x1, x0, x1 + mov x0, sp + bl inv_dcache_range + + /* --------------------------------------------- + * Enable the data cache. + * --------------------------------------------- + */ + mrs x0, sctlr_el3 + orr x0, x0, #SCTLR_C_BIT + msr sctlr_el3, x0 + isb + + ldp x29, x30, [sp], #16 + ret +endfunc psci_do_pwrup_cache_maintenance + +/* ----------------------------------------------------------------------- + * void psci_power_down_wfi(void); + * This function is called to indicate to the power controller that it + * is safe to power down this cpu. It should not exit the wfi and will + * be released from reset upon power up. + * ----------------------------------------------------------------------- + */ +func psci_power_down_wfi + dsb sy // ensure write buffer empty + wfi + bl plat_panic_handler +endfunc psci_power_down_wfi + +/* ----------------------------------------------------------------------- + * void psci_entrypoint(void); + * The deprecated entry point for PSCI on warm boot for AArch64. + * ----------------------------------------------------------------------- + */ +func_deprecated psci_entrypoint + b bl31_warm_entrypoint +endfunc_deprecated psci_entrypoint diff --git a/lib/psci/psci_common.c b/lib/psci/psci_common.c new file mode 100644 index 0000000..c6fea5b --- /dev/null +++ b/lib/psci/psci_common.c @@ -0,0 +1,928 @@ +/* + * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "psci_private.h" + +/* + * SPD power management operations, expected to be supplied by the registered + * SPD on successful SP initialization + */ +const spd_pm_ops_t *psci_spd_pm; + +/* + * PSCI requested local power state map. This array is used to store the local + * power states requested by a CPU for power levels from level 1 to + * PLAT_MAX_PWR_LVL. It does not store the requested local power state for power + * level 0 (PSCI_CPU_PWR_LVL) as the requested and the target power state for a + * CPU are the same. + * + * During state coordination, the platform is passed an array containing the + * local states requested for a particular non cpu power domain by each cpu + * within the domain. + * + * TODO: Dense packing of the requested states will cause cache thrashing + * when multiple power domains write to it. If we allocate the requested + * states at each power level in a cache-line aligned per-domain memory, + * the cache thrashing can be avoided. + */ +static plat_local_state_t + psci_req_local_pwr_states[PLAT_MAX_PWR_LVL][PLATFORM_CORE_COUNT]; + + +/******************************************************************************* + * Arrays that hold the platform's power domain tree information for state + * management of power domains. + * Each node in the array 'psci_non_cpu_pd_nodes' corresponds to a power domain + * which is an ancestor of a CPU power domain. + * Each node in the array 'psci_cpu_pd_nodes' corresponds to a cpu power domain + ******************************************************************************/ +non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS] +#if USE_COHERENT_MEM +__section("tzfw_coherent_mem") +#endif +; + +DEFINE_BAKERY_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]); + +cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT]; + +/******************************************************************************* + * Pointer to functions exported by the platform to complete power mgmt. ops + ******************************************************************************/ +const plat_psci_ops_t *psci_plat_pm_ops; + +/****************************************************************************** + * Check that the maximum power level supported by the platform makes sense + *****************************************************************************/ +CASSERT(PLAT_MAX_PWR_LVL <= PSCI_MAX_PWR_LVL && \ + PLAT_MAX_PWR_LVL >= PSCI_CPU_PWR_LVL, \ + assert_platform_max_pwrlvl_check); + +/* + * The plat_local_state used by the platform is one of these types: RUN, + * RETENTION and OFF. The platform can define further sub-states for each type + * apart from RUN. This categorization is done to verify the sanity of the + * psci_power_state passed by the platform and to print debug information. The + * categorization is done on the basis of the following conditions: + * + * 1. If (plat_local_state == 0) then the category is STATE_TYPE_RUN. + * + * 2. If (0 < plat_local_state <= PLAT_MAX_RET_STATE), then the category is + * STATE_TYPE_RETN. + * + * 3. If (plat_local_state > PLAT_MAX_RET_STATE), then the category is + * STATE_TYPE_OFF. + */ +typedef enum plat_local_state_type { + STATE_TYPE_RUN = 0, + STATE_TYPE_RETN, + STATE_TYPE_OFF +} plat_local_state_type_t; + +/* The macro used to categorize plat_local_state. */ +#define find_local_state_type(plat_local_state) \ + ((plat_local_state) ? ((plat_local_state > PLAT_MAX_RET_STATE) \ + ? STATE_TYPE_OFF : STATE_TYPE_RETN) \ + : STATE_TYPE_RUN) + +/****************************************************************************** + * Check that the maximum retention level supported by the platform is less + * than the maximum off level. + *****************************************************************************/ +CASSERT(PLAT_MAX_RET_STATE < PLAT_MAX_OFF_STATE, \ + assert_platform_max_off_and_retn_state_check); + +/****************************************************************************** + * This function ensures that the power state parameter in a CPU_SUSPEND request + * is valid. If so, it returns the requested states for each power level. + *****************************************************************************/ +int psci_validate_power_state(unsigned int power_state, + psci_power_state_t *state_info) +{ + /* Check SBZ bits in power state are zero */ + if (psci_check_power_state(power_state)) + return PSCI_E_INVALID_PARAMS; + + assert(psci_plat_pm_ops->validate_power_state); + + /* Validate the power_state using platform pm_ops */ + return psci_plat_pm_ops->validate_power_state(power_state, state_info); +} + +/****************************************************************************** + * This function retrieves the `psci_power_state_t` for system suspend from + * the platform. + *****************************************************************************/ +void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info) +{ + /* + * Assert that the required pm_ops hook is implemented to ensure that + * the capability detected during psci_setup() is valid. + */ + assert(psci_plat_pm_ops->get_sys_suspend_power_state); + + /* + * Query the platform for the power_state required for system suspend + */ + psci_plat_pm_ops->get_sys_suspend_power_state(state_info); +} + +/******************************************************************************* + * This function verifies that the all the other cores in the system have been + * turned OFF and the current CPU is the last running CPU in the system. + * Returns 1 (true) if the current CPU is the last ON CPU or 0 (false) + * otherwise. + ******************************************************************************/ +unsigned int psci_is_last_on_cpu(void) +{ + unsigned int cpu_idx, my_idx = plat_my_core_pos(); + + for (cpu_idx = 0; cpu_idx < PLATFORM_CORE_COUNT; cpu_idx++) { + if (cpu_idx == my_idx) { + assert(psci_get_aff_info_state() == AFF_STATE_ON); + continue; + } + + if (psci_get_aff_info_state_by_idx(cpu_idx) != AFF_STATE_OFF) + return 0; + } + + return 1; +} + +/******************************************************************************* + * Routine to return the maximum power level to traverse to after a cpu has + * been physically powered up. It is expected to be called immediately after + * reset from assembler code. + ******************************************************************************/ +static unsigned int get_power_on_target_pwrlvl(void) +{ + unsigned int pwrlvl; + + /* + * Assume that this cpu was suspended and retrieve its target power + * level. If it is invalid then it could only have been turned off + * earlier. PLAT_MAX_PWR_LVL will be the highest power level a + * cpu can be turned off to. + */ + pwrlvl = psci_get_suspend_pwrlvl(); + if (pwrlvl == PSCI_INVALID_PWR_LVL) + pwrlvl = PLAT_MAX_PWR_LVL; + return pwrlvl; +} + +/****************************************************************************** + * Helper function to update the requested local power state array. This array + * does not store the requested state for the CPU power level. Hence an + * assertion is added to prevent us from accessing the wrong index. + *****************************************************************************/ +static void psci_set_req_local_pwr_state(unsigned int pwrlvl, + unsigned int cpu_idx, + plat_local_state_t req_pwr_state) +{ + assert(pwrlvl > PSCI_CPU_PWR_LVL); + psci_req_local_pwr_states[pwrlvl - 1][cpu_idx] = req_pwr_state; +} + +/****************************************************************************** + * This function initializes the psci_req_local_pwr_states. + *****************************************************************************/ +void psci_init_req_local_pwr_states(void) +{ + /* Initialize the requested state of all non CPU power domains as OFF */ + memset(&psci_req_local_pwr_states, PLAT_MAX_OFF_STATE, + sizeof(psci_req_local_pwr_states)); +} + +/****************************************************************************** + * Helper function to return a reference to an array containing the local power + * states requested by each cpu for a power domain at 'pwrlvl'. The size of the + * array will be the number of cpu power domains of which this power domain is + * an ancestor. These requested states will be used to determine a suitable + * target state for this power domain during psci state coordination. An + * assertion is added to prevent us from accessing the CPU power level. + *****************************************************************************/ +static plat_local_state_t *psci_get_req_local_pwr_states(unsigned int pwrlvl, + unsigned int cpu_idx) +{ + assert(pwrlvl > PSCI_CPU_PWR_LVL); + + return &psci_req_local_pwr_states[pwrlvl - 1][cpu_idx]; +} + +/****************************************************************************** + * Helper function to return the current local power state of each power domain + * from the current cpu power domain to its ancestor at the 'end_pwrlvl'. This + * function will be called after a cpu is powered on to find the local state + * each power domain has emerged from. + *****************************************************************************/ +static void psci_get_target_local_pwr_states(unsigned int end_pwrlvl, + psci_power_state_t *target_state) +{ + unsigned int parent_idx, lvl; + plat_local_state_t *pd_state = target_state->pwr_domain_state; + + pd_state[PSCI_CPU_PWR_LVL] = psci_get_cpu_local_state(); + parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node; + + /* Copy the local power state from node to state_info */ + for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) { +#if !USE_COHERENT_MEM + /* + * If using normal memory for psci_non_cpu_pd_nodes, we need + * to flush before reading the local power state as another + * cpu in the same power domain could have updated it and this + * code runs before caches are enabled. + */ + flush_dcache_range( + (uintptr_t) &psci_non_cpu_pd_nodes[parent_idx], + sizeof(psci_non_cpu_pd_nodes[parent_idx])); +#endif + pd_state[lvl] = psci_non_cpu_pd_nodes[parent_idx].local_state; + parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; + } + + /* Set the the higher levels to RUN */ + for (; lvl <= PLAT_MAX_PWR_LVL; lvl++) + target_state->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN; +} + +/****************************************************************************** + * Helper function to set the target local power state that each power domain + * from the current cpu power domain to its ancestor at the 'end_pwrlvl' will + * enter. This function will be called after coordination of requested power + * states has been done for each power level. + *****************************************************************************/ +static void psci_set_target_local_pwr_states(unsigned int end_pwrlvl, + const psci_power_state_t *target_state) +{ + unsigned int parent_idx, lvl; + const plat_local_state_t *pd_state = target_state->pwr_domain_state; + + psci_set_cpu_local_state(pd_state[PSCI_CPU_PWR_LVL]); + + /* + * Need to flush as local_state will be accessed with Data Cache + * disabled during power on + */ + flush_cpu_data(psci_svc_cpu_data.local_state); + + parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node; + + /* Copy the local_state from state_info */ + for (lvl = 1; lvl <= end_pwrlvl; lvl++) { + psci_non_cpu_pd_nodes[parent_idx].local_state = pd_state[lvl]; +#if !USE_COHERENT_MEM + flush_dcache_range( + (uintptr_t)&psci_non_cpu_pd_nodes[parent_idx], + sizeof(psci_non_cpu_pd_nodes[parent_idx])); +#endif + parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; + } +} + + +/******************************************************************************* + * PSCI helper function to get the parent nodes corresponding to a cpu_index. + ******************************************************************************/ +void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx, + unsigned int end_lvl, + unsigned int node_index[]) +{ + unsigned int parent_node = psci_cpu_pd_nodes[cpu_idx].parent_node; + int i; + + for (i = PSCI_CPU_PWR_LVL + 1; i <= end_lvl; i++) { + *node_index++ = parent_node; + parent_node = psci_non_cpu_pd_nodes[parent_node].parent_node; + } +} + +/****************************************************************************** + * This function is invoked post CPU power up and initialization. It sets the + * affinity info state, target power state and requested power state for the + * current CPU and all its ancestor power domains to RUN. + *****************************************************************************/ +void psci_set_pwr_domains_to_run(unsigned int end_pwrlvl) +{ + unsigned int parent_idx, cpu_idx = plat_my_core_pos(), lvl; + parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node; + + /* Reset the local_state to RUN for the non cpu power domains. */ + for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) { + psci_non_cpu_pd_nodes[parent_idx].local_state = + PSCI_LOCAL_STATE_RUN; +#if !USE_COHERENT_MEM + flush_dcache_range( + (uintptr_t) &psci_non_cpu_pd_nodes[parent_idx], + sizeof(psci_non_cpu_pd_nodes[parent_idx])); +#endif + psci_set_req_local_pwr_state(lvl, + cpu_idx, + PSCI_LOCAL_STATE_RUN); + parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; + } + + /* Set the affinity info state to ON */ + psci_set_aff_info_state(AFF_STATE_ON); + + psci_set_cpu_local_state(PSCI_LOCAL_STATE_RUN); + flush_cpu_data(psci_svc_cpu_data); +} + +/****************************************************************************** + * This function is passed the local power states requested for each power + * domain (state_info) between the current CPU domain and its ancestors until + * the target power level (end_pwrlvl). It updates the array of requested power + * states with this information. + * + * Then, for each level (apart from the CPU level) until the 'end_pwrlvl', it + * retrieves the states requested by all the cpus of which the power domain at + * that level is an ancestor. It passes this information to the platform to + * coordinate and return the target power state. If the target state for a level + * is RUN then subsequent levels are not considered. At the CPU level, state + * coordination is not required. Hence, the requested and the target states are + * the same. + * + * The 'state_info' is updated with the target state for each level between the + * CPU and the 'end_pwrlvl' and returned to the caller. + * + * This function will only be invoked with data cache enabled and while + * powering down a core. + *****************************************************************************/ +void psci_do_state_coordination(unsigned int end_pwrlvl, + psci_power_state_t *state_info) +{ + unsigned int lvl, parent_idx, cpu_idx = plat_my_core_pos(); + unsigned int start_idx, ncpus; + plat_local_state_t target_state, *req_states; + + assert(end_pwrlvl <= PLAT_MAX_PWR_LVL); + parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node; + + /* For level 0, the requested state will be equivalent + to target state */ + for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) { + + /* First update the requested power state */ + psci_set_req_local_pwr_state(lvl, cpu_idx, + state_info->pwr_domain_state[lvl]); + + /* Get the requested power states for this power level */ + start_idx = psci_non_cpu_pd_nodes[parent_idx].cpu_start_idx; + req_states = psci_get_req_local_pwr_states(lvl, start_idx); + + /* + * Let the platform coordinate amongst the requested states at + * this power level and return the target local power state. + */ + ncpus = psci_non_cpu_pd_nodes[parent_idx].ncpus; + target_state = plat_get_target_pwr_state(lvl, + req_states, + ncpus); + + state_info->pwr_domain_state[lvl] = target_state; + + /* Break early if the negotiated target power state is RUN */ + if (is_local_state_run(state_info->pwr_domain_state[lvl])) + break; + + parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; + } + + /* + * This is for cases when we break out of the above loop early because + * the target power state is RUN at a power level < end_pwlvl. + * We update the requested power state from state_info and then + * set the target state as RUN. + */ + for (lvl = lvl + 1; lvl <= end_pwrlvl; lvl++) { + psci_set_req_local_pwr_state(lvl, cpu_idx, + state_info->pwr_domain_state[lvl]); + state_info->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN; + + } + + /* Update the target state in the power domain nodes */ + psci_set_target_local_pwr_states(end_pwrlvl, state_info); +} + +/****************************************************************************** + * This function validates a suspend request by making sure that if a standby + * state is requested then no power level is turned off and the highest power + * level is placed in a standby/retention state. + * + * It also ensures that the state level X will enter is not shallower than the + * state level X + 1 will enter. + * + * This validation will be enabled only for DEBUG builds as the platform is + * expected to perform these validations as well. + *****************************************************************************/ +int psci_validate_suspend_req(const psci_power_state_t *state_info, + unsigned int is_power_down_state) +{ + unsigned int max_off_lvl, target_lvl, max_retn_lvl; + plat_local_state_t state; + plat_local_state_type_t req_state_type, deepest_state_type; + int i; + + /* Find the target suspend power level */ + target_lvl = psci_find_target_suspend_lvl(state_info); + if (target_lvl == PSCI_INVALID_PWR_LVL) + return PSCI_E_INVALID_PARAMS; + + /* All power domain levels are in a RUN state to begin with */ + deepest_state_type = STATE_TYPE_RUN; + + for (i = target_lvl; i >= PSCI_CPU_PWR_LVL; i--) { + state = state_info->pwr_domain_state[i]; + req_state_type = find_local_state_type(state); + + /* + * While traversing from the highest power level to the lowest, + * the state requested for lower levels has to be the same or + * deeper i.e. equal to or greater than the state at the higher + * levels. If this condition is true, then the requested state + * becomes the deepest state encountered so far. + */ + if (req_state_type < deepest_state_type) + return PSCI_E_INVALID_PARAMS; + deepest_state_type = req_state_type; + } + + /* Find the highest off power level */ + max_off_lvl = psci_find_max_off_lvl(state_info); + + /* The target_lvl is either equal to the max_off_lvl or max_retn_lvl */ + max_retn_lvl = PSCI_INVALID_PWR_LVL; + if (target_lvl != max_off_lvl) + max_retn_lvl = target_lvl; + + /* + * If this is not a request for a power down state then max off level + * has to be invalid and max retention level has to be a valid power + * level. + */ + if (!is_power_down_state && (max_off_lvl != PSCI_INVALID_PWR_LVL || + max_retn_lvl == PSCI_INVALID_PWR_LVL)) + return PSCI_E_INVALID_PARAMS; + + return PSCI_E_SUCCESS; +} + +/****************************************************************************** + * This function finds the highest power level which will be powered down + * amongst all the power levels specified in the 'state_info' structure + *****************************************************************************/ +unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info) +{ + int i; + + for (i = PLAT_MAX_PWR_LVL; i >= PSCI_CPU_PWR_LVL; i--) { + if (is_local_state_off(state_info->pwr_domain_state[i])) + return i; + } + + return PSCI_INVALID_PWR_LVL; +} + +/****************************************************************************** + * This functions finds the level of the highest power domain which will be + * placed in a low power state during a suspend operation. + *****************************************************************************/ +unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info) +{ + int i; + + for (i = PLAT_MAX_PWR_LVL; i >= PSCI_CPU_PWR_LVL; i--) { + if (!is_local_state_run(state_info->pwr_domain_state[i])) + return i; + } + + return PSCI_INVALID_PWR_LVL; +} + +/******************************************************************************* + * This function is passed a cpu_index and the highest level in the topology + * tree that the operation should be applied to. It picks up locks in order of + * increasing power domain level in the range specified. + ******************************************************************************/ +void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl, + unsigned int cpu_idx) +{ + unsigned int parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node; + unsigned int level; + + /* No locking required for level 0. Hence start locking from level 1 */ + for (level = PSCI_CPU_PWR_LVL + 1; level <= end_pwrlvl; level++) { + psci_lock_get(&psci_non_cpu_pd_nodes[parent_idx]); + parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; + } +} + +/******************************************************************************* + * This function is passed a cpu_index and the highest level in the topology + * tree that the operation should be applied to. It releases the locks in order + * of decreasing power domain level in the range specified. + ******************************************************************************/ +void psci_release_pwr_domain_locks(unsigned int end_pwrlvl, + unsigned int cpu_idx) +{ + unsigned int parent_idx, parent_nodes[PLAT_MAX_PWR_LVL] = {0}; + int level; + + /* Get the parent nodes */ + psci_get_parent_pwr_domain_nodes(cpu_idx, end_pwrlvl, parent_nodes); + + /* Unlock top down. No unlocking required for level 0. */ + for (level = end_pwrlvl; level >= PSCI_CPU_PWR_LVL + 1; level--) { + parent_idx = parent_nodes[level - 1]; + psci_lock_release(&psci_non_cpu_pd_nodes[parent_idx]); + } +} + +/******************************************************************************* + * Simple routine to determine whether a mpidr is valid or not. + ******************************************************************************/ +int psci_validate_mpidr(u_register_t mpidr) +{ + if (plat_core_pos_by_mpidr(mpidr) < 0) + return PSCI_E_INVALID_PARAMS; + + return PSCI_E_SUCCESS; +} + +/******************************************************************************* + * This function determines the full entrypoint information for the requested + * PSCI entrypoint on power on/resume and returns it. + ******************************************************************************/ +static int psci_get_ns_ep_info(entry_point_info_t *ep, + uintptr_t entrypoint, + u_register_t context_id) +{ + u_register_t ep_attr, sctlr; + unsigned int daif, ee, mode; + u_register_t ns_scr_el3 = read_scr_el3(); + u_register_t ns_sctlr_el1 = read_sctlr_el1(); + + sctlr = ns_scr_el3 & SCR_HCE_BIT ? read_sctlr_el2() : ns_sctlr_el1; + ee = 0; + + ep_attr = NON_SECURE | EP_ST_DISABLE; + if (sctlr & SCTLR_EE_BIT) { + ep_attr |= EP_EE_BIG; + ee = 1; + } + SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr); + + ep->pc = entrypoint; + memset(&ep->args, 0, sizeof(ep->args)); + ep->args.arg0 = context_id; + + /* + * Figure out whether the cpu enters the non-secure address space + * in aarch32 or aarch64 + */ + if (ns_scr_el3 & SCR_RW_BIT) { + + /* + * Check whether a Thumb entry point has been provided for an + * aarch64 EL + */ + if (entrypoint & 0x1) + return PSCI_E_INVALID_ADDRESS; + + mode = ns_scr_el3 & SCR_HCE_BIT ? MODE_EL2 : MODE_EL1; + + ep->spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS); + } else { + + mode = ns_scr_el3 & SCR_HCE_BIT ? MODE32_hyp : MODE32_svc; + + /* + * TODO: Choose async. exception bits if HYP mode is not + * implemented according to the values of SCR.{AW, FW} bits + */ + daif = DAIF_ABT_BIT | DAIF_IRQ_BIT | DAIF_FIQ_BIT; + + ep->spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, daif); + } + + return PSCI_E_SUCCESS; +} + +/******************************************************************************* + * This function validates the entrypoint with the platform layer if the + * appropriate pm_ops hook is exported by the platform and returns the + * 'entry_point_info'. + ******************************************************************************/ +int psci_validate_entry_point(entry_point_info_t *ep, + uintptr_t entrypoint, + u_register_t context_id) +{ + int rc; + + /* Validate the entrypoint using platform psci_ops */ + if (psci_plat_pm_ops->validate_ns_entrypoint) { + rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint); + if (rc != PSCI_E_SUCCESS) + return PSCI_E_INVALID_ADDRESS; + } + + /* + * Verify and derive the re-entry information for + * the non-secure world from the non-secure state from + * where this call originated. + */ + rc = psci_get_ns_ep_info(ep, entrypoint, context_id); + return rc; +} + +/******************************************************************************* + * Generic handler which is called when a cpu is physically powered on. It + * traverses the node information and finds the highest power level powered + * off and performs generic, architectural, platform setup and state management + * to power on that power level and power levels below it. + * e.g. For a cpu that's been powered on, it will call the platform specific + * code to enable the gic cpu interface and for a cluster it will enable + * coherency at the interconnect level in addition to gic cpu interface. + ******************************************************************************/ +void psci_warmboot_entrypoint(void) +{ + unsigned int end_pwrlvl, cpu_idx = plat_my_core_pos(); + psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} }; + + /* + * Verify that we have been explicitly turned ON or resumed from + * suspend. + */ + if (psci_get_aff_info_state() == AFF_STATE_OFF) { + ERROR("Unexpected affinity info state"); + panic(); + } + + /* + * Get the maximum power domain level to traverse to after this cpu + * has been physically powered up. + */ + end_pwrlvl = get_power_on_target_pwrlvl(); + + /* + * This function acquires the lock corresponding to each power level so + * that by the time all locks are taken, the system topology is snapshot + * and state management can be done safely. + */ + psci_acquire_pwr_domain_locks(end_pwrlvl, + cpu_idx); + +#if ENABLE_PSCI_STAT + /* + * Capture power up time-stamp. + * No cache maintenance is required as caches are off + * and writes are direct to the main memory. + */ + PMF_CAPTURE_TIMESTAMP(psci_svc, PSCI_STAT_ID_EXIT_LOW_PWR, + PMF_NO_CACHE_MAINT); +#endif + + psci_get_target_local_pwr_states(end_pwrlvl, &state_info); + + /* + * This CPU could be resuming from suspend or it could have just been + * turned on. To distinguish between these 2 cases, we examine the + * affinity state of the CPU: + * - If the affinity state is ON_PENDING then it has just been + * turned on. + * - Else it is resuming from suspend. + * + * Depending on the type of warm reset identified, choose the right set + * of power management handler and perform the generic, architecture + * and platform specific handling. + */ + if (psci_get_aff_info_state() == AFF_STATE_ON_PENDING) + psci_cpu_on_finish(cpu_idx, &state_info); + else + psci_cpu_suspend_finish(cpu_idx, &state_info); + + /* + * Set the requested and target state of this CPU and all the higher + * power domains which are ancestors of this CPU to run. + */ + psci_set_pwr_domains_to_run(end_pwrlvl); + +#if ENABLE_PSCI_STAT + /* + * Update PSCI stats. + * Caches are off when writing stats data on the power down path. + * Since caches are now enabled, it's necessary to do cache + * maintenance before reading that same data. + */ + psci_stats_update_pwr_up(end_pwrlvl, &state_info, PMF_CACHE_MAINT); +#endif + + /* + * This loop releases the lock corresponding to each power level + * in the reverse order to which they were acquired. + */ + psci_release_pwr_domain_locks(end_pwrlvl, + cpu_idx); +} + +/******************************************************************************* + * This function initializes the set of hooks that PSCI invokes as part of power + * management operation. The power management hooks are expected to be provided + * by the SPD, after it finishes all its initialization + ******************************************************************************/ +void psci_register_spd_pm_hook(const spd_pm_ops_t *pm) +{ + assert(pm); + psci_spd_pm = pm; + + if (pm->svc_migrate) + psci_caps |= define_psci_cap(PSCI_MIG_AARCH64); + + if (pm->svc_migrate_info) + psci_caps |= define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64) + | define_psci_cap(PSCI_MIG_INFO_TYPE); +} + +/******************************************************************************* + * This function invokes the migrate info hook in the spd_pm_ops. It performs + * the necessary return value validation. If the Secure Payload is UP and + * migrate capable, it returns the mpidr of the CPU on which the Secure payload + * is resident through the mpidr parameter. Else the value of the parameter on + * return is undefined. + ******************************************************************************/ +int psci_spd_migrate_info(u_register_t *mpidr) +{ + int rc; + + if (!psci_spd_pm || !psci_spd_pm->svc_migrate_info) + return PSCI_E_NOT_SUPPORTED; + + rc = psci_spd_pm->svc_migrate_info(mpidr); + + assert(rc == PSCI_TOS_UP_MIG_CAP || rc == PSCI_TOS_NOT_UP_MIG_CAP \ + || rc == PSCI_TOS_NOT_PRESENT_MP || rc == PSCI_E_NOT_SUPPORTED); + + return rc; +} + + +/******************************************************************************* + * This function prints the state of all power domains present in the + * system + ******************************************************************************/ +void psci_print_power_domain_map(void) +{ +#if LOG_LEVEL >= LOG_LEVEL_INFO + unsigned int idx; + plat_local_state_t state; + plat_local_state_type_t state_type; + + /* This array maps to the PSCI_STATE_X definitions in psci.h */ + static const char * const psci_state_type_str[] = { + "ON", + "RETENTION", + "OFF", + }; + + INFO("PSCI Power Domain Map:\n"); + for (idx = 0; idx < (PSCI_NUM_PWR_DOMAINS - PLATFORM_CORE_COUNT); + idx++) { + state_type = find_local_state_type( + psci_non_cpu_pd_nodes[idx].local_state); + INFO(" Domain Node : Level %u, parent_node %d," + " State %s (0x%x)\n", + psci_non_cpu_pd_nodes[idx].level, + psci_non_cpu_pd_nodes[idx].parent_node, + psci_state_type_str[state_type], + psci_non_cpu_pd_nodes[idx].local_state); + } + + for (idx = 0; idx < PLATFORM_CORE_COUNT; idx++) { + state = psci_get_cpu_local_state_by_idx(idx); + state_type = find_local_state_type(state); + INFO(" CPU Node : MPID 0x%llx, parent_node %d," + " State %s (0x%x)\n", + (unsigned long long)psci_cpu_pd_nodes[idx].mpidr, + psci_cpu_pd_nodes[idx].parent_node, + psci_state_type_str[state_type], + psci_get_cpu_local_state_by_idx(idx)); + } +#endif +} + +#if ENABLE_PLAT_COMPAT +/******************************************************************************* + * PSCI Compatibility helper function to return the 'power_state' parameter of + * the PSCI CPU SUSPEND request for the current CPU. Returns PSCI_INVALID_DATA + * if not invoked within CPU_SUSPEND for the current CPU. + ******************************************************************************/ +int psci_get_suspend_powerstate(void) +{ + /* Sanity check to verify that CPU is within CPU_SUSPEND */ + if (psci_get_aff_info_state() == AFF_STATE_ON && + !is_local_state_run(psci_get_cpu_local_state())) + return psci_power_state_compat[plat_my_core_pos()]; + + return PSCI_INVALID_DATA; +} + +/******************************************************************************* + * PSCI Compatibility helper function to return the state id of the current + * cpu encoded in the 'power_state' parameter. Returns PSCI_INVALID_DATA + * if not invoked within CPU_SUSPEND for the current CPU. + ******************************************************************************/ +int psci_get_suspend_stateid(void) +{ + unsigned int power_state; + power_state = psci_get_suspend_powerstate(); + if (power_state != PSCI_INVALID_DATA) + return psci_get_pstate_id(power_state); + + return PSCI_INVALID_DATA; +} + +/******************************************************************************* + * PSCI Compatibility helper function to return the state id encoded in the + * 'power_state' parameter of the CPU specified by 'mpidr'. Returns + * PSCI_INVALID_DATA if the CPU is not in CPU_SUSPEND. + ******************************************************************************/ +int psci_get_suspend_stateid_by_mpidr(unsigned long mpidr) +{ + int cpu_idx = plat_core_pos_by_mpidr(mpidr); + + if (cpu_idx == -1) + return PSCI_INVALID_DATA; + + /* Sanity check to verify that the CPU is in CPU_SUSPEND */ + if (psci_get_aff_info_state_by_idx(cpu_idx) == AFF_STATE_ON && + !is_local_state_run(psci_get_cpu_local_state_by_idx(cpu_idx))) + return psci_get_pstate_id(psci_power_state_compat[cpu_idx]); + + return PSCI_INVALID_DATA; +} + +/******************************************************************************* + * This function returns highest affinity level which is in OFF + * state. The affinity instance with which the level is associated is + * determined by the caller. + ******************************************************************************/ +unsigned int psci_get_max_phys_off_afflvl(void) +{ + psci_power_state_t state_info; + + memset(&state_info, 0, sizeof(state_info)); + psci_get_target_local_pwr_states(PLAT_MAX_PWR_LVL, &state_info); + + return psci_find_target_suspend_lvl(&state_info); +} + +/******************************************************************************* + * PSCI Compatibility helper function to return target affinity level requested + * for the CPU_SUSPEND. This function assumes affinity levels correspond to + * power domain levels on the platform. + ******************************************************************************/ +int psci_get_suspend_afflvl(void) +{ + return psci_get_suspend_pwrlvl(); +} + +#endif diff --git a/lib/psci/psci_lib.mk b/lib/psci/psci_lib.mk new file mode 100644 index 0000000..662e14a --- /dev/null +++ b/lib/psci/psci_lib.mk @@ -0,0 +1,54 @@ +# +# Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# Neither the name of ARM nor the names of its contributors may be used +# to endorse or promote products derived from this software without specific +# prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# + +PSCI_LIB_SOURCES := lib/el3_runtime/cpu_data_array.c \ + lib/el3_runtime/aarch64/context.S \ + lib/el3_runtime/aarch64/cpu_data.S \ + lib/el3_runtime/aarch64/context_mgmt.c \ + lib/cpus/aarch64/cpu_helpers.S \ + lib/locks/exclusive/spinlock.S \ + lib/psci/psci_off.c \ + lib/psci/psci_on.c \ + lib/psci/psci_suspend.c \ + lib/psci/psci_common.c \ + lib/psci/psci_main.c \ + lib/psci/psci_setup.c \ + lib/psci/psci_system_off.c \ + lib/psci/aarch64/psci_helpers.S + +ifeq (${USE_COHERENT_MEM}, 1) +PSCI_LIB_SOURCES += lib/locks/bakery/bakery_lock_coherent.c +else +PSCI_LIB_SOURCES += lib/locks/bakery/bakery_lock_normal.c +endif + +ifeq (${ENABLE_PSCI_STAT}, 1) +PSCI_LIB_SOURCES += lib/psci/psci_stat.c +endif diff --git a/lib/psci/psci_main.c b/lib/psci/psci_main.c new file mode 100644 index 0000000..d412be3 --- /dev/null +++ b/lib/psci/psci_main.c @@ -0,0 +1,439 @@ +/* + * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "psci_private.h" + +/******************************************************************************* + * PSCI frontend api for servicing SMCs. Described in the PSCI spec. + ******************************************************************************/ +int psci_cpu_on(u_register_t target_cpu, + uintptr_t entrypoint, + u_register_t context_id) + +{ + int rc; + entry_point_info_t ep; + + /* Determine if the cpu exists of not */ + rc = psci_validate_mpidr(target_cpu); + if (rc != PSCI_E_SUCCESS) + return PSCI_E_INVALID_PARAMS; + + /* Validate the entry point and get the entry_point_info */ + rc = psci_validate_entry_point(&ep, entrypoint, context_id); + if (rc != PSCI_E_SUCCESS) + return rc; + + /* + * To turn this cpu on, specify which power + * levels need to be turned on + */ + return psci_cpu_on_start(target_cpu, &ep); +} + +unsigned int psci_version(void) +{ + return PSCI_MAJOR_VER | PSCI_MINOR_VER; +} + +int psci_cpu_suspend(unsigned int power_state, + uintptr_t entrypoint, + u_register_t context_id) +{ + int rc; + unsigned int target_pwrlvl, is_power_down_state; + entry_point_info_t ep; + psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} }; + plat_local_state_t cpu_pd_state; + + /* Validate the power_state parameter */ + rc = psci_validate_power_state(power_state, &state_info); + if (rc != PSCI_E_SUCCESS) { + assert(rc == PSCI_E_INVALID_PARAMS); + return rc; + } + + /* + * Get the value of the state type bit from the power state parameter. + */ + is_power_down_state = psci_get_pstate_type(power_state); + + /* Sanity check the requested suspend levels */ + assert(psci_validate_suspend_req(&state_info, is_power_down_state) + == PSCI_E_SUCCESS); + + target_pwrlvl = psci_find_target_suspend_lvl(&state_info); + + /* Fast path for CPU standby.*/ + if (is_cpu_standby_req(is_power_down_state, target_pwrlvl)) { + if (!psci_plat_pm_ops->cpu_standby) + return PSCI_E_INVALID_PARAMS; + + /* + * Set the state of the CPU power domain to the platform + * specific retention state and enter the standby state. + */ + cpu_pd_state = state_info.pwr_domain_state[PSCI_CPU_PWR_LVL]; + psci_set_cpu_local_state(cpu_pd_state); + +#if ENABLE_PSCI_STAT + /* + * Capture time-stamp before CPU standby + * No cache maintenance is needed as caches + * are ON through out the CPU standby operation. + */ + PMF_CAPTURE_TIMESTAMP(psci_svc, PSCI_STAT_ID_ENTER_LOW_PWR, + PMF_NO_CACHE_MAINT); +#endif + + psci_plat_pm_ops->cpu_standby(cpu_pd_state); + + /* Upon exit from standby, set the state back to RUN. */ + psci_set_cpu_local_state(PSCI_LOCAL_STATE_RUN); + +#if ENABLE_PSCI_STAT + /* Capture time-stamp after CPU standby */ + PMF_CAPTURE_TIMESTAMP(psci_svc, PSCI_STAT_ID_EXIT_LOW_PWR, + PMF_NO_CACHE_MAINT); + + /* Update PSCI stats */ + psci_stats_update_pwr_up(PSCI_CPU_PWR_LVL, &state_info, + PMF_NO_CACHE_MAINT); +#endif + + return PSCI_E_SUCCESS; + } + + /* + * If a power down state has been requested, we need to verify entry + * point and program entry information. + */ + if (is_power_down_state) { + rc = psci_validate_entry_point(&ep, entrypoint, context_id); + if (rc != PSCI_E_SUCCESS) + return rc; + } + + /* + * Do what is needed to enter the power down state. Upon success, + * enter the final wfi which will power down this CPU. This function + * might return if the power down was abandoned for any reason, e.g. + * arrival of an interrupt + */ + psci_cpu_suspend_start(&ep, + target_pwrlvl, + &state_info, + is_power_down_state); + + return PSCI_E_SUCCESS; +} + + +int psci_system_suspend(uintptr_t entrypoint, u_register_t context_id) +{ + int rc; + psci_power_state_t state_info; + entry_point_info_t ep; + + /* Check if the current CPU is the last ON CPU in the system */ + if (!psci_is_last_on_cpu()) + return PSCI_E_DENIED; + + /* Validate the entry point and get the entry_point_info */ + rc = psci_validate_entry_point(&ep, entrypoint, context_id); + if (rc != PSCI_E_SUCCESS) + return rc; + + /* Query the psci_power_state for system suspend */ + psci_query_sys_suspend_pwrstate(&state_info); + + /* Ensure that the psci_power_state makes sense */ + assert(psci_find_target_suspend_lvl(&state_info) == PLAT_MAX_PWR_LVL); + assert(psci_validate_suspend_req(&state_info, PSTATE_TYPE_POWERDOWN) + == PSCI_E_SUCCESS); + assert(is_local_state_off(state_info.pwr_domain_state[PLAT_MAX_PWR_LVL])); + + /* + * Do what is needed to enter the system suspend state. This function + * might return if the power down was abandoned for any reason, e.g. + * arrival of an interrupt + */ + psci_cpu_suspend_start(&ep, + PLAT_MAX_PWR_LVL, + &state_info, + PSTATE_TYPE_POWERDOWN); + + return PSCI_E_SUCCESS; +} + +int psci_cpu_off(void) +{ + int rc; + unsigned int target_pwrlvl = PLAT_MAX_PWR_LVL; + + /* + * Do what is needed to power off this CPU and possible higher power + * levels if it able to do so. Upon success, enter the final wfi + * which will power down this CPU. + */ + rc = psci_do_cpu_off(target_pwrlvl); + + /* + * The only error cpu_off can return is E_DENIED. So check if that's + * indeed the case. + */ + assert(rc == PSCI_E_DENIED); + + return rc; +} + +int psci_affinity_info(u_register_t target_affinity, + unsigned int lowest_affinity_level) +{ + unsigned int target_idx; + + /* We dont support level higher than PSCI_CPU_PWR_LVL */ + if (lowest_affinity_level > PSCI_CPU_PWR_LVL) + return PSCI_E_INVALID_PARAMS; + + /* Calculate the cpu index of the target */ + target_idx = plat_core_pos_by_mpidr(target_affinity); + if (target_idx == -1) + return PSCI_E_INVALID_PARAMS; + + return psci_get_aff_info_state_by_idx(target_idx); +} + +int psci_migrate(u_register_t target_cpu) +{ + int rc; + u_register_t resident_cpu_mpidr; + + rc = psci_spd_migrate_info(&resident_cpu_mpidr); + if (rc != PSCI_TOS_UP_MIG_CAP) + return (rc == PSCI_TOS_NOT_UP_MIG_CAP) ? + PSCI_E_DENIED : PSCI_E_NOT_SUPPORTED; + + /* + * Migrate should only be invoked on the CPU where + * the Secure OS is resident. + */ + if (resident_cpu_mpidr != read_mpidr_el1()) + return PSCI_E_NOT_PRESENT; + + /* Check the validity of the specified target cpu */ + rc = psci_validate_mpidr(target_cpu); + if (rc != PSCI_E_SUCCESS) + return PSCI_E_INVALID_PARAMS; + + assert(psci_spd_pm && psci_spd_pm->svc_migrate); + + rc = psci_spd_pm->svc_migrate(read_mpidr_el1(), target_cpu); + assert(rc == PSCI_E_SUCCESS || rc == PSCI_E_INTERN_FAIL); + + return rc; +} + +int psci_migrate_info_type(void) +{ + u_register_t resident_cpu_mpidr; + + return psci_spd_migrate_info(&resident_cpu_mpidr); +} + +long psci_migrate_info_up_cpu(void) +{ + u_register_t resident_cpu_mpidr; + int rc; + + /* + * Return value of this depends upon what + * psci_spd_migrate_info() returns. + */ + rc = psci_spd_migrate_info(&resident_cpu_mpidr); + if (rc != PSCI_TOS_NOT_UP_MIG_CAP && rc != PSCI_TOS_UP_MIG_CAP) + return PSCI_E_INVALID_PARAMS; + + return resident_cpu_mpidr; +} + +int psci_features(unsigned int psci_fid) +{ + unsigned int local_caps = psci_caps; + + /* Check if it is a 64 bit function */ + if (((psci_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_64) + local_caps &= PSCI_CAP_64BIT_MASK; + + /* Check for invalid fid */ + if (!(is_std_svc_call(psci_fid) && is_valid_fast_smc(psci_fid) + && is_psci_fid(psci_fid))) + return PSCI_E_NOT_SUPPORTED; + + + /* Check if the psci fid is supported or not */ + if (!(local_caps & define_psci_cap(psci_fid))) + return PSCI_E_NOT_SUPPORTED; + + /* Format the feature flags */ + if (psci_fid == PSCI_CPU_SUSPEND_AARCH32 || + psci_fid == PSCI_CPU_SUSPEND_AARCH64) { + /* + * The trusted firmware does not support OS Initiated Mode. + */ + return (FF_PSTATE << FF_PSTATE_SHIFT) | + ((!FF_SUPPORTS_OS_INIT_MODE) << FF_MODE_SUPPORT_SHIFT); + } + + /* Return 0 for all other fid's */ + return PSCI_E_SUCCESS; +} + +/******************************************************************************* + * PSCI top level handler for servicing SMCs. + ******************************************************************************/ +u_register_t psci_smc_handler(uint32_t smc_fid, + u_register_t x1, + u_register_t x2, + u_register_t x3, + u_register_t x4, + void *cookie, + void *handle, + u_register_t flags) +{ + if (is_caller_secure(flags)) + return SMC_UNK; + + /* Check the fid against the capabilities */ + if (!(psci_caps & define_psci_cap(smc_fid))) + return SMC_UNK; + + if (((smc_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_32) { + /* 32-bit PSCI function, clear top parameter bits */ + + x1 = (uint32_t)x1; + x2 = (uint32_t)x2; + x3 = (uint32_t)x3; + + switch (smc_fid) { + case PSCI_VERSION: + return psci_version(); + + case PSCI_CPU_OFF: + return psci_cpu_off(); + + case PSCI_CPU_SUSPEND_AARCH32: + return psci_cpu_suspend(x1, x2, x3); + + case PSCI_CPU_ON_AARCH32: + return psci_cpu_on(x1, x2, x3); + + case PSCI_AFFINITY_INFO_AARCH32: + return psci_affinity_info(x1, x2); + + case PSCI_MIG_AARCH32: + return psci_migrate(x1); + + case PSCI_MIG_INFO_TYPE: + return psci_migrate_info_type(); + + case PSCI_MIG_INFO_UP_CPU_AARCH32: + return psci_migrate_info_up_cpu(); + + case PSCI_SYSTEM_SUSPEND_AARCH32: + return psci_system_suspend(x1, x2); + + case PSCI_SYSTEM_OFF: + psci_system_off(); + /* We should never return from psci_system_off() */ + + case PSCI_SYSTEM_RESET: + psci_system_reset(); + /* We should never return from psci_system_reset() */ + + case PSCI_FEATURES: + return psci_features(x1); + +#if ENABLE_PSCI_STAT + case PSCI_STAT_RESIDENCY_AARCH32: + return psci_stat_residency(x1, x2); + + case PSCI_STAT_COUNT_AARCH32: + return psci_stat_count(x1, x2); +#endif + + default: + break; + } + } else { + /* 64-bit PSCI function */ + + switch (smc_fid) { + case PSCI_CPU_SUSPEND_AARCH64: + return psci_cpu_suspend(x1, x2, x3); + + case PSCI_CPU_ON_AARCH64: + return psci_cpu_on(x1, x2, x3); + + case PSCI_AFFINITY_INFO_AARCH64: + return psci_affinity_info(x1, x2); + + case PSCI_MIG_AARCH64: + return psci_migrate(x1); + + case PSCI_MIG_INFO_UP_CPU_AARCH64: + return psci_migrate_info_up_cpu(); + + case PSCI_SYSTEM_SUSPEND_AARCH64: + return psci_system_suspend(x1, x2); + +#if ENABLE_PSCI_STAT + case PSCI_STAT_RESIDENCY_AARCH64: + return psci_stat_residency(x1, x2); + + case PSCI_STAT_COUNT_AARCH64: + return psci_stat_count(x1, x2); +#endif + + default: + break; + } + } + + WARN("Unimplemented PSCI Call: 0x%x \n", smc_fid); + return SMC_UNK; +} diff --git a/lib/psci/psci_off.c b/lib/psci/psci_off.c new file mode 100644 index 0000000..471141d --- /dev/null +++ b/lib/psci/psci_off.c @@ -0,0 +1,169 @@ +/* + * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include "psci_private.h" + +/****************************************************************************** + * Construct the psci_power_state to request power OFF at all power levels. + ******************************************************************************/ +static void psci_set_power_off_state(psci_power_state_t *state_info) +{ + int lvl; + + for (lvl = PSCI_CPU_PWR_LVL; lvl <= PLAT_MAX_PWR_LVL; lvl++) + state_info->pwr_domain_state[lvl] = PLAT_MAX_OFF_STATE; +} + +/****************************************************************************** + * Top level handler which is called when a cpu wants to power itself down. + * It's assumed that along with turning the cpu power domain off, power + * domains at higher levels will be turned off as far as possible. It finds + * the highest level where a domain has to be powered off by traversing the + * node information and then performs generic, architectural, platform setup + * and state management required to turn OFF that power domain and domains + * below it. e.g. For a cpu that's to be powered OFF, it could mean programming + * the power controller whereas for a cluster that's to be powered off, it will + * call the platform specific code which will disable coherency at the + * interconnect level if the cpu is the last in the cluster and also the + * program the power controller. + ******************************************************************************/ +int psci_do_cpu_off(unsigned int end_pwrlvl) +{ + int rc = PSCI_E_SUCCESS, idx = plat_my_core_pos(); + psci_power_state_t state_info; + + /* + * This function must only be called on platforms where the + * CPU_OFF platform hooks have been implemented. + */ + assert(psci_plat_pm_ops->pwr_domain_off); + + /* + * This function acquires the lock corresponding to each power + * level so that by the time all locks are taken, the system topology + * is snapshot and state management can be done safely. + */ + psci_acquire_pwr_domain_locks(end_pwrlvl, + idx); + + /* + * Call the cpu off handler registered by the Secure Payload Dispatcher + * to let it do any bookkeeping. Assume that the SPD always reports an + * E_DENIED error if SP refuse to power down + */ + if (psci_spd_pm && psci_spd_pm->svc_off) { + rc = psci_spd_pm->svc_off(0); + if (rc) + goto exit; + } + + /* Construct the psci_power_state for CPU_OFF */ + psci_set_power_off_state(&state_info); + + /* + * This function is passed the requested state info and + * it returns the negotiated state info for each power level upto + * the end level specified. + */ + psci_do_state_coordination(end_pwrlvl, &state_info); + +#if ENABLE_PSCI_STAT + /* Update the last cpu for each level till end_pwrlvl */ + psci_stats_update_pwr_down(end_pwrlvl, &state_info); +#endif + + /* + * Arch. management. Perform the necessary steps to flush all + * cpu caches. + */ + psci_do_pwrdown_cache_maintenance(psci_find_max_off_lvl(&state_info)); + + /* + * Plat. management: Perform platform specific actions to turn this + * cpu off e.g. exit cpu coherency, program the power controller etc. + */ + psci_plat_pm_ops->pwr_domain_off(&state_info); + +#if ENABLE_PSCI_STAT + /* + * Capture time-stamp while entering low power state. + * No cache maintenance needed because caches are off + * and writes are direct to main memory. + */ + PMF_CAPTURE_TIMESTAMP(psci_svc, PSCI_STAT_ID_ENTER_LOW_PWR, + PMF_NO_CACHE_MAINT); +#endif + +exit: + /* + * Release the locks corresponding to each power level in the + * reverse order to which they were acquired. + */ + psci_release_pwr_domain_locks(end_pwrlvl, + idx); + + /* + * Check if all actions needed to safely power down this cpu have + * successfully completed. + */ + if (rc == PSCI_E_SUCCESS) { + /* + * Set the affinity info state to OFF. This writes directly to + * main memory as caches are disabled, so cache maintenance is + * required to ensure that later cached reads of aff_info_state + * return AFF_STATE_OFF. A dsbish() ensures ordering of the + * update to the affinity info state prior to cache line + * invalidation. + */ + flush_cpu_data(psci_svc_cpu_data.aff_info_state); + psci_set_aff_info_state(AFF_STATE_OFF); + dsbish(); + inv_cpu_data(psci_svc_cpu_data.aff_info_state); + + if (psci_plat_pm_ops->pwr_domain_pwr_down_wfi) { + /* This function must not return */ + psci_plat_pm_ops->pwr_domain_pwr_down_wfi(&state_info); + } else { + /* + * Enter a wfi loop which will allow the power + * controller to physically power down this cpu. + */ + psci_power_down_wfi(); + } + } + + return rc; +} diff --git a/lib/psci/psci_on.c b/lib/psci/psci_on.c new file mode 100644 index 0000000..f4bb797 --- /dev/null +++ b/lib/psci/psci_on.c @@ -0,0 +1,210 @@ +/* + * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "psci_private.h" + +/******************************************************************************* + * This function checks whether a cpu which has been requested to be turned on + * is OFF to begin with. + ******************************************************************************/ +static int cpu_on_validate_state(aff_info_state_t aff_state) +{ + if (aff_state == AFF_STATE_ON) + return PSCI_E_ALREADY_ON; + + if (aff_state == AFF_STATE_ON_PENDING) + return PSCI_E_ON_PENDING; + + assert(aff_state == AFF_STATE_OFF); + return PSCI_E_SUCCESS; +} + +/******************************************************************************* + * Generic handler which is called to physically power on a cpu identified by + * its mpidr. It performs the generic, architectural, platform setup and state + * management to power on the target cpu e.g. it will ensure that + * enough information is stashed for it to resume execution in the non-secure + * security state. + * + * The state of all the relevant power domains are changed after calling the + * platform handler as it can return error. + ******************************************************************************/ +int psci_cpu_on_start(u_register_t target_cpu, + entry_point_info_t *ep) +{ + int rc; + unsigned int target_idx = plat_core_pos_by_mpidr(target_cpu); + aff_info_state_t target_aff_state; + + /* Calling function must supply valid input arguments */ + assert((int) target_idx >= 0); + assert(ep != NULL); + + /* + * This function must only be called on platforms where the + * CPU_ON platform hooks have been implemented. + */ + assert(psci_plat_pm_ops->pwr_domain_on && + psci_plat_pm_ops->pwr_domain_on_finish); + + /* Protect against multiple CPUs trying to turn ON the same target CPU */ + psci_spin_lock_cpu(target_idx); + + /* + * Generic management: Ensure that the cpu is off to be + * turned on. + */ + rc = cpu_on_validate_state(psci_get_aff_info_state_by_idx(target_idx)); + if (rc != PSCI_E_SUCCESS) + goto exit; + + /* + * Call the cpu on handler registered by the Secure Payload Dispatcher + * to let it do any bookeeping. If the handler encounters an error, it's + * expected to assert within + */ + if (psci_spd_pm && psci_spd_pm->svc_on) + psci_spd_pm->svc_on(target_cpu); + + /* + * Set the Affinity info state of the target cpu to ON_PENDING. + * Flush aff_info_state as it will be accessed with caches + * turned OFF. + */ + psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_ON_PENDING); + flush_cpu_data_by_index(target_idx, psci_svc_cpu_data.aff_info_state); + + /* + * The cache line invalidation by the target CPU after setting the + * state to OFF (see psci_do_cpu_off()), could cause the update to + * aff_info_state to be invalidated. Retry the update if the target + * CPU aff_info_state is not ON_PENDING. + */ + target_aff_state = psci_get_aff_info_state_by_idx(target_idx); + if (target_aff_state != AFF_STATE_ON_PENDING) { + assert(target_aff_state == AFF_STATE_OFF); + psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_ON_PENDING); + flush_cpu_data_by_index(target_idx, psci_svc_cpu_data.aff_info_state); + + assert(psci_get_aff_info_state_by_idx(target_idx) == AFF_STATE_ON_PENDING); + } + + /* + * Perform generic, architecture and platform specific handling. + */ + /* + * Plat. management: Give the platform the current state + * of the target cpu to allow it to perform the necessary + * steps to power on. + */ + rc = psci_plat_pm_ops->pwr_domain_on(target_cpu); + assert(rc == PSCI_E_SUCCESS || rc == PSCI_E_INTERN_FAIL); + + if (rc == PSCI_E_SUCCESS) + /* Store the re-entry information for the non-secure world. */ + cm_init_context_by_index(target_idx, ep); + else { + /* Restore the state on error. */ + psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_OFF); + flush_cpu_data_by_index(target_idx, psci_svc_cpu_data.aff_info_state); + } + +exit: + psci_spin_unlock_cpu(target_idx); + return rc; +} + +/******************************************************************************* + * The following function finish an earlier power on request. They + * are called by the common finisher routine in psci_common.c. The `state_info` + * is the psci_power_state from which this CPU has woken up from. + ******************************************************************************/ +void psci_cpu_on_finish(unsigned int cpu_idx, + psci_power_state_t *state_info) +{ + /* + * Plat. management: Perform the platform specific actions + * for this cpu e.g. enabling the gic or zeroing the mailbox + * register. The actual state of this cpu has already been + * changed. + */ + psci_plat_pm_ops->pwr_domain_on_finish(state_info); + + /* + * Arch. management: Enable data cache and manage stack memory + */ + psci_do_pwrup_cache_maintenance(); + + /* + * All the platform specific actions for turning this cpu + * on have completed. Perform enough arch.initialization + * to run in the non-secure address space. + */ + psci_arch_setup(); + + /* + * Lock the CPU spin lock to make sure that the context initialization + * is done. Since the lock is only used in this function to create + * a synchronization point with cpu_on_start(), it can be released + * immediately. + */ + psci_spin_lock_cpu(cpu_idx); + psci_spin_unlock_cpu(cpu_idx); + + /* Ensure we have been explicitly woken up by another cpu */ + assert(psci_get_aff_info_state() == AFF_STATE_ON_PENDING); + + /* + * Call the cpu on finish handler registered by the Secure Payload + * Dispatcher to let it do any bookeeping. If the handler encounters an + * error, it's expected to assert within + */ + if (psci_spd_pm && psci_spd_pm->svc_on_finish) + psci_spd_pm->svc_on_finish(0); + + /* Populate the mpidr field within the cpu node array */ + /* This needs to be done only once */ + psci_cpu_pd_nodes[cpu_idx].mpidr = read_mpidr() & MPIDR_AFFINITY_MASK; + + /* + * Generic management: Now we just need to retrieve the + * information that we had stashed away during the cpu_on + * call to set this cpu on its way. + */ + cm_prepare_el3_exit(NON_SECURE); +} diff --git a/lib/psci/psci_private.h b/lib/psci/psci_private.h new file mode 100644 index 0000000..4935214 --- /dev/null +++ b/lib/psci/psci_private.h @@ -0,0 +1,253 @@ +/* + * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __PSCI_PRIVATE_H__ +#define __PSCI_PRIVATE_H__ + +#include +#include +#include +#include +#include +#include +#include + +/* + * The following helper macros abstract the interface to the Bakery + * Lock API. + */ +#define psci_lock_init(non_cpu_pd_node, idx) \ + ((non_cpu_pd_node)[(idx)].lock_index = (idx)) +#define psci_lock_get(non_cpu_pd_node) \ + bakery_lock_get(&psci_locks[(non_cpu_pd_node)->lock_index]) +#define psci_lock_release(non_cpu_pd_node) \ + bakery_lock_release(&psci_locks[(non_cpu_pd_node)->lock_index]) + +/* + * The PSCI capability which are provided by the generic code but does not + * depend on the platform or spd capabilities. + */ +#define PSCI_GENERIC_CAP \ + (define_psci_cap(PSCI_VERSION) | \ + define_psci_cap(PSCI_AFFINITY_INFO_AARCH64) | \ + define_psci_cap(PSCI_FEATURES)) + +/* + * The PSCI capabilities mask for 64 bit functions. + */ +#define PSCI_CAP_64BIT_MASK \ + (define_psci_cap(PSCI_CPU_SUSPEND_AARCH64) | \ + define_psci_cap(PSCI_CPU_ON_AARCH64) | \ + define_psci_cap(PSCI_AFFINITY_INFO_AARCH64) | \ + define_psci_cap(PSCI_MIG_AARCH64) | \ + define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64) | \ + define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64) | \ + define_psci_cap(PSCI_STAT_RESIDENCY_AARCH64) | \ + define_psci_cap(PSCI_STAT_COUNT_AARCH64)) + +/* + * Helper macros to get/set the fields of PSCI per-cpu data. + */ +#define psci_set_aff_info_state(aff_state) \ + set_cpu_data(psci_svc_cpu_data.aff_info_state, aff_state) +#define psci_get_aff_info_state() \ + get_cpu_data(psci_svc_cpu_data.aff_info_state) +#define psci_get_aff_info_state_by_idx(idx) \ + get_cpu_data_by_index(idx, psci_svc_cpu_data.aff_info_state) +#define psci_set_aff_info_state_by_idx(idx, aff_state) \ + set_cpu_data_by_index(idx, psci_svc_cpu_data.aff_info_state,\ + aff_state) +#define psci_get_suspend_pwrlvl() \ + get_cpu_data(psci_svc_cpu_data.target_pwrlvl) +#define psci_set_suspend_pwrlvl(target_lvl) \ + set_cpu_data(psci_svc_cpu_data.target_pwrlvl, target_lvl) +#define psci_set_cpu_local_state(state) \ + set_cpu_data(psci_svc_cpu_data.local_state, state) +#define psci_get_cpu_local_state() \ + get_cpu_data(psci_svc_cpu_data.local_state) +#define psci_get_cpu_local_state_by_idx(idx) \ + get_cpu_data_by_index(idx, psci_svc_cpu_data.local_state) + +/* + * Helper macros for the CPU level spinlocks + */ +#define psci_spin_lock_cpu(idx) spin_lock(&psci_cpu_pd_nodes[idx].cpu_lock) +#define psci_spin_unlock_cpu(idx) spin_unlock(&psci_cpu_pd_nodes[idx].cpu_lock) + +/* Helper macro to identify a CPU standby request in PSCI Suspend call */ +#define is_cpu_standby_req(is_power_down_state, retn_lvl) \ + (((!(is_power_down_state)) && ((retn_lvl) == 0)) ? 1 : 0) + +/* Following are used as ID's to capture time-stamp */ +#define PSCI_STAT_ID_ENTER_LOW_PWR 0 +#define PSCI_STAT_ID_EXIT_LOW_PWR 1 +#define PSCI_STAT_TOTAL_IDS 2 + +/* Declare PMF service functions for PSCI */ +PMF_DECLARE_CAPTURE_TIMESTAMP(psci_svc) +PMF_DECLARE_GET_TIMESTAMP(psci_svc) + +/******************************************************************************* + * The following two data structures implement the power domain tree. The tree + * is used to track the state of all the nodes i.e. power domain instances + * described by the platform. The tree consists of nodes that describe CPU power + * domains i.e. leaf nodes and all other power domains which are parents of a + * CPU power domain i.e. non-leaf nodes. + ******************************************************************************/ +typedef struct non_cpu_pwr_domain_node { + /* + * Index of the first CPU power domain node level 0 which has this node + * as its parent. + */ + unsigned int cpu_start_idx; + + /* + * Number of CPU power domains which are siblings of the domain indexed + * by 'cpu_start_idx' i.e. all the domains in the range 'cpu_start_idx + * -> cpu_start_idx + ncpus' have this node as their parent. + */ + unsigned int ncpus; + + /* + * Index of the parent power domain node. + * TODO: Figure out whether to whether using pointer is more efficient. + */ + unsigned int parent_node; + + plat_local_state_t local_state; + + unsigned char level; + + /* For indexing the psci_lock array*/ + unsigned char lock_index; +} non_cpu_pd_node_t; + +typedef struct cpu_pwr_domain_node { + u_register_t mpidr; + + /* + * Index of the parent power domain node. + * TODO: Figure out whether to whether using pointer is more efficient. + */ + unsigned int parent_node; + + /* + * A CPU power domain does not require state coordination like its + * parent power domains. Hence this node does not include a bakery + * lock. A spinlock is required by the CPU_ON handler to prevent a race + * when multiple CPUs try to turn ON the same target CPU. + */ + spinlock_t cpu_lock; +} cpu_pd_node_t; + +/******************************************************************************* + * Data prototypes + ******************************************************************************/ +extern const plat_psci_ops_t *psci_plat_pm_ops; +extern non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS]; +extern cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT]; +extern unsigned int psci_caps; + +/* One bakery lock is required for each non-cpu power domain */ +DECLARE_BAKERY_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]); + +/******************************************************************************* + * SPD's power management hooks registered with PSCI + ******************************************************************************/ +extern const spd_pm_ops_t *psci_spd_pm; + +/******************************************************************************* + * Function prototypes + ******************************************************************************/ +/* Private exported functions from psci_common.c */ +int psci_validate_power_state(unsigned int power_state, + psci_power_state_t *state_info); +void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info); +int psci_validate_mpidr(u_register_t mpidr); +void psci_init_req_local_pwr_states(void); +int psci_validate_entry_point(entry_point_info_t *ep, + uintptr_t entrypoint, u_register_t context_id); +void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx, + unsigned int end_lvl, + unsigned int node_index[]); +void psci_do_state_coordination(unsigned int end_pwrlvl, + psci_power_state_t *state_info); +void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl, + unsigned int cpu_idx); +void psci_release_pwr_domain_locks(unsigned int end_pwrlvl, + unsigned int cpu_idx); +int psci_validate_suspend_req(const psci_power_state_t *state_info, + unsigned int is_power_down_state_req); +unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info); +unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info); +void psci_set_pwr_domains_to_run(unsigned int end_pwrlvl); +void psci_print_power_domain_map(void); +unsigned int psci_is_last_on_cpu(void); +int psci_spd_migrate_info(u_register_t *mpidr); + +/* Private exported functions from psci_on.c */ +int psci_cpu_on_start(u_register_t target_cpu, + entry_point_info_t *ep); + +void psci_cpu_on_finish(unsigned int cpu_idx, + psci_power_state_t *state_info); + +/* Private exported functions from psci_off.c */ +int psci_do_cpu_off(unsigned int end_pwrlvl); + +/* Private exported functions from psci_suspend.c */ +void psci_cpu_suspend_start(entry_point_info_t *ep, + unsigned int end_pwrlvl, + psci_power_state_t *state_info, + unsigned int is_power_down_state_req); + +void psci_cpu_suspend_finish(unsigned int cpu_idx, + psci_power_state_t *state_info); + +/* Private exported functions from psci_helpers.S */ +void psci_do_pwrdown_cache_maintenance(unsigned int pwr_level); +void psci_do_pwrup_cache_maintenance(void); + +/* Private exported functions from psci_system_off.c */ +void __dead2 psci_system_off(void); +void __dead2 psci_system_reset(void); + +/* Private exported functions from psci_stat.c */ +void psci_stats_update_pwr_down(unsigned int end_pwrlvl, + const psci_power_state_t *state_info); +void psci_stats_update_pwr_up(unsigned int end_pwrlvl, + const psci_power_state_t *state_info, + unsigned int flags); +u_register_t psci_stat_residency(u_register_t target_cpu, + unsigned int power_state); +u_register_t psci_stat_count(u_register_t target_cpu, + unsigned int power_state); + +#endif /* __PSCI_PRIVATE_H__ */ diff --git a/lib/psci/psci_setup.c b/lib/psci/psci_setup.c new file mode 100644 index 0000000..d35e000 --- /dev/null +++ b/lib/psci/psci_setup.c @@ -0,0 +1,280 @@ +/* + * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "psci_private.h" + +/******************************************************************************* + * Per cpu non-secure contexts used to program the architectural state prior + * return to the normal world. + * TODO: Use the memory allocator to set aside memory for the contexts instead + * of relying on platform defined constants. + ******************************************************************************/ +static cpu_context_t psci_ns_context[PLATFORM_CORE_COUNT]; + +/****************************************************************************** + * Define the psci capability variable. + *****************************************************************************/ +unsigned int psci_caps; + +/******************************************************************************* + * Function which initializes the 'psci_non_cpu_pd_nodes' or the + * 'psci_cpu_pd_nodes' corresponding to the power level. + ******************************************************************************/ +static void psci_init_pwr_domain_node(unsigned int node_idx, + unsigned int parent_idx, + unsigned int level) +{ + if (level > PSCI_CPU_PWR_LVL) { + psci_non_cpu_pd_nodes[node_idx].level = level; + psci_lock_init(psci_non_cpu_pd_nodes, node_idx); + psci_non_cpu_pd_nodes[node_idx].parent_node = parent_idx; + psci_non_cpu_pd_nodes[node_idx].local_state = + PLAT_MAX_OFF_STATE; + } else { + psci_cpu_data_t *svc_cpu_data; + + psci_cpu_pd_nodes[node_idx].parent_node = parent_idx; + + /* Initialize with an invalid mpidr */ + psci_cpu_pd_nodes[node_idx].mpidr = PSCI_INVALID_MPIDR; + + svc_cpu_data = + &(_cpu_data_by_index(node_idx)->psci_svc_cpu_data); + + /* Set the Affinity Info for the cores as OFF */ + svc_cpu_data->aff_info_state = AFF_STATE_OFF; + + /* Invalidate the suspend level for the cpu */ + svc_cpu_data->target_pwrlvl = PSCI_INVALID_PWR_LVL; + + /* Set the power state to OFF state */ + svc_cpu_data->local_state = PLAT_MAX_OFF_STATE; + + flush_dcache_range((uintptr_t)svc_cpu_data, + sizeof(*svc_cpu_data)); + + cm_set_context_by_index(node_idx, + (void *) &psci_ns_context[node_idx], + NON_SECURE); + } +} + +/******************************************************************************* + * This functions updates cpu_start_idx and ncpus field for each of the node in + * psci_non_cpu_pd_nodes[]. It does so by comparing the parent nodes of each of + * the CPUs and check whether they match with the parent of the previous + * CPU. The basic assumption for this work is that children of the same parent + * are allocated adjacent indices. The platform should ensure this though proper + * mapping of the CPUs to indices via plat_core_pos_by_mpidr() and + * plat_my_core_pos() APIs. + *******************************************************************************/ +static void psci_update_pwrlvl_limits(void) +{ + int j; + unsigned int nodes_idx[PLAT_MAX_PWR_LVL] = {0}; + unsigned int temp_index[PLAT_MAX_PWR_LVL], cpu_idx; + + for (cpu_idx = 0; cpu_idx < PLATFORM_CORE_COUNT; cpu_idx++) { + psci_get_parent_pwr_domain_nodes(cpu_idx, + PLAT_MAX_PWR_LVL, + temp_index); + for (j = PLAT_MAX_PWR_LVL - 1; j >= 0; j--) { + if (temp_index[j] != nodes_idx[j]) { + nodes_idx[j] = temp_index[j]; + psci_non_cpu_pd_nodes[nodes_idx[j]].cpu_start_idx + = cpu_idx; + } + psci_non_cpu_pd_nodes[nodes_idx[j]].ncpus++; + } + } +} + +/******************************************************************************* + * Core routine to populate the power domain tree. The tree descriptor passed by + * the platform is populated breadth-first and the first entry in the map + * informs the number of root power domains. The parent nodes of the root nodes + * will point to an invalid entry(-1). + ******************************************************************************/ +static void populate_power_domain_tree(const unsigned char *topology) +{ + unsigned int i, j = 0, num_nodes_at_lvl = 1, num_nodes_at_next_lvl; + unsigned int node_index = 0, parent_node_index = 0, num_children; + int level = PLAT_MAX_PWR_LVL; + + /* + * For each level the inputs are: + * - number of nodes at this level in plat_array i.e. num_nodes_at_level + * This is the sum of values of nodes at the parent level. + * - Index of first entry at this level in the plat_array i.e. + * parent_node_index. + * - Index of first free entry in psci_non_cpu_pd_nodes[] or + * psci_cpu_pd_nodes[] i.e. node_index depending upon the level. + */ + while (level >= PSCI_CPU_PWR_LVL) { + num_nodes_at_next_lvl = 0; + /* + * For each entry (parent node) at this level in the plat_array: + * - Find the number of children + * - Allocate a node in a power domain array for each child + * - Set the parent of the child to the parent_node_index - 1 + * - Increment parent_node_index to point to the next parent + * - Accumulate the number of children at next level. + */ + for (i = 0; i < num_nodes_at_lvl; i++) { + assert(parent_node_index <= + PSCI_NUM_NON_CPU_PWR_DOMAINS); + num_children = topology[parent_node_index]; + + for (j = node_index; + j < node_index + num_children; j++) + psci_init_pwr_domain_node(j, + parent_node_index - 1, + level); + + node_index = j; + num_nodes_at_next_lvl += num_children; + parent_node_index++; + } + + num_nodes_at_lvl = num_nodes_at_next_lvl; + level--; + + /* Reset the index for the cpu power domain array */ + if (level == PSCI_CPU_PWR_LVL) + node_index = 0; + } + + /* Validate the sanity of array exported by the platform */ + assert(j == PLATFORM_CORE_COUNT); +} + +/******************************************************************************* + * This function does the architectural setup and takes the warm boot + * entry-point `mailbox_ep` as an argument. The function also initializes the + * power domain topology tree by querying the platform. The power domain nodes + * higher than the CPU are populated in the array psci_non_cpu_pd_nodes[] and + * the CPU power domains are populated in psci_cpu_pd_nodes[]. The platform + * exports its static topology map through the + * populate_power_domain_topology_tree() API. The algorithm populates the + * psci_non_cpu_pd_nodes and psci_cpu_pd_nodes iteratively by using this + * topology map. On a platform that implements two clusters of 2 cpus each, + * and supporting 3 domain levels, the populated psci_non_cpu_pd_nodes would + * look like this: + * + * --------------------------------------------------- + * | system node | cluster 0 node | cluster 1 node | + * --------------------------------------------------- + * + * And populated psci_cpu_pd_nodes would look like this : + * <- cpus cluster0 -><- cpus cluster1 -> + * ------------------------------------------------ + * | CPU 0 | CPU 1 | CPU 2 | CPU 3 | + * ------------------------------------------------ + ******************************************************************************/ +int psci_setup(uintptr_t mailbox_ep) +{ + const unsigned char *topology_tree; + + /* Do the Architectural initialization */ + psci_arch_setup(); + + /* Query the topology map from the platform */ + topology_tree = plat_get_power_domain_tree_desc(); + + /* Populate the power domain arrays using the platform topology map */ + populate_power_domain_tree(topology_tree); + + /* Update the CPU limits for each node in psci_non_cpu_pd_nodes */ + psci_update_pwrlvl_limits(); + + /* Populate the mpidr field of cpu node for this CPU */ + psci_cpu_pd_nodes[plat_my_core_pos()].mpidr = + read_mpidr() & MPIDR_AFFINITY_MASK; + + psci_init_req_local_pwr_states(); + + /* + * Set the requested and target state of this CPU and all the higher + * power domain levels for this CPU to run. + */ + psci_set_pwr_domains_to_run(PLAT_MAX_PWR_LVL); + + assert(mailbox_ep); + plat_setup_psci_ops(mailbox_ep, &psci_plat_pm_ops); + assert(psci_plat_pm_ops); + + /* Initialize the psci capability */ + psci_caps = PSCI_GENERIC_CAP; + + if (psci_plat_pm_ops->pwr_domain_off) + psci_caps |= define_psci_cap(PSCI_CPU_OFF); + if (psci_plat_pm_ops->pwr_domain_on && + psci_plat_pm_ops->pwr_domain_on_finish) + psci_caps |= define_psci_cap(PSCI_CPU_ON_AARCH64); + if (psci_plat_pm_ops->pwr_domain_suspend && + psci_plat_pm_ops->pwr_domain_suspend_finish) { + psci_caps |= define_psci_cap(PSCI_CPU_SUSPEND_AARCH64); + if (psci_plat_pm_ops->get_sys_suspend_power_state) + psci_caps |= define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64); + } + if (psci_plat_pm_ops->system_off) + psci_caps |= define_psci_cap(PSCI_SYSTEM_OFF); + if (psci_plat_pm_ops->system_reset) + psci_caps |= define_psci_cap(PSCI_SYSTEM_RESET); + +#if ENABLE_PSCI_STAT + psci_caps |= define_psci_cap(PSCI_STAT_RESIDENCY_AARCH64); + psci_caps |= define_psci_cap(PSCI_STAT_COUNT_AARCH64); +#endif + + return 0; +} + +/******************************************************************************* + * This duplicates what the primary cpu did after a cold boot in BL1. The same + * needs to be done when a cpu is hotplugged in. This function could also over- + * ride any EL3 setup done by BL1 as this code resides in rw memory. + ******************************************************************************/ +void psci_arch_setup(void) +{ + /* Program the counter frequency */ + write_cntfrq_el0(plat_get_syscnt_freq2()); + + /* Initialize the cpu_ops pointer. */ + init_cpu_ops(); +} diff --git a/lib/psci/psci_stat.c b/lib/psci/psci_stat.c new file mode 100644 index 0000000..155bbb0 --- /dev/null +++ b/lib/psci/psci_stat.c @@ -0,0 +1,309 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include "psci_private.h" + +#ifndef PLAT_MAX_PWR_LVL_STATES +#define PLAT_MAX_PWR_LVL_STATES 2 +#endif + +/* Ticks elapsed in one second by a signal of 1 MHz */ +#define MHZ_TICKS_PER_SEC 1000000 + +/* Following structure is used for PSCI STAT */ +typedef struct psci_stat { + u_register_t residency; + u_register_t count; +} psci_stat_t; + +/* + * Following is used to keep track of the last cpu + * that goes to power down in non cpu power domains. + */ +static int last_cpu_in_non_cpu_pd[PSCI_NUM_NON_CPU_PWR_DOMAINS] = {-1}; + +/* + * Following are used to store PSCI STAT values for + * CPU and non CPU power domains. + */ +static psci_stat_t psci_cpu_stat[PLATFORM_CORE_COUNT] + [PLAT_MAX_PWR_LVL_STATES]; +static psci_stat_t psci_non_cpu_stat[PSCI_NUM_NON_CPU_PWR_DOMAINS] + [PLAT_MAX_PWR_LVL_STATES]; + +/* Register PMF PSCI service */ +PMF_REGISTER_SERVICE(psci_svc, PMF_PSCI_STAT_SVC_ID, + PSCI_STAT_TOTAL_IDS, PMF_STORE_ENABLE) + +/* The divisor to use to convert raw timestamp into microseconds */ +u_register_t residency_div; + +/* + * This macro calculates the stats residency in microseconds, + * taking in account the wrap around condition. + */ +#define calc_stat_residency(_pwrupts, _pwrdnts, _res) \ + do { \ + if (_pwrupts < _pwrdnts) \ + _res = UINT64_MAX - _pwrdnts + _pwrupts;\ + else \ + _res = _pwrupts - _pwrdnts; \ + /* Convert timestamp into microseconds */ \ + _res = _res/residency_div; \ + } while (0) + +/* + * This functions returns the index into the `psci_stat_t` array given the + * local power state and power domain level. If the platform implements the + * `get_pwr_lvl_state_idx` pm hook, then that will be used to return the index. + */ +static int get_stat_idx(plat_local_state_t local_state, int pwr_lvl) +{ + int idx; + + if (psci_plat_pm_ops->get_pwr_lvl_state_idx == NULL) { + assert(PLAT_MAX_PWR_LVL_STATES == 2); + if (is_local_state_retn(local_state)) + return 0; + + assert(is_local_state_off(local_state)); + return 1; + } + + idx = psci_plat_pm_ops->get_pwr_lvl_state_idx(local_state, pwr_lvl); + assert((idx >= 0) && (idx < PLAT_MAX_PWR_LVL_STATES)); + return idx; +} + +/******************************************************************************* + * This function is passed the target local power states for each power + * domain (state_info) between the current CPU domain and its ancestors until + * the target power level (end_pwrlvl). + * + * Then, for each level (apart from the CPU level) until the 'end_pwrlvl', it + * updates the `last_cpu_in_non_cpu_pd[]` with last power down cpu id. + * + * This function will only be invoked with data cache enabled and while + * powering down a core. + ******************************************************************************/ +void psci_stats_update_pwr_down(unsigned int end_pwrlvl, + const psci_power_state_t *state_info) +{ + int lvl, parent_idx, cpu_idx = plat_my_core_pos(); + + assert(end_pwrlvl <= PLAT_MAX_PWR_LVL); + assert(state_info); + + parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node; + + for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) { + + /* Break early if the target power state is RUN */ + if (is_local_state_run(state_info->pwr_domain_state[lvl])) + break; + + /* + * The power domain is entering a low power state, so this is + * the last CPU for this power domain + */ + last_cpu_in_non_cpu_pd[parent_idx] = cpu_idx; + + parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; + } + +} + +/******************************************************************************* + * This function updates the PSCI STATS(residency time and count) for CPU + * and NON-CPU power domains. + * It is called with caches enabled and locks acquired(for NON-CPU domain) + ******************************************************************************/ +void psci_stats_update_pwr_up(unsigned int end_pwrlvl, + const psci_power_state_t *state_info, + unsigned int flags) +{ + int parent_idx, cpu_idx = plat_my_core_pos(); + int lvl, stat_idx; + plat_local_state_t local_state; + unsigned long long pwrup_ts = 0, pwrdn_ts = 0; + u_register_t residency; + + assert(end_pwrlvl <= PLAT_MAX_PWR_LVL); + assert(state_info); + + /* Initialize the residency divisor if not already initialized */ + if (!residency_div) { + /* Pre-calculate divisor so that it can be directly used to + convert time-stamp into microseconds */ + residency_div = read_cntfrq_el0() / MHZ_TICKS_PER_SEC; + assert(residency_div); + } + + /* Get power down time-stamp for current CPU */ + PMF_GET_TIMESTAMP_BY_INDEX(psci_svc, PSCI_STAT_ID_ENTER_LOW_PWR, + cpu_idx, flags, pwrdn_ts); + + /* In the case of 1st power on just return */ + if (!pwrdn_ts) + return; + + /* Get power up time-stamp for current CPU */ + PMF_GET_TIMESTAMP_BY_INDEX(psci_svc, PSCI_STAT_ID_EXIT_LOW_PWR, + cpu_idx, flags, pwrup_ts); + + /* Get the index into the stats array */ + local_state = state_info->pwr_domain_state[PSCI_CPU_PWR_LVL]; + stat_idx = get_stat_idx(local_state, PSCI_CPU_PWR_LVL); + + /* Calculate stats residency */ + calc_stat_residency(pwrup_ts, pwrdn_ts, residency); + + /* Update CPU stats. */ + psci_cpu_stat[cpu_idx][stat_idx].residency += residency; + psci_cpu_stat[cpu_idx][stat_idx].count++; + + /* + * Check what power domains above CPU were off + * prior to this CPU powering on. + */ + parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node; + for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) { + local_state = state_info->pwr_domain_state[lvl]; + if (is_local_state_run(local_state)) { + /* Break early */ + break; + } + + assert(last_cpu_in_non_cpu_pd[parent_idx] != -1); + + /* Get power down time-stamp for last CPU */ + PMF_GET_TIMESTAMP_BY_INDEX(psci_svc, PSCI_STAT_ID_ENTER_LOW_PWR, + last_cpu_in_non_cpu_pd[parent_idx], + flags, pwrdn_ts); + + /* Initialize back to reset value */ + last_cpu_in_non_cpu_pd[parent_idx] = -1; + + /* Get the index into the stats array */ + stat_idx = get_stat_idx(local_state, lvl); + + /* Calculate stats residency */ + calc_stat_residency(pwrup_ts, pwrdn_ts, residency); + + /* Update non cpu stats */ + psci_non_cpu_stat[parent_idx][stat_idx].residency += residency; + psci_non_cpu_stat[parent_idx][stat_idx].count++; + + parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; + } + +} + +/******************************************************************************* + * This function returns the appropriate count and residency time of the + * local state for the highest power level expressed in the `power_state` + * for the node represented by `target_cpu`. + ******************************************************************************/ +int psci_get_stat(u_register_t target_cpu, unsigned int power_state, + psci_stat_t *psci_stat) +{ + int rc, pwrlvl, lvl, parent_idx, stat_idx, target_idx; + psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} }; + plat_local_state_t local_state; + + /* Validate the target_cpu parameter and determine the cpu index */ + target_idx = plat_core_pos_by_mpidr(target_cpu); + if (target_idx == -1) + return PSCI_E_INVALID_PARAMS; + + /* Validate the power_state parameter */ + if (!psci_plat_pm_ops->translate_power_state_by_mpidr) + rc = psci_validate_power_state(power_state, &state_info); + else + rc = psci_plat_pm_ops->translate_power_state_by_mpidr( + target_cpu, power_state, &state_info); + + if (rc != PSCI_E_SUCCESS) + return PSCI_E_INVALID_PARAMS; + + /* Find the highest power level */ + pwrlvl = psci_find_target_suspend_lvl(&state_info); + if (pwrlvl == PSCI_INVALID_PWR_LVL) + return PSCI_E_INVALID_PARAMS; + + /* Get the index into the stats array */ + local_state = state_info.pwr_domain_state[pwrlvl]; + stat_idx = get_stat_idx(local_state, pwrlvl); + + if (pwrlvl > PSCI_CPU_PWR_LVL) { + /* Get the power domain index */ + parent_idx = psci_cpu_pd_nodes[target_idx].parent_node; + for (lvl = PSCI_CPU_PWR_LVL + 1; lvl < pwrlvl; lvl++) + parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; + + /* Get the non cpu power domain stats */ + *psci_stat = psci_non_cpu_stat[parent_idx][stat_idx]; + } else { + /* Get the cpu power domain stats */ + *psci_stat = psci_cpu_stat[target_idx][stat_idx]; + } + + return PSCI_E_SUCCESS; +} + +/* This is the top level function for PSCI_STAT_RESIDENCY SMC. */ +u_register_t psci_stat_residency(u_register_t target_cpu, + unsigned int power_state) +{ + psci_stat_t psci_stat; + + int rc = psci_get_stat(target_cpu, power_state, &psci_stat); + if (rc == PSCI_E_SUCCESS) + return psci_stat.residency; + else + return 0; +} + +/* This is the top level function for PSCI_STAT_COUNT SMC. */ +u_register_t psci_stat_count(u_register_t target_cpu, + unsigned int power_state) +{ + psci_stat_t psci_stat; + + int rc = psci_get_stat(target_cpu, power_state, &psci_stat); + if (rc == PSCI_E_SUCCESS) + return psci_stat.count; + else + return 0; +} diff --git a/lib/psci/psci_suspend.c b/lib/psci/psci_suspend.c new file mode 100644 index 0000000..904a4e7 --- /dev/null +++ b/lib/psci/psci_suspend.c @@ -0,0 +1,283 @@ +/* + * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "psci_private.h" + +/******************************************************************************* + * This function does generic and platform specific operations after a wake-up + * from standby/retention states at multiple power levels. + ******************************************************************************/ +static void psci_suspend_to_standby_finisher(unsigned int cpu_idx, + psci_power_state_t *state_info, + unsigned int end_pwrlvl) +{ + psci_acquire_pwr_domain_locks(end_pwrlvl, + cpu_idx); + + /* + * Plat. management: Allow the platform to do operations + * on waking up from retention. + */ + psci_plat_pm_ops->pwr_domain_suspend_finish(state_info); + + /* + * Set the requested and target state of this CPU and all the higher + * power domain levels for this CPU to run. + */ + psci_set_pwr_domains_to_run(end_pwrlvl); + + psci_release_pwr_domain_locks(end_pwrlvl, + cpu_idx); +} + +/******************************************************************************* + * This function does generic and platform specific suspend to power down + * operations. + ******************************************************************************/ +static void psci_suspend_to_pwrdown_start(unsigned int end_pwrlvl, + entry_point_info_t *ep, + psci_power_state_t *state_info) +{ + unsigned int max_off_lvl = psci_find_max_off_lvl(state_info); + + /* Save PSCI target power level for the suspend finisher handler */ + psci_set_suspend_pwrlvl(end_pwrlvl); + + /* + * Flush the target power level as it will be accessed on power up with + * Data cache disabled. + */ + flush_cpu_data(psci_svc_cpu_data.target_pwrlvl); + + /* + * Call the cpu suspend handler registered by the Secure Payload + * Dispatcher to let it do any book-keeping. If the handler encounters an + * error, it's expected to assert within + */ + if (psci_spd_pm && psci_spd_pm->svc_suspend) + psci_spd_pm->svc_suspend(max_off_lvl); + + /* + * Store the re-entry information for the non-secure world. + */ + cm_init_my_context(ep); + + /* + * Arch. management. Perform the necessary steps to flush all + * cpu caches. Currently we assume that the power level correspond + * the cache level. + * TODO : Introduce a mechanism to query the cache level to flush + * and the cpu-ops power down to perform from the platform. + */ + psci_do_pwrdown_cache_maintenance(max_off_lvl); +} + +/******************************************************************************* + * Top level handler which is called when a cpu wants to suspend its execution. + * It is assumed that along with suspending the cpu power domain, power domains + * at higher levels until the target power level will be suspended as well. It + * coordinates with the platform to negotiate the target state for each of + * the power domain level till the target power domain level. It then performs + * generic, architectural, platform setup and state management required to + * suspend that power domain level and power domain levels below it. + * e.g. For a cpu that's to be suspended, it could mean programming the + * power controller whereas for a cluster that's to be suspended, it will call + * the platform specific code which will disable coherency at the interconnect + * level if the cpu is the last in the cluster and also the program the power + * controller. + * + * All the required parameter checks are performed at the beginning and after + * the state transition has been done, no further error is expected and it is + * not possible to undo any of the actions taken beyond that point. + ******************************************************************************/ +void psci_cpu_suspend_start(entry_point_info_t *ep, + unsigned int end_pwrlvl, + psci_power_state_t *state_info, + unsigned int is_power_down_state) +{ + int skip_wfi = 0; + unsigned int idx = plat_my_core_pos(); + + /* + * This function must only be called on platforms where the + * CPU_SUSPEND platform hooks have been implemented. + */ + assert(psci_plat_pm_ops->pwr_domain_suspend && + psci_plat_pm_ops->pwr_domain_suspend_finish); + + /* + * This function acquires the lock corresponding to each power + * level so that by the time all locks are taken, the system topology + * is snapshot and state management can be done safely. + */ + psci_acquire_pwr_domain_locks(end_pwrlvl, + idx); + + /* + * We check if there are any pending interrupts after the delay + * introduced by lock contention to increase the chances of early + * detection that a wake-up interrupt has fired. + */ + if (read_isr_el1()) { + skip_wfi = 1; + goto exit; + } + + /* + * This function is passed the requested state info and + * it returns the negotiated state info for each power level upto + * the end level specified. + */ + psci_do_state_coordination(end_pwrlvl, state_info); + +#if ENABLE_PSCI_STAT + /* Update the last cpu for each level till end_pwrlvl */ + psci_stats_update_pwr_down(end_pwrlvl, state_info); +#endif + + if (is_power_down_state) + psci_suspend_to_pwrdown_start(end_pwrlvl, ep, state_info); + + /* + * Plat. management: Allow the platform to perform the + * necessary actions to turn off this cpu e.g. set the + * platform defined mailbox with the psci entrypoint, + * program the power controller etc. + */ + psci_plat_pm_ops->pwr_domain_suspend(state_info); + +#if ENABLE_PSCI_STAT + /* + * Capture time-stamp while entering low power state. + * No cache maintenance needed because caches are off + * and writes are direct to main memory. + */ + PMF_CAPTURE_TIMESTAMP(psci_svc, PSCI_STAT_ID_ENTER_LOW_PWR, + PMF_NO_CACHE_MAINT); +#endif + +exit: + /* + * Release the locks corresponding to each power level in the + * reverse order to which they were acquired. + */ + psci_release_pwr_domain_locks(end_pwrlvl, + idx); + if (skip_wfi) + return; + + if (is_power_down_state) { + /* The function calls below must not return */ + if (psci_plat_pm_ops->pwr_domain_pwr_down_wfi) + psci_plat_pm_ops->pwr_domain_pwr_down_wfi(state_info); + else + psci_power_down_wfi(); + } + + /* + * We will reach here if only retention/standby states have been + * requested at multiple power levels. This means that the cpu + * context will be preserved. + */ + wfi(); + + /* + * After we wake up from context retaining suspend, call the + * context retaining suspend finisher. + */ + psci_suspend_to_standby_finisher(idx, state_info, end_pwrlvl); +} + +/******************************************************************************* + * The following functions finish an earlier suspend request. They + * are called by the common finisher routine in psci_common.c. The `state_info` + * is the psci_power_state from which this CPU has woken up from. + ******************************************************************************/ +void psci_cpu_suspend_finish(unsigned int cpu_idx, + psci_power_state_t *state_info) +{ + unsigned int counter_freq; + unsigned int max_off_lvl; + + /* Ensure we have been woken up from a suspended state */ + assert(psci_get_aff_info_state() == AFF_STATE_ON && is_local_state_off(\ + state_info->pwr_domain_state[PSCI_CPU_PWR_LVL])); + + /* + * Plat. management: Perform the platform specific actions + * before we change the state of the cpu e.g. enabling the + * gic or zeroing the mailbox register. If anything goes + * wrong then assert as there is no way to recover from this + * situation. + */ + psci_plat_pm_ops->pwr_domain_suspend_finish(state_info); + + /* + * Arch. management: Enable the data cache, manage stack memory and + * restore the stashed EL3 architectural context from the 'cpu_context' + * structure for this cpu. + */ + psci_do_pwrup_cache_maintenance(); + + /* Re-init the cntfrq_el0 register */ + counter_freq = plat_get_syscnt_freq2(); + write_cntfrq_el0(counter_freq); + + /* + * Call the cpu suspend finish handler registered by the Secure Payload + * Dispatcher to let it do any bookeeping. If the handler encounters an + * error, it's expected to assert within + */ + if (psci_spd_pm && psci_spd_pm->svc_suspend) { + max_off_lvl = psci_find_max_off_lvl(state_info); + assert (max_off_lvl != PSCI_INVALID_PWR_LVL); + psci_spd_pm->svc_suspend_finish(max_off_lvl); + } + + /* Invalidate the suspend level for the cpu */ + psci_set_suspend_pwrlvl(PSCI_INVALID_PWR_LVL); + + /* + * Generic management: Now we just need to retrieve the + * information that we had stashed away during the suspend + * call to set this cpu on its way. + */ + cm_prepare_el3_exit(NON_SECURE); +} diff --git a/lib/psci/psci_system_off.c b/lib/psci/psci_system_off.c new file mode 100644 index 0000000..de9ec64 --- /dev/null +++ b/lib/psci/psci_system_off.c @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include "psci_private.h" + +void psci_system_off(void) +{ + psci_print_power_domain_map(); + + assert(psci_plat_pm_ops->system_off); + + /* Notify the Secure Payload Dispatcher */ + if (psci_spd_pm && psci_spd_pm->svc_system_off) { + psci_spd_pm->svc_system_off(); + } + + /* Call the platform specific hook */ + psci_plat_pm_ops->system_off(); + + /* This function does not return. We should never get here */ +} + +void psci_system_reset(void) +{ + psci_print_power_domain_map(); + + assert(psci_plat_pm_ops->system_reset); + + /* Notify the Secure Payload Dispatcher */ + if (psci_spd_pm && psci_spd_pm->svc_system_reset) { + psci_spd_pm->svc_system_reset(); + } + + /* Call the platform specific hook */ + psci_plat_pm_ops->system_reset(); + + /* This function does not return. We should never get here */ +} diff --git a/lib/xlat_tables/aarch64/xlat_tables.c b/lib/xlat_tables/aarch64/xlat_tables.c index 19eb7d5..f432237 100644 --- a/lib/xlat_tables/aarch64/xlat_tables.c +++ b/lib/xlat_tables/aarch64/xlat_tables.c @@ -47,7 +47,6 @@ CASSERT(ADDR_SPACE_SIZE >= (1ull << 31) && ADDR_SPACE_SIZE <= (1ull << 39) && IS_POWER_OF_TWO(ADDR_SPACE_SIZE), assert_valid_addr_space_size); -#define UNSET_DESC ~0ul #define NUM_L1_ENTRIES (ADDR_SPACE_SIZE >> L1_XLAT_ADDRESS_SHIFT) static uint64_t l1_xlation_table[NUM_L1_ENTRIES] diff --git a/lib/xlat_tables/xlat_tables_common.c b/lib/xlat_tables/xlat_tables_common.c index 71e3efc..33784c2 100644 --- a/lib/xlat_tables/xlat_tables_common.c +++ b/lib/xlat_tables/xlat_tables_common.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include @@ -52,7 +53,7 @@ #define debug_print(...) ((void)0) #endif -#define UNSET_DESC ~0ul +#define UNSET_DESC ~0ull static uint64_t xlat_tables[MAX_XLAT_TABLES][XLAT_TABLE_ENTRIES] __aligned(XLAT_TABLE_SIZE) __section("xlat_table"); @@ -313,9 +314,9 @@ unsigned level_size_shift = L1_XLAT_ADDRESS_SHIFT - (level - 1) * XLAT_TABLE_ENTRIES_SHIFT; unsigned level_size = 1 << level_size_shift; - unsigned long long level_index_mask = - ((unsigned long long) XLAT_TABLE_ENTRIES_MASK) - << level_size_shift; + u_register_t level_index_mask = + (u_register_t)(((u_register_t) XLAT_TABLE_ENTRIES_MASK) + << level_size_shift); assert(level > 0 && level <= 3); @@ -357,7 +358,7 @@ /* Area not covered by a region so need finer table */ uint64_t *new_table = xlat_tables[next_xlat++]; assert(next_xlat <= MAX_XLAT_TABLES); - desc = TABLE_DESC | (uint64_t)new_table; + desc = TABLE_DESC | (uintptr_t)new_table; /* Recurse to fill in new table */ mm = init_xlation_table_inner(mm, base_va, diff --git a/plat/arm/board/fvp/aarch64/fvp_helpers.S b/plat/arm/board/fvp/aarch64/fvp_helpers.S index 884fee8..6a7ad23 100644 --- a/plat/arm/board/fvp/aarch64/fvp_helpers.S +++ b/plat/arm/board/fvp/aarch64/fvp_helpers.S @@ -127,7 +127,7 @@ endfunc plat_secondary_cold_boot_setup /* --------------------------------------------------------------------- - * unsigned long plat_get_my_entrypoint (void); + * uintptr_t plat_get_my_entrypoint (void); * * Main job of this routine is to distinguish between a cold and warm * boot. On FVP, this information can be queried from the power diff --git a/plat/arm/board/fvp/drivers/pwrc/fvp_pwrc.c b/plat/arm/board/fvp/drivers/pwrc/fvp_pwrc.c index e004281..c8df78c 100644 --- a/plat/arm/board/fvp/drivers/pwrc/fvp_pwrc.c +++ b/plat/arm/board/fvp/drivers/pwrc/fvp_pwrc.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -41,12 +41,12 @@ */ ARM_INSTANTIATE_LOCK -unsigned int fvp_pwrc_get_cpu_wkr(unsigned long mpidr) +unsigned int fvp_pwrc_get_cpu_wkr(u_register_t mpidr) { return PSYSR_WK(fvp_pwrc_read_psysr(mpidr)); } -unsigned int fvp_pwrc_read_psysr(unsigned long mpidr) +unsigned int fvp_pwrc_read_psysr(u_register_t mpidr) { unsigned int rc; arm_lock_get(); @@ -56,21 +56,21 @@ return rc; } -void fvp_pwrc_write_pponr(unsigned long mpidr) +void fvp_pwrc_write_pponr(u_register_t mpidr) { arm_lock_get(); mmio_write_32(PWRC_BASE + PPONR_OFF, (unsigned int) mpidr); arm_lock_release(); } -void fvp_pwrc_write_ppoffr(unsigned long mpidr) +void fvp_pwrc_write_ppoffr(u_register_t mpidr) { arm_lock_get(); mmio_write_32(PWRC_BASE + PPOFFR_OFF, (unsigned int) mpidr); arm_lock_release(); } -void fvp_pwrc_set_wen(unsigned long mpidr) +void fvp_pwrc_set_wen(u_register_t mpidr) { arm_lock_get(); mmio_write_32(PWRC_BASE + PWKUPR_OFF, @@ -78,7 +78,7 @@ arm_lock_release(); } -void fvp_pwrc_clr_wen(unsigned long mpidr) +void fvp_pwrc_clr_wen(u_register_t mpidr) { arm_lock_get(); mmio_write_32(PWRC_BASE + PWKUPR_OFF, @@ -86,7 +86,7 @@ arm_lock_release(); } -void fvp_pwrc_write_pcoffr(unsigned long mpidr) +void fvp_pwrc_write_pcoffr(u_register_t mpidr) { arm_lock_get(); mmio_write_32(PWRC_BASE + PCOFFR_OFF, (unsigned int) mpidr); diff --git a/plat/arm/board/fvp/drivers/pwrc/fvp_pwrc.h b/plat/arm/board/fvp/drivers/pwrc/fvp_pwrc.h index 3dc9aad..1dbf128 100644 --- a/plat/arm/board/fvp/drivers/pwrc/fvp_pwrc.h +++ b/plat/arm/board/fvp/drivers/pwrc/fvp_pwrc.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -64,13 +64,13 @@ /******************************************************************************* * Function & variable prototypes ******************************************************************************/ -void fvp_pwrc_write_pcoffr(unsigned long); -void fvp_pwrc_write_ppoffr(unsigned long); -void fvp_pwrc_write_pponr(unsigned long); -void fvp_pwrc_set_wen(unsigned long); -void fvp_pwrc_clr_wen(unsigned long); -unsigned int fvp_pwrc_read_psysr(unsigned long); -unsigned int fvp_pwrc_get_cpu_wkr(unsigned long); +void fvp_pwrc_write_pcoffr(u_register_t); +void fvp_pwrc_write_ppoffr(u_register_t); +void fvp_pwrc_write_pponr(u_register_t); +void fvp_pwrc_set_wen(u_register_t); +void fvp_pwrc_clr_wen(u_register_t); +unsigned int fvp_pwrc_read_psysr(u_register_t); +unsigned int fvp_pwrc_get_cpu_wkr(u_register_t); #endif /*__ASSEMBLY__*/ diff --git a/plat/arm/board/juno/aarch64/juno_helpers.S b/plat/arm/board/juno/aarch64/juno_helpers.S index 377b0cb..9291fa4 100644 --- a/plat/arm/board/juno/aarch64/juno_helpers.S +++ b/plat/arm/board/juno/aarch64/juno_helpers.S @@ -206,7 +206,7 @@ endfunc plat_reset_handler /* ----------------------------------------------------- - * unsigned int plat_arm_calc_core_pos(uint64_t mpidr) + * unsigned int plat_arm_calc_core_pos(u_register_t mpidr) * Helper function to calculate the core position. * ----------------------------------------------------- */ diff --git a/plat/arm/common/aarch64/arm_common.c b/plat/arm/common/aarch64/arm_common.c deleted file mode 100644 index 7c0b93d..0000000 --- a/plat/arm/common/aarch64/arm_common.c +++ /dev/null @@ -1,206 +0,0 @@ -/* - * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ -#include -#include -#include -#include -#include -#include -#include -#include - -extern const mmap_region_t plat_arm_mmap[]; - -/* Weak definitions may be overridden in specific ARM standard platform */ -#pragma weak plat_get_ns_image_entrypoint -#pragma weak plat_arm_get_mmap - -/* Conditionally provide a weak definition of plat_get_syscnt_freq2 to avoid - * conflicts with the definition in plat/common. */ -#if ERROR_DEPRECATED -#pragma weak plat_get_syscnt_freq2 -#else -#pragma weak plat_get_syscnt_freq -#endif - -/* - * Set up the page tables for the generic and platform-specific memory regions. - * The extents of the generic memory regions are specified by the function - * arguments and consist of: - * - Trusted SRAM seen by the BL image; - * - Code section; - * - Read-only data section; - * - Coherent memory region, if applicable. - */ -void arm_setup_page_tables(unsigned long total_base, - unsigned long total_size, - unsigned long code_start, - unsigned long code_limit, - unsigned long rodata_start, - unsigned long rodata_limit -#if USE_COHERENT_MEM - , - unsigned long coh_start, - unsigned long coh_limit -#endif - ) -{ - /* - * Map the Trusted SRAM with appropriate memory attributes. - * Subsequent mappings will adjust the attributes for specific regions. - */ - VERBOSE("Trusted SRAM seen by this BL image: %p - %p\n", - (void *) total_base, (void *) (total_base + total_size)); - mmap_add_region(total_base, total_base, - total_size, - MT_MEMORY | MT_RW | MT_SECURE); - - /* Re-map the code section */ - VERBOSE("Code region: %p - %p\n", - (void *) code_start, (void *) code_limit); - mmap_add_region(code_start, code_start, - code_limit - code_start, - MT_CODE | MT_SECURE); - - /* Re-map the read-only data section */ - VERBOSE("Read-only data region: %p - %p\n", - (void *) rodata_start, (void *) rodata_limit); - mmap_add_region(rodata_start, rodata_start, - rodata_limit - rodata_start, - MT_RO_DATA | MT_SECURE); - -#if USE_COHERENT_MEM - /* Re-map the coherent memory region */ - VERBOSE("Coherent region: %p - %p\n", - (void *) coh_start, (void *) coh_limit); - mmap_add_region(coh_start, coh_start, - coh_limit - coh_start, - MT_DEVICE | MT_RW | MT_SECURE); -#endif - - /* Now (re-)map the platform-specific memory regions */ - mmap_add(plat_arm_get_mmap()); - - /* Create the page tables to reflect the above mappings */ - init_xlat_tables(); -} - -uintptr_t plat_get_ns_image_entrypoint(void) -{ - return PLAT_ARM_NS_IMAGE_OFFSET; -} - -/******************************************************************************* - * Gets SPSR for BL32 entry - ******************************************************************************/ -uint32_t arm_get_spsr_for_bl32_entry(void) -{ - /* - * The Secure Payload Dispatcher service is responsible for - * setting the SPSR prior to entry into the BL32 image. - */ - return 0; -} - -/******************************************************************************* - * Gets SPSR for BL33 entry - ******************************************************************************/ -uint32_t arm_get_spsr_for_bl33_entry(void) -{ - unsigned long el_status; - unsigned int mode; - uint32_t spsr; - - /* Figure out what mode we enter the non-secure world in */ - el_status = read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL2_SHIFT; - el_status &= ID_AA64PFR0_ELX_MASK; - - mode = (el_status) ? MODE_EL2 : MODE_EL1; - - /* - * TODO: Consider the possibility of specifying the SPSR in - * the FIP ToC and allowing the platform to have a say as - * well. - */ - spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS); - return spsr; -} - -/******************************************************************************* - * Configures access to the system counter timer module. - ******************************************************************************/ -#ifdef ARM_SYS_TIMCTL_BASE -void arm_configure_sys_timer(void) -{ - unsigned int reg_val; - -#if ARM_CONFIG_CNTACR - reg_val = (1 << CNTACR_RPCT_SHIFT) | (1 << CNTACR_RVCT_SHIFT); - reg_val |= (1 << CNTACR_RFRQ_SHIFT) | (1 << CNTACR_RVOFF_SHIFT); - reg_val |= (1 << CNTACR_RWVT_SHIFT) | (1 << CNTACR_RWPT_SHIFT); - mmio_write_32(ARM_SYS_TIMCTL_BASE + CNTACR_BASE(PLAT_ARM_NSTIMER_FRAME_ID), reg_val); -#endif /* ARM_CONFIG_CNTACR */ - - reg_val = (1 << CNTNSAR_NS_SHIFT(PLAT_ARM_NSTIMER_FRAME_ID)); - mmio_write_32(ARM_SYS_TIMCTL_BASE + CNTNSAR, reg_val); -} -#endif /* ARM_SYS_TIMCTL_BASE */ - -/******************************************************************************* - * Returns ARM platform specific memory map regions. - ******************************************************************************/ -const mmap_region_t *plat_arm_get_mmap(void) -{ - return plat_arm_mmap; -} - -#ifdef ARM_SYS_CNTCTL_BASE - -#if ERROR_DEPRECATED -unsigned int plat_get_syscnt_freq2(void) -{ - unsigned int counter_base_frequency; -#else -unsigned long long plat_get_syscnt_freq(void) -{ - unsigned long long counter_base_frequency; -#endif /* ERROR_DEPRECATED */ - - /* Read the frequency from Frequency modes table */ - counter_base_frequency = mmio_read_32(ARM_SYS_CNTCTL_BASE + CNTFID_OFF); - - /* The first entry of the frequency modes table must not be 0 */ - if (counter_base_frequency == 0) - panic(); - - return counter_base_frequency; -} - -#endif /* ARM_SYS_CNTCTL_BASE */ diff --git a/plat/arm/common/aarch64/arm_helpers.S b/plat/arm/common/aarch64/arm_helpers.S index a0338f1..d782020 100644 --- a/plat/arm/common/aarch64/arm_helpers.S +++ b/plat/arm/common/aarch64/arm_helpers.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -49,7 +49,7 @@ endfunc plat_my_core_pos /* ----------------------------------------------------- - * unsigned int plat_arm_calc_core_pos(uint64_t mpidr) + * unsigned int plat_arm_calc_core_pos(u_register_t mpidr) * Helper function to calculate the core position. * With this function: CorePos = (ClusterId * 4) + * CoreId diff --git a/plat/arm/common/arm_bl31_setup.c b/plat/arm/common/arm_bl31_setup.c index 87cafce..4ed2477 100644 --- a/plat/arm/common/arm_bl31_setup.c +++ b/plat/arm/common/arm_bl31_setup.c @@ -38,7 +38,7 @@ #include #include -#define BL31_END (unsigned long)(&__BL31_END__) +#define BL31_END (uintptr_t)(&__BL31_END__) #if USE_COHERENT_MEM /* @@ -48,8 +48,8 @@ * __COHERENT_RAM_START__ and __COHERENT_RAM_END__ linker symbols * refer to page-aligned addresses. */ -#define BL31_COHERENT_RAM_BASE (unsigned long)(&__COHERENT_RAM_START__) -#define BL31_COHERENT_RAM_LIMIT (unsigned long)(&__COHERENT_RAM_END__) +#define BL31_COHERENT_RAM_BASE (uintptr_t)(&__COHERENT_RAM_START__) +#define BL31_COHERENT_RAM_LIMIT (uintptr_t)(&__COHERENT_RAM_END__) #endif /* @@ -130,11 +130,8 @@ * Tell BL31 where the non-trusted software image * is located and the entry state information */ -#ifdef PRELOADED_BL33_BASE - bl33_image_ep_info.pc = PRELOADED_BL33_BASE; -#else bl33_image_ep_info.pc = plat_get_ns_image_entrypoint(); -#endif /* PRELOADED_BL33_BASE */ + bl33_image_ep_info.spsr = arm_get_spsr_for_bl33_entry(); SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE); diff --git a/plat/arm/common/arm_common.c b/plat/arm/common/arm_common.c new file mode 100644 index 0000000..93355fe --- /dev/null +++ b/plat/arm/common/arm_common.c @@ -0,0 +1,202 @@ +/* + * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +extern const mmap_region_t plat_arm_mmap[]; + +/* Weak definitions may be overridden in specific ARM standard platform */ +#pragma weak plat_get_ns_image_entrypoint +#pragma weak plat_arm_get_mmap + +/* Conditionally provide a weak definition of plat_get_syscnt_freq2 to avoid + * conflicts with the definition in plat/common. */ +#if ERROR_DEPRECATED +#pragma weak plat_get_syscnt_freq2 +#endif + +/* + * Set up the page tables for the generic and platform-specific memory regions. + * The extents of the generic memory regions are specified by the function + * arguments and consist of: + * - Trusted SRAM seen by the BL image; + * - Code section; + * - Read-only data section; + * - Coherent memory region, if applicable. + */ +void arm_setup_page_tables(uintptr_t total_base, + size_t total_size, + uintptr_t code_start, + uintptr_t code_limit, + uintptr_t rodata_start, + uintptr_t rodata_limit +#if USE_COHERENT_MEM + , + uintptr_t coh_start, + uintptr_t coh_limit +#endif + ) +{ + /* + * Map the Trusted SRAM with appropriate memory attributes. + * Subsequent mappings will adjust the attributes for specific regions. + */ + VERBOSE("Trusted SRAM seen by this BL image: %p - %p\n", + (void *) total_base, (void *) (total_base + total_size)); + mmap_add_region(total_base, total_base, + total_size, + MT_MEMORY | MT_RW | MT_SECURE); + + /* Re-map the code section */ + VERBOSE("Code region: %p - %p\n", + (void *) code_start, (void *) code_limit); + mmap_add_region(code_start, code_start, + code_limit - code_start, + MT_CODE | MT_SECURE); + + /* Re-map the read-only data section */ + VERBOSE("Read-only data region: %p - %p\n", + (void *) rodata_start, (void *) rodata_limit); + mmap_add_region(rodata_start, rodata_start, + rodata_limit - rodata_start, + MT_RO_DATA | MT_SECURE); + +#if USE_COHERENT_MEM + /* Re-map the coherent memory region */ + VERBOSE("Coherent region: %p - %p\n", + (void *) coh_start, (void *) coh_limit); + mmap_add_region(coh_start, coh_start, + coh_limit - coh_start, + MT_DEVICE | MT_RW | MT_SECURE); +#endif + + /* Now (re-)map the platform-specific memory regions */ + mmap_add(plat_arm_get_mmap()); + + /* Create the page tables to reflect the above mappings */ + init_xlat_tables(); +} + +uintptr_t plat_get_ns_image_entrypoint(void) +{ +#ifdef PRELOADED_BL33_BASE + return PRELOADED_BL33_BASE; +#else + return PLAT_ARM_NS_IMAGE_OFFSET; +#endif +} + +/******************************************************************************* + * Gets SPSR for BL32 entry + ******************************************************************************/ +uint32_t arm_get_spsr_for_bl32_entry(void) +{ + /* + * The Secure Payload Dispatcher service is responsible for + * setting the SPSR prior to entry into the BL32 image. + */ + return 0; +} + +/******************************************************************************* + * Gets SPSR for BL33 entry + ******************************************************************************/ +uint32_t arm_get_spsr_for_bl33_entry(void) +{ + unsigned long el_status; + unsigned int mode; + uint32_t spsr; + + /* Figure out what mode we enter the non-secure world in */ + el_status = read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL2_SHIFT; + el_status &= ID_AA64PFR0_ELX_MASK; + + mode = (el_status) ? MODE_EL2 : MODE_EL1; + + /* + * TODO: Consider the possibility of specifying the SPSR in + * the FIP ToC and allowing the platform to have a say as + * well. + */ + spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS); + return spsr; +} + +/******************************************************************************* + * Configures access to the system counter timer module. + ******************************************************************************/ +#ifdef ARM_SYS_TIMCTL_BASE +void arm_configure_sys_timer(void) +{ + unsigned int reg_val; + +#if ARM_CONFIG_CNTACR + reg_val = (1 << CNTACR_RPCT_SHIFT) | (1 << CNTACR_RVCT_SHIFT); + reg_val |= (1 << CNTACR_RFRQ_SHIFT) | (1 << CNTACR_RVOFF_SHIFT); + reg_val |= (1 << CNTACR_RWVT_SHIFT) | (1 << CNTACR_RWPT_SHIFT); + mmio_write_32(ARM_SYS_TIMCTL_BASE + CNTACR_BASE(PLAT_ARM_NSTIMER_FRAME_ID), reg_val); +#endif /* ARM_CONFIG_CNTACR */ + + reg_val = (1 << CNTNSAR_NS_SHIFT(PLAT_ARM_NSTIMER_FRAME_ID)); + mmio_write_32(ARM_SYS_TIMCTL_BASE + CNTNSAR, reg_val); +} +#endif /* ARM_SYS_TIMCTL_BASE */ + +/******************************************************************************* + * Returns ARM platform specific memory map regions. + ******************************************************************************/ +const mmap_region_t *plat_arm_get_mmap(void) +{ + return plat_arm_mmap; +} + +#ifdef ARM_SYS_CNTCTL_BASE + +unsigned int plat_get_syscnt_freq2(void) +{ + unsigned int counter_base_frequency; + + /* Read the frequency from Frequency modes table */ + counter_base_frequency = mmio_read_32(ARM_SYS_CNTCTL_BASE + CNTFID_OFF); + + /* The first entry of the frequency modes table must not be 0 */ + if (counter_base_frequency == 0) + panic(); + + return counter_base_frequency; +} + +#endif /* ARM_SYS_CNTCTL_BASE */ diff --git a/plat/arm/common/arm_common.mk b/plat/arm/common/arm_common.mk index 9e5ddea..03b9fe4 100644 --- a/plat/arm/common/arm_common.mk +++ b/plat/arm/common/arm_common.mk @@ -97,8 +97,8 @@ PLAT_BL_COMMON_SOURCES += lib/xlat_tables/xlat_tables_common.c \ lib/xlat_tables/aarch64/xlat_tables.c \ - plat/arm/common/aarch64/arm_common.c \ plat/arm/common/aarch64/arm_helpers.S \ + plat/arm/common/arm_common.c \ plat/common/aarch64/plat_common.c BL1_SOURCES += drivers/arm/sp805/sp805.c \ @@ -128,7 +128,7 @@ plat/arm/common/arm_pm.c \ plat/arm/common/arm_topology.c \ plat/common/aarch64/platform_mp_stack.S \ - plat/common/aarch64/plat_psci_common.c + plat/common/plat_psci_common.c ifneq (${TRUSTED_BOARD_BOOT},0) diff --git a/plat/arm/css/common/aarch64/css_helpers.S b/plat/arm/css/common/aarch64/css_helpers.S index 0763a3e..92b0e81 100644 --- a/plat/arm/css/common/aarch64/css_helpers.S +++ b/plat/arm/css/common/aarch64/css_helpers.S @@ -70,7 +70,7 @@ endfunc plat_secondary_cold_boot_setup /* --------------------------------------------------------------------- - * unsigned long plat_get_my_entrypoint (void); + * uintptr_t plat_get_my_entrypoint (void); * * Main job of this routine is to distinguish between a cold and a warm * boot. On CSS platforms, this distinction is based on the contents of @@ -90,7 +90,7 @@ endfunc plat_get_my_entrypoint /* ----------------------------------------------------------- - * unsigned int css_calc_core_pos_swap_cluster(uint64_t mpidr) + * unsigned int css_calc_core_pos_swap_cluster(u_register_t mpidr) * Utility function to calculate the core position by * swapping the cluster order. This is necessary in order to * match the format of the boot information passed by the SCP diff --git a/plat/common/aarch64/plat_psci_common.c b/plat/common/aarch64/plat_psci_common.c index 0748ef4..804da93 100644 --- a/plat/common/aarch64/plat_psci_common.c +++ b/plat/common/aarch64/plat_psci_common.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -28,36 +28,6 @@ * POSSIBILITY OF SUCH DAMAGE. */ -#include -#include -#include -#include - -/* - * The PSCI generic code uses this API to let the platform participate in state - * coordination during a power management operation. It compares the platform - * specific local power states requested by each cpu for a given power domain - * and returns the coordinated target power state that the domain should - * enter. A platform assigns a number to a local power state. This default - * implementation assumes that the platform assigns these numbers in order of - * increasing depth of the power state i.e. for two power states X & Y, if X < Y - * then X represents a shallower power state than Y. As a result, the - * coordinated target local power state for a power domain will be the minimum - * of the requested local power states. - */ -plat_local_state_t plat_get_target_pwr_state(unsigned int lvl, - const plat_local_state_t *states, - unsigned int ncpu) -{ - plat_local_state_t target = PLAT_MAX_OFF_STATE, temp; - - assert(ncpu); - - do { - temp = *states++; - if (temp < target) - target = temp; - } while (--ncpu); - - return target; -} +#if !ERROR_DEPRECATED +#include "../plat_psci_common.c" +#endif diff --git a/plat/common/aarch64/platform_mp_stack.S b/plat/common/aarch64/platform_mp_stack.S index a077f65..e3063d1 100644 --- a/plat/common/aarch64/platform_mp_stack.S +++ b/plat/common/aarch64/platform_mp_stack.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -159,7 +159,7 @@ endfunc_deprecated platform_set_stack /* ----------------------------------------------------- - * unsigned long plat_get_my_stack () + * uintptr_t plat_get_my_stack () * * For the current CPU, this function returns the stack * pointer for a stack allocated in device memory. diff --git a/plat/common/aarch64/platform_up_stack.S b/plat/common/aarch64/platform_up_stack.S index 24b3a71..5b82630 100644 --- a/plat/common/aarch64/platform_up_stack.S +++ b/plat/common/aarch64/platform_up_stack.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -40,7 +40,7 @@ .globl platform_get_stack /* ----------------------------------------------------- - * unsigned long plat_get_my_stack () + * uintptr_t plat_get_my_stack () * * For cold-boot BL images, only the primary CPU needs a * stack. This function returns the stack pointer for a diff --git a/plat/common/plat_psci_common.c b/plat/common/plat_psci_common.c new file mode 100644 index 0000000..3eb6886 --- /dev/null +++ b/plat/common/plat_psci_common.c @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include + +/* + * The PSCI generic code uses this API to let the platform participate in state + * coordination during a power management operation. It compares the platform + * specific local power states requested by each cpu for a given power domain + * and returns the coordinated target power state that the domain should + * enter. A platform assigns a number to a local power state. This default + * implementation assumes that the platform assigns these numbers in order of + * increasing depth of the power state i.e. for two power states X & Y, if X < Y + * then X represents a shallower power state than Y. As a result, the + * coordinated target local power state for a power domain will be the minimum + * of the requested local power states. + */ +plat_local_state_t plat_get_target_pwr_state(unsigned int lvl, + const plat_local_state_t *states, + unsigned int ncpu) +{ + plat_local_state_t target = PLAT_MAX_OFF_STATE, temp; + + assert(ncpu); + + do { + temp = *states++; + if (temp < target) + target = temp; + } while (--ncpu); + + return target; +} diff --git a/plat/compat/plat_compat.mk b/plat/compat/plat_compat.mk index c0c8ece..d9d50f6 100644 --- a/plat/compat/plat_compat.mk +++ b/plat/compat/plat_compat.mk @@ -1,5 +1,5 @@ # -# Copyright (c) 2015, ARM Limited and Contributors. All rights reserved. +# Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: @@ -36,6 +36,6 @@ PLAT_BL_COMMON_SOURCES += plat/compat/aarch64/plat_helpers_compat.S -BL31_SOURCES += plat/common/aarch64/plat_psci_common.c \ +BL31_SOURCES += plat/common/plat_psci_common.c \ plat/compat/plat_pm_compat.c \ plat/compat/plat_topology_compat.c diff --git a/plat/nvidia/tegra/common/tegra_common.mk b/plat/nvidia/tegra/common/tegra_common.mk index 2ecf5f5..03ca773 100644 --- a/plat/nvidia/tegra/common/tegra_common.mk +++ b/plat/nvidia/tegra/common/tegra_common.mk @@ -1,5 +1,5 @@ # -# Copyright (c) 2015, ARM Limited and Contributors. All rights reserved. +# Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: @@ -52,7 +52,7 @@ drivers/delay_timer/delay_timer.c \ drivers/ti/uart/16550_console.S \ plat/common/aarch64/platform_mp_stack.S \ - plat/common/aarch64/plat_psci_common.c \ + plat/common/plat_psci_common.c \ ${COMMON_DIR}/aarch64/tegra_helpers.S \ ${COMMON_DIR}/drivers/memctrl/memctrl.c \ ${COMMON_DIR}/drivers/pmc/pmc.c \ diff --git a/plat/rockchip/rk3368/platform.mk b/plat/rockchip/rk3368/platform.mk index b90b792..1dca4c5 100644 --- a/plat/rockchip/rk3368/platform.mk +++ b/plat/rockchip/rk3368/platform.mk @@ -51,7 +51,7 @@ PLAT_BL_COMMON_SOURCES := lib/xlat_tables/xlat_tables_common.c \ lib/xlat_tables/aarch64/xlat_tables.c \ plat/common/aarch64/plat_common.c \ - plat/common/aarch64/plat_psci_common.c + plat/common/plat_psci_common.c BL31_SOURCES += ${RK_GIC_SOURCES} \ drivers/arm/cci/cci.c \ diff --git a/plat/rockchip/rk3399/platform.mk b/plat/rockchip/rk3399/platform.mk index 9b5848e..142fe9e 100644 --- a/plat/rockchip/rk3399/platform.mk +++ b/plat/rockchip/rk3399/platform.mk @@ -50,7 +50,7 @@ PLAT_BL_COMMON_SOURCES := lib/xlat_tables/xlat_tables_common.c \ lib/xlat_tables/aarch64/xlat_tables.c \ plat/common/aarch64/plat_common.c \ - plat/common/aarch64/plat_psci_common.c + plat/common/plat_psci_common.c BL31_SOURCES += ${RK_GIC_SOURCES} \ drivers/arm/cci/cci.c \ diff --git a/plat/xilinx/zynqmp/platform.mk b/plat/xilinx/zynqmp/platform.mk index a93ce3c..fe939c7 100644 --- a/plat/xilinx/zynqmp/platform.mk +++ b/plat/xilinx/zynqmp/platform.mk @@ -69,9 +69,9 @@ drivers/arm/gic/v2/gicv2_helpers.c \ drivers/cadence/uart/cdns_console.S \ drivers/console/console.S \ - plat/arm/common/aarch64/arm_common.c \ plat/arm/common/aarch64/arm_helpers.S \ plat/arm/common/arm_cci.c \ + plat/arm/common/arm_common.c \ plat/arm/common/arm_gicv2.c \ plat/common/plat_gicv2.c \ plat/common/aarch64/plat_common.c \ @@ -81,7 +81,7 @@ BL31_SOURCES += drivers/arm/cci/cci.c \ lib/cpus/aarch64/aem_generic.S \ lib/cpus/aarch64/cortex_a53.S \ - plat/common/aarch64/plat_psci_common.c \ + plat/common/plat_psci_common.c \ plat/common/aarch64/platform_mp_stack.S \ plat/xilinx/zynqmp/bl31_zynqmp_setup.c \ plat/xilinx/zynqmp/plat_psci.c \ diff --git a/services/std_svc/psci/psci_common.c b/services/std_svc/psci/psci_common.c deleted file mode 100644 index 5090037..0000000 --- a/services/std_svc/psci/psci_common.c +++ /dev/null @@ -1,928 +0,0 @@ -/* - * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "psci_private.h" - -/* - * SPD power management operations, expected to be supplied by the registered - * SPD on successful SP initialization - */ -const spd_pm_ops_t *psci_spd_pm; - -/* - * PSCI requested local power state map. This array is used to store the local - * power states requested by a CPU for power levels from level 1 to - * PLAT_MAX_PWR_LVL. It does not store the requested local power state for power - * level 0 (PSCI_CPU_PWR_LVL) as the requested and the target power state for a - * CPU are the same. - * - * During state coordination, the platform is passed an array containing the - * local states requested for a particular non cpu power domain by each cpu - * within the domain. - * - * TODO: Dense packing of the requested states will cause cache thrashing - * when multiple power domains write to it. If we allocate the requested - * states at each power level in a cache-line aligned per-domain memory, - * the cache thrashing can be avoided. - */ -static plat_local_state_t - psci_req_local_pwr_states[PLAT_MAX_PWR_LVL][PLATFORM_CORE_COUNT]; - - -/******************************************************************************* - * Arrays that hold the platform's power domain tree information for state - * management of power domains. - * Each node in the array 'psci_non_cpu_pd_nodes' corresponds to a power domain - * which is an ancestor of a CPU power domain. - * Each node in the array 'psci_cpu_pd_nodes' corresponds to a cpu power domain - ******************************************************************************/ -non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS] -#if USE_COHERENT_MEM -__section("tzfw_coherent_mem") -#endif -; - -DEFINE_BAKERY_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]); - -cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT]; - -/******************************************************************************* - * Pointer to functions exported by the platform to complete power mgmt. ops - ******************************************************************************/ -const plat_psci_ops_t *psci_plat_pm_ops; - -/****************************************************************************** - * Check that the maximum power level supported by the platform makes sense - *****************************************************************************/ -CASSERT(PLAT_MAX_PWR_LVL <= PSCI_MAX_PWR_LVL && \ - PLAT_MAX_PWR_LVL >= PSCI_CPU_PWR_LVL, \ - assert_platform_max_pwrlvl_check); - -/* - * The plat_local_state used by the platform is one of these types: RUN, - * RETENTION and OFF. The platform can define further sub-states for each type - * apart from RUN. This categorization is done to verify the sanity of the - * psci_power_state passed by the platform and to print debug information. The - * categorization is done on the basis of the following conditions: - * - * 1. If (plat_local_state == 0) then the category is STATE_TYPE_RUN. - * - * 2. If (0 < plat_local_state <= PLAT_MAX_RET_STATE), then the category is - * STATE_TYPE_RETN. - * - * 3. If (plat_local_state > PLAT_MAX_RET_STATE), then the category is - * STATE_TYPE_OFF. - */ -typedef enum plat_local_state_type { - STATE_TYPE_RUN = 0, - STATE_TYPE_RETN, - STATE_TYPE_OFF -} plat_local_state_type_t; - -/* The macro used to categorize plat_local_state. */ -#define find_local_state_type(plat_local_state) \ - ((plat_local_state) ? ((plat_local_state > PLAT_MAX_RET_STATE) \ - ? STATE_TYPE_OFF : STATE_TYPE_RETN) \ - : STATE_TYPE_RUN) - -/****************************************************************************** - * Check that the maximum retention level supported by the platform is less - * than the maximum off level. - *****************************************************************************/ -CASSERT(PLAT_MAX_RET_STATE < PLAT_MAX_OFF_STATE, \ - assert_platform_max_off_and_retn_state_check); - -/****************************************************************************** - * This function ensures that the power state parameter in a CPU_SUSPEND request - * is valid. If so, it returns the requested states for each power level. - *****************************************************************************/ -int psci_validate_power_state(unsigned int power_state, - psci_power_state_t *state_info) -{ - /* Check SBZ bits in power state are zero */ - if (psci_check_power_state(power_state)) - return PSCI_E_INVALID_PARAMS; - - assert(psci_plat_pm_ops->validate_power_state); - - /* Validate the power_state using platform pm_ops */ - return psci_plat_pm_ops->validate_power_state(power_state, state_info); -} - -/****************************************************************************** - * This function retrieves the `psci_power_state_t` for system suspend from - * the platform. - *****************************************************************************/ -void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info) -{ - /* - * Assert that the required pm_ops hook is implemented to ensure that - * the capability detected during psci_setup() is valid. - */ - assert(psci_plat_pm_ops->get_sys_suspend_power_state); - - /* - * Query the platform for the power_state required for system suspend - */ - psci_plat_pm_ops->get_sys_suspend_power_state(state_info); -} - -/******************************************************************************* - * This function verifies that the all the other cores in the system have been - * turned OFF and the current CPU is the last running CPU in the system. - * Returns 1 (true) if the current CPU is the last ON CPU or 0 (false) - * otherwise. - ******************************************************************************/ -unsigned int psci_is_last_on_cpu(void) -{ - unsigned int cpu_idx, my_idx = plat_my_core_pos(); - - for (cpu_idx = 0; cpu_idx < PLATFORM_CORE_COUNT; cpu_idx++) { - if (cpu_idx == my_idx) { - assert(psci_get_aff_info_state() == AFF_STATE_ON); - continue; - } - - if (psci_get_aff_info_state_by_idx(cpu_idx) != AFF_STATE_OFF) - return 0; - } - - return 1; -} - -/******************************************************************************* - * Routine to return the maximum power level to traverse to after a cpu has - * been physically powered up. It is expected to be called immediately after - * reset from assembler code. - ******************************************************************************/ -static unsigned int get_power_on_target_pwrlvl(void) -{ - unsigned int pwrlvl; - - /* - * Assume that this cpu was suspended and retrieve its target power - * level. If it is invalid then it could only have been turned off - * earlier. PLAT_MAX_PWR_LVL will be the highest power level a - * cpu can be turned off to. - */ - pwrlvl = psci_get_suspend_pwrlvl(); - if (pwrlvl == PSCI_INVALID_PWR_LVL) - pwrlvl = PLAT_MAX_PWR_LVL; - return pwrlvl; -} - -/****************************************************************************** - * Helper function to update the requested local power state array. This array - * does not store the requested state for the CPU power level. Hence an - * assertion is added to prevent us from accessing the wrong index. - *****************************************************************************/ -static void psci_set_req_local_pwr_state(unsigned int pwrlvl, - unsigned int cpu_idx, - plat_local_state_t req_pwr_state) -{ - assert(pwrlvl > PSCI_CPU_PWR_LVL); - psci_req_local_pwr_states[pwrlvl - 1][cpu_idx] = req_pwr_state; -} - -/****************************************************************************** - * This function initializes the psci_req_local_pwr_states. - *****************************************************************************/ -void psci_init_req_local_pwr_states(void) -{ - /* Initialize the requested state of all non CPU power domains as OFF */ - memset(&psci_req_local_pwr_states, PLAT_MAX_OFF_STATE, - sizeof(psci_req_local_pwr_states)); -} - -/****************************************************************************** - * Helper function to return a reference to an array containing the local power - * states requested by each cpu for a power domain at 'pwrlvl'. The size of the - * array will be the number of cpu power domains of which this power domain is - * an ancestor. These requested states will be used to determine a suitable - * target state for this power domain during psci state coordination. An - * assertion is added to prevent us from accessing the CPU power level. - *****************************************************************************/ -static plat_local_state_t *psci_get_req_local_pwr_states(unsigned int pwrlvl, - unsigned int cpu_idx) -{ - assert(pwrlvl > PSCI_CPU_PWR_LVL); - - return &psci_req_local_pwr_states[pwrlvl - 1][cpu_idx]; -} - -/****************************************************************************** - * Helper function to return the current local power state of each power domain - * from the current cpu power domain to its ancestor at the 'end_pwrlvl'. This - * function will be called after a cpu is powered on to find the local state - * each power domain has emerged from. - *****************************************************************************/ -static void psci_get_target_local_pwr_states(unsigned int end_pwrlvl, - psci_power_state_t *target_state) -{ - unsigned int parent_idx, lvl; - plat_local_state_t *pd_state = target_state->pwr_domain_state; - - pd_state[PSCI_CPU_PWR_LVL] = psci_get_cpu_local_state(); - parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node; - - /* Copy the local power state from node to state_info */ - for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) { -#if !USE_COHERENT_MEM - /* - * If using normal memory for psci_non_cpu_pd_nodes, we need - * to flush before reading the local power state as another - * cpu in the same power domain could have updated it and this - * code runs before caches are enabled. - */ - flush_dcache_range( - (uintptr_t) &psci_non_cpu_pd_nodes[parent_idx], - sizeof(psci_non_cpu_pd_nodes[parent_idx])); -#endif - pd_state[lvl] = psci_non_cpu_pd_nodes[parent_idx].local_state; - parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; - } - - /* Set the the higher levels to RUN */ - for (; lvl <= PLAT_MAX_PWR_LVL; lvl++) - target_state->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN; -} - -/****************************************************************************** - * Helper function to set the target local power state that each power domain - * from the current cpu power domain to its ancestor at the 'end_pwrlvl' will - * enter. This function will be called after coordination of requested power - * states has been done for each power level. - *****************************************************************************/ -static void psci_set_target_local_pwr_states(unsigned int end_pwrlvl, - const psci_power_state_t *target_state) -{ - unsigned int parent_idx, lvl; - const plat_local_state_t *pd_state = target_state->pwr_domain_state; - - psci_set_cpu_local_state(pd_state[PSCI_CPU_PWR_LVL]); - - /* - * Need to flush as local_state will be accessed with Data Cache - * disabled during power on - */ - flush_cpu_data(psci_svc_cpu_data.local_state); - - parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node; - - /* Copy the local_state from state_info */ - for (lvl = 1; lvl <= end_pwrlvl; lvl++) { - psci_non_cpu_pd_nodes[parent_idx].local_state = pd_state[lvl]; -#if !USE_COHERENT_MEM - flush_dcache_range( - (uintptr_t)&psci_non_cpu_pd_nodes[parent_idx], - sizeof(psci_non_cpu_pd_nodes[parent_idx])); -#endif - parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; - } -} - - -/******************************************************************************* - * PSCI helper function to get the parent nodes corresponding to a cpu_index. - ******************************************************************************/ -void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx, - unsigned int end_lvl, - unsigned int node_index[]) -{ - unsigned int parent_node = psci_cpu_pd_nodes[cpu_idx].parent_node; - int i; - - for (i = PSCI_CPU_PWR_LVL + 1; i <= end_lvl; i++) { - *node_index++ = parent_node; - parent_node = psci_non_cpu_pd_nodes[parent_node].parent_node; - } -} - -/****************************************************************************** - * This function is invoked post CPU power up and initialization. It sets the - * affinity info state, target power state and requested power state for the - * current CPU and all its ancestor power domains to RUN. - *****************************************************************************/ -void psci_set_pwr_domains_to_run(unsigned int end_pwrlvl) -{ - unsigned int parent_idx, cpu_idx = plat_my_core_pos(), lvl; - parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node; - - /* Reset the local_state to RUN for the non cpu power domains. */ - for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) { - psci_non_cpu_pd_nodes[parent_idx].local_state = - PSCI_LOCAL_STATE_RUN; -#if !USE_COHERENT_MEM - flush_dcache_range( - (uintptr_t) &psci_non_cpu_pd_nodes[parent_idx], - sizeof(psci_non_cpu_pd_nodes[parent_idx])); -#endif - psci_set_req_local_pwr_state(lvl, - cpu_idx, - PSCI_LOCAL_STATE_RUN); - parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; - } - - /* Set the affinity info state to ON */ - psci_set_aff_info_state(AFF_STATE_ON); - - psci_set_cpu_local_state(PSCI_LOCAL_STATE_RUN); - flush_cpu_data(psci_svc_cpu_data); -} - -/****************************************************************************** - * This function is passed the local power states requested for each power - * domain (state_info) between the current CPU domain and its ancestors until - * the target power level (end_pwrlvl). It updates the array of requested power - * states with this information. - * - * Then, for each level (apart from the CPU level) until the 'end_pwrlvl', it - * retrieves the states requested by all the cpus of which the power domain at - * that level is an ancestor. It passes this information to the platform to - * coordinate and return the target power state. If the target state for a level - * is RUN then subsequent levels are not considered. At the CPU level, state - * coordination is not required. Hence, the requested and the target states are - * the same. - * - * The 'state_info' is updated with the target state for each level between the - * CPU and the 'end_pwrlvl' and returned to the caller. - * - * This function will only be invoked with data cache enabled and while - * powering down a core. - *****************************************************************************/ -void psci_do_state_coordination(unsigned int end_pwrlvl, - psci_power_state_t *state_info) -{ - unsigned int lvl, parent_idx, cpu_idx = plat_my_core_pos(); - unsigned int start_idx, ncpus; - plat_local_state_t target_state, *req_states; - - assert(end_pwrlvl <= PLAT_MAX_PWR_LVL); - parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node; - - /* For level 0, the requested state will be equivalent - to target state */ - for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) { - - /* First update the requested power state */ - psci_set_req_local_pwr_state(lvl, cpu_idx, - state_info->pwr_domain_state[lvl]); - - /* Get the requested power states for this power level */ - start_idx = psci_non_cpu_pd_nodes[parent_idx].cpu_start_idx; - req_states = psci_get_req_local_pwr_states(lvl, start_idx); - - /* - * Let the platform coordinate amongst the requested states at - * this power level and return the target local power state. - */ - ncpus = psci_non_cpu_pd_nodes[parent_idx].ncpus; - target_state = plat_get_target_pwr_state(lvl, - req_states, - ncpus); - - state_info->pwr_domain_state[lvl] = target_state; - - /* Break early if the negotiated target power state is RUN */ - if (is_local_state_run(state_info->pwr_domain_state[lvl])) - break; - - parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; - } - - /* - * This is for cases when we break out of the above loop early because - * the target power state is RUN at a power level < end_pwlvl. - * We update the requested power state from state_info and then - * set the target state as RUN. - */ - for (lvl = lvl + 1; lvl <= end_pwrlvl; lvl++) { - psci_set_req_local_pwr_state(lvl, cpu_idx, - state_info->pwr_domain_state[lvl]); - state_info->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN; - - } - - /* Update the target state in the power domain nodes */ - psci_set_target_local_pwr_states(end_pwrlvl, state_info); -} - -/****************************************************************************** - * This function validates a suspend request by making sure that if a standby - * state is requested then no power level is turned off and the highest power - * level is placed in a standby/retention state. - * - * It also ensures that the state level X will enter is not shallower than the - * state level X + 1 will enter. - * - * This validation will be enabled only for DEBUG builds as the platform is - * expected to perform these validations as well. - *****************************************************************************/ -int psci_validate_suspend_req(const psci_power_state_t *state_info, - unsigned int is_power_down_state) -{ - unsigned int max_off_lvl, target_lvl, max_retn_lvl; - plat_local_state_t state; - plat_local_state_type_t req_state_type, deepest_state_type; - int i; - - /* Find the target suspend power level */ - target_lvl = psci_find_target_suspend_lvl(state_info); - if (target_lvl == PSCI_INVALID_PWR_LVL) - return PSCI_E_INVALID_PARAMS; - - /* All power domain levels are in a RUN state to begin with */ - deepest_state_type = STATE_TYPE_RUN; - - for (i = target_lvl; i >= PSCI_CPU_PWR_LVL; i--) { - state = state_info->pwr_domain_state[i]; - req_state_type = find_local_state_type(state); - - /* - * While traversing from the highest power level to the lowest, - * the state requested for lower levels has to be the same or - * deeper i.e. equal to or greater than the state at the higher - * levels. If this condition is true, then the requested state - * becomes the deepest state encountered so far. - */ - if (req_state_type < deepest_state_type) - return PSCI_E_INVALID_PARAMS; - deepest_state_type = req_state_type; - } - - /* Find the highest off power level */ - max_off_lvl = psci_find_max_off_lvl(state_info); - - /* The target_lvl is either equal to the max_off_lvl or max_retn_lvl */ - max_retn_lvl = PSCI_INVALID_PWR_LVL; - if (target_lvl != max_off_lvl) - max_retn_lvl = target_lvl; - - /* - * If this is not a request for a power down state then max off level - * has to be invalid and max retention level has to be a valid power - * level. - */ - if (!is_power_down_state && (max_off_lvl != PSCI_INVALID_PWR_LVL || - max_retn_lvl == PSCI_INVALID_PWR_LVL)) - return PSCI_E_INVALID_PARAMS; - - return PSCI_E_SUCCESS; -} - -/****************************************************************************** - * This function finds the highest power level which will be powered down - * amongst all the power levels specified in the 'state_info' structure - *****************************************************************************/ -unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info) -{ - int i; - - for (i = PLAT_MAX_PWR_LVL; i >= PSCI_CPU_PWR_LVL; i--) { - if (is_local_state_off(state_info->pwr_domain_state[i])) - return i; - } - - return PSCI_INVALID_PWR_LVL; -} - -/****************************************************************************** - * This functions finds the level of the highest power domain which will be - * placed in a low power state during a suspend operation. - *****************************************************************************/ -unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info) -{ - int i; - - for (i = PLAT_MAX_PWR_LVL; i >= PSCI_CPU_PWR_LVL; i--) { - if (!is_local_state_run(state_info->pwr_domain_state[i])) - return i; - } - - return PSCI_INVALID_PWR_LVL; -} - -/******************************************************************************* - * This function is passed a cpu_index and the highest level in the topology - * tree that the operation should be applied to. It picks up locks in order of - * increasing power domain level in the range specified. - ******************************************************************************/ -void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl, - unsigned int cpu_idx) -{ - unsigned int parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node; - unsigned int level; - - /* No locking required for level 0. Hence start locking from level 1 */ - for (level = PSCI_CPU_PWR_LVL + 1; level <= end_pwrlvl; level++) { - psci_lock_get(&psci_non_cpu_pd_nodes[parent_idx]); - parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; - } -} - -/******************************************************************************* - * This function is passed a cpu_index and the highest level in the topology - * tree that the operation should be applied to. It releases the locks in order - * of decreasing power domain level in the range specified. - ******************************************************************************/ -void psci_release_pwr_domain_locks(unsigned int end_pwrlvl, - unsigned int cpu_idx) -{ - unsigned int parent_idx, parent_nodes[PLAT_MAX_PWR_LVL] = {0}; - int level; - - /* Get the parent nodes */ - psci_get_parent_pwr_domain_nodes(cpu_idx, end_pwrlvl, parent_nodes); - - /* Unlock top down. No unlocking required for level 0. */ - for (level = end_pwrlvl; level >= PSCI_CPU_PWR_LVL + 1; level--) { - parent_idx = parent_nodes[level - 1]; - psci_lock_release(&psci_non_cpu_pd_nodes[parent_idx]); - } -} - -/******************************************************************************* - * Simple routine to determine whether a mpidr is valid or not. - ******************************************************************************/ -int psci_validate_mpidr(u_register_t mpidr) -{ - if (plat_core_pos_by_mpidr(mpidr) < 0) - return PSCI_E_INVALID_PARAMS; - - return PSCI_E_SUCCESS; -} - -/******************************************************************************* - * This function determines the full entrypoint information for the requested - * PSCI entrypoint on power on/resume and returns it. - ******************************************************************************/ -static int psci_get_ns_ep_info(entry_point_info_t *ep, - uintptr_t entrypoint, - u_register_t context_id) -{ - unsigned long ep_attr, sctlr; - unsigned int daif, ee, mode; - unsigned long ns_scr_el3 = read_scr_el3(); - unsigned long ns_sctlr_el1 = read_sctlr_el1(); - - sctlr = ns_scr_el3 & SCR_HCE_BIT ? read_sctlr_el2() : ns_sctlr_el1; - ee = 0; - - ep_attr = NON_SECURE | EP_ST_DISABLE; - if (sctlr & SCTLR_EE_BIT) { - ep_attr |= EP_EE_BIG; - ee = 1; - } - SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr); - - ep->pc = entrypoint; - memset(&ep->args, 0, sizeof(ep->args)); - ep->args.arg0 = context_id; - - /* - * Figure out whether the cpu enters the non-secure address space - * in aarch32 or aarch64 - */ - if (ns_scr_el3 & SCR_RW_BIT) { - - /* - * Check whether a Thumb entry point has been provided for an - * aarch64 EL - */ - if (entrypoint & 0x1) - return PSCI_E_INVALID_ADDRESS; - - mode = ns_scr_el3 & SCR_HCE_BIT ? MODE_EL2 : MODE_EL1; - - ep->spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS); - } else { - - mode = ns_scr_el3 & SCR_HCE_BIT ? MODE32_hyp : MODE32_svc; - - /* - * TODO: Choose async. exception bits if HYP mode is not - * implemented according to the values of SCR.{AW, FW} bits - */ - daif = DAIF_ABT_BIT | DAIF_IRQ_BIT | DAIF_FIQ_BIT; - - ep->spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, daif); - } - - return PSCI_E_SUCCESS; -} - -/******************************************************************************* - * This function validates the entrypoint with the platform layer if the - * appropriate pm_ops hook is exported by the platform and returns the - * 'entry_point_info'. - ******************************************************************************/ -int psci_validate_entry_point(entry_point_info_t *ep, - uintptr_t entrypoint, - u_register_t context_id) -{ - int rc; - - /* Validate the entrypoint using platform psci_ops */ - if (psci_plat_pm_ops->validate_ns_entrypoint) { - rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint); - if (rc != PSCI_E_SUCCESS) - return PSCI_E_INVALID_ADDRESS; - } - - /* - * Verify and derive the re-entry information for - * the non-secure world from the non-secure state from - * where this call originated. - */ - rc = psci_get_ns_ep_info(ep, entrypoint, context_id); - return rc; -} - -/******************************************************************************* - * Generic handler which is called when a cpu is physically powered on. It - * traverses the node information and finds the highest power level powered - * off and performs generic, architectural, platform setup and state management - * to power on that power level and power levels below it. - * e.g. For a cpu that's been powered on, it will call the platform specific - * code to enable the gic cpu interface and for a cluster it will enable - * coherency at the interconnect level in addition to gic cpu interface. - ******************************************************************************/ -void psci_power_up_finish(void) -{ - unsigned int end_pwrlvl, cpu_idx = plat_my_core_pos(); - psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} }; - - /* - * Verify that we have been explicitly turned ON or resumed from - * suspend. - */ - if (psci_get_aff_info_state() == AFF_STATE_OFF) { - ERROR("Unexpected affinity info state"); - panic(); - } - - /* - * Get the maximum power domain level to traverse to after this cpu - * has been physically powered up. - */ - end_pwrlvl = get_power_on_target_pwrlvl(); - - /* - * This function acquires the lock corresponding to each power level so - * that by the time all locks are taken, the system topology is snapshot - * and state management can be done safely. - */ - psci_acquire_pwr_domain_locks(end_pwrlvl, - cpu_idx); - -#if ENABLE_PSCI_STAT - /* - * Capture power up time-stamp. - * No cache maintenance is required as caches are off - * and writes are direct to the main memory. - */ - PMF_CAPTURE_TIMESTAMP(psci_svc, PSCI_STAT_ID_EXIT_LOW_PWR, - PMF_NO_CACHE_MAINT); -#endif - - psci_get_target_local_pwr_states(end_pwrlvl, &state_info); - - /* - * This CPU could be resuming from suspend or it could have just been - * turned on. To distinguish between these 2 cases, we examine the - * affinity state of the CPU: - * - If the affinity state is ON_PENDING then it has just been - * turned on. - * - Else it is resuming from suspend. - * - * Depending on the type of warm reset identified, choose the right set - * of power management handler and perform the generic, architecture - * and platform specific handling. - */ - if (psci_get_aff_info_state() == AFF_STATE_ON_PENDING) - psci_cpu_on_finish(cpu_idx, &state_info); - else - psci_cpu_suspend_finish(cpu_idx, &state_info); - - /* - * Set the requested and target state of this CPU and all the higher - * power domains which are ancestors of this CPU to run. - */ - psci_set_pwr_domains_to_run(end_pwrlvl); - -#if ENABLE_PSCI_STAT - /* - * Update PSCI stats. - * Caches are off when writing stats data on the power down path. - * Since caches are now enabled, it's necessary to do cache - * maintenance before reading that same data. - */ - psci_stats_update_pwr_up(end_pwrlvl, &state_info, PMF_CACHE_MAINT); -#endif - - /* - * This loop releases the lock corresponding to each power level - * in the reverse order to which they were acquired. - */ - psci_release_pwr_domain_locks(end_pwrlvl, - cpu_idx); -} - -/******************************************************************************* - * This function initializes the set of hooks that PSCI invokes as part of power - * management operation. The power management hooks are expected to be provided - * by the SPD, after it finishes all its initialization - ******************************************************************************/ -void psci_register_spd_pm_hook(const spd_pm_ops_t *pm) -{ - assert(pm); - psci_spd_pm = pm; - - if (pm->svc_migrate) - psci_caps |= define_psci_cap(PSCI_MIG_AARCH64); - - if (pm->svc_migrate_info) - psci_caps |= define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64) - | define_psci_cap(PSCI_MIG_INFO_TYPE); -} - -/******************************************************************************* - * This function invokes the migrate info hook in the spd_pm_ops. It performs - * the necessary return value validation. If the Secure Payload is UP and - * migrate capable, it returns the mpidr of the CPU on which the Secure payload - * is resident through the mpidr parameter. Else the value of the parameter on - * return is undefined. - ******************************************************************************/ -int psci_spd_migrate_info(u_register_t *mpidr) -{ - int rc; - - if (!psci_spd_pm || !psci_spd_pm->svc_migrate_info) - return PSCI_E_NOT_SUPPORTED; - - rc = psci_spd_pm->svc_migrate_info(mpidr); - - assert(rc == PSCI_TOS_UP_MIG_CAP || rc == PSCI_TOS_NOT_UP_MIG_CAP \ - || rc == PSCI_TOS_NOT_PRESENT_MP || rc == PSCI_E_NOT_SUPPORTED); - - return rc; -} - - -/******************************************************************************* - * This function prints the state of all power domains present in the - * system - ******************************************************************************/ -void psci_print_power_domain_map(void) -{ -#if LOG_LEVEL >= LOG_LEVEL_INFO - unsigned int idx; - plat_local_state_t state; - plat_local_state_type_t state_type; - - /* This array maps to the PSCI_STATE_X definitions in psci.h */ - static const char *psci_state_type_str[] = { - "ON", - "RETENTION", - "OFF", - }; - - INFO("PSCI Power Domain Map:\n"); - for (idx = 0; idx < (PSCI_NUM_PWR_DOMAINS - PLATFORM_CORE_COUNT); - idx++) { - state_type = find_local_state_type( - psci_non_cpu_pd_nodes[idx].local_state); - INFO(" Domain Node : Level %u, parent_node %d," - " State %s (0x%x)\n", - psci_non_cpu_pd_nodes[idx].level, - psci_non_cpu_pd_nodes[idx].parent_node, - psci_state_type_str[state_type], - psci_non_cpu_pd_nodes[idx].local_state); - } - - for (idx = 0; idx < PLATFORM_CORE_COUNT; idx++) { - state = psci_get_cpu_local_state_by_idx(idx); - state_type = find_local_state_type(state); - INFO(" CPU Node : MPID 0x%lx, parent_node %d," - " State %s (0x%x)\n", - psci_cpu_pd_nodes[idx].mpidr, - psci_cpu_pd_nodes[idx].parent_node, - psci_state_type_str[state_type], - psci_get_cpu_local_state_by_idx(idx)); - } -#endif -} - -#if ENABLE_PLAT_COMPAT -/******************************************************************************* - * PSCI Compatibility helper function to return the 'power_state' parameter of - * the PSCI CPU SUSPEND request for the current CPU. Returns PSCI_INVALID_DATA - * if not invoked within CPU_SUSPEND for the current CPU. - ******************************************************************************/ -int psci_get_suspend_powerstate(void) -{ - /* Sanity check to verify that CPU is within CPU_SUSPEND */ - if (psci_get_aff_info_state() == AFF_STATE_ON && - !is_local_state_run(psci_get_cpu_local_state())) - return psci_power_state_compat[plat_my_core_pos()]; - - return PSCI_INVALID_DATA; -} - -/******************************************************************************* - * PSCI Compatibility helper function to return the state id of the current - * cpu encoded in the 'power_state' parameter. Returns PSCI_INVALID_DATA - * if not invoked within CPU_SUSPEND for the current CPU. - ******************************************************************************/ -int psci_get_suspend_stateid(void) -{ - unsigned int power_state; - power_state = psci_get_suspend_powerstate(); - if (power_state != PSCI_INVALID_DATA) - return psci_get_pstate_id(power_state); - - return PSCI_INVALID_DATA; -} - -/******************************************************************************* - * PSCI Compatibility helper function to return the state id encoded in the - * 'power_state' parameter of the CPU specified by 'mpidr'. Returns - * PSCI_INVALID_DATA if the CPU is not in CPU_SUSPEND. - ******************************************************************************/ -int psci_get_suspend_stateid_by_mpidr(unsigned long mpidr) -{ - int cpu_idx = plat_core_pos_by_mpidr(mpidr); - - if (cpu_idx == -1) - return PSCI_INVALID_DATA; - - /* Sanity check to verify that the CPU is in CPU_SUSPEND */ - if (psci_get_aff_info_state_by_idx(cpu_idx) == AFF_STATE_ON && - !is_local_state_run(psci_get_cpu_local_state_by_idx(cpu_idx))) - return psci_get_pstate_id(psci_power_state_compat[cpu_idx]); - - return PSCI_INVALID_DATA; -} - -/******************************************************************************* - * This function returns highest affinity level which is in OFF - * state. The affinity instance with which the level is associated is - * determined by the caller. - ******************************************************************************/ -unsigned int psci_get_max_phys_off_afflvl(void) -{ - psci_power_state_t state_info; - - memset(&state_info, 0, sizeof(state_info)); - psci_get_target_local_pwr_states(PLAT_MAX_PWR_LVL, &state_info); - - return psci_find_target_suspend_lvl(&state_info); -} - -/******************************************************************************* - * PSCI Compatibility helper function to return target affinity level requested - * for the CPU_SUSPEND. This function assumes affinity levels correspond to - * power domain levels on the platform. - ******************************************************************************/ -int psci_get_suspend_afflvl(void) -{ - return psci_get_suspend_pwrlvl(); -} - -#endif diff --git a/services/std_svc/psci/psci_entry.S b/services/std_svc/psci/psci_entry.S deleted file mode 100644 index f8c0afa..0000000 --- a/services/std_svc/psci/psci_entry.S +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include - - .globl psci_entrypoint - .globl psci_power_down_wfi - - /* -------------------------------------------------------------------- - * This CPU has been physically powered up. It is either resuming from - * suspend or has simply been turned on. In both cases, call the power - * on finisher. - * -------------------------------------------------------------------- - */ -func psci_entrypoint - /* - * On the warm boot path, most of the EL3 initialisations performed by - * 'el3_entrypoint_common' must be skipped: - * - * - Only when the platform bypasses the BL1/BL31 entrypoint by - * programming the reset address do we need to set the CPU endianness. - * In other cases, we assume this has been taken care by the - * entrypoint code. - * - * - No need to determine the type of boot, we know it is a warm boot. - * - * - Do not try to distinguish between primary and secondary CPUs, this - * notion only exists for a cold boot. - * - * - No need to initialise the memory or the C runtime environment, - * it has been done once and for all on the cold boot path. - */ - el3_entrypoint_common \ - _set_endian=PROGRAMMABLE_RESET_ADDRESS \ - _warm_boot_mailbox=0 \ - _secondary_cold_boot=0 \ - _init_memory=0 \ - _init_c_runtime=0 \ - _exception_vectors=runtime_exceptions - - /* -------------------------------------------- - * Enable the MMU with the DCache disabled. It - * is safe to use stacks allocated in normal - * memory as a result. All memory accesses are - * marked nGnRnE when the MMU is disabled. So - * all the stack writes will make it to memory. - * All memory accesses are marked Non-cacheable - * when the MMU is enabled but D$ is disabled. - * So used stack memory is guaranteed to be - * visible immediately after the MMU is enabled - * Enabling the DCache at the same time as the - * MMU can lead to speculatively fetched and - * possibly stale stack memory being read from - * other caches. This can lead to coherency - * issues. - * -------------------------------------------- - */ - mov x0, #DISABLE_DCACHE - bl bl31_plat_enable_mmu - - bl psci_power_up_finish - - b el3_exit -endfunc psci_entrypoint - - /* -------------------------------------------- - * This function is called to indicate to the - * power controller that it is safe to power - * down this cpu. It should not exit the wfi - * and will be released from reset upon power - * up. 'wfi_spill' is used to catch erroneous - * exits from wfi. - * -------------------------------------------- - */ -func psci_power_down_wfi - dsb sy // ensure write buffer empty - wfi - bl plat_panic_handler -endfunc psci_power_down_wfi - diff --git a/services/std_svc/psci/psci_helpers.S b/services/std_svc/psci/psci_helpers.S deleted file mode 100644 index 6ccf943..0000000 --- a/services/std_svc/psci/psci_helpers.S +++ /dev/null @@ -1,154 +0,0 @@ -/* - * Copyright (c) 2014-2015, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include - - .globl psci_do_pwrdown_cache_maintenance - .globl psci_do_pwrup_cache_maintenance - -/* ----------------------------------------------------------------------- - * void psci_do_pwrdown_cache_maintenance(unsigned int power level); - * - * This function performs cache maintenance for the specified power - * level. The levels of cache affected are determined by the power - * level which is passed as the argument i.e. level 0 results - * in a flush of the L1 cache. Both the L1 and L2 caches are flushed - * for a higher power level. - * - * Additionally, this function also ensures that stack memory is correctly - * flushed out to avoid coherency issues due to a change in its memory - * attributes after the data cache is disabled. - * ----------------------------------------------------------------------- - */ -func psci_do_pwrdown_cache_maintenance - stp x29, x30, [sp,#-16]! - stp x19, x20, [sp,#-16]! - - /* --------------------------------------------- - * Determine to how many levels of cache will be - * subject to cache maintenance. Power level - * 0 implies that only the cpu is being powered - * down. Only the L1 data cache needs to be - * flushed to the PoU in this case. For a higher - * power level we are assuming that a flush - * of L1 data and L2 unified cache is enough. - * This information should be provided by the - * platform. - * --------------------------------------------- - */ - cmp w0, #PSCI_CPU_PWR_LVL - b.eq do_core_pwr_dwn - bl prepare_cluster_pwr_dwn - b do_stack_maintenance - -do_core_pwr_dwn: - bl prepare_core_pwr_dwn - - /* --------------------------------------------- - * Do stack maintenance by flushing the used - * stack to the main memory and invalidating the - * remainder. - * --------------------------------------------- - */ -do_stack_maintenance: - bl plat_get_my_stack - - /* --------------------------------------------- - * Calculate and store the size of the used - * stack memory in x1. - * --------------------------------------------- - */ - mov x19, x0 - mov x1, sp - sub x1, x0, x1 - mov x0, sp - bl flush_dcache_range - - /* --------------------------------------------- - * Calculate and store the size of the unused - * stack memory in x1. Calculate and store the - * stack base address in x0. - * --------------------------------------------- - */ - sub x0, x19, #PLATFORM_STACK_SIZE - sub x1, sp, x0 - bl inv_dcache_range - - ldp x19, x20, [sp], #16 - ldp x29, x30, [sp], #16 - ret -endfunc psci_do_pwrdown_cache_maintenance - - -/* ----------------------------------------------------------------------- - * void psci_do_pwrup_cache_maintenance(void); - * - * This function performs cache maintenance after this cpu is powered up. - * Currently, this involves managing the used stack memory before turning - * on the data cache. - * ----------------------------------------------------------------------- - */ -func psci_do_pwrup_cache_maintenance - stp x29, x30, [sp,#-16]! - - /* --------------------------------------------- - * Ensure any inflight stack writes have made it - * to main memory. - * --------------------------------------------- - */ - dmb st - - /* --------------------------------------------- - * Calculate and store the size of the used - * stack memory in x1. Calculate and store the - * stack base address in x0. - * --------------------------------------------- - */ - bl plat_get_my_stack - mov x1, sp - sub x1, x0, x1 - mov x0, sp - bl inv_dcache_range - - /* --------------------------------------------- - * Enable the data cache. - * --------------------------------------------- - */ - mrs x0, sctlr_el3 - orr x0, x0, #SCTLR_C_BIT - msr sctlr_el3, x0 - isb - - ldp x29, x30, [sp], #16 - ret -endfunc psci_do_pwrup_cache_maintenance diff --git a/services/std_svc/psci/psci_main.c b/services/std_svc/psci/psci_main.c deleted file mode 100644 index 86f45ca..0000000 --- a/services/std_svc/psci/psci_main.c +++ /dev/null @@ -1,440 +0,0 @@ -/* - * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include "psci_private.h" - -/******************************************************************************* - * PSCI frontend api for servicing SMCs. Described in the PSCI spec. - ******************************************************************************/ -int psci_cpu_on(u_register_t target_cpu, - uintptr_t entrypoint, - u_register_t context_id) - -{ - int rc; - entry_point_info_t ep; - - /* Determine if the cpu exists of not */ - rc = psci_validate_mpidr(target_cpu); - if (rc != PSCI_E_SUCCESS) - return PSCI_E_INVALID_PARAMS; - - /* Validate the entry point and get the entry_point_info */ - rc = psci_validate_entry_point(&ep, entrypoint, context_id); - if (rc != PSCI_E_SUCCESS) - return rc; - - /* - * To turn this cpu on, specify which power - * levels need to be turned on - */ - return psci_cpu_on_start(target_cpu, &ep); -} - -unsigned int psci_version(void) -{ - return PSCI_MAJOR_VER | PSCI_MINOR_VER; -} - -int psci_cpu_suspend(unsigned int power_state, - uintptr_t entrypoint, - u_register_t context_id) -{ - int rc; - unsigned int target_pwrlvl, is_power_down_state; - entry_point_info_t ep; - psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} }; - plat_local_state_t cpu_pd_state; - - /* Validate the power_state parameter */ - rc = psci_validate_power_state(power_state, &state_info); - if (rc != PSCI_E_SUCCESS) { - assert(rc == PSCI_E_INVALID_PARAMS); - return rc; - } - - /* - * Get the value of the state type bit from the power state parameter. - */ - is_power_down_state = psci_get_pstate_type(power_state); - - /* Sanity check the requested suspend levels */ - assert (psci_validate_suspend_req(&state_info, is_power_down_state) - == PSCI_E_SUCCESS); - - target_pwrlvl = psci_find_target_suspend_lvl(&state_info); - - /* Fast path for CPU standby.*/ - if (is_cpu_standby_req(is_power_down_state, target_pwrlvl)) { - if (!psci_plat_pm_ops->cpu_standby) - return PSCI_E_INVALID_PARAMS; - - /* - * Set the state of the CPU power domain to the platform - * specific retention state and enter the standby state. - */ - cpu_pd_state = state_info.pwr_domain_state[PSCI_CPU_PWR_LVL]; - psci_set_cpu_local_state(cpu_pd_state); - -#if ENABLE_PSCI_STAT - /* - * Capture time-stamp before CPU standby - * No cache maintenance is needed as caches - * are ON through out the CPU standby operation. - */ - PMF_CAPTURE_TIMESTAMP(psci_svc, PSCI_STAT_ID_ENTER_LOW_PWR, - PMF_NO_CACHE_MAINT); -#endif - - psci_plat_pm_ops->cpu_standby(cpu_pd_state); - - /* Upon exit from standby, set the state back to RUN. */ - psci_set_cpu_local_state(PSCI_LOCAL_STATE_RUN); - -#if ENABLE_PSCI_STAT - /* Capture time-stamp after CPU standby */ - PMF_CAPTURE_TIMESTAMP(psci_svc, PSCI_STAT_ID_EXIT_LOW_PWR, - PMF_NO_CACHE_MAINT); - - /* Update PSCI stats */ - psci_stats_update_pwr_up(PSCI_CPU_PWR_LVL, &state_info, - PMF_NO_CACHE_MAINT); -#endif - - return PSCI_E_SUCCESS; - } - - /* - * If a power down state has been requested, we need to verify entry - * point and program entry information. - */ - if (is_power_down_state) { - rc = psci_validate_entry_point(&ep, entrypoint, context_id); - if (rc != PSCI_E_SUCCESS) - return rc; - } - - /* - * Do what is needed to enter the power down state. Upon success, - * enter the final wfi which will power down this CPU. This function - * might return if the power down was abandoned for any reason, e.g. - * arrival of an interrupt - */ - psci_cpu_suspend_start(&ep, - target_pwrlvl, - &state_info, - is_power_down_state); - - return PSCI_E_SUCCESS; -} - - -int psci_system_suspend(uintptr_t entrypoint, u_register_t context_id) -{ - int rc; - psci_power_state_t state_info; - entry_point_info_t ep; - - /* Check if the current CPU is the last ON CPU in the system */ - if (!psci_is_last_on_cpu()) - return PSCI_E_DENIED; - - /* Validate the entry point and get the entry_point_info */ - rc = psci_validate_entry_point(&ep, entrypoint, context_id); - if (rc != PSCI_E_SUCCESS) - return rc; - - /* Query the psci_power_state for system suspend */ - psci_query_sys_suspend_pwrstate(&state_info); - - /* Ensure that the psci_power_state makes sense */ - assert(psci_find_target_suspend_lvl(&state_info) == PLAT_MAX_PWR_LVL); - assert(psci_validate_suspend_req(&state_info, PSTATE_TYPE_POWERDOWN) - == PSCI_E_SUCCESS); - assert(is_local_state_off(state_info.pwr_domain_state[PLAT_MAX_PWR_LVL])); - - /* - * Do what is needed to enter the system suspend state. This function - * might return if the power down was abandoned for any reason, e.g. - * arrival of an interrupt - */ - psci_cpu_suspend_start(&ep, - PLAT_MAX_PWR_LVL, - &state_info, - PSTATE_TYPE_POWERDOWN); - - return PSCI_E_SUCCESS; -} - -int psci_cpu_off(void) -{ - int rc; - unsigned int target_pwrlvl = PLAT_MAX_PWR_LVL; - - /* - * Do what is needed to power off this CPU and possible higher power - * levels if it able to do so. Upon success, enter the final wfi - * which will power down this CPU. - */ - rc = psci_do_cpu_off(target_pwrlvl); - - /* - * The only error cpu_off can return is E_DENIED. So check if that's - * indeed the case. - */ - assert (rc == PSCI_E_DENIED); - - return rc; -} - -int psci_affinity_info(u_register_t target_affinity, - unsigned int lowest_affinity_level) -{ - unsigned int target_idx; - - /* We dont support level higher than PSCI_CPU_PWR_LVL */ - if (lowest_affinity_level > PSCI_CPU_PWR_LVL) - return PSCI_E_INVALID_PARAMS; - - /* Calculate the cpu index of the target */ - target_idx = plat_core_pos_by_mpidr(target_affinity); - if (target_idx == -1) - return PSCI_E_INVALID_PARAMS; - - return psci_get_aff_info_state_by_idx(target_idx); -} - -int psci_migrate(u_register_t target_cpu) -{ - int rc; - u_register_t resident_cpu_mpidr; - - rc = psci_spd_migrate_info(&resident_cpu_mpidr); - if (rc != PSCI_TOS_UP_MIG_CAP) - return (rc == PSCI_TOS_NOT_UP_MIG_CAP) ? - PSCI_E_DENIED : PSCI_E_NOT_SUPPORTED; - - /* - * Migrate should only be invoked on the CPU where - * the Secure OS is resident. - */ - if (resident_cpu_mpidr != read_mpidr_el1()) - return PSCI_E_NOT_PRESENT; - - /* Check the validity of the specified target cpu */ - rc = psci_validate_mpidr(target_cpu); - if (rc != PSCI_E_SUCCESS) - return PSCI_E_INVALID_PARAMS; - - assert(psci_spd_pm && psci_spd_pm->svc_migrate); - - rc = psci_spd_pm->svc_migrate(read_mpidr_el1(), target_cpu); - assert(rc == PSCI_E_SUCCESS || rc == PSCI_E_INTERN_FAIL); - - return rc; -} - -int psci_migrate_info_type(void) -{ - u_register_t resident_cpu_mpidr; - - return psci_spd_migrate_info(&resident_cpu_mpidr); -} - -long psci_migrate_info_up_cpu(void) -{ - u_register_t resident_cpu_mpidr; - int rc; - - /* - * Return value of this depends upon what - * psci_spd_migrate_info() returns. - */ - rc = psci_spd_migrate_info(&resident_cpu_mpidr); - if (rc != PSCI_TOS_NOT_UP_MIG_CAP && rc != PSCI_TOS_UP_MIG_CAP) - return PSCI_E_INVALID_PARAMS; - - return resident_cpu_mpidr; -} - -int psci_features(unsigned int psci_fid) -{ - unsigned int local_caps = psci_caps; - - /* Check if it is a 64 bit function */ - if (((psci_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_64) - local_caps &= PSCI_CAP_64BIT_MASK; - - /* Check for invalid fid */ - if (!(is_std_svc_call(psci_fid) && is_valid_fast_smc(psci_fid) - && is_psci_fid(psci_fid))) - return PSCI_E_NOT_SUPPORTED; - - - /* Check if the psci fid is supported or not */ - if (!(local_caps & define_psci_cap(psci_fid))) - return PSCI_E_NOT_SUPPORTED; - - /* Format the feature flags */ - if (psci_fid == PSCI_CPU_SUSPEND_AARCH32 || - psci_fid == PSCI_CPU_SUSPEND_AARCH64) { - /* - * The trusted firmware does not support OS Initiated Mode. - */ - return (FF_PSTATE << FF_PSTATE_SHIFT) | - ((!FF_SUPPORTS_OS_INIT_MODE) << FF_MODE_SUPPORT_SHIFT); - } - - /* Return 0 for all other fid's */ - return PSCI_E_SUCCESS; -} - -/******************************************************************************* - * PSCI top level handler for servicing SMCs. - ******************************************************************************/ -uint64_t psci_smc_handler(uint32_t smc_fid, - uint64_t x1, - uint64_t x2, - uint64_t x3, - uint64_t x4, - void *cookie, - void *handle, - uint64_t flags) -{ - if (is_caller_secure(flags)) - SMC_RET1(handle, SMC_UNK); - - /* Check the fid against the capabilities */ - if (!(psci_caps & define_psci_cap(smc_fid))) - SMC_RET1(handle, SMC_UNK); - - if (((smc_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_32) { - /* 32-bit PSCI function, clear top parameter bits */ - - x1 = (uint32_t)x1; - x2 = (uint32_t)x2; - x3 = (uint32_t)x3; - - switch (smc_fid) { - case PSCI_VERSION: - SMC_RET1(handle, psci_version()); - - case PSCI_CPU_OFF: - SMC_RET1(handle, psci_cpu_off()); - - case PSCI_CPU_SUSPEND_AARCH32: - SMC_RET1(handle, psci_cpu_suspend(x1, x2, x3)); - - case PSCI_CPU_ON_AARCH32: - SMC_RET1(handle, psci_cpu_on(x1, x2, x3)); - - case PSCI_AFFINITY_INFO_AARCH32: - SMC_RET1(handle, psci_affinity_info(x1, x2)); - - case PSCI_MIG_AARCH32: - SMC_RET1(handle, psci_migrate(x1)); - - case PSCI_MIG_INFO_TYPE: - SMC_RET1(handle, psci_migrate_info_type()); - - case PSCI_MIG_INFO_UP_CPU_AARCH32: - SMC_RET1(handle, psci_migrate_info_up_cpu()); - - case PSCI_SYSTEM_SUSPEND_AARCH32: - SMC_RET1(handle, psci_system_suspend(x1, x2)); - - case PSCI_SYSTEM_OFF: - psci_system_off(); - /* We should never return from psci_system_off() */ - - case PSCI_SYSTEM_RESET: - psci_system_reset(); - /* We should never return from psci_system_reset() */ - - case PSCI_FEATURES: - SMC_RET1(handle, psci_features(x1)); - -#if ENABLE_PSCI_STAT - case PSCI_STAT_RESIDENCY_AARCH32: - SMC_RET1(handle, psci_stat_residency(x1, x2)); - - case PSCI_STAT_COUNT_AARCH32: - SMC_RET1(handle, psci_stat_count(x1, x2)); -#endif - - default: - break; - } - } else { - /* 64-bit PSCI function */ - - switch (smc_fid) { - case PSCI_CPU_SUSPEND_AARCH64: - SMC_RET1(handle, psci_cpu_suspend(x1, x2, x3)); - - case PSCI_CPU_ON_AARCH64: - SMC_RET1(handle, psci_cpu_on(x1, x2, x3)); - - case PSCI_AFFINITY_INFO_AARCH64: - SMC_RET1(handle, psci_affinity_info(x1, x2)); - - case PSCI_MIG_AARCH64: - SMC_RET1(handle, psci_migrate(x1)); - - case PSCI_MIG_INFO_UP_CPU_AARCH64: - SMC_RET1(handle, psci_migrate_info_up_cpu()); - - case PSCI_SYSTEM_SUSPEND_AARCH64: - SMC_RET1(handle, psci_system_suspend(x1, x2)); - -#if ENABLE_PSCI_STAT - case PSCI_STAT_RESIDENCY_AARCH64: - SMC_RET1(handle, psci_stat_residency(x1, x2)); - - case PSCI_STAT_COUNT_AARCH64: - SMC_RET1(handle, psci_stat_count(x1, x2)); -#endif - - default: - break; - } - } - - WARN("Unimplemented PSCI Call: 0x%x \n", smc_fid); - SMC_RET1(handle, SMC_UNK); -} diff --git a/services/std_svc/psci/psci_off.c b/services/std_svc/psci/psci_off.c deleted file mode 100644 index 36dab49..0000000 --- a/services/std_svc/psci/psci_off.c +++ /dev/null @@ -1,169 +0,0 @@ -/* - * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include -#include -#include "psci_private.h" - -/****************************************************************************** - * Construct the psci_power_state to request power OFF at all power levels. - ******************************************************************************/ -static void psci_set_power_off_state(psci_power_state_t *state_info) -{ - int lvl; - - for (lvl = PSCI_CPU_PWR_LVL; lvl <= PLAT_MAX_PWR_LVL; lvl++) - state_info->pwr_domain_state[lvl] = PLAT_MAX_OFF_STATE; -} - -/****************************************************************************** - * Top level handler which is called when a cpu wants to power itself down. - * It's assumed that along with turning the cpu power domain off, power - * domains at higher levels will be turned off as far as possible. It finds - * the highest level where a domain has to be powered off by traversing the - * node information and then performs generic, architectural, platform setup - * and state management required to turn OFF that power domain and domains - * below it. e.g. For a cpu that's to be powered OFF, it could mean programming - * the power controller whereas for a cluster that's to be powered off, it will - * call the platform specific code which will disable coherency at the - * interconnect level if the cpu is the last in the cluster and also the - * program the power controller. - ******************************************************************************/ -int psci_do_cpu_off(unsigned int end_pwrlvl) -{ - int rc = PSCI_E_SUCCESS, idx = plat_my_core_pos(); - psci_power_state_t state_info; - - /* - * This function must only be called on platforms where the - * CPU_OFF platform hooks have been implemented. - */ - assert(psci_plat_pm_ops->pwr_domain_off); - - /* - * This function acquires the lock corresponding to each power - * level so that by the time all locks are taken, the system topology - * is snapshot and state management can be done safely. - */ - psci_acquire_pwr_domain_locks(end_pwrlvl, - idx); - - /* - * Call the cpu off handler registered by the Secure Payload Dispatcher - * to let it do any bookkeeping. Assume that the SPD always reports an - * E_DENIED error if SP refuse to power down - */ - if (psci_spd_pm && psci_spd_pm->svc_off) { - rc = psci_spd_pm->svc_off(0); - if (rc) - goto exit; - } - - /* Construct the psci_power_state for CPU_OFF */ - psci_set_power_off_state(&state_info); - - /* - * This function is passed the requested state info and - * it returns the negotiated state info for each power level upto - * the end level specified. - */ - psci_do_state_coordination(end_pwrlvl, &state_info); - -#if ENABLE_PSCI_STAT - /* Update the last cpu for each level till end_pwrlvl */ - psci_stats_update_pwr_down(end_pwrlvl, &state_info); -#endif - - /* - * Arch. management. Perform the necessary steps to flush all - * cpu caches. - */ - psci_do_pwrdown_cache_maintenance(psci_find_max_off_lvl(&state_info)); - - /* - * Plat. management: Perform platform specific actions to turn this - * cpu off e.g. exit cpu coherency, program the power controller etc. - */ - psci_plat_pm_ops->pwr_domain_off(&state_info); - -#if ENABLE_PSCI_STAT - /* - * Capture time-stamp while entering low power state. - * No cache maintenance needed because caches are off - * and writes are direct to main memory. - */ - PMF_CAPTURE_TIMESTAMP(psci_svc, PSCI_STAT_ID_ENTER_LOW_PWR, - PMF_NO_CACHE_MAINT); -#endif - -exit: - /* - * Release the locks corresponding to each power level in the - * reverse order to which they were acquired. - */ - psci_release_pwr_domain_locks(end_pwrlvl, - idx); - - /* - * Check if all actions needed to safely power down this cpu have - * successfully completed. - */ - if (rc == PSCI_E_SUCCESS) { - /* - * Set the affinity info state to OFF. This writes directly to - * main memory as caches are disabled, so cache maintenance is - * required to ensure that later cached reads of aff_info_state - * return AFF_STATE_OFF. A dsbish() ensures ordering of the - * update to the affinity info state prior to cache line - * invalidation. - */ - flush_cpu_data(psci_svc_cpu_data.aff_info_state); - psci_set_aff_info_state(AFF_STATE_OFF); - dsbish(); - inv_cpu_data(psci_svc_cpu_data.aff_info_state); - - if (psci_plat_pm_ops->pwr_domain_pwr_down_wfi) { - /* This function must not return */ - psci_plat_pm_ops->pwr_domain_pwr_down_wfi(&state_info); - } else { - /* - * Enter a wfi loop which will allow the power - * controller to physically power down this cpu. - */ - psci_power_down_wfi(); - } - } - - return rc; -} diff --git a/services/std_svc/psci/psci_on.c b/services/std_svc/psci/psci_on.c deleted file mode 100644 index c8c36cd..0000000 --- a/services/std_svc/psci/psci_on.c +++ /dev/null @@ -1,212 +0,0 @@ -/* - * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "psci_private.h" - -/******************************************************************************* - * This function checks whether a cpu which has been requested to be turned on - * is OFF to begin with. - ******************************************************************************/ -static int cpu_on_validate_state(aff_info_state_t aff_state) -{ - if (aff_state == AFF_STATE_ON) - return PSCI_E_ALREADY_ON; - - if (aff_state == AFF_STATE_ON_PENDING) - return PSCI_E_ON_PENDING; - - assert(aff_state == AFF_STATE_OFF); - return PSCI_E_SUCCESS; -} - -/******************************************************************************* - * Generic handler which is called to physically power on a cpu identified by - * its mpidr. It performs the generic, architectural, platform setup and state - * management to power on the target cpu e.g. it will ensure that - * enough information is stashed for it to resume execution in the non-secure - * security state. - * - * The state of all the relevant power domains are changed after calling the - * platform handler as it can return error. - ******************************************************************************/ -int psci_cpu_on_start(u_register_t target_cpu, - entry_point_info_t *ep) -{ - int rc; - unsigned int target_idx = plat_core_pos_by_mpidr(target_cpu); - aff_info_state_t target_aff_state; - - /* Calling function must supply valid input arguments */ - assert((int) target_idx >= 0); - assert(ep != NULL); - - /* - * This function must only be called on platforms where the - * CPU_ON platform hooks have been implemented. - */ - assert(psci_plat_pm_ops->pwr_domain_on && - psci_plat_pm_ops->pwr_domain_on_finish); - - /* Protect against multiple CPUs trying to turn ON the same target CPU */ - psci_spin_lock_cpu(target_idx); - - /* - * Generic management: Ensure that the cpu is off to be - * turned on. - */ - rc = cpu_on_validate_state(psci_get_aff_info_state_by_idx(target_idx)); - if (rc != PSCI_E_SUCCESS) - goto exit; - - /* - * Call the cpu on handler registered by the Secure Payload Dispatcher - * to let it do any bookeeping. If the handler encounters an error, it's - * expected to assert within - */ - if (psci_spd_pm && psci_spd_pm->svc_on) - psci_spd_pm->svc_on(target_cpu); - - /* - * Set the Affinity info state of the target cpu to ON_PENDING. - * Flush aff_info_state as it will be accessed with caches - * turned OFF. - */ - psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_ON_PENDING); - flush_cpu_data_by_index(target_idx, psci_svc_cpu_data.aff_info_state); - - /* - * The cache line invalidation by the target CPU after setting the - * state to OFF (see psci_do_cpu_off()), could cause the update to - * aff_info_state to be invalidated. Retry the update if the target - * CPU aff_info_state is not ON_PENDING. - */ - target_aff_state = psci_get_aff_info_state_by_idx(target_idx); - if (target_aff_state != AFF_STATE_ON_PENDING) { - assert(target_aff_state == AFF_STATE_OFF); - psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_ON_PENDING); - flush_cpu_data_by_index(target_idx, psci_svc_cpu_data.aff_info_state); - - assert(psci_get_aff_info_state_by_idx(target_idx) == AFF_STATE_ON_PENDING); - } - - /* - * Perform generic, architecture and platform specific handling. - */ - /* - * Plat. management: Give the platform the current state - * of the target cpu to allow it to perform the necessary - * steps to power on. - */ - rc = psci_plat_pm_ops->pwr_domain_on(target_cpu); - assert(rc == PSCI_E_SUCCESS || rc == PSCI_E_INTERN_FAIL); - - if (rc == PSCI_E_SUCCESS) - /* Store the re-entry information for the non-secure world. */ - cm_init_context_by_index(target_idx, ep); - else { - /* Restore the state on error. */ - psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_OFF); - flush_cpu_data_by_index(target_idx, psci_svc_cpu_data.aff_info_state); - } - -exit: - psci_spin_unlock_cpu(target_idx); - return rc; -} - -/******************************************************************************* - * The following function finish an earlier power on request. They - * are called by the common finisher routine in psci_common.c. The `state_info` - * is the psci_power_state from which this CPU has woken up from. - ******************************************************************************/ -void psci_cpu_on_finish(unsigned int cpu_idx, - psci_power_state_t *state_info) -{ - /* - * Plat. management: Perform the platform specific actions - * for this cpu e.g. enabling the gic or zeroing the mailbox - * register. The actual state of this cpu has already been - * changed. - */ - psci_plat_pm_ops->pwr_domain_on_finish(state_info); - - /* - * Arch. management: Enable data cache and manage stack memory - */ - psci_do_pwrup_cache_maintenance(); - - /* - * All the platform specific actions for turning this cpu - * on have completed. Perform enough arch.initialization - * to run in the non-secure address space. - */ - bl31_arch_setup(); - - /* - * Lock the CPU spin lock to make sure that the context initialization - * is done. Since the lock is only used in this function to create - * a synchronization point with cpu_on_start(), it can be released - * immediately. - */ - psci_spin_lock_cpu(cpu_idx); - psci_spin_unlock_cpu(cpu_idx); - - /* Ensure we have been explicitly woken up by another cpu */ - assert(psci_get_aff_info_state() == AFF_STATE_ON_PENDING); - - /* - * Call the cpu on finish handler registered by the Secure Payload - * Dispatcher to let it do any bookeeping. If the handler encounters an - * error, it's expected to assert within - */ - if (psci_spd_pm && psci_spd_pm->svc_on_finish) - psci_spd_pm->svc_on_finish(0); - - /* Populate the mpidr field within the cpu node array */ - /* This needs to be done only once */ - psci_cpu_pd_nodes[cpu_idx].mpidr = read_mpidr() & MPIDR_AFFINITY_MASK; - - /* - * Generic management: Now we just need to retrieve the - * information that we had stashed away during the cpu_on - * call to set this cpu on its way. - */ - cm_prepare_el3_exit(NON_SECURE); -} diff --git a/services/std_svc/psci/psci_private.h b/services/std_svc/psci/psci_private.h deleted file mode 100644 index ffb0732..0000000 --- a/services/std_svc/psci/psci_private.h +++ /dev/null @@ -1,254 +0,0 @@ -/* - * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef __PSCI_PRIVATE_H__ -#define __PSCI_PRIVATE_H__ - -#include -#include -#include -#include -#include -#include -#include - -/* - * The following helper macros abstract the interface to the Bakery - * Lock API. - */ -#define psci_lock_init(non_cpu_pd_node, idx) \ - ((non_cpu_pd_node)[(idx)].lock_index = (idx)) -#define psci_lock_get(non_cpu_pd_node) \ - bakery_lock_get(&psci_locks[(non_cpu_pd_node)->lock_index]) -#define psci_lock_release(non_cpu_pd_node) \ - bakery_lock_release(&psci_locks[(non_cpu_pd_node)->lock_index]) - -/* - * The PSCI capability which are provided by the generic code but does not - * depend on the platform or spd capabilities. - */ -#define PSCI_GENERIC_CAP \ - (define_psci_cap(PSCI_VERSION) | \ - define_psci_cap(PSCI_AFFINITY_INFO_AARCH64) | \ - define_psci_cap(PSCI_FEATURES)) - -/* - * The PSCI capabilities mask for 64 bit functions. - */ -#define PSCI_CAP_64BIT_MASK \ - (define_psci_cap(PSCI_CPU_SUSPEND_AARCH64) | \ - define_psci_cap(PSCI_CPU_ON_AARCH64) | \ - define_psci_cap(PSCI_AFFINITY_INFO_AARCH64) | \ - define_psci_cap(PSCI_MIG_AARCH64) | \ - define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64) | \ - define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64) | \ - define_psci_cap(PSCI_STAT_RESIDENCY_AARCH64) | \ - define_psci_cap(PSCI_STAT_COUNT_AARCH64)) - -/* - * Helper macros to get/set the fields of PSCI per-cpu data. - */ -#define psci_set_aff_info_state(aff_state) \ - set_cpu_data(psci_svc_cpu_data.aff_info_state, aff_state) -#define psci_get_aff_info_state() \ - get_cpu_data(psci_svc_cpu_data.aff_info_state) -#define psci_get_aff_info_state_by_idx(idx) \ - get_cpu_data_by_index(idx, psci_svc_cpu_data.aff_info_state) -#define psci_set_aff_info_state_by_idx(idx, aff_state) \ - set_cpu_data_by_index(idx, psci_svc_cpu_data.aff_info_state,\ - aff_state) -#define psci_get_suspend_pwrlvl() \ - get_cpu_data(psci_svc_cpu_data.target_pwrlvl) -#define psci_set_suspend_pwrlvl(target_lvl) \ - set_cpu_data(psci_svc_cpu_data.target_pwrlvl, target_lvl) -#define psci_set_cpu_local_state(state) \ - set_cpu_data(psci_svc_cpu_data.local_state, state) -#define psci_get_cpu_local_state() \ - get_cpu_data(psci_svc_cpu_data.local_state) -#define psci_get_cpu_local_state_by_idx(idx) \ - get_cpu_data_by_index(idx, psci_svc_cpu_data.local_state) - -/* - * Helper macros for the CPU level spinlocks - */ -#define psci_spin_lock_cpu(idx) spin_lock(&psci_cpu_pd_nodes[idx].cpu_lock) -#define psci_spin_unlock_cpu(idx) spin_unlock(&psci_cpu_pd_nodes[idx].cpu_lock) - -/* Helper macro to identify a CPU standby request in PSCI Suspend call */ -#define is_cpu_standby_req(is_power_down_state, retn_lvl) \ - (((!(is_power_down_state)) && ((retn_lvl) == 0)) ? 1 : 0) - -/* Following are used as ID's to capture time-stamp */ -#define PSCI_STAT_ID_ENTER_LOW_PWR 0 -#define PSCI_STAT_ID_EXIT_LOW_PWR 1 -#define PSCI_STAT_TOTAL_IDS 2 - -/* Declare PMF service functions for PSCI */ -PMF_DECLARE_CAPTURE_TIMESTAMP(psci_svc) -PMF_DECLARE_GET_TIMESTAMP(psci_svc) - -/******************************************************************************* - * The following two data structures implement the power domain tree. The tree - * is used to track the state of all the nodes i.e. power domain instances - * described by the platform. The tree consists of nodes that describe CPU power - * domains i.e. leaf nodes and all other power domains which are parents of a - * CPU power domain i.e. non-leaf nodes. - ******************************************************************************/ -typedef struct non_cpu_pwr_domain_node { - /* - * Index of the first CPU power domain node level 0 which has this node - * as its parent. - */ - unsigned int cpu_start_idx; - - /* - * Number of CPU power domains which are siblings of the domain indexed - * by 'cpu_start_idx' i.e. all the domains in the range 'cpu_start_idx - * -> cpu_start_idx + ncpus' have this node as their parent. - */ - unsigned int ncpus; - - /* - * Index of the parent power domain node. - * TODO: Figure out whether to whether using pointer is more efficient. - */ - unsigned int parent_node; - - plat_local_state_t local_state; - - unsigned char level; - - /* For indexing the psci_lock array*/ - unsigned char lock_index; -} non_cpu_pd_node_t; - -typedef struct cpu_pwr_domain_node { - u_register_t mpidr; - - /* - * Index of the parent power domain node. - * TODO: Figure out whether to whether using pointer is more efficient. - */ - unsigned int parent_node; - - /* - * A CPU power domain does not require state coordination like its - * parent power domains. Hence this node does not include a bakery - * lock. A spinlock is required by the CPU_ON handler to prevent a race - * when multiple CPUs try to turn ON the same target CPU. - */ - spinlock_t cpu_lock; -} cpu_pd_node_t; - -/******************************************************************************* - * Data prototypes - ******************************************************************************/ -extern const plat_psci_ops_t *psci_plat_pm_ops; -extern non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS]; -extern cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT]; -extern unsigned int psci_caps; - -/* One bakery lock is required for each non-cpu power domain */ -DECLARE_BAKERY_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]); - -/******************************************************************************* - * SPD's power management hooks registered with PSCI - ******************************************************************************/ -extern const spd_pm_ops_t *psci_spd_pm; - -/******************************************************************************* - * Function prototypes - ******************************************************************************/ -/* Private exported functions from psci_common.c */ -int psci_validate_power_state(unsigned int power_state, - psci_power_state_t *state_info); -void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info); -int psci_validate_mpidr(u_register_t mpidr); -void psci_init_req_local_pwr_states(void); -void psci_power_up_finish(void); -int psci_validate_entry_point(entry_point_info_t *ep, - uintptr_t entrypoint, u_register_t context_id); -void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx, - unsigned int end_lvl, - unsigned int node_index[]); -void psci_do_state_coordination(unsigned int end_pwrlvl, - psci_power_state_t *state_info); -void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl, - unsigned int cpu_idx); -void psci_release_pwr_domain_locks(unsigned int end_pwrlvl, - unsigned int cpu_idx); -int psci_validate_suspend_req(const psci_power_state_t *state_info, - unsigned int is_power_down_state_req); -unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info); -unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info); -void psci_set_pwr_domains_to_run(unsigned int end_pwrlvl); -void psci_print_power_domain_map(void); -unsigned int psci_is_last_on_cpu(void); -int psci_spd_migrate_info(u_register_t *mpidr); - -/* Private exported functions from psci_on.c */ -int psci_cpu_on_start(unsigned long target_cpu, - entry_point_info_t *ep); - -void psci_cpu_on_finish(unsigned int cpu_idx, - psci_power_state_t *state_info); - -/* Private exported functions from psci_off.c */ -int psci_do_cpu_off(unsigned int end_pwrlvl); - -/* Private exported functions from psci_suspend.c */ -void psci_cpu_suspend_start(entry_point_info_t *ep, - unsigned int end_pwrlvl, - psci_power_state_t *state_info, - unsigned int is_power_down_state_req); - -void psci_cpu_suspend_finish(unsigned int cpu_idx, - psci_power_state_t *state_info); - -/* Private exported functions from psci_helpers.S */ -void psci_do_pwrdown_cache_maintenance(unsigned int pwr_level); -void psci_do_pwrup_cache_maintenance(void); - -/* Private exported functions from psci_system_off.c */ -void __dead2 psci_system_off(void); -void __dead2 psci_system_reset(void); - -/* Private exported functions from psci_stat.c */ -void psci_stats_update_pwr_down(unsigned int end_pwrlvl, - const psci_power_state_t *state_info); -void psci_stats_update_pwr_up(unsigned int end_pwrlvl, - const psci_power_state_t *state_info, - unsigned int flags); -u_register_t psci_stat_residency(u_register_t target_cpu, - unsigned int power_state); -u_register_t psci_stat_count(u_register_t target_cpu, - unsigned int power_state); - -#endif /* __PSCI_PRIVATE_H__ */ diff --git a/services/std_svc/psci/psci_setup.c b/services/std_svc/psci/psci_setup.c deleted file mode 100644 index 975b257..0000000 --- a/services/std_svc/psci/psci_setup.c +++ /dev/null @@ -1,261 +0,0 @@ -/* - * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include "psci_private.h" - -/******************************************************************************* - * Per cpu non-secure contexts used to program the architectural state prior - * return to the normal world. - * TODO: Use the memory allocator to set aside memory for the contexts instead - * of relying on platform defined constants. - ******************************************************************************/ -static cpu_context_t psci_ns_context[PLATFORM_CORE_COUNT]; - -/****************************************************************************** - * Define the psci capability variable. - *****************************************************************************/ -unsigned int psci_caps; - -/******************************************************************************* - * Function which initializes the 'psci_non_cpu_pd_nodes' or the - * 'psci_cpu_pd_nodes' corresponding to the power level. - ******************************************************************************/ -static void psci_init_pwr_domain_node(unsigned int node_idx, - unsigned int parent_idx, - unsigned int level) -{ - if (level > PSCI_CPU_PWR_LVL) { - psci_non_cpu_pd_nodes[node_idx].level = level; - psci_lock_init(psci_non_cpu_pd_nodes, node_idx); - psci_non_cpu_pd_nodes[node_idx].parent_node = parent_idx; - psci_non_cpu_pd_nodes[node_idx].local_state = - PLAT_MAX_OFF_STATE; - } else { - psci_cpu_data_t *svc_cpu_data; - - psci_cpu_pd_nodes[node_idx].parent_node = parent_idx; - - /* Initialize with an invalid mpidr */ - psci_cpu_pd_nodes[node_idx].mpidr = PSCI_INVALID_MPIDR; - - svc_cpu_data = - &(_cpu_data_by_index(node_idx)->psci_svc_cpu_data); - - /* Set the Affinity Info for the cores as OFF */ - svc_cpu_data->aff_info_state = AFF_STATE_OFF; - - /* Invalidate the suspend level for the cpu */ - svc_cpu_data->target_pwrlvl = PSCI_INVALID_PWR_LVL; - - /* Set the power state to OFF state */ - svc_cpu_data->local_state = PLAT_MAX_OFF_STATE; - - flush_dcache_range((uintptr_t)svc_cpu_data, - sizeof(*svc_cpu_data)); - - cm_set_context_by_index(node_idx, - (void *) &psci_ns_context[node_idx], - NON_SECURE); - } -} - -/******************************************************************************* - * This functions updates cpu_start_idx and ncpus field for each of the node in - * psci_non_cpu_pd_nodes[]. It does so by comparing the parent nodes of each of - * the CPUs and check whether they match with the parent of the previous - * CPU. The basic assumption for this work is that children of the same parent - * are allocated adjacent indices. The platform should ensure this though proper - * mapping of the CPUs to indices via plat_core_pos_by_mpidr() and - * plat_my_core_pos() APIs. - *******************************************************************************/ -static void psci_update_pwrlvl_limits(void) -{ - int j; - unsigned int nodes_idx[PLAT_MAX_PWR_LVL] = {0}; - unsigned int temp_index[PLAT_MAX_PWR_LVL], cpu_idx; - - for (cpu_idx = 0; cpu_idx < PLATFORM_CORE_COUNT; cpu_idx++) { - psci_get_parent_pwr_domain_nodes(cpu_idx, - PLAT_MAX_PWR_LVL, - temp_index); - for (j = PLAT_MAX_PWR_LVL - 1; j >= 0; j--) { - if (temp_index[j] != nodes_idx[j]) { - nodes_idx[j] = temp_index[j]; - psci_non_cpu_pd_nodes[nodes_idx[j]].cpu_start_idx - = cpu_idx; - } - psci_non_cpu_pd_nodes[nodes_idx[j]].ncpus++; - } - } -} - -/******************************************************************************* - * Core routine to populate the power domain tree. The tree descriptor passed by - * the platform is populated breadth-first and the first entry in the map - * informs the number of root power domains. The parent nodes of the root nodes - * will point to an invalid entry(-1). - ******************************************************************************/ -static void populate_power_domain_tree(const unsigned char *topology) -{ - unsigned int i, j = 0, num_nodes_at_lvl = 1, num_nodes_at_next_lvl; - unsigned int node_index = 0, parent_node_index = 0, num_children; - int level = PLAT_MAX_PWR_LVL; - - /* - * For each level the inputs are: - * - number of nodes at this level in plat_array i.e. num_nodes_at_level - * This is the sum of values of nodes at the parent level. - * - Index of first entry at this level in the plat_array i.e. - * parent_node_index. - * - Index of first free entry in psci_non_cpu_pd_nodes[] or - * psci_cpu_pd_nodes[] i.e. node_index depending upon the level. - */ - while (level >= PSCI_CPU_PWR_LVL) { - num_nodes_at_next_lvl = 0; - /* - * For each entry (parent node) at this level in the plat_array: - * - Find the number of children - * - Allocate a node in a power domain array for each child - * - Set the parent of the child to the parent_node_index - 1 - * - Increment parent_node_index to point to the next parent - * - Accumulate the number of children at next level. - */ - for (i = 0; i < num_nodes_at_lvl; i++) { - assert(parent_node_index <= - PSCI_NUM_NON_CPU_PWR_DOMAINS); - num_children = topology[parent_node_index]; - - for (j = node_index; - j < node_index + num_children; j++) - psci_init_pwr_domain_node(j, - parent_node_index - 1, - level); - - node_index = j; - num_nodes_at_next_lvl += num_children; - parent_node_index++; - } - - num_nodes_at_lvl = num_nodes_at_next_lvl; - level--; - - /* Reset the index for the cpu power domain array */ - if (level == PSCI_CPU_PWR_LVL) - node_index = 0; - } - - /* Validate the sanity of array exported by the platform */ - assert(j == PLATFORM_CORE_COUNT); -} - -/******************************************************************************* - * This function initializes the power domain topology tree by querying the - * platform. The power domain nodes higher than the CPU are populated in the - * array psci_non_cpu_pd_nodes[] and the CPU power domains are populated in - * psci_cpu_pd_nodes[]. The platform exports its static topology map through the - * populate_power_domain_topology_tree() API. The algorithm populates the - * psci_non_cpu_pd_nodes and psci_cpu_pd_nodes iteratively by using this - * topology map. On a platform that implements two clusters of 2 cpus each, and - * supporting 3 domain levels, the populated psci_non_cpu_pd_nodes would look - * like this: - * - * --------------------------------------------------- - * | system node | cluster 0 node | cluster 1 node | - * --------------------------------------------------- - * - * And populated psci_cpu_pd_nodes would look like this : - * <- cpus cluster0 -><- cpus cluster1 -> - * ------------------------------------------------ - * | CPU 0 | CPU 1 | CPU 2 | CPU 3 | - * ------------------------------------------------ - ******************************************************************************/ -int psci_setup(void) -{ - const unsigned char *topology_tree; - - /* Query the topology map from the platform */ - topology_tree = plat_get_power_domain_tree_desc(); - - /* Populate the power domain arrays using the platform topology map */ - populate_power_domain_tree(topology_tree); - - /* Update the CPU limits for each node in psci_non_cpu_pd_nodes */ - psci_update_pwrlvl_limits(); - - /* Populate the mpidr field of cpu node for this CPU */ - psci_cpu_pd_nodes[plat_my_core_pos()].mpidr = - read_mpidr() & MPIDR_AFFINITY_MASK; - - psci_init_req_local_pwr_states(); - - /* - * Set the requested and target state of this CPU and all the higher - * power domain levels for this CPU to run. - */ - psci_set_pwr_domains_to_run(PLAT_MAX_PWR_LVL); - - plat_setup_psci_ops((uintptr_t)psci_entrypoint, - &psci_plat_pm_ops); - assert(psci_plat_pm_ops); - - /* Initialize the psci capability */ - psci_caps = PSCI_GENERIC_CAP; - - if (psci_plat_pm_ops->pwr_domain_off) - psci_caps |= define_psci_cap(PSCI_CPU_OFF); - if (psci_plat_pm_ops->pwr_domain_on && - psci_plat_pm_ops->pwr_domain_on_finish) - psci_caps |= define_psci_cap(PSCI_CPU_ON_AARCH64); - if (psci_plat_pm_ops->pwr_domain_suspend && - psci_plat_pm_ops->pwr_domain_suspend_finish) { - psci_caps |= define_psci_cap(PSCI_CPU_SUSPEND_AARCH64); - if (psci_plat_pm_ops->get_sys_suspend_power_state) - psci_caps |= define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64); - } - if (psci_plat_pm_ops->system_off) - psci_caps |= define_psci_cap(PSCI_SYSTEM_OFF); - if (psci_plat_pm_ops->system_reset) - psci_caps |= define_psci_cap(PSCI_SYSTEM_RESET); - -#if ENABLE_PSCI_STAT - psci_caps |= define_psci_cap(PSCI_STAT_RESIDENCY_AARCH64); - psci_caps |= define_psci_cap(PSCI_STAT_COUNT_AARCH64); -#endif - - return 0; -} diff --git a/services/std_svc/psci/psci_stat.c b/services/std_svc/psci/psci_stat.c deleted file mode 100644 index 155bbb0..0000000 --- a/services/std_svc/psci/psci_stat.c +++ /dev/null @@ -1,309 +0,0 @@ -/* - * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include "psci_private.h" - -#ifndef PLAT_MAX_PWR_LVL_STATES -#define PLAT_MAX_PWR_LVL_STATES 2 -#endif - -/* Ticks elapsed in one second by a signal of 1 MHz */ -#define MHZ_TICKS_PER_SEC 1000000 - -/* Following structure is used for PSCI STAT */ -typedef struct psci_stat { - u_register_t residency; - u_register_t count; -} psci_stat_t; - -/* - * Following is used to keep track of the last cpu - * that goes to power down in non cpu power domains. - */ -static int last_cpu_in_non_cpu_pd[PSCI_NUM_NON_CPU_PWR_DOMAINS] = {-1}; - -/* - * Following are used to store PSCI STAT values for - * CPU and non CPU power domains. - */ -static psci_stat_t psci_cpu_stat[PLATFORM_CORE_COUNT] - [PLAT_MAX_PWR_LVL_STATES]; -static psci_stat_t psci_non_cpu_stat[PSCI_NUM_NON_CPU_PWR_DOMAINS] - [PLAT_MAX_PWR_LVL_STATES]; - -/* Register PMF PSCI service */ -PMF_REGISTER_SERVICE(psci_svc, PMF_PSCI_STAT_SVC_ID, - PSCI_STAT_TOTAL_IDS, PMF_STORE_ENABLE) - -/* The divisor to use to convert raw timestamp into microseconds */ -u_register_t residency_div; - -/* - * This macro calculates the stats residency in microseconds, - * taking in account the wrap around condition. - */ -#define calc_stat_residency(_pwrupts, _pwrdnts, _res) \ - do { \ - if (_pwrupts < _pwrdnts) \ - _res = UINT64_MAX - _pwrdnts + _pwrupts;\ - else \ - _res = _pwrupts - _pwrdnts; \ - /* Convert timestamp into microseconds */ \ - _res = _res/residency_div; \ - } while (0) - -/* - * This functions returns the index into the `psci_stat_t` array given the - * local power state and power domain level. If the platform implements the - * `get_pwr_lvl_state_idx` pm hook, then that will be used to return the index. - */ -static int get_stat_idx(plat_local_state_t local_state, int pwr_lvl) -{ - int idx; - - if (psci_plat_pm_ops->get_pwr_lvl_state_idx == NULL) { - assert(PLAT_MAX_PWR_LVL_STATES == 2); - if (is_local_state_retn(local_state)) - return 0; - - assert(is_local_state_off(local_state)); - return 1; - } - - idx = psci_plat_pm_ops->get_pwr_lvl_state_idx(local_state, pwr_lvl); - assert((idx >= 0) && (idx < PLAT_MAX_PWR_LVL_STATES)); - return idx; -} - -/******************************************************************************* - * This function is passed the target local power states for each power - * domain (state_info) between the current CPU domain and its ancestors until - * the target power level (end_pwrlvl). - * - * Then, for each level (apart from the CPU level) until the 'end_pwrlvl', it - * updates the `last_cpu_in_non_cpu_pd[]` with last power down cpu id. - * - * This function will only be invoked with data cache enabled and while - * powering down a core. - ******************************************************************************/ -void psci_stats_update_pwr_down(unsigned int end_pwrlvl, - const psci_power_state_t *state_info) -{ - int lvl, parent_idx, cpu_idx = plat_my_core_pos(); - - assert(end_pwrlvl <= PLAT_MAX_PWR_LVL); - assert(state_info); - - parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node; - - for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) { - - /* Break early if the target power state is RUN */ - if (is_local_state_run(state_info->pwr_domain_state[lvl])) - break; - - /* - * The power domain is entering a low power state, so this is - * the last CPU for this power domain - */ - last_cpu_in_non_cpu_pd[parent_idx] = cpu_idx; - - parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; - } - -} - -/******************************************************************************* - * This function updates the PSCI STATS(residency time and count) for CPU - * and NON-CPU power domains. - * It is called with caches enabled and locks acquired(for NON-CPU domain) - ******************************************************************************/ -void psci_stats_update_pwr_up(unsigned int end_pwrlvl, - const psci_power_state_t *state_info, - unsigned int flags) -{ - int parent_idx, cpu_idx = plat_my_core_pos(); - int lvl, stat_idx; - plat_local_state_t local_state; - unsigned long long pwrup_ts = 0, pwrdn_ts = 0; - u_register_t residency; - - assert(end_pwrlvl <= PLAT_MAX_PWR_LVL); - assert(state_info); - - /* Initialize the residency divisor if not already initialized */ - if (!residency_div) { - /* Pre-calculate divisor so that it can be directly used to - convert time-stamp into microseconds */ - residency_div = read_cntfrq_el0() / MHZ_TICKS_PER_SEC; - assert(residency_div); - } - - /* Get power down time-stamp for current CPU */ - PMF_GET_TIMESTAMP_BY_INDEX(psci_svc, PSCI_STAT_ID_ENTER_LOW_PWR, - cpu_idx, flags, pwrdn_ts); - - /* In the case of 1st power on just return */ - if (!pwrdn_ts) - return; - - /* Get power up time-stamp for current CPU */ - PMF_GET_TIMESTAMP_BY_INDEX(psci_svc, PSCI_STAT_ID_EXIT_LOW_PWR, - cpu_idx, flags, pwrup_ts); - - /* Get the index into the stats array */ - local_state = state_info->pwr_domain_state[PSCI_CPU_PWR_LVL]; - stat_idx = get_stat_idx(local_state, PSCI_CPU_PWR_LVL); - - /* Calculate stats residency */ - calc_stat_residency(pwrup_ts, pwrdn_ts, residency); - - /* Update CPU stats. */ - psci_cpu_stat[cpu_idx][stat_idx].residency += residency; - psci_cpu_stat[cpu_idx][stat_idx].count++; - - /* - * Check what power domains above CPU were off - * prior to this CPU powering on. - */ - parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node; - for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) { - local_state = state_info->pwr_domain_state[lvl]; - if (is_local_state_run(local_state)) { - /* Break early */ - break; - } - - assert(last_cpu_in_non_cpu_pd[parent_idx] != -1); - - /* Get power down time-stamp for last CPU */ - PMF_GET_TIMESTAMP_BY_INDEX(psci_svc, PSCI_STAT_ID_ENTER_LOW_PWR, - last_cpu_in_non_cpu_pd[parent_idx], - flags, pwrdn_ts); - - /* Initialize back to reset value */ - last_cpu_in_non_cpu_pd[parent_idx] = -1; - - /* Get the index into the stats array */ - stat_idx = get_stat_idx(local_state, lvl); - - /* Calculate stats residency */ - calc_stat_residency(pwrup_ts, pwrdn_ts, residency); - - /* Update non cpu stats */ - psci_non_cpu_stat[parent_idx][stat_idx].residency += residency; - psci_non_cpu_stat[parent_idx][stat_idx].count++; - - parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; - } - -} - -/******************************************************************************* - * This function returns the appropriate count and residency time of the - * local state for the highest power level expressed in the `power_state` - * for the node represented by `target_cpu`. - ******************************************************************************/ -int psci_get_stat(u_register_t target_cpu, unsigned int power_state, - psci_stat_t *psci_stat) -{ - int rc, pwrlvl, lvl, parent_idx, stat_idx, target_idx; - psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} }; - plat_local_state_t local_state; - - /* Validate the target_cpu parameter and determine the cpu index */ - target_idx = plat_core_pos_by_mpidr(target_cpu); - if (target_idx == -1) - return PSCI_E_INVALID_PARAMS; - - /* Validate the power_state parameter */ - if (!psci_plat_pm_ops->translate_power_state_by_mpidr) - rc = psci_validate_power_state(power_state, &state_info); - else - rc = psci_plat_pm_ops->translate_power_state_by_mpidr( - target_cpu, power_state, &state_info); - - if (rc != PSCI_E_SUCCESS) - return PSCI_E_INVALID_PARAMS; - - /* Find the highest power level */ - pwrlvl = psci_find_target_suspend_lvl(&state_info); - if (pwrlvl == PSCI_INVALID_PWR_LVL) - return PSCI_E_INVALID_PARAMS; - - /* Get the index into the stats array */ - local_state = state_info.pwr_domain_state[pwrlvl]; - stat_idx = get_stat_idx(local_state, pwrlvl); - - if (pwrlvl > PSCI_CPU_PWR_LVL) { - /* Get the power domain index */ - parent_idx = psci_cpu_pd_nodes[target_idx].parent_node; - for (lvl = PSCI_CPU_PWR_LVL + 1; lvl < pwrlvl; lvl++) - parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; - - /* Get the non cpu power domain stats */ - *psci_stat = psci_non_cpu_stat[parent_idx][stat_idx]; - } else { - /* Get the cpu power domain stats */ - *psci_stat = psci_cpu_stat[target_idx][stat_idx]; - } - - return PSCI_E_SUCCESS; -} - -/* This is the top level function for PSCI_STAT_RESIDENCY SMC. */ -u_register_t psci_stat_residency(u_register_t target_cpu, - unsigned int power_state) -{ - psci_stat_t psci_stat; - - int rc = psci_get_stat(target_cpu, power_state, &psci_stat); - if (rc == PSCI_E_SUCCESS) - return psci_stat.residency; - else - return 0; -} - -/* This is the top level function for PSCI_STAT_COUNT SMC. */ -u_register_t psci_stat_count(u_register_t target_cpu, - unsigned int power_state) -{ - psci_stat_t psci_stat; - - int rc = psci_get_stat(target_cpu, power_state, &psci_stat); - if (rc == PSCI_E_SUCCESS) - return psci_stat.count; - else - return 0; -} diff --git a/services/std_svc/psci/psci_suspend.c b/services/std_svc/psci/psci_suspend.c deleted file mode 100644 index e6c8cd9..0000000 --- a/services/std_svc/psci/psci_suspend.c +++ /dev/null @@ -1,284 +0,0 @@ -/* - * Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "psci_private.h" - -/******************************************************************************* - * This function does generic and platform specific operations after a wake-up - * from standby/retention states at multiple power levels. - ******************************************************************************/ -static void psci_suspend_to_standby_finisher(unsigned int cpu_idx, - psci_power_state_t *state_info, - unsigned int end_pwrlvl) -{ - psci_acquire_pwr_domain_locks(end_pwrlvl, - cpu_idx); - - /* - * Plat. management: Allow the platform to do operations - * on waking up from retention. - */ - psci_plat_pm_ops->pwr_domain_suspend_finish(state_info); - - /* - * Set the requested and target state of this CPU and all the higher - * power domain levels for this CPU to run. - */ - psci_set_pwr_domains_to_run(end_pwrlvl); - - psci_release_pwr_domain_locks(end_pwrlvl, - cpu_idx); -} - -/******************************************************************************* - * This function does generic and platform specific suspend to power down - * operations. - ******************************************************************************/ -static void psci_suspend_to_pwrdown_start(unsigned int end_pwrlvl, - entry_point_info_t *ep, - psci_power_state_t *state_info) -{ - unsigned int max_off_lvl = psci_find_max_off_lvl(state_info); - - /* Save PSCI target power level for the suspend finisher handler */ - psci_set_suspend_pwrlvl(end_pwrlvl); - - /* - * Flush the target power level as it will be accessed on power up with - * Data cache disabled. - */ - flush_cpu_data(psci_svc_cpu_data.target_pwrlvl); - - /* - * Call the cpu suspend handler registered by the Secure Payload - * Dispatcher to let it do any book-keeping. If the handler encounters an - * error, it's expected to assert within - */ - if (psci_spd_pm && psci_spd_pm->svc_suspend) - psci_spd_pm->svc_suspend(max_off_lvl); - - /* - * Store the re-entry information for the non-secure world. - */ - cm_init_my_context(ep); - - /* - * Arch. management. Perform the necessary steps to flush all - * cpu caches. Currently we assume that the power level correspond - * the cache level. - * TODO : Introduce a mechanism to query the cache level to flush - * and the cpu-ops power down to perform from the platform. - */ - psci_do_pwrdown_cache_maintenance(max_off_lvl); -} - -/******************************************************************************* - * Top level handler which is called when a cpu wants to suspend its execution. - * It is assumed that along with suspending the cpu power domain, power domains - * at higher levels until the target power level will be suspended as well. It - * coordinates with the platform to negotiate the target state for each of - * the power domain level till the target power domain level. It then performs - * generic, architectural, platform setup and state management required to - * suspend that power domain level and power domain levels below it. - * e.g. For a cpu that's to be suspended, it could mean programming the - * power controller whereas for a cluster that's to be suspended, it will call - * the platform specific code which will disable coherency at the interconnect - * level if the cpu is the last in the cluster and also the program the power - * controller. - * - * All the required parameter checks are performed at the beginning and after - * the state transition has been done, no further error is expected and it is - * not possible to undo any of the actions taken beyond that point. - ******************************************************************************/ -void psci_cpu_suspend_start(entry_point_info_t *ep, - unsigned int end_pwrlvl, - psci_power_state_t *state_info, - unsigned int is_power_down_state) -{ - int skip_wfi = 0; - unsigned int idx = plat_my_core_pos(); - - /* - * This function must only be called on platforms where the - * CPU_SUSPEND platform hooks have been implemented. - */ - assert(psci_plat_pm_ops->pwr_domain_suspend && - psci_plat_pm_ops->pwr_domain_suspend_finish); - - /* - * This function acquires the lock corresponding to each power - * level so that by the time all locks are taken, the system topology - * is snapshot and state management can be done safely. - */ - psci_acquire_pwr_domain_locks(end_pwrlvl, - idx); - - /* - * We check if there are any pending interrupts after the delay - * introduced by lock contention to increase the chances of early - * detection that a wake-up interrupt has fired. - */ - if (read_isr_el1()) { - skip_wfi = 1; - goto exit; - } - - /* - * This function is passed the requested state info and - * it returns the negotiated state info for each power level upto - * the end level specified. - */ - psci_do_state_coordination(end_pwrlvl, state_info); - -#if ENABLE_PSCI_STAT - /* Update the last cpu for each level till end_pwrlvl */ - psci_stats_update_pwr_down(end_pwrlvl, state_info); -#endif - - if (is_power_down_state) - psci_suspend_to_pwrdown_start(end_pwrlvl, ep, state_info); - - /* - * Plat. management: Allow the platform to perform the - * necessary actions to turn off this cpu e.g. set the - * platform defined mailbox with the psci entrypoint, - * program the power controller etc. - */ - psci_plat_pm_ops->pwr_domain_suspend(state_info); - -#if ENABLE_PSCI_STAT - /* - * Capture time-stamp while entering low power state. - * No cache maintenance needed because caches are off - * and writes are direct to main memory. - */ - PMF_CAPTURE_TIMESTAMP(psci_svc, PSCI_STAT_ID_ENTER_LOW_PWR, - PMF_NO_CACHE_MAINT); -#endif - -exit: - /* - * Release the locks corresponding to each power level in the - * reverse order to which they were acquired. - */ - psci_release_pwr_domain_locks(end_pwrlvl, - idx); - if (skip_wfi) - return; - - if (is_power_down_state) { - /* The function calls below must not return */ - if (psci_plat_pm_ops->pwr_domain_pwr_down_wfi) - psci_plat_pm_ops->pwr_domain_pwr_down_wfi(state_info); - else - psci_power_down_wfi(); - } - - /* - * We will reach here if only retention/standby states have been - * requested at multiple power levels. This means that the cpu - * context will be preserved. - */ - wfi(); - - /* - * After we wake up from context retaining suspend, call the - * context retaining suspend finisher. - */ - psci_suspend_to_standby_finisher(idx, state_info, end_pwrlvl); -} - -/******************************************************************************* - * The following functions finish an earlier suspend request. They - * are called by the common finisher routine in psci_common.c. The `state_info` - * is the psci_power_state from which this CPU has woken up from. - ******************************************************************************/ -void psci_cpu_suspend_finish(unsigned int cpu_idx, - psci_power_state_t *state_info) -{ - unsigned int counter_freq; - unsigned int max_off_lvl; - - /* Ensure we have been woken up from a suspended state */ - assert(psci_get_aff_info_state() == AFF_STATE_ON && is_local_state_off(\ - state_info->pwr_domain_state[PSCI_CPU_PWR_LVL])); - - /* - * Plat. management: Perform the platform specific actions - * before we change the state of the cpu e.g. enabling the - * gic or zeroing the mailbox register. If anything goes - * wrong then assert as there is no way to recover from this - * situation. - */ - psci_plat_pm_ops->pwr_domain_suspend_finish(state_info); - - /* - * Arch. management: Enable the data cache, manage stack memory and - * restore the stashed EL3 architectural context from the 'cpu_context' - * structure for this cpu. - */ - psci_do_pwrup_cache_maintenance(); - - /* Re-init the cntfrq_el0 register */ - counter_freq = plat_get_syscnt_freq2(); - write_cntfrq_el0(counter_freq); - - /* - * Call the cpu suspend finish handler registered by the Secure Payload - * Dispatcher to let it do any bookeeping. If the handler encounters an - * error, it's expected to assert within - */ - if (psci_spd_pm && psci_spd_pm->svc_suspend) { - max_off_lvl = psci_find_max_off_lvl(state_info); - assert (max_off_lvl != PSCI_INVALID_PWR_LVL); - psci_spd_pm->svc_suspend_finish(max_off_lvl); - } - - /* Invalidate the suspend level for the cpu */ - psci_set_suspend_pwrlvl(PSCI_INVALID_PWR_LVL); - - /* - * Generic management: Now we just need to retrieve the - * information that we had stashed away during the suspend - * call to set this cpu on its way. - */ - cm_prepare_el3_exit(NON_SECURE); -} diff --git a/services/std_svc/psci/psci_system_off.c b/services/std_svc/psci/psci_system_off.c deleted file mode 100644 index 28315d6..0000000 --- a/services/std_svc/psci/psci_system_off.c +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright (c) 2014-2015, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include -#include "psci_private.h" - -void psci_system_off(void) -{ - psci_print_power_domain_map(); - - assert(psci_plat_pm_ops->system_off); - - /* Notify the Secure Payload Dispatcher */ - if (psci_spd_pm && psci_spd_pm->svc_system_off) { - psci_spd_pm->svc_system_off(); - } - - /* Call the platform specific hook */ - psci_plat_pm_ops->system_off(); - - /* This function does not return. We should never get here */ -} - -void psci_system_reset(void) -{ - psci_print_power_domain_map(); - - assert(psci_plat_pm_ops->system_reset); - - /* Notify the Secure Payload Dispatcher */ - if (psci_spd_pm && psci_spd_pm->svc_system_reset) { - psci_spd_pm->svc_system_reset(); - } - - /* Call the platform specific hook */ - psci_plat_pm_ops->system_reset(); - - /* This function does not return. We should never get here */ -} diff --git a/services/std_svc/std_svc_setup.c b/services/std_svc/std_svc_setup.c index 6cb0319..06647e0 100644 --- a/services/std_svc/std_svc_setup.c +++ b/services/std_svc/std_svc_setup.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -31,6 +31,7 @@ #include #include #include +#include #include #include #include @@ -40,36 +41,27 @@ 0x108d905b, 0xf863, 0x47e8, 0xae, 0x2d, 0xc0, 0xfb, 0x56, 0x41, 0xf6, 0xe2); -/* Setup Standard Services */ -static int32_t std_svc_setup(void) -{ - /* - * PSCI is the only specification implemented as a Standard Service. - * Invoke PSCI setup from here - */ - return psci_setup(); -} - /* * Top-level Standard Service SMC handler. This handler will in turn dispatch * calls to PSCI SMC handler */ -uint64_t std_svc_smc_handler(uint32_t smc_fid, - uint64_t x1, - uint64_t x2, - uint64_t x3, - uint64_t x4, +uintptr_t std_svc_smc_handler(uint32_t smc_fid, + u_register_t x1, + u_register_t x2, + u_register_t x3, + u_register_t x4, void *cookie, void *handle, - uint64_t flags) + u_register_t flags) { /* * Dispatch PSCI calls to PSCI SMC handler and return its return * value */ if (is_psci_fid(smc_fid)) { - return psci_smc_handler(smc_fid, x1, x2, x3, x4, cookie, - handle, flags); + SMC_RET1(handle, + psci_smc_handler(smc_fid, x1, x2, x3, x4, + cookie, handle, flags)); } switch (smc_fid) { @@ -101,6 +93,6 @@ OEN_STD_START, OEN_STD_END, SMC_TYPE_FAST, - std_svc_setup, + NULL, std_svc_smc_handler );