diff --git a/docs/porting-guide.md b/docs/porting-guide.md index 7b2e7d2..b4253e6 100644 --- a/docs/porting-guide.md +++ b/docs/porting-guide.md @@ -177,7 +177,7 @@ The following mandatory modifications may be implemented in any file the implementer chooses. In the ARM FVP port, they are implemented in -[../plat/fvp/aarch64/fvp_common.c]. +[../plat/fvp/aarch64/plat_common.c]. * **Variable : unsigned char platform_normal_stacks[X][Y]** @@ -836,7 +836,7 @@ the passed pointer with a pointer to BL3-1's private `plat_pm_ops` structure. A description of each member of this structure is given below. Please refer to -the ARM FVP specific implementation of these handlers in [../plat/fvp/fvp_pm.c] +the ARM FVP specific implementation of these handlers in [../plat/fvp/plat_pm.c] as an example. A platform port may choose not implement some of the power management operations. For example, the ARM FVP port does not implement the `affinst_standby()` function. @@ -991,6 +991,6 @@ [../plat/common/aarch64/platform_helpers.S]: ../plat/common/aarch64/platform_helpers.S [../plat/fvp/platform.h]: ../plat/fvp/platform.h -[../plat/fvp/aarch64/fvp_common.c]: ../plat/fvp/aarch64/fvp_common.c -[../plat/fvp/fvp_pm.c]: ../plat/fvp/fvp_pm.c +[../plat/fvp/aarch64/plat_common.c]: ../plat/fvp/aarch64/plat_common.c +[../plat/fvp/plat_pm.c]: ../plat/fvp/plat_pm.c [../include/runtime_svc.h]: ../include/runtime_svc.h diff --git a/plat/fvp/aarch64/fvp_common.c b/plat/fvp/aarch64/fvp_common.c deleted file mode 100644 index 8568bfe..0000000 --- a/plat/fvp/aarch64/fvp_common.c +++ /dev/null @@ -1,595 +0,0 @@ -/* - * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include -/* Included only for error codes */ -#include - -unsigned char platform_normal_stacks[PLATFORM_STACK_SIZE][PLATFORM_CORE_COUNT] -__attribute__ ((aligned(PLATFORM_CACHE_LINE_SIZE), - section("tzfw_normal_stacks"))); - -/******************************************************************************* - * This array holds the characteristics of the differences between the three - * FVP platforms (Base, A53_A57 & Foundation). It will be populated during cold - * boot at each boot stage by the primary before enabling the MMU (to allow cci - * configuration) & used thereafter. Each BL will have its own copy to allow - * independent operation. - ******************************************************************************/ -static unsigned long platform_config[CONFIG_LIMIT]; - -/******************************************************************************* - * TODO: Check page table alignment to avoid space wastage - ******************************************************************************/ - -/******************************************************************************* - * Level 1 translation tables need 4 entries for the 4GB address space accessib- - * le by the secure firmware. Input address space will be restricted using the - * T0SZ settings in the TCR. - ******************************************************************************/ -static unsigned long l1_xlation_table[ADDR_SPACE_SIZE >> 30] -__attribute__ ((aligned((ADDR_SPACE_SIZE >> 30) << 3))); - -/******************************************************************************* - * Level 2 translation tables describe the first & second gb of the address - * space needed to address secure peripherals e.g. trusted ROM and RAM. - ******************************************************************************/ -static unsigned long l2_xlation_table[NUM_L2_PAGETABLES][NUM_2MB_IN_GB] -__attribute__ ((aligned(NUM_2MB_IN_GB << 3))); - -/******************************************************************************* - * Level 3 translation tables (2 sets) describe the trusted & non-trusted RAM - * regions at a granularity of 4K. - ******************************************************************************/ -static unsigned long l3_xlation_table[NUM_L3_PAGETABLES][NUM_4K_IN_2MB] -__attribute__ ((aligned(NUM_4K_IN_2MB << 3))); - -/******************************************************************************* - * Helper to create a level 1/2 table descriptor which points to a level 2/3 - * table. - ******************************************************************************/ -static unsigned long create_table_desc(unsigned long *next_table_ptr) -{ - unsigned long desc = (unsigned long) next_table_ptr; - - /* Clear the last 12 bits */ - desc >>= FOUR_KB_SHIFT; - desc <<= FOUR_KB_SHIFT; - - desc |= TABLE_DESC; - - return desc; -} - -/******************************************************************************* - * Helper to create a level 1/2/3 block descriptor which maps the va to addr - ******************************************************************************/ -static unsigned long create_block_desc(unsigned long desc, - unsigned long addr, - unsigned int level) -{ - switch (level) { - case LEVEL1: - desc |= (addr << FIRST_LEVEL_DESC_N) | BLOCK_DESC; - break; - case LEVEL2: - desc |= (addr << SECOND_LEVEL_DESC_N) | BLOCK_DESC; - break; - case LEVEL3: - desc |= (addr << THIRD_LEVEL_DESC_N) | TABLE_DESC; - break; - default: - assert(0); - } - - return desc; -} - -/******************************************************************************* - * Helper to create a level 1/2/3 block descriptor which maps the va to output_ - * addr with Device nGnRE attributes. - ******************************************************************************/ -static unsigned long create_device_block(unsigned long output_addr, - unsigned int level, - unsigned int ns) -{ - unsigned long upper_attrs, lower_attrs, desc; - - lower_attrs = LOWER_ATTRS(ACCESS_FLAG | OSH | AP_RW); - lower_attrs |= LOWER_ATTRS(ns | ATTR_DEVICE_INDEX); - upper_attrs = UPPER_ATTRS(XN); - desc = upper_attrs | lower_attrs; - - return create_block_desc(desc, output_addr, level); -} - -/******************************************************************************* - * Helper to create a level 1/2/3 block descriptor which maps the va to output_ - * addr with inner-shareable normal wbwa read-only memory attributes. - ******************************************************************************/ -static unsigned long create_romem_block(unsigned long output_addr, - unsigned int level, - unsigned int ns) -{ - unsigned long upper_attrs, lower_attrs, desc; - - lower_attrs = LOWER_ATTRS(ACCESS_FLAG | ISH | AP_RO); - lower_attrs |= LOWER_ATTRS(ns | ATTR_IWBWA_OWBWA_NTR_INDEX); - upper_attrs = UPPER_ATTRS(0ull); - desc = upper_attrs | lower_attrs; - - return create_block_desc(desc, output_addr, level); -} - -/******************************************************************************* - * Helper to create a level 1/2/3 block descriptor which maps the va to output_ - * addr with inner-shareable normal wbwa read-write memory attributes. - ******************************************************************************/ -static unsigned long create_rwmem_block(unsigned long output_addr, - unsigned int level, - unsigned int ns) -{ - unsigned long upper_attrs, lower_attrs, desc; - - lower_attrs = LOWER_ATTRS(ACCESS_FLAG | ISH | AP_RW); - lower_attrs |= LOWER_ATTRS(ns | ATTR_IWBWA_OWBWA_NTR_INDEX); - upper_attrs = UPPER_ATTRS(XN); - desc = upper_attrs | lower_attrs; - - return create_block_desc(desc, output_addr, level); -} - -/******************************************************************************* - * Create page tables as per the platform memory map. Certain aspects of page - * talble creating have been abstracted in the above routines. This can be impr- - * oved further. - * TODO: Move the page table setup helpers into the arch or lib directory - *******************************************************************************/ -static unsigned long fill_xlation_tables(meminfo *tzram_layout, - unsigned long ro_start, - unsigned long ro_limit, - unsigned long coh_start, - unsigned long coh_limit) -{ - unsigned long l2_desc, l3_desc; - unsigned long *xt_addr = 0, *pt_addr, off = 0; - unsigned long trom_start_index, trom_end_index; - unsigned long tzram_start_index, tzram_end_index; - unsigned long flash0_start_index, flash0_end_index; - unsigned long flash1_start_index, flash1_end_index; - unsigned long vram_start_index, vram_end_index; - unsigned long nsram_start_index, nsram_end_index; - unsigned long tdram_start_index, tdram_end_index; - unsigned long dram_start_index, dram_end_index; - unsigned long dev0_start_index, dev0_end_index; - unsigned long dev1_start_index, dev1_end_index; - unsigned int idx; - - - /***************************************************************** - * LEVEL1 PAGETABLE SETUP - * - * Find the start and end indices of the memory peripherals in the - * first level pagetables. These are the main areas we care about. - * Also bump the end index by one if its equal to the start to - * allow for regions which lie completely in a GB. - *****************************************************************/ - trom_start_index = ONE_GB_INDEX(TZROM_BASE); - dev0_start_index = ONE_GB_INDEX(TZRNG_BASE); - dram_start_index = ONE_GB_INDEX(DRAM_BASE); - dram_end_index = ONE_GB_INDEX(DRAM_BASE + DRAM_SIZE); - - if (dram_end_index == dram_start_index) - dram_end_index++; - - /* - * Fill up the level1 translation table first - */ - for (idx = 0; idx < (ADDR_SPACE_SIZE >> 30); idx++) { - - /* - * Fill up the entry for the TZROM. This will cover - * everything in the first GB. - */ - if (idx == trom_start_index) { - xt_addr = &l2_xlation_table[GB1_L2_PAGETABLE][0]; - l1_xlation_table[idx] = create_table_desc(xt_addr); - continue; - } - - /* - * Mark the second gb as device - */ - if (idx == dev0_start_index) { - xt_addr = &l2_xlation_table[GB2_L2_PAGETABLE][0]; - l1_xlation_table[idx] = create_table_desc(xt_addr); - continue; - } - - /* - * Fill up the block entry for the DRAM with Normal - * inner-WBWA outer-WBWA non-transient attributes. - * This will cover 2-4GB. Note that the acesses are - * marked as non-secure. - */ - if ((idx >= dram_start_index) && (idx < dram_end_index)) { - l1_xlation_table[idx] = create_rwmem_block(idx, LEVEL1, - NS); - continue; - } - - assert(0); - } - - - /***************************************************************** - * LEVEL2 PAGETABLE SETUP - * - * Find the start and end indices of the memory & peripherals in the - * second level pagetables. - ******************************************************************/ - - /* Initializations for the 1st GB */ - trom_start_index = TWO_MB_INDEX(TZROM_BASE); - trom_end_index = TWO_MB_INDEX(TZROM_BASE + TZROM_SIZE); - if (trom_end_index == trom_start_index) - trom_end_index++; - - tdram_start_index = TWO_MB_INDEX(TZDRAM_BASE); - tdram_end_index = TWO_MB_INDEX(TZDRAM_BASE + TZDRAM_SIZE); - if (tdram_end_index == tdram_start_index) - tdram_end_index++; - - flash0_start_index = TWO_MB_INDEX(FLASH0_BASE); - flash0_end_index = TWO_MB_INDEX(FLASH0_BASE + TZROM_SIZE); - if (flash0_end_index == flash0_start_index) - flash0_end_index++; - - flash1_start_index = TWO_MB_INDEX(FLASH1_BASE); - flash1_end_index = TWO_MB_INDEX(FLASH1_BASE + FLASH1_SIZE); - if (flash1_end_index == flash1_start_index) - flash1_end_index++; - - vram_start_index = TWO_MB_INDEX(VRAM_BASE); - vram_end_index = TWO_MB_INDEX(VRAM_BASE + VRAM_SIZE); - if (vram_end_index == vram_start_index) - vram_end_index++; - - dev0_start_index = TWO_MB_INDEX(DEVICE0_BASE); - dev0_end_index = TWO_MB_INDEX(DEVICE0_BASE + DEVICE0_SIZE); - if (dev0_end_index == dev0_start_index) - dev0_end_index++; - - dev1_start_index = TWO_MB_INDEX(DEVICE1_BASE); - dev1_end_index = TWO_MB_INDEX(DEVICE1_BASE + DEVICE1_SIZE); - if (dev1_end_index == dev1_start_index) - dev1_end_index++; - - /* Since the size is < 2M this is a single index */ - tzram_start_index = TWO_MB_INDEX(tzram_layout->total_base); - nsram_start_index = TWO_MB_INDEX(NSRAM_BASE); - - /* - * Fill up the level2 translation table for the first GB next - */ - for (idx = 0; idx < NUM_2MB_IN_GB; idx++) { - - l2_desc = INVALID_DESC; - xt_addr = &l2_xlation_table[GB1_L2_PAGETABLE][idx]; - - /* Block entries for 64M of trusted Boot ROM */ - if ((idx >= trom_start_index) && (idx < trom_end_index)) - l2_desc = create_romem_block(idx, LEVEL2, 0); - - /* Single L3 page table entry for 256K of TZRAM */ - if (idx == tzram_start_index) { - pt_addr = &l3_xlation_table[TZRAM_PAGETABLE][0]; - l2_desc = create_table_desc(pt_addr); - } - - /* Block entries for 32M of trusted DRAM */ - if ((idx >= tdram_start_index) && (idx <= tdram_end_index)) - l2_desc = create_rwmem_block(idx, LEVEL2, 0); - - /* Block entries for 64M of aliased trusted Boot ROM */ - if ((idx >= flash0_start_index) && (idx < flash0_end_index)) - l2_desc = create_romem_block(idx, LEVEL2, 0); - - /* Block entries for 64M of flash1 */ - if ((idx >= flash1_start_index) && (idx < flash1_end_index)) - l2_desc = create_romem_block(idx, LEVEL2, 0); - - /* Block entries for 32M of VRAM */ - if ((idx >= vram_start_index) && (idx < vram_end_index)) - l2_desc = create_rwmem_block(idx, LEVEL2, 0); - - /* Block entries for all the devices in the first gb */ - if ((idx >= dev0_start_index) && (idx < dev0_end_index)) - l2_desc = create_device_block(idx, LEVEL2, 0); - - /* Block entries for all the devices in the first gb */ - if ((idx >= dev1_start_index) && (idx < dev1_end_index)) - l2_desc = create_device_block(idx, LEVEL2, 0); - - /* Single L3 page table entry for 64K of NSRAM */ - if (idx == nsram_start_index) { - pt_addr = &l3_xlation_table[NSRAM_PAGETABLE][0]; - l2_desc = create_table_desc(pt_addr); - } - - *xt_addr = l2_desc; - } - - - /* - * Initializations for the 2nd GB. Mark everything as device - * for the time being as the memory map is not final. Each - * index will need to be offset'ed to allow absolute values - */ - off = NUM_2MB_IN_GB; - for (idx = off; idx < (NUM_2MB_IN_GB + off); idx++) { - l2_desc = create_device_block(idx, LEVEL2, 0); - xt_addr = &l2_xlation_table[GB2_L2_PAGETABLE][idx - off]; - *xt_addr = l2_desc; - } - - - /***************************************************************** - * LEVEL3 PAGETABLE SETUP - *****************************************************************/ - - /* Fill up the level3 pagetable for the trusted SRAM. */ - tzram_start_index = FOUR_KB_INDEX(tzram_layout->total_base); - tzram_end_index = FOUR_KB_INDEX(tzram_layout->total_base + - tzram_layout->total_size); - if (tzram_end_index == tzram_start_index) - tzram_end_index++; - - /* Reusing trom* to mark RO memory. */ - trom_start_index = FOUR_KB_INDEX(ro_start); - trom_end_index = FOUR_KB_INDEX(ro_limit); - if (trom_end_index == trom_start_index) - trom_end_index++; - - /* Reusing dev* to mark coherent device memory. */ - dev0_start_index = FOUR_KB_INDEX(coh_start); - dev0_end_index = FOUR_KB_INDEX(coh_limit); - if (dev0_end_index == dev0_start_index) - dev0_end_index++; - - - /* Each index will need to be offset'ed to allow absolute values */ - off = FOUR_KB_INDEX(TZRAM_BASE); - for (idx = off; idx < (NUM_4K_IN_2MB + off); idx++) { - - l3_desc = INVALID_DESC; - xt_addr = &l3_xlation_table[TZRAM_PAGETABLE][idx - off]; - - if (idx >= tzram_start_index && idx < tzram_end_index) - l3_desc = create_rwmem_block(idx, LEVEL3, 0); - - if (idx >= trom_start_index && idx < trom_end_index) - l3_desc = create_romem_block(idx, LEVEL3, 0); - - if (idx >= dev0_start_index && idx < dev0_end_index) - l3_desc = create_device_block(idx, LEVEL3, 0); - - *xt_addr = l3_desc; - } - - /* Fill up the level3 pagetable for the non-trusted SRAM. */ - nsram_start_index = FOUR_KB_INDEX(NSRAM_BASE); - nsram_end_index = FOUR_KB_INDEX(NSRAM_BASE + NSRAM_SIZE); - if (nsram_end_index == nsram_start_index) - nsram_end_index++; - - /* Each index will need to be offset'ed to allow absolute values */ - off = FOUR_KB_INDEX(NSRAM_BASE); - for (idx = off; idx < (NUM_4K_IN_2MB + off); idx++) { - - l3_desc = INVALID_DESC; - xt_addr = &l3_xlation_table[NSRAM_PAGETABLE][idx - off]; - - if (idx >= nsram_start_index && idx < nsram_end_index) - l3_desc = create_rwmem_block(idx, LEVEL3, NS); - - *xt_addr = l3_desc; - } - - return (unsigned long) l1_xlation_table; -} - -/******************************************************************************* - * Enable the MMU assuming that the pagetables have already been created - *******************************************************************************/ -void enable_mmu() -{ - unsigned long mair, tcr, ttbr, sctlr; - unsigned long current_el = read_current_el(); - - /* Set the attributes in the right indices of the MAIR */ - mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX); - mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, - ATTR_IWBWA_OWBWA_NTR_INDEX); - write_mair(mair); - - /* - * Set TCR bits as well. Inner & outer WBWA & shareable + T0SZ = 32 - */ - tcr = TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WBA | - TCR_RGN_INNER_WBA | TCR_T0SZ_4GB; - if (GET_EL(current_el) == MODE_EL3) { - tcr |= TCR_EL3_RES1; - /* Invalidate EL3 TLBs */ - tlbialle3(); - } else { - /* Invalidate EL1 TLBs */ - tlbivmalle1(); - } - - write_tcr(tcr); - - /* Set TTBR bits as well */ - assert(((unsigned long)l1_xlation_table & (sizeof(l1_xlation_table) - 1)) == 0); - ttbr = (unsigned long) l1_xlation_table; - write_ttbr0(ttbr); - - sctlr = read_sctlr(); - sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT | SCTLR_I_BIT; - sctlr |= SCTLR_A_BIT | SCTLR_C_BIT; - write_sctlr(sctlr); - - return; -} - -void disable_mmu(void) -{ - /* Zero out the MMU related registers */ - write_mair(0); - write_tcr(0); - write_ttbr0(0); - write_sctlr(0); - - /* Flush the caches */ - dcsw_op_all(DCCISW); - - return; -} - -/******************************************************************************* - * Setup the pagetables as per the platform memory map & initialize the mmu - *******************************************************************************/ -void configure_mmu(meminfo *mem_layout, - unsigned long ro_start, - unsigned long ro_limit, - unsigned long coh_start, - unsigned long coh_limit) -{ - assert(IS_PAGE_ALIGNED(ro_start)); - assert(IS_PAGE_ALIGNED(ro_limit)); - assert(IS_PAGE_ALIGNED(coh_start)); - assert(IS_PAGE_ALIGNED(coh_limit)); - - fill_xlation_tables(mem_layout, - ro_start, - ro_limit, - coh_start, - coh_limit); - enable_mmu(); - return; -} - -/* Simple routine which returns a configuration variable value */ -unsigned long platform_get_cfgvar(unsigned int var_id) -{ - assert(var_id < CONFIG_LIMIT); - return platform_config[var_id]; -} - -/******************************************************************************* - * A single boot loader stack is expected to work on both the Foundation FVP - * models and the two flavours of the Base FVP models (AEMv8 & Cortex). The - * SYS_ID register provides a mechanism for detecting the differences between - * these platforms. This information is stored in a per-BL array to allow the - * code to take the correct path.Per BL platform configuration. - ******************************************************************************/ -int platform_config_setup(void) -{ - unsigned int rev, hbi, bld, arch, sys_id, midr_pn; - - sys_id = mmio_read_32(VE_SYSREGS_BASE + V2M_SYS_ID); - rev = (sys_id >> SYS_ID_REV_SHIFT) & SYS_ID_REV_MASK; - hbi = (sys_id >> SYS_ID_HBI_SHIFT) & SYS_ID_HBI_MASK; - bld = (sys_id >> SYS_ID_BLD_SHIFT) & SYS_ID_BLD_MASK; - arch = (sys_id >> SYS_ID_ARCH_SHIFT) & SYS_ID_ARCH_MASK; - - assert(rev == REV_FVP); - assert(arch == ARCH_MODEL); - - /* - * The build field in the SYS_ID tells which variant of the GIC - * memory is implemented by the model. - */ - switch (bld) { - case BLD_GIC_VE_MMAP: - platform_config[CONFIG_GICD_ADDR] = VE_GICD_BASE; - platform_config[CONFIG_GICC_ADDR] = VE_GICC_BASE; - platform_config[CONFIG_GICH_ADDR] = VE_GICH_BASE; - platform_config[CONFIG_GICV_ADDR] = VE_GICV_BASE; - break; - case BLD_GIC_A53A57_MMAP: - platform_config[CONFIG_GICD_ADDR] = BASE_GICD_BASE; - platform_config[CONFIG_GICC_ADDR] = BASE_GICC_BASE; - platform_config[CONFIG_GICH_ADDR] = BASE_GICH_BASE; - platform_config[CONFIG_GICV_ADDR] = BASE_GICV_BASE; - break; - default: - assert(0); - } - - /* - * The hbi field in the SYS_ID is 0x020 for the Base FVP & 0x010 - * for the Foundation FVP. - */ - switch (hbi) { - case HBI_FOUNDATION: - platform_config[CONFIG_MAX_AFF0] = 4; - platform_config[CONFIG_MAX_AFF1] = 1; - platform_config[CONFIG_CPU_SETUP] = 0; - platform_config[CONFIG_BASE_MMAP] = 0; - platform_config[CONFIG_HAS_CCI] = 0; - break; - case HBI_FVP_BASE: - midr_pn = (read_midr() >> MIDR_PN_SHIFT) & MIDR_PN_MASK; - if ((midr_pn == MIDR_PN_A57) || (midr_pn == MIDR_PN_A53)) - platform_config[CONFIG_CPU_SETUP] = 1; - else - platform_config[CONFIG_CPU_SETUP] = 0; - - platform_config[CONFIG_MAX_AFF0] = 4; - platform_config[CONFIG_MAX_AFF1] = 2; - platform_config[CONFIG_BASE_MMAP] = 1; - platform_config[CONFIG_HAS_CCI] = 1; - break; - default: - assert(0); - } - - return 0; -} - -unsigned long plat_get_ns_image_entrypoint(void) -{ - return NS_IMAGE_OFFSET; -} diff --git a/plat/fvp/aarch64/fvp_helpers.S b/plat/fvp/aarch64/fvp_helpers.S deleted file mode 100644 index 032b393..0000000 --- a/plat/fvp/aarch64/fvp_helpers.S +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include - - .globl plat_report_exception - - .section .text, "ax" - - /* --------------------------------------------- - * void plat_report_exception(unsigned int type) - * Function to report an unhandled exception - * with platform-specific means. - * On FVP platform, it updates the LEDs - * to indicate where we are - * --------------------------------------------- - */ -plat_report_exception: - mrs x1, CurrentEl - lsr x1, x1, #MODE_EL_SHIFT - lsl x1, x1, #SYS_LED_EL_SHIFT - lsl x0, x0, #SYS_LED_EC_SHIFT - mov x2, #(SECURE << SYS_LED_SS_SHIFT) - orr x0, x0, x2 - orr x0, x0, x1 - mov x1, #VE_SYSREGS_BASE - add x1, x1, #V2M_SYS_LED - str w0, [x1] - ret diff --git a/plat/fvp/aarch64/plat_common.c b/plat/fvp/aarch64/plat_common.c new file mode 100644 index 0000000..8568bfe --- /dev/null +++ b/plat/fvp/aarch64/plat_common.c @@ -0,0 +1,595 @@ +/* + * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +/* Included only for error codes */ +#include + +unsigned char platform_normal_stacks[PLATFORM_STACK_SIZE][PLATFORM_CORE_COUNT] +__attribute__ ((aligned(PLATFORM_CACHE_LINE_SIZE), + section("tzfw_normal_stacks"))); + +/******************************************************************************* + * This array holds the characteristics of the differences between the three + * FVP platforms (Base, A53_A57 & Foundation). It will be populated during cold + * boot at each boot stage by the primary before enabling the MMU (to allow cci + * configuration) & used thereafter. Each BL will have its own copy to allow + * independent operation. + ******************************************************************************/ +static unsigned long platform_config[CONFIG_LIMIT]; + +/******************************************************************************* + * TODO: Check page table alignment to avoid space wastage + ******************************************************************************/ + +/******************************************************************************* + * Level 1 translation tables need 4 entries for the 4GB address space accessib- + * le by the secure firmware. Input address space will be restricted using the + * T0SZ settings in the TCR. + ******************************************************************************/ +static unsigned long l1_xlation_table[ADDR_SPACE_SIZE >> 30] +__attribute__ ((aligned((ADDR_SPACE_SIZE >> 30) << 3))); + +/******************************************************************************* + * Level 2 translation tables describe the first & second gb of the address + * space needed to address secure peripherals e.g. trusted ROM and RAM. + ******************************************************************************/ +static unsigned long l2_xlation_table[NUM_L2_PAGETABLES][NUM_2MB_IN_GB] +__attribute__ ((aligned(NUM_2MB_IN_GB << 3))); + +/******************************************************************************* + * Level 3 translation tables (2 sets) describe the trusted & non-trusted RAM + * regions at a granularity of 4K. + ******************************************************************************/ +static unsigned long l3_xlation_table[NUM_L3_PAGETABLES][NUM_4K_IN_2MB] +__attribute__ ((aligned(NUM_4K_IN_2MB << 3))); + +/******************************************************************************* + * Helper to create a level 1/2 table descriptor which points to a level 2/3 + * table. + ******************************************************************************/ +static unsigned long create_table_desc(unsigned long *next_table_ptr) +{ + unsigned long desc = (unsigned long) next_table_ptr; + + /* Clear the last 12 bits */ + desc >>= FOUR_KB_SHIFT; + desc <<= FOUR_KB_SHIFT; + + desc |= TABLE_DESC; + + return desc; +} + +/******************************************************************************* + * Helper to create a level 1/2/3 block descriptor which maps the va to addr + ******************************************************************************/ +static unsigned long create_block_desc(unsigned long desc, + unsigned long addr, + unsigned int level) +{ + switch (level) { + case LEVEL1: + desc |= (addr << FIRST_LEVEL_DESC_N) | BLOCK_DESC; + break; + case LEVEL2: + desc |= (addr << SECOND_LEVEL_DESC_N) | BLOCK_DESC; + break; + case LEVEL3: + desc |= (addr << THIRD_LEVEL_DESC_N) | TABLE_DESC; + break; + default: + assert(0); + } + + return desc; +} + +/******************************************************************************* + * Helper to create a level 1/2/3 block descriptor which maps the va to output_ + * addr with Device nGnRE attributes. + ******************************************************************************/ +static unsigned long create_device_block(unsigned long output_addr, + unsigned int level, + unsigned int ns) +{ + unsigned long upper_attrs, lower_attrs, desc; + + lower_attrs = LOWER_ATTRS(ACCESS_FLAG | OSH | AP_RW); + lower_attrs |= LOWER_ATTRS(ns | ATTR_DEVICE_INDEX); + upper_attrs = UPPER_ATTRS(XN); + desc = upper_attrs | lower_attrs; + + return create_block_desc(desc, output_addr, level); +} + +/******************************************************************************* + * Helper to create a level 1/2/3 block descriptor which maps the va to output_ + * addr with inner-shareable normal wbwa read-only memory attributes. + ******************************************************************************/ +static unsigned long create_romem_block(unsigned long output_addr, + unsigned int level, + unsigned int ns) +{ + unsigned long upper_attrs, lower_attrs, desc; + + lower_attrs = LOWER_ATTRS(ACCESS_FLAG | ISH | AP_RO); + lower_attrs |= LOWER_ATTRS(ns | ATTR_IWBWA_OWBWA_NTR_INDEX); + upper_attrs = UPPER_ATTRS(0ull); + desc = upper_attrs | lower_attrs; + + return create_block_desc(desc, output_addr, level); +} + +/******************************************************************************* + * Helper to create a level 1/2/3 block descriptor which maps the va to output_ + * addr with inner-shareable normal wbwa read-write memory attributes. + ******************************************************************************/ +static unsigned long create_rwmem_block(unsigned long output_addr, + unsigned int level, + unsigned int ns) +{ + unsigned long upper_attrs, lower_attrs, desc; + + lower_attrs = LOWER_ATTRS(ACCESS_FLAG | ISH | AP_RW); + lower_attrs |= LOWER_ATTRS(ns | ATTR_IWBWA_OWBWA_NTR_INDEX); + upper_attrs = UPPER_ATTRS(XN); + desc = upper_attrs | lower_attrs; + + return create_block_desc(desc, output_addr, level); +} + +/******************************************************************************* + * Create page tables as per the platform memory map. Certain aspects of page + * talble creating have been abstracted in the above routines. This can be impr- + * oved further. + * TODO: Move the page table setup helpers into the arch or lib directory + *******************************************************************************/ +static unsigned long fill_xlation_tables(meminfo *tzram_layout, + unsigned long ro_start, + unsigned long ro_limit, + unsigned long coh_start, + unsigned long coh_limit) +{ + unsigned long l2_desc, l3_desc; + unsigned long *xt_addr = 0, *pt_addr, off = 0; + unsigned long trom_start_index, trom_end_index; + unsigned long tzram_start_index, tzram_end_index; + unsigned long flash0_start_index, flash0_end_index; + unsigned long flash1_start_index, flash1_end_index; + unsigned long vram_start_index, vram_end_index; + unsigned long nsram_start_index, nsram_end_index; + unsigned long tdram_start_index, tdram_end_index; + unsigned long dram_start_index, dram_end_index; + unsigned long dev0_start_index, dev0_end_index; + unsigned long dev1_start_index, dev1_end_index; + unsigned int idx; + + + /***************************************************************** + * LEVEL1 PAGETABLE SETUP + * + * Find the start and end indices of the memory peripherals in the + * first level pagetables. These are the main areas we care about. + * Also bump the end index by one if its equal to the start to + * allow for regions which lie completely in a GB. + *****************************************************************/ + trom_start_index = ONE_GB_INDEX(TZROM_BASE); + dev0_start_index = ONE_GB_INDEX(TZRNG_BASE); + dram_start_index = ONE_GB_INDEX(DRAM_BASE); + dram_end_index = ONE_GB_INDEX(DRAM_BASE + DRAM_SIZE); + + if (dram_end_index == dram_start_index) + dram_end_index++; + + /* + * Fill up the level1 translation table first + */ + for (idx = 0; idx < (ADDR_SPACE_SIZE >> 30); idx++) { + + /* + * Fill up the entry for the TZROM. This will cover + * everything in the first GB. + */ + if (idx == trom_start_index) { + xt_addr = &l2_xlation_table[GB1_L2_PAGETABLE][0]; + l1_xlation_table[idx] = create_table_desc(xt_addr); + continue; + } + + /* + * Mark the second gb as device + */ + if (idx == dev0_start_index) { + xt_addr = &l2_xlation_table[GB2_L2_PAGETABLE][0]; + l1_xlation_table[idx] = create_table_desc(xt_addr); + continue; + } + + /* + * Fill up the block entry for the DRAM with Normal + * inner-WBWA outer-WBWA non-transient attributes. + * This will cover 2-4GB. Note that the acesses are + * marked as non-secure. + */ + if ((idx >= dram_start_index) && (idx < dram_end_index)) { + l1_xlation_table[idx] = create_rwmem_block(idx, LEVEL1, + NS); + continue; + } + + assert(0); + } + + + /***************************************************************** + * LEVEL2 PAGETABLE SETUP + * + * Find the start and end indices of the memory & peripherals in the + * second level pagetables. + ******************************************************************/ + + /* Initializations for the 1st GB */ + trom_start_index = TWO_MB_INDEX(TZROM_BASE); + trom_end_index = TWO_MB_INDEX(TZROM_BASE + TZROM_SIZE); + if (trom_end_index == trom_start_index) + trom_end_index++; + + tdram_start_index = TWO_MB_INDEX(TZDRAM_BASE); + tdram_end_index = TWO_MB_INDEX(TZDRAM_BASE + TZDRAM_SIZE); + if (tdram_end_index == tdram_start_index) + tdram_end_index++; + + flash0_start_index = TWO_MB_INDEX(FLASH0_BASE); + flash0_end_index = TWO_MB_INDEX(FLASH0_BASE + TZROM_SIZE); + if (flash0_end_index == flash0_start_index) + flash0_end_index++; + + flash1_start_index = TWO_MB_INDEX(FLASH1_BASE); + flash1_end_index = TWO_MB_INDEX(FLASH1_BASE + FLASH1_SIZE); + if (flash1_end_index == flash1_start_index) + flash1_end_index++; + + vram_start_index = TWO_MB_INDEX(VRAM_BASE); + vram_end_index = TWO_MB_INDEX(VRAM_BASE + VRAM_SIZE); + if (vram_end_index == vram_start_index) + vram_end_index++; + + dev0_start_index = TWO_MB_INDEX(DEVICE0_BASE); + dev0_end_index = TWO_MB_INDEX(DEVICE0_BASE + DEVICE0_SIZE); + if (dev0_end_index == dev0_start_index) + dev0_end_index++; + + dev1_start_index = TWO_MB_INDEX(DEVICE1_BASE); + dev1_end_index = TWO_MB_INDEX(DEVICE1_BASE + DEVICE1_SIZE); + if (dev1_end_index == dev1_start_index) + dev1_end_index++; + + /* Since the size is < 2M this is a single index */ + tzram_start_index = TWO_MB_INDEX(tzram_layout->total_base); + nsram_start_index = TWO_MB_INDEX(NSRAM_BASE); + + /* + * Fill up the level2 translation table for the first GB next + */ + for (idx = 0; idx < NUM_2MB_IN_GB; idx++) { + + l2_desc = INVALID_DESC; + xt_addr = &l2_xlation_table[GB1_L2_PAGETABLE][idx]; + + /* Block entries for 64M of trusted Boot ROM */ + if ((idx >= trom_start_index) && (idx < trom_end_index)) + l2_desc = create_romem_block(idx, LEVEL2, 0); + + /* Single L3 page table entry for 256K of TZRAM */ + if (idx == tzram_start_index) { + pt_addr = &l3_xlation_table[TZRAM_PAGETABLE][0]; + l2_desc = create_table_desc(pt_addr); + } + + /* Block entries for 32M of trusted DRAM */ + if ((idx >= tdram_start_index) && (idx <= tdram_end_index)) + l2_desc = create_rwmem_block(idx, LEVEL2, 0); + + /* Block entries for 64M of aliased trusted Boot ROM */ + if ((idx >= flash0_start_index) && (idx < flash0_end_index)) + l2_desc = create_romem_block(idx, LEVEL2, 0); + + /* Block entries for 64M of flash1 */ + if ((idx >= flash1_start_index) && (idx < flash1_end_index)) + l2_desc = create_romem_block(idx, LEVEL2, 0); + + /* Block entries for 32M of VRAM */ + if ((idx >= vram_start_index) && (idx < vram_end_index)) + l2_desc = create_rwmem_block(idx, LEVEL2, 0); + + /* Block entries for all the devices in the first gb */ + if ((idx >= dev0_start_index) && (idx < dev0_end_index)) + l2_desc = create_device_block(idx, LEVEL2, 0); + + /* Block entries for all the devices in the first gb */ + if ((idx >= dev1_start_index) && (idx < dev1_end_index)) + l2_desc = create_device_block(idx, LEVEL2, 0); + + /* Single L3 page table entry for 64K of NSRAM */ + if (idx == nsram_start_index) { + pt_addr = &l3_xlation_table[NSRAM_PAGETABLE][0]; + l2_desc = create_table_desc(pt_addr); + } + + *xt_addr = l2_desc; + } + + + /* + * Initializations for the 2nd GB. Mark everything as device + * for the time being as the memory map is not final. Each + * index will need to be offset'ed to allow absolute values + */ + off = NUM_2MB_IN_GB; + for (idx = off; idx < (NUM_2MB_IN_GB + off); idx++) { + l2_desc = create_device_block(idx, LEVEL2, 0); + xt_addr = &l2_xlation_table[GB2_L2_PAGETABLE][idx - off]; + *xt_addr = l2_desc; + } + + + /***************************************************************** + * LEVEL3 PAGETABLE SETUP + *****************************************************************/ + + /* Fill up the level3 pagetable for the trusted SRAM. */ + tzram_start_index = FOUR_KB_INDEX(tzram_layout->total_base); + tzram_end_index = FOUR_KB_INDEX(tzram_layout->total_base + + tzram_layout->total_size); + if (tzram_end_index == tzram_start_index) + tzram_end_index++; + + /* Reusing trom* to mark RO memory. */ + trom_start_index = FOUR_KB_INDEX(ro_start); + trom_end_index = FOUR_KB_INDEX(ro_limit); + if (trom_end_index == trom_start_index) + trom_end_index++; + + /* Reusing dev* to mark coherent device memory. */ + dev0_start_index = FOUR_KB_INDEX(coh_start); + dev0_end_index = FOUR_KB_INDEX(coh_limit); + if (dev0_end_index == dev0_start_index) + dev0_end_index++; + + + /* Each index will need to be offset'ed to allow absolute values */ + off = FOUR_KB_INDEX(TZRAM_BASE); + for (idx = off; idx < (NUM_4K_IN_2MB + off); idx++) { + + l3_desc = INVALID_DESC; + xt_addr = &l3_xlation_table[TZRAM_PAGETABLE][idx - off]; + + if (idx >= tzram_start_index && idx < tzram_end_index) + l3_desc = create_rwmem_block(idx, LEVEL3, 0); + + if (idx >= trom_start_index && idx < trom_end_index) + l3_desc = create_romem_block(idx, LEVEL3, 0); + + if (idx >= dev0_start_index && idx < dev0_end_index) + l3_desc = create_device_block(idx, LEVEL3, 0); + + *xt_addr = l3_desc; + } + + /* Fill up the level3 pagetable for the non-trusted SRAM. */ + nsram_start_index = FOUR_KB_INDEX(NSRAM_BASE); + nsram_end_index = FOUR_KB_INDEX(NSRAM_BASE + NSRAM_SIZE); + if (nsram_end_index == nsram_start_index) + nsram_end_index++; + + /* Each index will need to be offset'ed to allow absolute values */ + off = FOUR_KB_INDEX(NSRAM_BASE); + for (idx = off; idx < (NUM_4K_IN_2MB + off); idx++) { + + l3_desc = INVALID_DESC; + xt_addr = &l3_xlation_table[NSRAM_PAGETABLE][idx - off]; + + if (idx >= nsram_start_index && idx < nsram_end_index) + l3_desc = create_rwmem_block(idx, LEVEL3, NS); + + *xt_addr = l3_desc; + } + + return (unsigned long) l1_xlation_table; +} + +/******************************************************************************* + * Enable the MMU assuming that the pagetables have already been created + *******************************************************************************/ +void enable_mmu() +{ + unsigned long mair, tcr, ttbr, sctlr; + unsigned long current_el = read_current_el(); + + /* Set the attributes in the right indices of the MAIR */ + mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX); + mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, + ATTR_IWBWA_OWBWA_NTR_INDEX); + write_mair(mair); + + /* + * Set TCR bits as well. Inner & outer WBWA & shareable + T0SZ = 32 + */ + tcr = TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WBA | + TCR_RGN_INNER_WBA | TCR_T0SZ_4GB; + if (GET_EL(current_el) == MODE_EL3) { + tcr |= TCR_EL3_RES1; + /* Invalidate EL3 TLBs */ + tlbialle3(); + } else { + /* Invalidate EL1 TLBs */ + tlbivmalle1(); + } + + write_tcr(tcr); + + /* Set TTBR bits as well */ + assert(((unsigned long)l1_xlation_table & (sizeof(l1_xlation_table) - 1)) == 0); + ttbr = (unsigned long) l1_xlation_table; + write_ttbr0(ttbr); + + sctlr = read_sctlr(); + sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT | SCTLR_I_BIT; + sctlr |= SCTLR_A_BIT | SCTLR_C_BIT; + write_sctlr(sctlr); + + return; +} + +void disable_mmu(void) +{ + /* Zero out the MMU related registers */ + write_mair(0); + write_tcr(0); + write_ttbr0(0); + write_sctlr(0); + + /* Flush the caches */ + dcsw_op_all(DCCISW); + + return; +} + +/******************************************************************************* + * Setup the pagetables as per the platform memory map & initialize the mmu + *******************************************************************************/ +void configure_mmu(meminfo *mem_layout, + unsigned long ro_start, + unsigned long ro_limit, + unsigned long coh_start, + unsigned long coh_limit) +{ + assert(IS_PAGE_ALIGNED(ro_start)); + assert(IS_PAGE_ALIGNED(ro_limit)); + assert(IS_PAGE_ALIGNED(coh_start)); + assert(IS_PAGE_ALIGNED(coh_limit)); + + fill_xlation_tables(mem_layout, + ro_start, + ro_limit, + coh_start, + coh_limit); + enable_mmu(); + return; +} + +/* Simple routine which returns a configuration variable value */ +unsigned long platform_get_cfgvar(unsigned int var_id) +{ + assert(var_id < CONFIG_LIMIT); + return platform_config[var_id]; +} + +/******************************************************************************* + * A single boot loader stack is expected to work on both the Foundation FVP + * models and the two flavours of the Base FVP models (AEMv8 & Cortex). The + * SYS_ID register provides a mechanism for detecting the differences between + * these platforms. This information is stored in a per-BL array to allow the + * code to take the correct path.Per BL platform configuration. + ******************************************************************************/ +int platform_config_setup(void) +{ + unsigned int rev, hbi, bld, arch, sys_id, midr_pn; + + sys_id = mmio_read_32(VE_SYSREGS_BASE + V2M_SYS_ID); + rev = (sys_id >> SYS_ID_REV_SHIFT) & SYS_ID_REV_MASK; + hbi = (sys_id >> SYS_ID_HBI_SHIFT) & SYS_ID_HBI_MASK; + bld = (sys_id >> SYS_ID_BLD_SHIFT) & SYS_ID_BLD_MASK; + arch = (sys_id >> SYS_ID_ARCH_SHIFT) & SYS_ID_ARCH_MASK; + + assert(rev == REV_FVP); + assert(arch == ARCH_MODEL); + + /* + * The build field in the SYS_ID tells which variant of the GIC + * memory is implemented by the model. + */ + switch (bld) { + case BLD_GIC_VE_MMAP: + platform_config[CONFIG_GICD_ADDR] = VE_GICD_BASE; + platform_config[CONFIG_GICC_ADDR] = VE_GICC_BASE; + platform_config[CONFIG_GICH_ADDR] = VE_GICH_BASE; + platform_config[CONFIG_GICV_ADDR] = VE_GICV_BASE; + break; + case BLD_GIC_A53A57_MMAP: + platform_config[CONFIG_GICD_ADDR] = BASE_GICD_BASE; + platform_config[CONFIG_GICC_ADDR] = BASE_GICC_BASE; + platform_config[CONFIG_GICH_ADDR] = BASE_GICH_BASE; + platform_config[CONFIG_GICV_ADDR] = BASE_GICV_BASE; + break; + default: + assert(0); + } + + /* + * The hbi field in the SYS_ID is 0x020 for the Base FVP & 0x010 + * for the Foundation FVP. + */ + switch (hbi) { + case HBI_FOUNDATION: + platform_config[CONFIG_MAX_AFF0] = 4; + platform_config[CONFIG_MAX_AFF1] = 1; + platform_config[CONFIG_CPU_SETUP] = 0; + platform_config[CONFIG_BASE_MMAP] = 0; + platform_config[CONFIG_HAS_CCI] = 0; + break; + case HBI_FVP_BASE: + midr_pn = (read_midr() >> MIDR_PN_SHIFT) & MIDR_PN_MASK; + if ((midr_pn == MIDR_PN_A57) || (midr_pn == MIDR_PN_A53)) + platform_config[CONFIG_CPU_SETUP] = 1; + else + platform_config[CONFIG_CPU_SETUP] = 0; + + platform_config[CONFIG_MAX_AFF0] = 4; + platform_config[CONFIG_MAX_AFF1] = 2; + platform_config[CONFIG_BASE_MMAP] = 1; + platform_config[CONFIG_HAS_CCI] = 1; + break; + default: + assert(0); + } + + return 0; +} + +unsigned long plat_get_ns_image_entrypoint(void) +{ + return NS_IMAGE_OFFSET; +} diff --git a/plat/fvp/aarch64/plat_helpers.S b/plat/fvp/aarch64/plat_helpers.S new file mode 100644 index 0000000..032b393 --- /dev/null +++ b/plat/fvp/aarch64/plat_helpers.S @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include + + .globl plat_report_exception + + .section .text, "ax" + + /* --------------------------------------------- + * void plat_report_exception(unsigned int type) + * Function to report an unhandled exception + * with platform-specific means. + * On FVP platform, it updates the LEDs + * to indicate where we are + * --------------------------------------------- + */ +plat_report_exception: + mrs x1, CurrentEl + lsr x1, x1, #MODE_EL_SHIFT + lsl x1, x1, #SYS_LED_EL_SHIFT + lsl x0, x0, #SYS_LED_EC_SHIFT + mov x2, #(SECURE << SYS_LED_SS_SHIFT) + orr x0, x0, x2 + orr x0, x0, x1 + mov x1, #VE_SYSREGS_BASE + add x1, x1, #V2M_SYS_LED + str w0, [x1] + ret diff --git a/plat/fvp/fvp_gic.c b/plat/fvp/fvp_gic.c deleted file mode 100644 index 6dd13ec..0000000 --- a/plat/fvp/fvp_gic.c +++ /dev/null @@ -1,282 +0,0 @@ -/* - * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include - - -/******************************************************************************* - * TODO: Revisit if priorities are being set such that no non-secure interrupt - * can have a higher priority than a secure one as recommended in the GICv2 spec - ******************************************************************************/ - -/******************************************************************************* - * This function does some minimal GICv3 configuration. The Firmware itself does - * not fully support GICv3 at this time and relies on GICv2 emulation as - * provided by GICv3. This function allows software (like Linux) in later stages - * to use full GICv3 features. - ******************************************************************************/ -void gicv3_cpuif_setup(void) -{ - unsigned int scr_val, val; - uintptr_t base; - - /* - * When CPUs come out of reset they have their GICR_WAKER.ProcessorSleep - * bit set. In order to allow interrupts to get routed to the CPU we - * need to clear this bit if set and wait for GICR_WAKER.ChildrenAsleep - * to clear (GICv3 Architecture specification 5.4.23). - * GICR_WAKER is NOT banked per CPU, compute the correct base address - * per CPU. - */ - base = gicv3_get_rdist(BASE_GICR_BASE, read_mpidr()); - if (base == (uintptr_t)NULL) { - /* No re-distributor base address. This interface cannot be - * configured. - */ - panic(); - } - - val = gicr_read_waker(base); - - val &= ~WAKER_PS; - gicr_write_waker(base, val); - dsb(); - - /* We need to wait for ChildrenAsleep to clear. */ - val = gicr_read_waker(base); - while (val & WAKER_CA) { - val = gicr_read_waker(base); - } - - /* - * We need to set SCR_EL3.NS in order to see GICv3 non-secure state. - * Restore SCR_EL3.NS again before exit. - */ - scr_val = read_scr(); - write_scr(scr_val | SCR_NS_BIT); - - /* - * By default EL2 and NS-EL1 software should be able to enable GICv3 - * System register access without any configuration at EL3. But it turns - * out that GICC PMR as set in GICv2 mode does not affect GICv3 mode. So - * we need to set it here again. In order to do that we need to enable - * register access. We leave it enabled as it should be fine and might - * prevent problems with later software trying to access GIC System - * Registers. - */ - val = read_icc_sre_el3(); - write_icc_sre_el3(val | ICC_SRE_EN | ICC_SRE_SRE); - - val = read_icc_sre_el2(); - write_icc_sre_el2(val | ICC_SRE_EN | ICC_SRE_SRE); - - write_icc_pmr_el1(MAX_PRI_VAL); - - /* Restore SCR_EL3 */ - write_scr(scr_val); -} - -/******************************************************************************* - * This function does some minimal GICv3 configuration when cores go - * down. - ******************************************************************************/ -void gicv3_cpuif_deactivate(void) -{ - unsigned int val; - uintptr_t base; - - /* - * When taking CPUs down we need to set GICR_WAKER.ProcessorSleep and - * wait for GICR_WAKER.ChildrenAsleep to get set. - * (GICv3 Architecture specification 5.4.23). - * GICR_WAKER is NOT banked per CPU, compute the correct base address - * per CPU. - */ - base = gicv3_get_rdist(BASE_GICR_BASE, read_mpidr()); - if (base == (uintptr_t)NULL) { - /* No re-distributor base address. This interface cannot be - * configured. - */ - panic(); - } - - val = gicr_read_waker(base); - val |= WAKER_PS; - gicr_write_waker(base, val); - dsb(); - - /* We need to wait for ChildrenAsleep to set. */ - val = gicr_read_waker(base); - while ((val & WAKER_CA) == 0) { - val = gicr_read_waker(base); - } -} - - -/******************************************************************************* - * Enable secure interrupts and use FIQs to route them. Disable legacy bypass - * and set the priority mask register to allow all interrupts to trickle in. - ******************************************************************************/ -void gic_cpuif_setup(unsigned int gicc_base) -{ - unsigned int val; - - val = gicc_read_iidr(gicc_base); - - /* - * If GICv3 we need to do a bit of additional setup. We want to - * allow default GICv2 behaviour but allow the next stage to - * enable full gicv3 features. - */ - if (((val >> GICC_IIDR_ARCH_SHIFT) & GICC_IIDR_ARCH_MASK) >= 3) { - gicv3_cpuif_setup(); - } - - val = ENABLE_GRP0 | FIQ_EN | FIQ_BYP_DIS_GRP0; - val |= IRQ_BYP_DIS_GRP0 | FIQ_BYP_DIS_GRP1 | IRQ_BYP_DIS_GRP1; - - gicc_write_pmr(gicc_base, MAX_PRI_VAL); - gicc_write_ctlr(gicc_base, val); -} - -/******************************************************************************* - * Place the cpu interface in a state where it can never make a cpu exit wfi as - * as result of an asserted interrupt. This is critical for powering down a cpu - ******************************************************************************/ -void gic_cpuif_deactivate(unsigned int gicc_base) -{ - unsigned int val; - - /* Disable secure, non-secure interrupts and disable their bypass */ - val = gicc_read_ctlr(gicc_base); - val &= ~(ENABLE_GRP0 | ENABLE_GRP1); - val |= FIQ_BYP_DIS_GRP1 | FIQ_BYP_DIS_GRP0; - val |= IRQ_BYP_DIS_GRP0 | IRQ_BYP_DIS_GRP1; - gicc_write_ctlr(gicc_base, val); - - val = gicc_read_iidr(gicc_base); - - /* - * If GICv3 we need to do a bit of additional setup. Make sure the - * RDIST is put to sleep. - */ - if (((val >> GICC_IIDR_ARCH_SHIFT) & GICC_IIDR_ARCH_MASK) >= 3) { - gicv3_cpuif_deactivate(); - } -} - -/******************************************************************************* - * Per cpu gic distributor setup which will be done by all cpus after a cold - * boot/hotplug. This marks out the secure interrupts & enables them. - ******************************************************************************/ -void gic_pcpu_distif_setup(unsigned int gicd_base) -{ - gicd_write_igroupr(gicd_base, 0, ~0); - - gicd_clr_igroupr(gicd_base, IRQ_SEC_PHY_TIMER); - gicd_clr_igroupr(gicd_base, IRQ_SEC_SGI_0); - gicd_clr_igroupr(gicd_base, IRQ_SEC_SGI_1); - gicd_clr_igroupr(gicd_base, IRQ_SEC_SGI_2); - gicd_clr_igroupr(gicd_base, IRQ_SEC_SGI_3); - gicd_clr_igroupr(gicd_base, IRQ_SEC_SGI_4); - gicd_clr_igroupr(gicd_base, IRQ_SEC_SGI_5); - gicd_clr_igroupr(gicd_base, IRQ_SEC_SGI_6); - gicd_clr_igroupr(gicd_base, IRQ_SEC_SGI_7); - - gicd_set_ipriorityr(gicd_base, IRQ_SEC_PHY_TIMER, MAX_PRI_VAL); - gicd_set_ipriorityr(gicd_base, IRQ_SEC_SGI_0, MAX_PRI_VAL); - gicd_set_ipriorityr(gicd_base, IRQ_SEC_SGI_1, MAX_PRI_VAL); - gicd_set_ipriorityr(gicd_base, IRQ_SEC_SGI_2, MAX_PRI_VAL); - gicd_set_ipriorityr(gicd_base, IRQ_SEC_SGI_3, MAX_PRI_VAL); - gicd_set_ipriorityr(gicd_base, IRQ_SEC_SGI_4, MAX_PRI_VAL); - gicd_set_ipriorityr(gicd_base, IRQ_SEC_SGI_5, MAX_PRI_VAL); - gicd_set_ipriorityr(gicd_base, IRQ_SEC_SGI_6, MAX_PRI_VAL); - gicd_set_ipriorityr(gicd_base, IRQ_SEC_SGI_7, MAX_PRI_VAL); - - gicd_set_isenabler(gicd_base, IRQ_SEC_PHY_TIMER); - gicd_set_isenabler(gicd_base, IRQ_SEC_SGI_0); - gicd_set_isenabler(gicd_base, IRQ_SEC_SGI_1); - gicd_set_isenabler(gicd_base, IRQ_SEC_SGI_2); - gicd_set_isenabler(gicd_base, IRQ_SEC_SGI_3); - gicd_set_isenabler(gicd_base, IRQ_SEC_SGI_4); - gicd_set_isenabler(gicd_base, IRQ_SEC_SGI_5); - gicd_set_isenabler(gicd_base, IRQ_SEC_SGI_6); - gicd_set_isenabler(gicd_base, IRQ_SEC_SGI_7); -} - -/******************************************************************************* - * Global gic distributor setup which will be done by the primary cpu after a - * cold boot. It marks out the secure SPIs, PPIs & SGIs and enables them. It - * then enables the secure GIC distributor interface. - ******************************************************************************/ -void gic_distif_setup(unsigned int gicd_base) -{ - unsigned int ctr, num_ints, ctlr; - - /* Disable the distributor before going further */ - ctlr = gicd_read_ctlr(gicd_base); - ctlr &= ~(ENABLE_GRP0 | ENABLE_GRP1); - gicd_write_ctlr(gicd_base, ctlr); - - /* - * Mark out non-secure interrupts. Calculate number of - * IGROUPR registers to consider. Will be equal to the - * number of IT_LINES - */ - num_ints = gicd_read_typer(gicd_base) & IT_LINES_NO_MASK; - num_ints++; - for (ctr = 0; ctr < num_ints; ctr++) - gicd_write_igroupr(gicd_base, ctr << IGROUPR_SHIFT, ~0); - - /* Configure secure interrupts now */ - gicd_clr_igroupr(gicd_base, IRQ_TZ_WDOG); - gicd_set_ipriorityr(gicd_base, IRQ_TZ_WDOG, MAX_PRI_VAL); - gicd_set_itargetsr(gicd_base, IRQ_TZ_WDOG, - platform_get_core_pos(read_mpidr())); - gicd_set_isenabler(gicd_base, IRQ_TZ_WDOG); - gic_pcpu_distif_setup(gicd_base); - - gicd_write_ctlr(gicd_base, ctlr | ENABLE_GRP0); -} - -void gic_setup(void) -{ - unsigned int gicd_base, gicc_base; - - gicd_base = platform_get_cfgvar(CONFIG_GICD_ADDR); - gicc_base = platform_get_cfgvar(CONFIG_GICC_ADDR); - - gic_cpuif_setup(gicc_base); - gic_distif_setup(gicd_base); -} diff --git a/plat/fvp/fvp_pm.c b/plat/fvp/fvp_pm.c deleted file mode 100644 index b9948ee..0000000 --- a/plat/fvp/fvp_pm.c +++ /dev/null @@ -1,376 +0,0 @@ -/* - * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -/* Only included for error codes */ -#include - -/******************************************************************************* - * FVP handler called when an affinity instance is about to be turned on. The - * level and mpidr determine the affinity instance. - ******************************************************************************/ -int fvp_affinst_on(unsigned long mpidr, - unsigned long sec_entrypoint, - unsigned long ns_entrypoint, - unsigned int afflvl, - unsigned int state) -{ - int rc = PSCI_E_SUCCESS; - unsigned long linear_id; - mailbox *fvp_mboxes; - unsigned int psysr; - - /* - * It's possible to turn on only affinity level 0 i.e. a cpu - * on the FVP. Ignore any other affinity level. - */ - if (afflvl != MPIDR_AFFLVL0) - goto exit; - - /* - * Ensure that we do not cancel an inflight power off request - * for the target cpu. That would leave it in a zombie wfi. - * Wait for it to power off, program the jump address for the - * target cpu and then program the power controller to turn - * that cpu on - */ - do { - psysr = fvp_pwrc_read_psysr(mpidr); - } while (psysr & PSYSR_AFF_L0); - - linear_id = platform_get_core_pos(mpidr); - fvp_mboxes = (mailbox *) (TZDRAM_BASE + MBOX_OFF); - fvp_mboxes[linear_id].value = sec_entrypoint; - flush_dcache_range((unsigned long) &fvp_mboxes[linear_id], - sizeof(unsigned long)); - - fvp_pwrc_write_pponr(mpidr); - -exit: - return rc; -} - -/******************************************************************************* - * FVP handler called when an affinity instance is about to be turned off. The - * level and mpidr determine the affinity instance. The 'state' arg. allows the - * platform to decide whether the cluster is being turned off and take apt - * actions. - * - * CAUTION: This function is called with coherent stacks so that caches can be - * turned off, flushed and coherency disabled. There is no guarantee that caches - * will remain turned on across calls to this function as each affinity level is - * dealt with. So do not write & read global variables across calls. It will be - * wise to do flush a write to the global to prevent unpredictable results. - ******************************************************************************/ -int fvp_affinst_off(unsigned long mpidr, - unsigned int afflvl, - unsigned int state) -{ - int rc = PSCI_E_SUCCESS; - unsigned int gicc_base, ectlr; - unsigned long cpu_setup, cci_setup; - - switch (afflvl) { - case MPIDR_AFFLVL1: - if (state == PSCI_STATE_OFF) { - /* - * Disable coherency if this cluster is to be - * turned off - */ - cci_setup = platform_get_cfgvar(CONFIG_HAS_CCI); - if (cci_setup) { - cci_disable_coherency(mpidr); - } - - /* - * Program the power controller to turn the - * cluster off - */ - fvp_pwrc_write_pcoffr(mpidr); - - } - break; - - case MPIDR_AFFLVL0: - if (state == PSCI_STATE_OFF) { - - /* - * Take this cpu out of intra-cluster coherency if - * the FVP flavour supports the SMP bit. - */ - cpu_setup = platform_get_cfgvar(CONFIG_CPU_SETUP); - if (cpu_setup) { - ectlr = read_cpuectlr(); - ectlr &= ~CPUECTLR_SMP_BIT; - write_cpuectlr(ectlr); - } - - /* - * Prevent interrupts from spuriously waking up - * this cpu - */ - gicc_base = platform_get_cfgvar(CONFIG_GICC_ADDR); - gic_cpuif_deactivate(gicc_base); - - /* - * Program the power controller to power this - * cpu off - */ - fvp_pwrc_write_ppoffr(mpidr); - } - break; - - default: - assert(0); - } - - return rc; -} - -/******************************************************************************* - * FVP handler called when an affinity instance is about to be suspended. The - * level and mpidr determine the affinity instance. The 'state' arg. allows the - * platform to decide whether the cluster is being turned off and take apt - * actions. - * - * CAUTION: This function is called with coherent stacks so that caches can be - * turned off, flushed and coherency disabled. There is no guarantee that caches - * will remain turned on across calls to this function as each affinity level is - * dealt with. So do not write & read global variables across calls. It will be - * wise to do flush a write to the global to prevent unpredictable results. - ******************************************************************************/ -int fvp_affinst_suspend(unsigned long mpidr, - unsigned long sec_entrypoint, - unsigned long ns_entrypoint, - unsigned int afflvl, - unsigned int state) -{ - int rc = PSCI_E_SUCCESS; - unsigned int gicc_base, ectlr; - unsigned long cpu_setup, cci_setup, linear_id; - mailbox *fvp_mboxes; - - switch (afflvl) { - case MPIDR_AFFLVL1: - if (state == PSCI_STATE_OFF) { - /* - * Disable coherency if this cluster is to be - * turned off - */ - cci_setup = platform_get_cfgvar(CONFIG_HAS_CCI); - if (cci_setup) { - cci_disable_coherency(mpidr); - } - - /* - * Program the power controller to turn the - * cluster off - */ - fvp_pwrc_write_pcoffr(mpidr); - - } - break; - - case MPIDR_AFFLVL0: - if (state == PSCI_STATE_OFF) { - /* - * Take this cpu out of intra-cluster coherency if - * the FVP flavour supports the SMP bit. - */ - cpu_setup = platform_get_cfgvar(CONFIG_CPU_SETUP); - if (cpu_setup) { - ectlr = read_cpuectlr(); - ectlr &= ~CPUECTLR_SMP_BIT; - write_cpuectlr(ectlr); - } - - /* Program the jump address for the target cpu */ - linear_id = platform_get_core_pos(mpidr); - fvp_mboxes = (mailbox *) (TZDRAM_BASE + MBOX_OFF); - fvp_mboxes[linear_id].value = sec_entrypoint; - flush_dcache_range((unsigned long) &fvp_mboxes[linear_id], - sizeof(unsigned long)); - - /* - * Prevent interrupts from spuriously waking up - * this cpu - */ - gicc_base = platform_get_cfgvar(CONFIG_GICC_ADDR); - gic_cpuif_deactivate(gicc_base); - - /* - * Program the power controller to power this - * cpu off and enable wakeup interrupts. - */ - fvp_pwrc_set_wen(mpidr); - fvp_pwrc_write_ppoffr(mpidr); - } - break; - - default: - assert(0); - } - - return rc; -} - -/******************************************************************************* - * FVP handler called when an affinity instance has just been powered on after - * being turned off earlier. The level and mpidr determine the affinity - * instance. The 'state' arg. allows the platform to decide whether the cluster - * was turned off prior to wakeup and do what's necessary to setup it up - * correctly. - ******************************************************************************/ -int fvp_affinst_on_finish(unsigned long mpidr, - unsigned int afflvl, - unsigned int state) -{ - int rc = PSCI_E_SUCCESS; - unsigned long linear_id, cpu_setup, cci_setup; - mailbox *fvp_mboxes; - unsigned int gicd_base, gicc_base, reg_val, ectlr; - - switch (afflvl) { - - case MPIDR_AFFLVL1: - /* Enable coherency if this cluster was off */ - if (state == PSCI_STATE_OFF) { - cci_setup = platform_get_cfgvar(CONFIG_HAS_CCI); - if (cci_setup) { - cci_enable_coherency(mpidr); - } - } - break; - - case MPIDR_AFFLVL0: - /* - * Ignore the state passed for a cpu. It could only have - * been off if we are here. - */ - - /* - * Turn on intra-cluster coherency if the FVP flavour supports - * it. - */ - cpu_setup = platform_get_cfgvar(CONFIG_CPU_SETUP); - if (cpu_setup) { - ectlr = read_cpuectlr(); - ectlr |= CPUECTLR_SMP_BIT; - write_cpuectlr(ectlr); - } - - /* - * Clear PWKUPR.WEN bit to ensure interrupts do not interfere - * with a cpu power down unless the bit is set again - */ - fvp_pwrc_clr_wen(mpidr); - - /* Zero the jump address in the mailbox for this cpu */ - fvp_mboxes = (mailbox *) (TZDRAM_BASE + MBOX_OFF); - linear_id = platform_get_core_pos(mpidr); - fvp_mboxes[linear_id].value = 0; - flush_dcache_range((unsigned long) &fvp_mboxes[linear_id], - sizeof(unsigned long)); - - gicd_base = platform_get_cfgvar(CONFIG_GICD_ADDR); - gicc_base = platform_get_cfgvar(CONFIG_GICC_ADDR); - - /* Enable the gic cpu interface */ - gic_cpuif_setup(gicc_base); - - /* TODO: This setup is needed only after a cold boot */ - gic_pcpu_distif_setup(gicd_base); - - /* Allow access to the System counter timer module */ - reg_val = (1 << CNTACR_RPCT_SHIFT) | (1 << CNTACR_RVCT_SHIFT); - reg_val |= (1 << CNTACR_RFRQ_SHIFT) | (1 << CNTACR_RVOFF_SHIFT); - reg_val |= (1 << CNTACR_RWVT_SHIFT) | (1 << CNTACR_RWPT_SHIFT); - mmio_write_32(SYS_TIMCTL_BASE + CNTACR_BASE(0), reg_val); - mmio_write_32(SYS_TIMCTL_BASE + CNTACR_BASE(1), reg_val); - - reg_val = (1 << CNTNSAR_NS_SHIFT(0)) | - (1 << CNTNSAR_NS_SHIFT(1)); - mmio_write_32(SYS_TIMCTL_BASE + CNTNSAR, reg_val); - - break; - - default: - assert(0); - } - - return rc; -} - -/******************************************************************************* - * FVP handler called when an affinity instance has just been powered on after - * having been suspended earlier. The level and mpidr determine the affinity - * instance. - * TODO: At the moment we reuse the on finisher and reinitialize the secure - * context. Need to implement a separate suspend finisher. - ******************************************************************************/ -int fvp_affinst_suspend_finish(unsigned long mpidr, - unsigned int afflvl, - unsigned int state) -{ - return fvp_affinst_on_finish(mpidr, afflvl, state); -} - - -/******************************************************************************* - * Export the platform handlers to enable psci to invoke them - ******************************************************************************/ -static plat_pm_ops fvp_plat_pm_ops = { - 0, - fvp_affinst_on, - fvp_affinst_off, - fvp_affinst_suspend, - fvp_affinst_on_finish, - fvp_affinst_suspend_finish, -}; - -/******************************************************************************* - * Export the platform specific power ops & initialize the fvp power controller - ******************************************************************************/ -int platform_setup_pm(plat_pm_ops **plat_ops) -{ - *plat_ops = &fvp_plat_pm_ops; - return 0; -} diff --git a/plat/fvp/fvp_topology.c b/plat/fvp/fvp_topology.c deleted file mode 100644 index 0c8c525..0000000 --- a/plat/fvp/fvp_topology.c +++ /dev/null @@ -1,241 +0,0 @@ -/* - * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * Neither the name of ARM nor the names of its contributors may be used - * to endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include -/* TODO: Reusing psci error codes & state information. Get our own! */ -#include - -/* We treat '255' as an invalid affinity instance */ -#define AFFINST_INVAL 0xff - -/******************************************************************************* - * We support 3 flavours of the FVP: Foundation, Base AEM & Base Cortex. Each - * flavour has a different topology. The common bit is that there can be a max. - * of 2 clusters (affinity 1) and 4 cpus (affinity 0) per cluster. So we define - * a tree like data structure which caters to these maximum bounds. It simply - * marks the absent affinity level instances as PSCI_AFF_ABSENT e.g. there is no - * cluster 1 on the Foundation FVP. The 'data' field is currently unused. - ******************************************************************************/ -typedef struct { - unsigned char sibling; - unsigned char child; - unsigned char state; - unsigned int data; -} affinity_info; - -/******************************************************************************* - * The following two data structures store the topology tree for the fvp. There - * is a separate array for each affinity level i.e. cpus and clusters. The child - * and sibling references allow traversal inside and in between the two arrays. - ******************************************************************************/ -static affinity_info fvp_aff1_topology_map[PLATFORM_CLUSTER_COUNT]; -static affinity_info fvp_aff0_topology_map[PLATFORM_CORE_COUNT]; - -/* Simple global variable to safeguard us from stupidity */ -static unsigned int topology_setup_done; - -/******************************************************************************* - * This function implements a part of the critical interface between the psci - * generic layer and the platform to allow the former to detect the platform - * topology. psci queries the platform to determine how many affinity instances - * are present at a particular level for a given mpidr e.g. consider a dual - * cluster platform where each cluster has 4 cpus. A call to this function with - * (0, 0x100) will return the number of cpus implemented under cluster 1 i.e. 4. - * Similarly a call with (1, 0x100) will return 2 i.e. the number of clusters. - * This is 'cause we are effectively asking how many affinity level 1 instances - * are implemented under affinity level 2 instance 0. - ******************************************************************************/ -unsigned int plat_get_aff_count(unsigned int aff_lvl, - unsigned long mpidr) -{ - unsigned int aff_count = 1, ctr; - unsigned char parent_aff_id; - - assert(topology_setup_done == 1); - - switch (aff_lvl) { - case 3: - case 2: - /* - * Assert if the parent affinity instance is not 0. - * This also takes care of level 3 in an obfuscated way - */ - parent_aff_id = (mpidr >> MPIDR_AFF3_SHIFT) & MPIDR_AFFLVL_MASK; - assert(parent_aff_id == 0); - - /* - * Report that we implement a single instance of - * affinity levels 2 & 3 which are AFF_ABSENT - */ - break; - case 1: - /* Assert if the parent affinity instance is not 0. */ - parent_aff_id = (mpidr >> MPIDR_AFF2_SHIFT) & MPIDR_AFFLVL_MASK; - assert(parent_aff_id == 0); - - /* Fetch the starting index in the aff1 array */ - for (ctr = 0; - fvp_aff1_topology_map[ctr].sibling != AFFINST_INVAL; - ctr = fvp_aff1_topology_map[ctr].sibling) { - aff_count++; - } - - break; - case 0: - /* Assert if the cluster id is anything apart from 0 or 1 */ - parent_aff_id = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK; - assert(parent_aff_id < PLATFORM_CLUSTER_COUNT); - - /* Fetch the starting index in the aff0 array */ - for (ctr = fvp_aff1_topology_map[parent_aff_id].child; - fvp_aff0_topology_map[ctr].sibling != AFFINST_INVAL; - ctr = fvp_aff0_topology_map[ctr].sibling) { - aff_count++; - } - - break; - default: - assert(0); - } - - return aff_count; -} - -/******************************************************************************* - * This function implements a part of the critical interface between the psci - * generic layer and the platform to allow the former to detect the state of a - * affinity instance in the platform topology. psci queries the platform to - * determine whether an affinity instance is present or absent. This caters for - * topologies where an intermediate affinity level instance is missing e.g. - * consider a platform which implements a single cluster with 4 cpus and there - * is another cpu sitting directly on the interconnect along with the cluster. - * The mpidrs of the cluster would range from 0x0-0x3. The mpidr of the single - * cpu would be 0x100 to highlight that it does not belong to cluster 0. Cluster - * 1 is however missing but needs to be accounted to reach this single cpu in - * the topology tree. Hence it will be marked as PSCI_AFF_ABSENT. This is not - * applicable to the FVP but depicted as an example. - ******************************************************************************/ -unsigned int plat_get_aff_state(unsigned int aff_lvl, - unsigned long mpidr) -{ - unsigned int aff_state = PSCI_AFF_ABSENT, idx; - idx = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK; - - assert(topology_setup_done == 1); - - switch (aff_lvl) { - case 3: - case 2: - /* Report affinity levels 2 & 3 as absent */ - break; - case 1: - aff_state = fvp_aff1_topology_map[idx].state; - break; - case 0: - /* - * First get start index of the aff0 in its array & then add - * to it the affinity id that we want the state of - */ - idx = fvp_aff1_topology_map[idx].child; - idx += (mpidr >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK; - aff_state = fvp_aff0_topology_map[idx].state; - break; - default: - assert(0); - } - - return aff_state; -} - -/******************************************************************************* - * Handy optimization to prevent the psci implementation from traversing through - * affinity levels which are not present while detecting the platform topology. - ******************************************************************************/ -int plat_get_max_afflvl() -{ - return MPIDR_AFFLVL1; -} - -/******************************************************************************* - * This function populates the FVP specific topology information depending upon - * the FVP flavour its running on. We construct all the mpidrs we can handle - * and rely on the PWRC.PSYSR to flag absent cpus when their status is queried. - ******************************************************************************/ -int plat_setup_topology() -{ - unsigned char aff0, aff1, aff_state, aff0_offset = 0; - unsigned long mpidr; - - topology_setup_done = 0; - - for (aff1 = 0; aff1 < PLATFORM_CLUSTER_COUNT; aff1++) { - - fvp_aff1_topology_map[aff1].child = aff0_offset; - fvp_aff1_topology_map[aff1].sibling = aff1 + 1; - - for (aff0 = 0; aff0 < PLATFORM_MAX_CPUS_PER_CLUSTER; aff0++) { - - mpidr = aff1 << MPIDR_AFF1_SHIFT; - mpidr |= aff0 << MPIDR_AFF0_SHIFT; - - if (fvp_pwrc_read_psysr(mpidr) != PSYSR_INVALID) { - /* - * Presence of even a single aff0 indicates - * presence of parent aff1 on the FVP. - */ - aff_state = PSCI_AFF_PRESENT; - fvp_aff1_topology_map[aff1].state = - PSCI_AFF_PRESENT; - } else { - aff_state = PSCI_AFF_ABSENT; - } - - fvp_aff0_topology_map[aff0_offset].child = AFFINST_INVAL; - fvp_aff0_topology_map[aff0_offset].state = aff_state; - fvp_aff0_topology_map[aff0_offset].sibling = - aff0_offset + 1; - - /* Increment the absolute number of aff0s traversed */ - aff0_offset++; - } - - /* Tie-off the last aff0 sibling to -1 to avoid overflow */ - fvp_aff0_topology_map[aff0_offset - 1].sibling = AFFINST_INVAL; - } - - /* Tie-off the last aff1 sibling to AFFINST_INVAL to avoid overflow */ - fvp_aff1_topology_map[aff1 - 1].sibling = AFFINST_INVAL; - - topology_setup_done = 1; - return 0; -} diff --git a/plat/fvp/plat_gic.c b/plat/fvp/plat_gic.c new file mode 100644 index 0000000..6dd13ec --- /dev/null +++ b/plat/fvp/plat_gic.c @@ -0,0 +1,282 @@ +/* + * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include + + +/******************************************************************************* + * TODO: Revisit if priorities are being set such that no non-secure interrupt + * can have a higher priority than a secure one as recommended in the GICv2 spec + ******************************************************************************/ + +/******************************************************************************* + * This function does some minimal GICv3 configuration. The Firmware itself does + * not fully support GICv3 at this time and relies on GICv2 emulation as + * provided by GICv3. This function allows software (like Linux) in later stages + * to use full GICv3 features. + ******************************************************************************/ +void gicv3_cpuif_setup(void) +{ + unsigned int scr_val, val; + uintptr_t base; + + /* + * When CPUs come out of reset they have their GICR_WAKER.ProcessorSleep + * bit set. In order to allow interrupts to get routed to the CPU we + * need to clear this bit if set and wait for GICR_WAKER.ChildrenAsleep + * to clear (GICv3 Architecture specification 5.4.23). + * GICR_WAKER is NOT banked per CPU, compute the correct base address + * per CPU. + */ + base = gicv3_get_rdist(BASE_GICR_BASE, read_mpidr()); + if (base == (uintptr_t)NULL) { + /* No re-distributor base address. This interface cannot be + * configured. + */ + panic(); + } + + val = gicr_read_waker(base); + + val &= ~WAKER_PS; + gicr_write_waker(base, val); + dsb(); + + /* We need to wait for ChildrenAsleep to clear. */ + val = gicr_read_waker(base); + while (val & WAKER_CA) { + val = gicr_read_waker(base); + } + + /* + * We need to set SCR_EL3.NS in order to see GICv3 non-secure state. + * Restore SCR_EL3.NS again before exit. + */ + scr_val = read_scr(); + write_scr(scr_val | SCR_NS_BIT); + + /* + * By default EL2 and NS-EL1 software should be able to enable GICv3 + * System register access without any configuration at EL3. But it turns + * out that GICC PMR as set in GICv2 mode does not affect GICv3 mode. So + * we need to set it here again. In order to do that we need to enable + * register access. We leave it enabled as it should be fine and might + * prevent problems with later software trying to access GIC System + * Registers. + */ + val = read_icc_sre_el3(); + write_icc_sre_el3(val | ICC_SRE_EN | ICC_SRE_SRE); + + val = read_icc_sre_el2(); + write_icc_sre_el2(val | ICC_SRE_EN | ICC_SRE_SRE); + + write_icc_pmr_el1(MAX_PRI_VAL); + + /* Restore SCR_EL3 */ + write_scr(scr_val); +} + +/******************************************************************************* + * This function does some minimal GICv3 configuration when cores go + * down. + ******************************************************************************/ +void gicv3_cpuif_deactivate(void) +{ + unsigned int val; + uintptr_t base; + + /* + * When taking CPUs down we need to set GICR_WAKER.ProcessorSleep and + * wait for GICR_WAKER.ChildrenAsleep to get set. + * (GICv3 Architecture specification 5.4.23). + * GICR_WAKER is NOT banked per CPU, compute the correct base address + * per CPU. + */ + base = gicv3_get_rdist(BASE_GICR_BASE, read_mpidr()); + if (base == (uintptr_t)NULL) { + /* No re-distributor base address. This interface cannot be + * configured. + */ + panic(); + } + + val = gicr_read_waker(base); + val |= WAKER_PS; + gicr_write_waker(base, val); + dsb(); + + /* We need to wait for ChildrenAsleep to set. */ + val = gicr_read_waker(base); + while ((val & WAKER_CA) == 0) { + val = gicr_read_waker(base); + } +} + + +/******************************************************************************* + * Enable secure interrupts and use FIQs to route them. Disable legacy bypass + * and set the priority mask register to allow all interrupts to trickle in. + ******************************************************************************/ +void gic_cpuif_setup(unsigned int gicc_base) +{ + unsigned int val; + + val = gicc_read_iidr(gicc_base); + + /* + * If GICv3 we need to do a bit of additional setup. We want to + * allow default GICv2 behaviour but allow the next stage to + * enable full gicv3 features. + */ + if (((val >> GICC_IIDR_ARCH_SHIFT) & GICC_IIDR_ARCH_MASK) >= 3) { + gicv3_cpuif_setup(); + } + + val = ENABLE_GRP0 | FIQ_EN | FIQ_BYP_DIS_GRP0; + val |= IRQ_BYP_DIS_GRP0 | FIQ_BYP_DIS_GRP1 | IRQ_BYP_DIS_GRP1; + + gicc_write_pmr(gicc_base, MAX_PRI_VAL); + gicc_write_ctlr(gicc_base, val); +} + +/******************************************************************************* + * Place the cpu interface in a state where it can never make a cpu exit wfi as + * as result of an asserted interrupt. This is critical for powering down a cpu + ******************************************************************************/ +void gic_cpuif_deactivate(unsigned int gicc_base) +{ + unsigned int val; + + /* Disable secure, non-secure interrupts and disable their bypass */ + val = gicc_read_ctlr(gicc_base); + val &= ~(ENABLE_GRP0 | ENABLE_GRP1); + val |= FIQ_BYP_DIS_GRP1 | FIQ_BYP_DIS_GRP0; + val |= IRQ_BYP_DIS_GRP0 | IRQ_BYP_DIS_GRP1; + gicc_write_ctlr(gicc_base, val); + + val = gicc_read_iidr(gicc_base); + + /* + * If GICv3 we need to do a bit of additional setup. Make sure the + * RDIST is put to sleep. + */ + if (((val >> GICC_IIDR_ARCH_SHIFT) & GICC_IIDR_ARCH_MASK) >= 3) { + gicv3_cpuif_deactivate(); + } +} + +/******************************************************************************* + * Per cpu gic distributor setup which will be done by all cpus after a cold + * boot/hotplug. This marks out the secure interrupts & enables them. + ******************************************************************************/ +void gic_pcpu_distif_setup(unsigned int gicd_base) +{ + gicd_write_igroupr(gicd_base, 0, ~0); + + gicd_clr_igroupr(gicd_base, IRQ_SEC_PHY_TIMER); + gicd_clr_igroupr(gicd_base, IRQ_SEC_SGI_0); + gicd_clr_igroupr(gicd_base, IRQ_SEC_SGI_1); + gicd_clr_igroupr(gicd_base, IRQ_SEC_SGI_2); + gicd_clr_igroupr(gicd_base, IRQ_SEC_SGI_3); + gicd_clr_igroupr(gicd_base, IRQ_SEC_SGI_4); + gicd_clr_igroupr(gicd_base, IRQ_SEC_SGI_5); + gicd_clr_igroupr(gicd_base, IRQ_SEC_SGI_6); + gicd_clr_igroupr(gicd_base, IRQ_SEC_SGI_7); + + gicd_set_ipriorityr(gicd_base, IRQ_SEC_PHY_TIMER, MAX_PRI_VAL); + gicd_set_ipriorityr(gicd_base, IRQ_SEC_SGI_0, MAX_PRI_VAL); + gicd_set_ipriorityr(gicd_base, IRQ_SEC_SGI_1, MAX_PRI_VAL); + gicd_set_ipriorityr(gicd_base, IRQ_SEC_SGI_2, MAX_PRI_VAL); + gicd_set_ipriorityr(gicd_base, IRQ_SEC_SGI_3, MAX_PRI_VAL); + gicd_set_ipriorityr(gicd_base, IRQ_SEC_SGI_4, MAX_PRI_VAL); + gicd_set_ipriorityr(gicd_base, IRQ_SEC_SGI_5, MAX_PRI_VAL); + gicd_set_ipriorityr(gicd_base, IRQ_SEC_SGI_6, MAX_PRI_VAL); + gicd_set_ipriorityr(gicd_base, IRQ_SEC_SGI_7, MAX_PRI_VAL); + + gicd_set_isenabler(gicd_base, IRQ_SEC_PHY_TIMER); + gicd_set_isenabler(gicd_base, IRQ_SEC_SGI_0); + gicd_set_isenabler(gicd_base, IRQ_SEC_SGI_1); + gicd_set_isenabler(gicd_base, IRQ_SEC_SGI_2); + gicd_set_isenabler(gicd_base, IRQ_SEC_SGI_3); + gicd_set_isenabler(gicd_base, IRQ_SEC_SGI_4); + gicd_set_isenabler(gicd_base, IRQ_SEC_SGI_5); + gicd_set_isenabler(gicd_base, IRQ_SEC_SGI_6); + gicd_set_isenabler(gicd_base, IRQ_SEC_SGI_7); +} + +/******************************************************************************* + * Global gic distributor setup which will be done by the primary cpu after a + * cold boot. It marks out the secure SPIs, PPIs & SGIs and enables them. It + * then enables the secure GIC distributor interface. + ******************************************************************************/ +void gic_distif_setup(unsigned int gicd_base) +{ + unsigned int ctr, num_ints, ctlr; + + /* Disable the distributor before going further */ + ctlr = gicd_read_ctlr(gicd_base); + ctlr &= ~(ENABLE_GRP0 | ENABLE_GRP1); + gicd_write_ctlr(gicd_base, ctlr); + + /* + * Mark out non-secure interrupts. Calculate number of + * IGROUPR registers to consider. Will be equal to the + * number of IT_LINES + */ + num_ints = gicd_read_typer(gicd_base) & IT_LINES_NO_MASK; + num_ints++; + for (ctr = 0; ctr < num_ints; ctr++) + gicd_write_igroupr(gicd_base, ctr << IGROUPR_SHIFT, ~0); + + /* Configure secure interrupts now */ + gicd_clr_igroupr(gicd_base, IRQ_TZ_WDOG); + gicd_set_ipriorityr(gicd_base, IRQ_TZ_WDOG, MAX_PRI_VAL); + gicd_set_itargetsr(gicd_base, IRQ_TZ_WDOG, + platform_get_core_pos(read_mpidr())); + gicd_set_isenabler(gicd_base, IRQ_TZ_WDOG); + gic_pcpu_distif_setup(gicd_base); + + gicd_write_ctlr(gicd_base, ctlr | ENABLE_GRP0); +} + +void gic_setup(void) +{ + unsigned int gicd_base, gicc_base; + + gicd_base = platform_get_cfgvar(CONFIG_GICD_ADDR); + gicc_base = platform_get_cfgvar(CONFIG_GICC_ADDR); + + gic_cpuif_setup(gicc_base); + gic_distif_setup(gicd_base); +} diff --git a/plat/fvp/plat_pm.c b/plat/fvp/plat_pm.c new file mode 100644 index 0000000..b9948ee --- /dev/null +++ b/plat/fvp/plat_pm.c @@ -0,0 +1,376 @@ +/* + * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +/* Only included for error codes */ +#include + +/******************************************************************************* + * FVP handler called when an affinity instance is about to be turned on. The + * level and mpidr determine the affinity instance. + ******************************************************************************/ +int fvp_affinst_on(unsigned long mpidr, + unsigned long sec_entrypoint, + unsigned long ns_entrypoint, + unsigned int afflvl, + unsigned int state) +{ + int rc = PSCI_E_SUCCESS; + unsigned long linear_id; + mailbox *fvp_mboxes; + unsigned int psysr; + + /* + * It's possible to turn on only affinity level 0 i.e. a cpu + * on the FVP. Ignore any other affinity level. + */ + if (afflvl != MPIDR_AFFLVL0) + goto exit; + + /* + * Ensure that we do not cancel an inflight power off request + * for the target cpu. That would leave it in a zombie wfi. + * Wait for it to power off, program the jump address for the + * target cpu and then program the power controller to turn + * that cpu on + */ + do { + psysr = fvp_pwrc_read_psysr(mpidr); + } while (psysr & PSYSR_AFF_L0); + + linear_id = platform_get_core_pos(mpidr); + fvp_mboxes = (mailbox *) (TZDRAM_BASE + MBOX_OFF); + fvp_mboxes[linear_id].value = sec_entrypoint; + flush_dcache_range((unsigned long) &fvp_mboxes[linear_id], + sizeof(unsigned long)); + + fvp_pwrc_write_pponr(mpidr); + +exit: + return rc; +} + +/******************************************************************************* + * FVP handler called when an affinity instance is about to be turned off. The + * level and mpidr determine the affinity instance. The 'state' arg. allows the + * platform to decide whether the cluster is being turned off and take apt + * actions. + * + * CAUTION: This function is called with coherent stacks so that caches can be + * turned off, flushed and coherency disabled. There is no guarantee that caches + * will remain turned on across calls to this function as each affinity level is + * dealt with. So do not write & read global variables across calls. It will be + * wise to do flush a write to the global to prevent unpredictable results. + ******************************************************************************/ +int fvp_affinst_off(unsigned long mpidr, + unsigned int afflvl, + unsigned int state) +{ + int rc = PSCI_E_SUCCESS; + unsigned int gicc_base, ectlr; + unsigned long cpu_setup, cci_setup; + + switch (afflvl) { + case MPIDR_AFFLVL1: + if (state == PSCI_STATE_OFF) { + /* + * Disable coherency if this cluster is to be + * turned off + */ + cci_setup = platform_get_cfgvar(CONFIG_HAS_CCI); + if (cci_setup) { + cci_disable_coherency(mpidr); + } + + /* + * Program the power controller to turn the + * cluster off + */ + fvp_pwrc_write_pcoffr(mpidr); + + } + break; + + case MPIDR_AFFLVL0: + if (state == PSCI_STATE_OFF) { + + /* + * Take this cpu out of intra-cluster coherency if + * the FVP flavour supports the SMP bit. + */ + cpu_setup = platform_get_cfgvar(CONFIG_CPU_SETUP); + if (cpu_setup) { + ectlr = read_cpuectlr(); + ectlr &= ~CPUECTLR_SMP_BIT; + write_cpuectlr(ectlr); + } + + /* + * Prevent interrupts from spuriously waking up + * this cpu + */ + gicc_base = platform_get_cfgvar(CONFIG_GICC_ADDR); + gic_cpuif_deactivate(gicc_base); + + /* + * Program the power controller to power this + * cpu off + */ + fvp_pwrc_write_ppoffr(mpidr); + } + break; + + default: + assert(0); + } + + return rc; +} + +/******************************************************************************* + * FVP handler called when an affinity instance is about to be suspended. The + * level and mpidr determine the affinity instance. The 'state' arg. allows the + * platform to decide whether the cluster is being turned off and take apt + * actions. + * + * CAUTION: This function is called with coherent stacks so that caches can be + * turned off, flushed and coherency disabled. There is no guarantee that caches + * will remain turned on across calls to this function as each affinity level is + * dealt with. So do not write & read global variables across calls. It will be + * wise to do flush a write to the global to prevent unpredictable results. + ******************************************************************************/ +int fvp_affinst_suspend(unsigned long mpidr, + unsigned long sec_entrypoint, + unsigned long ns_entrypoint, + unsigned int afflvl, + unsigned int state) +{ + int rc = PSCI_E_SUCCESS; + unsigned int gicc_base, ectlr; + unsigned long cpu_setup, cci_setup, linear_id; + mailbox *fvp_mboxes; + + switch (afflvl) { + case MPIDR_AFFLVL1: + if (state == PSCI_STATE_OFF) { + /* + * Disable coherency if this cluster is to be + * turned off + */ + cci_setup = platform_get_cfgvar(CONFIG_HAS_CCI); + if (cci_setup) { + cci_disable_coherency(mpidr); + } + + /* + * Program the power controller to turn the + * cluster off + */ + fvp_pwrc_write_pcoffr(mpidr); + + } + break; + + case MPIDR_AFFLVL0: + if (state == PSCI_STATE_OFF) { + /* + * Take this cpu out of intra-cluster coherency if + * the FVP flavour supports the SMP bit. + */ + cpu_setup = platform_get_cfgvar(CONFIG_CPU_SETUP); + if (cpu_setup) { + ectlr = read_cpuectlr(); + ectlr &= ~CPUECTLR_SMP_BIT; + write_cpuectlr(ectlr); + } + + /* Program the jump address for the target cpu */ + linear_id = platform_get_core_pos(mpidr); + fvp_mboxes = (mailbox *) (TZDRAM_BASE + MBOX_OFF); + fvp_mboxes[linear_id].value = sec_entrypoint; + flush_dcache_range((unsigned long) &fvp_mboxes[linear_id], + sizeof(unsigned long)); + + /* + * Prevent interrupts from spuriously waking up + * this cpu + */ + gicc_base = platform_get_cfgvar(CONFIG_GICC_ADDR); + gic_cpuif_deactivate(gicc_base); + + /* + * Program the power controller to power this + * cpu off and enable wakeup interrupts. + */ + fvp_pwrc_set_wen(mpidr); + fvp_pwrc_write_ppoffr(mpidr); + } + break; + + default: + assert(0); + } + + return rc; +} + +/******************************************************************************* + * FVP handler called when an affinity instance has just been powered on after + * being turned off earlier. The level and mpidr determine the affinity + * instance. The 'state' arg. allows the platform to decide whether the cluster + * was turned off prior to wakeup and do what's necessary to setup it up + * correctly. + ******************************************************************************/ +int fvp_affinst_on_finish(unsigned long mpidr, + unsigned int afflvl, + unsigned int state) +{ + int rc = PSCI_E_SUCCESS; + unsigned long linear_id, cpu_setup, cci_setup; + mailbox *fvp_mboxes; + unsigned int gicd_base, gicc_base, reg_val, ectlr; + + switch (afflvl) { + + case MPIDR_AFFLVL1: + /* Enable coherency if this cluster was off */ + if (state == PSCI_STATE_OFF) { + cci_setup = platform_get_cfgvar(CONFIG_HAS_CCI); + if (cci_setup) { + cci_enable_coherency(mpidr); + } + } + break; + + case MPIDR_AFFLVL0: + /* + * Ignore the state passed for a cpu. It could only have + * been off if we are here. + */ + + /* + * Turn on intra-cluster coherency if the FVP flavour supports + * it. + */ + cpu_setup = platform_get_cfgvar(CONFIG_CPU_SETUP); + if (cpu_setup) { + ectlr = read_cpuectlr(); + ectlr |= CPUECTLR_SMP_BIT; + write_cpuectlr(ectlr); + } + + /* + * Clear PWKUPR.WEN bit to ensure interrupts do not interfere + * with a cpu power down unless the bit is set again + */ + fvp_pwrc_clr_wen(mpidr); + + /* Zero the jump address in the mailbox for this cpu */ + fvp_mboxes = (mailbox *) (TZDRAM_BASE + MBOX_OFF); + linear_id = platform_get_core_pos(mpidr); + fvp_mboxes[linear_id].value = 0; + flush_dcache_range((unsigned long) &fvp_mboxes[linear_id], + sizeof(unsigned long)); + + gicd_base = platform_get_cfgvar(CONFIG_GICD_ADDR); + gicc_base = platform_get_cfgvar(CONFIG_GICC_ADDR); + + /* Enable the gic cpu interface */ + gic_cpuif_setup(gicc_base); + + /* TODO: This setup is needed only after a cold boot */ + gic_pcpu_distif_setup(gicd_base); + + /* Allow access to the System counter timer module */ + reg_val = (1 << CNTACR_RPCT_SHIFT) | (1 << CNTACR_RVCT_SHIFT); + reg_val |= (1 << CNTACR_RFRQ_SHIFT) | (1 << CNTACR_RVOFF_SHIFT); + reg_val |= (1 << CNTACR_RWVT_SHIFT) | (1 << CNTACR_RWPT_SHIFT); + mmio_write_32(SYS_TIMCTL_BASE + CNTACR_BASE(0), reg_val); + mmio_write_32(SYS_TIMCTL_BASE + CNTACR_BASE(1), reg_val); + + reg_val = (1 << CNTNSAR_NS_SHIFT(0)) | + (1 << CNTNSAR_NS_SHIFT(1)); + mmio_write_32(SYS_TIMCTL_BASE + CNTNSAR, reg_val); + + break; + + default: + assert(0); + } + + return rc; +} + +/******************************************************************************* + * FVP handler called when an affinity instance has just been powered on after + * having been suspended earlier. The level and mpidr determine the affinity + * instance. + * TODO: At the moment we reuse the on finisher and reinitialize the secure + * context. Need to implement a separate suspend finisher. + ******************************************************************************/ +int fvp_affinst_suspend_finish(unsigned long mpidr, + unsigned int afflvl, + unsigned int state) +{ + return fvp_affinst_on_finish(mpidr, afflvl, state); +} + + +/******************************************************************************* + * Export the platform handlers to enable psci to invoke them + ******************************************************************************/ +static plat_pm_ops fvp_plat_pm_ops = { + 0, + fvp_affinst_on, + fvp_affinst_off, + fvp_affinst_suspend, + fvp_affinst_on_finish, + fvp_affinst_suspend_finish, +}; + +/******************************************************************************* + * Export the platform specific power ops & initialize the fvp power controller + ******************************************************************************/ +int platform_setup_pm(plat_pm_ops **plat_ops) +{ + *plat_ops = &fvp_plat_pm_ops; + return 0; +} diff --git a/plat/fvp/plat_topology.c b/plat/fvp/plat_topology.c new file mode 100644 index 0000000..0c8c525 --- /dev/null +++ b/plat/fvp/plat_topology.c @@ -0,0 +1,241 @@ +/* + * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of ARM nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +/* TODO: Reusing psci error codes & state information. Get our own! */ +#include + +/* We treat '255' as an invalid affinity instance */ +#define AFFINST_INVAL 0xff + +/******************************************************************************* + * We support 3 flavours of the FVP: Foundation, Base AEM & Base Cortex. Each + * flavour has a different topology. The common bit is that there can be a max. + * of 2 clusters (affinity 1) and 4 cpus (affinity 0) per cluster. So we define + * a tree like data structure which caters to these maximum bounds. It simply + * marks the absent affinity level instances as PSCI_AFF_ABSENT e.g. there is no + * cluster 1 on the Foundation FVP. The 'data' field is currently unused. + ******************************************************************************/ +typedef struct { + unsigned char sibling; + unsigned char child; + unsigned char state; + unsigned int data; +} affinity_info; + +/******************************************************************************* + * The following two data structures store the topology tree for the fvp. There + * is a separate array for each affinity level i.e. cpus and clusters. The child + * and sibling references allow traversal inside and in between the two arrays. + ******************************************************************************/ +static affinity_info fvp_aff1_topology_map[PLATFORM_CLUSTER_COUNT]; +static affinity_info fvp_aff0_topology_map[PLATFORM_CORE_COUNT]; + +/* Simple global variable to safeguard us from stupidity */ +static unsigned int topology_setup_done; + +/******************************************************************************* + * This function implements a part of the critical interface between the psci + * generic layer and the platform to allow the former to detect the platform + * topology. psci queries the platform to determine how many affinity instances + * are present at a particular level for a given mpidr e.g. consider a dual + * cluster platform where each cluster has 4 cpus. A call to this function with + * (0, 0x100) will return the number of cpus implemented under cluster 1 i.e. 4. + * Similarly a call with (1, 0x100) will return 2 i.e. the number of clusters. + * This is 'cause we are effectively asking how many affinity level 1 instances + * are implemented under affinity level 2 instance 0. + ******************************************************************************/ +unsigned int plat_get_aff_count(unsigned int aff_lvl, + unsigned long mpidr) +{ + unsigned int aff_count = 1, ctr; + unsigned char parent_aff_id; + + assert(topology_setup_done == 1); + + switch (aff_lvl) { + case 3: + case 2: + /* + * Assert if the parent affinity instance is not 0. + * This also takes care of level 3 in an obfuscated way + */ + parent_aff_id = (mpidr >> MPIDR_AFF3_SHIFT) & MPIDR_AFFLVL_MASK; + assert(parent_aff_id == 0); + + /* + * Report that we implement a single instance of + * affinity levels 2 & 3 which are AFF_ABSENT + */ + break; + case 1: + /* Assert if the parent affinity instance is not 0. */ + parent_aff_id = (mpidr >> MPIDR_AFF2_SHIFT) & MPIDR_AFFLVL_MASK; + assert(parent_aff_id == 0); + + /* Fetch the starting index in the aff1 array */ + for (ctr = 0; + fvp_aff1_topology_map[ctr].sibling != AFFINST_INVAL; + ctr = fvp_aff1_topology_map[ctr].sibling) { + aff_count++; + } + + break; + case 0: + /* Assert if the cluster id is anything apart from 0 or 1 */ + parent_aff_id = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK; + assert(parent_aff_id < PLATFORM_CLUSTER_COUNT); + + /* Fetch the starting index in the aff0 array */ + for (ctr = fvp_aff1_topology_map[parent_aff_id].child; + fvp_aff0_topology_map[ctr].sibling != AFFINST_INVAL; + ctr = fvp_aff0_topology_map[ctr].sibling) { + aff_count++; + } + + break; + default: + assert(0); + } + + return aff_count; +} + +/******************************************************************************* + * This function implements a part of the critical interface between the psci + * generic layer and the platform to allow the former to detect the state of a + * affinity instance in the platform topology. psci queries the platform to + * determine whether an affinity instance is present or absent. This caters for + * topologies where an intermediate affinity level instance is missing e.g. + * consider a platform which implements a single cluster with 4 cpus and there + * is another cpu sitting directly on the interconnect along with the cluster. + * The mpidrs of the cluster would range from 0x0-0x3. The mpidr of the single + * cpu would be 0x100 to highlight that it does not belong to cluster 0. Cluster + * 1 is however missing but needs to be accounted to reach this single cpu in + * the topology tree. Hence it will be marked as PSCI_AFF_ABSENT. This is not + * applicable to the FVP but depicted as an example. + ******************************************************************************/ +unsigned int plat_get_aff_state(unsigned int aff_lvl, + unsigned long mpidr) +{ + unsigned int aff_state = PSCI_AFF_ABSENT, idx; + idx = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK; + + assert(topology_setup_done == 1); + + switch (aff_lvl) { + case 3: + case 2: + /* Report affinity levels 2 & 3 as absent */ + break; + case 1: + aff_state = fvp_aff1_topology_map[idx].state; + break; + case 0: + /* + * First get start index of the aff0 in its array & then add + * to it the affinity id that we want the state of + */ + idx = fvp_aff1_topology_map[idx].child; + idx += (mpidr >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK; + aff_state = fvp_aff0_topology_map[idx].state; + break; + default: + assert(0); + } + + return aff_state; +} + +/******************************************************************************* + * Handy optimization to prevent the psci implementation from traversing through + * affinity levels which are not present while detecting the platform topology. + ******************************************************************************/ +int plat_get_max_afflvl() +{ + return MPIDR_AFFLVL1; +} + +/******************************************************************************* + * This function populates the FVP specific topology information depending upon + * the FVP flavour its running on. We construct all the mpidrs we can handle + * and rely on the PWRC.PSYSR to flag absent cpus when their status is queried. + ******************************************************************************/ +int plat_setup_topology() +{ + unsigned char aff0, aff1, aff_state, aff0_offset = 0; + unsigned long mpidr; + + topology_setup_done = 0; + + for (aff1 = 0; aff1 < PLATFORM_CLUSTER_COUNT; aff1++) { + + fvp_aff1_topology_map[aff1].child = aff0_offset; + fvp_aff1_topology_map[aff1].sibling = aff1 + 1; + + for (aff0 = 0; aff0 < PLATFORM_MAX_CPUS_PER_CLUSTER; aff0++) { + + mpidr = aff1 << MPIDR_AFF1_SHIFT; + mpidr |= aff0 << MPIDR_AFF0_SHIFT; + + if (fvp_pwrc_read_psysr(mpidr) != PSYSR_INVALID) { + /* + * Presence of even a single aff0 indicates + * presence of parent aff1 on the FVP. + */ + aff_state = PSCI_AFF_PRESENT; + fvp_aff1_topology_map[aff1].state = + PSCI_AFF_PRESENT; + } else { + aff_state = PSCI_AFF_ABSENT; + } + + fvp_aff0_topology_map[aff0_offset].child = AFFINST_INVAL; + fvp_aff0_topology_map[aff0_offset].state = aff_state; + fvp_aff0_topology_map[aff0_offset].sibling = + aff0_offset + 1; + + /* Increment the absolute number of aff0s traversed */ + aff0_offset++; + } + + /* Tie-off the last aff0 sibling to -1 to avoid overflow */ + fvp_aff0_topology_map[aff0_offset - 1].sibling = AFFINST_INVAL; + } + + /* Tie-off the last aff1 sibling to AFFINST_INVAL to avoid overflow */ + fvp_aff1_topology_map[aff1 - 1].sibling = AFFINST_INVAL; + + topology_setup_done = 1; + return 0; +} diff --git a/plat/fvp/platform.mk b/plat/fvp/platform.mk index 1765801..5da2acd 100644 --- a/plat/fvp/platform.mk +++ b/plat/fvp/platform.mk @@ -62,20 +62,20 @@ BL1_OBJS += bl1_plat_setup.o \ bl1_plat_helpers.o \ - fvp_helpers.o \ - fvp_common.o \ + plat_helpers.o \ + plat_common.o \ cci400.o BL2_OBJS += bl2_plat_setup.o \ - fvp_common.o + plat_common.o BL31_OBJS += bl31_plat_setup.o \ - fvp_helpers.o \ - fvp_common.o \ - fvp_pm.o \ + plat_helpers.o \ + plat_common.o \ + plat_pm.o \ + plat_topology.o \ + plat_gic.o \ fvp_pwrc.o \ - fvp_topology.o \ - fvp_gic.o \ cci400.o \ gic_v2.o \ gic_v3.o