diff --git a/Makefile b/Makefile index b3d67b0..aa9b980 100644 --- a/Makefile +++ b/Makefile @@ -163,6 +163,10 @@ # Alternatively CROSS_COMPILE can be set in the environment. # Default value for CROSS_COMPILE is not to prefix executables +ifeq ($(ARCH),arm64) +ARCH = arm +endif + ARCH ?= sandbox CROSS_COMPILE ?= diff --git a/arch/arm/cpu/exceptions.S b/arch/arm/cpu/exceptions.S index eda0d6a..55014c8 100644 --- a/arch/arm/cpu/exceptions.S +++ b/arch/arm/cpu/exceptions.S @@ -55,26 +55,6 @@ mov r0, sp .endm - .macro irq_save_user_regs - sub sp, sp, #S_FRAME_SIZE - stmia sp, {r0 - r12} @ Calling r0-r12 - add r8, sp, #S_PC - stmdb r8, {sp, lr}^ @ Calling SP, LR - str lr, [r8, #0] @ Save calling PC - mrs r6, spsr - str r6, [r8, #4] @ Save CPSR - str r0, [r8, #8] @ Save OLD_R0 - mov r0, sp - .endm - - .macro irq_restore_user_regs - ldmia sp, {r0 - lr}^ @ Calling r0 - lr - mov r0, r0 - ldr lr, [sp, #S_PC] @ Get PC - add sp, sp, #S_FRAME_SIZE - subs pc, lr, #4 @ return & move spsr_svc into cpsr - .endm - .macro get_bad_stack ldr r13, =abort_stack str lr, [r13] @ save caller lr / spsr @@ -103,14 +83,6 @@ do_abort_\@: .endm - .macro get_irq_stack @ setup IRQ stack - ldr sp, IRQ_STACK_START - .endm - - .macro get_fiq_stack @ setup FIQ stack - ldr sp, FIQ_STACK_START - .endm - /* * exception handlers */ diff --git a/arch/arm/cpu/interrupts_64.c b/arch/arm/cpu/interrupts_64.c index e8475d2..baccf47 100644 --- a/arch/arm/cpu/interrupts_64.c +++ b/arch/arm/cpu/interrupts_64.c @@ -194,15 +194,22 @@ static int aarch64_init_vectors(void) { - unsigned int el; + unsigned int el; - el = current_el(); - if (el == 1) - asm volatile("msr vbar_el1, %0" : : "r" (&vectors) : "cc"); - else if (el == 2) - asm volatile("msr vbar_el2, %0" : : "r" (&vectors) : "cc"); - else - asm volatile("msr vbar_el3, %0" : : "r" (&vectors) : "cc"); + el = current_el(); + switch (el) { + case 3: + asm volatile("msr vbar_el3, %0" : : "r" (&vectors) : "cc"); + /* Fall through */ + case 2: + asm volatile("msr vbar_el2, %0" : : "r" (&vectors) : "cc"); + /* Fall through */ + case 1: + asm volatile("msr vbar_el1, %0" : : "r" (&vectors) : "cc"); + /* Fall through */ + default: + break; + } return 0; } diff --git a/arch/arm/cpu/mmu-early_64.c b/arch/arm/cpu/mmu-early_64.c index 94e3726..a7598f2 100644 --- a/arch/arm/cpu/mmu-early_64.c +++ b/arch/arm/cpu/mmu-early_64.c @@ -67,7 +67,8 @@ el = current_el(); set_ttbr_tcr_mair(el, ttb, calc_tcr(el, EARLY_BITS_PER_VA), MEMORY_ATTRIBUTES); - create_sections((void *)ttb, 0, 0, 1UL << (EARLY_BITS_PER_VA - 1), UNCACHED_MEM); + create_sections((void *)ttb, 0, 0, 1UL << (EARLY_BITS_PER_VA - 1), + attrs_uncached_mem()); create_sections((void *)ttb, membase, membase, memsize, CACHED_MEM); tlb_invalidate(); isb(); diff --git a/arch/arm/cpu/mmu.h b/arch/arm/cpu/mmu.h index 6e7a4c0..c85e0ea 100644 --- a/arch/arm/cpu/mmu.h +++ b/arch/arm/cpu/mmu.h @@ -39,6 +39,15 @@ #define DOMAIN_CLIENT 1 #define DOMAIN_MANAGER 3 +static inline unsigned long get_domain(void) +{ + unsigned long dacr; + + asm volatile ("mrc p15, 0, %0, c3, c0, 0" : "=r"(dacr)); + + return dacr; +} + static inline void set_domain(unsigned val) { /* Set the Domain Access Control Register */ diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c index 98cd4c7..14d955c 100644 --- a/arch/arm/cpu/mmu_64.c +++ b/arch/arm/cpu/mmu_64.c @@ -158,19 +158,21 @@ int arch_remap_range(void *_start, size_t size, unsigned flags) { + unsigned long attrs; + switch (flags) { case MAP_CACHED: - flags = CACHED_MEM; + attrs = CACHED_MEM; break; case MAP_UNCACHED: - flags = UNCACHED_MEM; + attrs = attrs_uncached_mem(); break; default: return -EINVAL; } create_sections((uint64_t)_start, (uint64_t)_start, (uint64_t)size, - flags); + attrs); return 0; } @@ -199,7 +201,7 @@ pr_debug("ttb: 0x%p\n", ttb); /* create a flat mapping */ - create_sections(0, 0, 1UL << (BITS_PER_VA - 1), UNCACHED_MEM); + create_sections(0, 0, 1UL << (BITS_PER_VA - 1), attrs_uncached_mem()); /* Map sdram cached. */ for_each_memory_bank(bank) diff --git a/arch/arm/cpu/mmu_64.h b/arch/arm/cpu/mmu_64.h index a2a5477..9bbb62f 100644 --- a/arch/arm/cpu/mmu_64.h +++ b/arch/arm/cpu/mmu_64.h @@ -8,6 +8,23 @@ PTE_BLOCK_OUTER_SHARE | \ PTE_BLOCK_AF) +static inline unsigned long attrs_uncached_mem(void) +{ + unsigned long attrs = UNCACHED_MEM; + + switch (current_el()) { + case 3: + case 2: + attrs |= PTE_BLOCK_UXN; + break; + default: + attrs |= PTE_BLOCK_UXN | PTE_BLOCK_PXN; + break; + } + + return attrs; +} + /* * Do it the simple way for now and invalidate the entire tlb */ diff --git a/arch/arm/cpu/sm.c b/arch/arm/cpu/sm.c index 1f2c236..f5a1edb 100644 --- a/arch/arm/cpu/sm.c +++ b/arch/arm/cpu/sm.c @@ -26,7 +26,7 @@ { unsigned int reg; - asm("mrc p15, 0, %0, c0, c1, 1\n" : "=r"(reg)); + asm volatile ("mrc p15, 0, %0, c0, c1, 1\n" : "=r"(reg)); return reg; } @@ -34,18 +34,18 @@ { unsigned int reg; - asm("mrc p15, 0, %0, c1, c1, 2\n" : "=r"(reg)); + asm volatile ("mrc p15, 0, %0, c1, c1, 2\n" : "=r"(reg)); return reg; } static void write_nsacr(u32 val) { - asm("mcr p15, 0, %0, c1, c1, 2" : : "r"(val)); + asm volatile ("mcr p15, 0, %0, c1, c1, 2" : : "r"(val)); } static void write_mvbar(u32 val) { - asm("mcr p15, 0, %0, c12, c0, 1" : : "r"(val)); + asm volatile ("mcr p15, 0, %0, c12, c0, 1" : : "r"(val)); } static int cpu_is_virt_capable(void) @@ -150,7 +150,7 @@ int armv7_secure_monitor_install(void) { int mmuon; - unsigned long ttb, vbar; + unsigned long ttb, vbar, dacr; if (!armv7_have_security_extensions()) { pr_err("Security extensions not implemented.\n"); @@ -164,12 +164,14 @@ vbar = get_vbar(); ttb = get_ttbr(); + dacr = get_domain(); armv7_init_nonsec(); __armv7_secure_monitor_install(); set_ttbr((void *)ttb); set_vbar(vbar); + set_domain(dacr); if (mmuon) { /* diff --git a/arch/arm/lib32/bootm.c b/arch/arm/lib32/bootm.c index d64e705..971ebee 100644 --- a/arch/arm/lib32/bootm.c +++ b/arch/arm/lib32/bootm.c @@ -104,11 +104,24 @@ spacing = SZ_1M; if (*load_address == UIMAGE_INVALID_ADDRESS) { + unsigned long mem_end = mem_start + mem_size - 1; + unsigned long kaddr; + /* * Place the kernel at an address where it does not need to * relocate itself before decompression. */ - *load_address = mem_start + image_decomp_size; + kaddr = mem_start + image_decomp_size; + + /* + * Make sure we do not place the image past the end of the + * available memory. + */ + if (kaddr + image_size + spacing >= mem_end) + kaddr = mem_end - image_size - spacing; + + *load_address = PAGE_ALIGN_DOWN(kaddr); + if (verbose) printf("no OS load address, defaulting to 0x%08lx\n", *load_address); diff --git a/common/bootm.c b/common/bootm.c index 366f314..2999856 100644 --- a/common/bootm.c +++ b/common/bootm.c @@ -119,8 +119,13 @@ data->os_res = request_sdram_region("kernel", load_address, kernel_size); - if (!data->os_res) + if (!data->os_res) { + printf("unable to request SDRAM region for kernel at" + "0x%08llx-0x%08llx\n", + (unsigned long long)load_address, + (unsigned long long)load_address + kernel_size - 1); return -ENOMEM; + } memcpy((void *)load_address, kernel, kernel_size); return 0; } @@ -227,8 +232,13 @@ data->initrd_res = request_sdram_region("initrd", load_address, initrd_size); - if (!data->initrd_res) + if (!data->initrd_res) { + printf("unable to request SDRAM region for initrd at" + "0x%08llx-0x%08llx\n", + (unsigned long long)load_address, + (unsigned long long)load_address + initrd_size - 1); return -ENOMEM; + } memcpy((void *)load_address, initrd, initrd_size); printf("Loaded initrd from FIT image\n"); goto done1; @@ -439,8 +449,13 @@ data->oftree_res = request_sdram_region("oftree", load_address, fdt_size); - if (!data->oftree_res) + if (!data->oftree_res) { + printf("unable to request SDRAM region for device tree at" + "0x%08llx-0x%08llx\n", + (unsigned long long)load_address, + (unsigned long long)load_address + fdt_size - 1); return -ENOMEM; + } memcpy((void *)data->oftree_res->start, fdt, fdt_size);