diff --git a/cmsis/TARGET_CORTEX_M/cmsis_compiler.h b/cmsis/TARGET_CORTEX_M/cmsis_compiler.h index 582ada4..9e87cfd 100644 --- a/cmsis/TARGET_CORTEX_M/cmsis_compiler.h +++ b/cmsis/TARGET_CORTEX_M/cmsis_compiler.h @@ -78,6 +78,14 @@ #ifndef __ARM_ARCH_7EM__ #define __ARM_ARCH_7EM__ 1 #endif + #elif (__CORE__ == __ARM8M_BASELINE__) + #ifndef __ARM_ARCH_8M_BASE__ + #define __ARM_ARCH_8M_BASE__ 1 + #endif + #elif (__CORE__ == __ARM8M_MAINLINE__) + #ifndef __ARM_ARCH_8M_MAIN__ + #define __ARM_ARCH_8M_MAIN__ 1 + #endif #endif // IAR version 7.8.1 and earlier do not include __ALIGNED diff --git a/platform/CThunk.h b/platform/CThunk.h index a1db2cf..90e150b 100644 --- a/platform/CThunk.h +++ b/platform/CThunk.h @@ -37,7 +37,8 @@ #define CTHUNK_ADDRESS 1 #define CTHUNK_VARIABLES volatile uint32_t code[2] -#if (defined(__CORTEX_M3) || defined(__CORTEX_M4) || defined(__CORTEX_M7) || defined(__CORTEX_A9)) +#if (defined(__CORTEX_M3) || defined(__CORTEX_M4) || defined(__CORTEX_M7) || defined(__CORTEX_A9) \ + || defined(__CORTEX_M23) || defined(__CORTEX_M33)) /** * CTHUNK disassembly for Cortex-M3/M4/M7/A9 (thumb2): * * adr r0, #4 diff --git a/rtos/TARGET_CORTEX/rtx5/TARGET_M23/TOOLCHAIN_GCC/irq_armv8mbl.S b/rtos/TARGET_CORTEX/rtx5/TARGET_M23/TOOLCHAIN_GCC/irq_armv8mbl.S new file mode 100644 index 0000000..1b6b119 --- /dev/null +++ b/rtos/TARGET_CORTEX/rtx5/TARGET_M23/TOOLCHAIN_GCC/irq_armv8mbl.S @@ -0,0 +1,302 @@ +;/* +; * Copyright (c) 2016-2017 ARM Limited. All rights reserved. +; * +; * SPDX-License-Identifier: Apache-2.0 +; * +; * Licensed under the Apache License, Version 2.0 (the License); you may +; * not use this file except in compliance with the License. +; * You may obtain a copy of the License at +; * +; * www.apache.org/licenses/LICENSE-2.0 +; * +; * Unless required by applicable law or agreed to in writing, software +; * distributed under the License is distributed on an AS IS BASIS, WITHOUT +; * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +; * See the License for the specific language governing permissions and +; * limitations under the License. +; * +; * ----------------------------------------------------------------------------- +; * +; * Project: CMSIS-RTOS RTX +; * Title: ARMv8M Baseline Exception handlers +; * +; * ----------------------------------------------------------------------------- +; */ + + + .file "irq_armv8mbl.S" + .syntax unified + + .equ I_T_RUN_OFS, 28 // osRtxInfo.thread.run offset + .equ TCB_SM_OFS, 48 // TCB.stack_mem offset + .equ TCB_SP_OFS, 56 // TCB.SP offset + .equ TCB_SF_OFS, 34 // TCB.stack_frame offset + .equ TCB_TZM_OFS, 64 // TCB.tz_memory offset + + .section ".rodata" + .global irqRtxLib // Non weak library reference +irqRtxLib: + .byte 0 + + + .thumb + .section ".text" + .align 2 + + + .thumb_func + .type SVC_Handler, %function + .global SVC_Handler + .fnstart + .cantunwind +SVC_Handler: + MRS R0,PSP // Get PSP + LDR R1,[R0,#24] // Load saved PC from stack + SUBS R1,R1,#2 // Point to SVC instruction + LDRB R1,[R1] // Load SVC number + CMP R1,#0 + BNE SVC_User // Branch if not SVC 0 + + PUSH {R0,LR} // Save PSP and EXC_RETURN + LDM R0,{R0-R3} // Load function parameters from stack + BLX R7 // Call service function + POP {R2,R3} // Restore PSP and EXC_RETURN + STMIA R2!,{R0-R1} // Store function return values + MOV LR,R3 // Set EXC_RETURN + +SVC_Context: + LDR R3,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.run + LDMIA R3!,{R1,R2} // Load osRtxInfo.thread.run: curr & next + CMP R1,R2 // Check if thread switch is required + BEQ SVC_Exit // Branch when threads are the same + + CBZ R1,SVC_ContextSwitch // Branch if running thread is deleted + +SVC_ContextSave: +#ifdef __DOMAIN_NS + LDR R0,[R1,#TCB_TZM_OFS] // Load TrustZone memory identifier + CBZ R0,SVC_ContextSave1 // Branch if there is no secure context + PUSH {R1,R2,R3,R7} // Save registers + MOV R7,LR // Get EXC_RETURN + BL TZ_StoreContext_S // Store secure context + MOV LR,R7 // Set EXC_RETURN + POP {R1,R2,R3,R7} // Restore registers +#endif + +SVC_ContextSave1: + MRS R0,PSP // Get PSP + SUBS R0,R0,#32 // Adjust PSP + STR R0,[R1,#TCB_SP_OFS] // Store SP + STMIA R0!,{R4-R7} // Save R4..R7 + MOV R4,R8 + MOV R5,R9 + MOV R6,R10 + MOV R7,R11 + STMIA R0!,{R4-R7} // Save R8..R11 + +SVC_ContextSave2: + MOV R0,LR // Get EXC_RETURN + ADDS R1,R1,#TCB_SF_OFS // Adjust address + STRB R0,[R1] // Store stack frame information + +SVC_ContextSwitch: + SUBS R3,R3,#8 // Adjust address + STR R2,[R3] // osRtxInfo.thread.run: curr = next + +SVC_ContextRestore: +#ifdef __DOMAIN_NS + LDR R0,[R2,#TCB_TZM_OFS] // Load TrustZone memory identifier + CBZ R0,SVC_ContextRestore1 // Branch if there is no secure context + PUSH {R2,R3} // Save registers + BL TZ_LoadContext_S // Load secure context + POP {R2,R3} // Restore registers +#endif + +SVC_ContextRestore1: + MOV R1,R2 + ADDS R1,R1,#TCB_SF_OFS // Adjust address + LDRB R0,[R1] // Load stack frame information + MOVS R1,#0xFF + MVNS R1,R1 // R1=0xFFFFFF00 + ORRS R0,R1 + MOV LR,R0 // Set EXC_RETURN + +#ifdef __DOMAIN_NS + LSLS R0,R0,#25 // Check domain of interrupted thread + BPL SVC_ContextRestore2 // Branch if non-secure + LDR R0,[R2,#TCB_SP_OFS] // Load SP + MSR PSP,R0 // Set PSP + BX LR // Exit from handler +#else + LDR R0,[R2,#TCB_SM_OFS] // Load stack memory base + MSR PSPLIM,R0 // Set PSPLIM +#endif + +SVC_ContextRestore2: + LDR R0,[R2,#TCB_SP_OFS] // Load SP + ADDS R0,R0,#16 // Adjust address + LDMIA R0!,{R4-R7} // Restore R8..R11 + MOV R8,R4 + MOV R9,R5 + MOV R10,R6 + MOV R11,R7 + MSR PSP,R0 // Set PSP + SUBS R0,R0,#32 // Adjust address + LDMIA R0!,{R4-R7} // Restore R4..R7 + +SVC_Exit: + BX LR // Exit from handler + +SVC_User: + PUSH {R4,LR} // Save registers + LDR R2,=osRtxUserSVC // Load address of SVC table + LDR R3,[R2] // Load SVC maximum number + CMP R1,R3 // Check SVC number range + BHI SVC_Done // Branch if out of range + + LSLS R1,R1,#2 + LDR R4,[R2,R1] // Load address of SVC function + + LDM R0,{R0-R3} // Load function parameters from stack + BLX R4 // Call service function + MRS R4,PSP // Get PSP + STR R0,[R4] // Store function return value + +SVC_Done: + POP {R4,PC} // Return from handler + + .fnend + .size SVC_Handler, .-SVC_Handler + + + .thumb_func + .type PendSV_Handler, %function + .global PendSV_Handler + .fnstart + .cantunwind +PendSV_Handler: + + PUSH {R0,LR} // Save EXC_RETURN + BL osRtxPendSV_Handler // Call osRtxPendSV_Handler + POP {R0,R1} // Restore EXC_RETURN + MOV LR,R1 // Set EXC_RETURN + B Sys_Context + + .fnend + .size PendSV_Handler, .-PendSV_Handler + + + .thumb_func + .type SysTick_Handler, %function + .global SysTick_Handler + .fnstart + .cantunwind +SysTick_Handler: + + PUSH {R0,LR} // Save EXC_RETURN + BL osRtxTick_Handler // Call osRtxTick_Handler + POP {R0,R1} // Restore EXC_RETURN + MOV LR,R1 // Set EXC_RETURN + B Sys_Context + + .fnend + .size SysTick_Handler, .-SysTick_Handler + + + .thumb_func + .type Sys_Context, %function + .global Sys_Context + .fnstart + .cantunwind +Sys_Context: + + LDR R3,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.run + LDM R3!,{R1,R2} // Load osRtxInfo.thread.run: curr & next + CMP R1,R2 // Check if thread switch is required + BEQ Sys_ContextExit // Branch when threads are the same + +Sys_ContextSave: +#ifdef __DOMAIN_NS + LDR R0,[R1,#TCB_TZM_OFS] // Load TrustZone memory identifier + CBZ R0,Sys_ContextSave1 // Branch if there is no secure context + PUSH {R1,R2,R3,R7} // Save registers + MOV R7,LR // Get EXC_RETURN + BL TZ_StoreContext_S // Store secure context + MOV LR,R7 // Set EXC_RETURN + POP {R1,R2,R3,R7} // Restore registers + LSLS R7,R7,#25 // Check domain of interrupted thread + BMI Sys_ContextSave1 // Branch if secure + MRS R0,PSP // Get PSP + STR R0,[R1,#TCB_SP_OFS] // Store SP + B Sys_ContextSave2 +#endif + +Sys_ContextSave1: + MRS R0,PSP // Get PSP + SUBS R0,R0,#32 // Adjust address + STR R0,[R1,#TCB_SP_OFS] // Store SP + STMIA R0!,{R4-R7} // Save R4..R7 + MOV R4,R8 + MOV R5,R9 + MOV R6,R10 + MOV R7,R11 + STMIA R0!,{R4-R7} // Save R8..R11 + +Sys_ContextSave2: + MOV R0,LR // Get EXC_RETURN + ADDS R1,R1,#TCB_SF_OFS // Adjust address + STRB R0,[R1] // Store stack frame information + +Sys_ContextSwitch: + SUBS R3,R3,#8 // Adjust address + STR R2,[R3] // osRtxInfo.run: curr = next + +Sys_ContextRestore: +#ifdef __DOMAIN_NS + LDR R0,[R2,#TCB_TZM_OFS] // Load TrustZone memory identifier + CBZ R0,Sys_ContextRestore1 // Branch if there is no secure context + PUSH {R2,R3} // Save registers + BL TZ_LoadContext_S // Load secure context + POP {R2,R3} // Restore registers +#endif + +Sys_ContextRestore1: + MOV R1,R2 + ADDS R1,R1,#TCB_SF_OFS // Adjust offset + LDRB R0,[R1] // Load stack frame information + MOVS R1,#0xFF + MVNS R1,R1 // R1=0xFFFFFF00 + ORRS R0,R1 + MOV LR,R0 // Set EXC_RETURN + +#ifdef __DOMAIN_NS + LSLS R0,R0,#25 // Check domain of interrupted thread + BPL Sys_ContextRestore2 // Branch if non-secure + LDR R0,[R2,#TCB_SP_OFS] // Load SP + MSR PSP,R0 // Set PSP + BX LR // Exit from handler +#else + LDR R0,[R2,#TCB_SM_OFS] // Load stack memory base + MSR PSPLIM,R0 // Set PSPLIM +#endif + +Sys_ContextRestore2: + LDR R0,[R2,#TCB_SP_OFS] // Load SP + ADDS R0,R0,#16 // Adjust address + LDMIA R0!,{R4-R7} // Restore R8..R11 + MOV R8,R4 + MOV R9,R5 + MOV R10,R6 + MOV R11,R7 + MSR PSP,R0 // Set PSP + SUBS R0,R0,#32 // Adjust address + LDMIA R0!,{R4-R7} // Restore R4..R7 + +Sys_ContextExit: + BX LR // Exit from handler + + .fnend + .size Sys_Context, .-Sys_Context + + + .end diff --git a/rtos/TARGET_CORTEX/rtx5/TARGET_M23/TOOLCHAIN_IAR/irq_armv8mbl.S b/rtos/TARGET_CORTEX/rtx5/TARGET_M23/TOOLCHAIN_IAR/irq_armv8mbl.S new file mode 100644 index 0000000..080814f --- /dev/null +++ b/rtos/TARGET_CORTEX/rtx5/TARGET_M23/TOOLCHAIN_IAR/irq_armv8mbl.S @@ -0,0 +1,288 @@ +;/* +; * Copyright (c) 2016-2017 ARM Limited. All rights reserved. +; * +; * SPDX-License-Identifier: Apache-2.0 +; * +; * Licensed under the Apache License, Version 2.0 (the License); you may +; * not use this file except in compliance with the License. +; * You may obtain a copy of the License at +; * +; * www.apache.org/licenses/LICENSE-2.0 +; * +; * Unless required by applicable law or agreed to in writing, software +; * distributed under the License is distributed on an AS IS BASIS, WITHOUT +; * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +; * See the License for the specific language governing permissions and +; * limitations under the License. +; * +; * ----------------------------------------------------------------------------- +; * +; * Project: CMSIS-RTOS RTX +; * Title: ARMv8M Baseline Exception handlers +; * +; * ----------------------------------------------------------------------------- +; */ + + + NAME irq_armv8mbl.s + +I_T_RUN_OFS EQU 28 ; osRtxInfo.thread.run offset +TCB_SM_OFS EQU 48 ; TCB.stack_mem offset +TCB_SP_OFS EQU 56 ; TCB.SP offset +TCB_SF_OFS EQU 34 ; TCB.stack_frame offset +TCB_TZM_OFS EQU 64 ; TCB.tz_memory offset + + + PRESERVE8 + SECTION .rodata:DATA:NOROOT(2) + + + EXPORT irqRtxLib +irqRtxLib DCB 0 ; Non weak library reference + + THUMB + SECTION .text:CODE:NOROOT(2) + + +SVC_Handler + EXPORT SVC_Handler + IMPORT osRtxUserSVC + IMPORT osRtxInfo +#ifdef __DOMAIN_NS + IMPORT TZ_LoadContext_S + IMPORT TZ_StoreContext_S +#endif + + MRS R0,PSP ; Get PSP + LDR R1,[R0,#24] ; Load saved PC from stack + SUBS R1,R1,#2 ; Point to SVC instruction + LDRB R1,[R1] ; Load SVC number + CMP R1,#0 + BNE SVC_User ; Branch if not SVC 0 + + PUSH {R0,LR} ; Save PSP and EXC_RETURN + LDM R0,{R0-R3} ; Load function parameters from stack + BLX R7 ; Call service function + POP {R2,R3} ; Restore PSP and EXC_RETURN + STMIA R2!,{R0-R1} ; Store function return values + MOV LR,R3 ; Set EXC_RETURN + +SVC_Context + LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.run + LDMIA R3!,{R1,R2} ; Load osRtxInfo.thread.run: curr & next + CMP R1,R2 ; Check if thread switch is required + BEQ SVC_Exit ; Branch when threads are the same + + CBZ R1,SVC_ContextSwitch ; Branch if running thread is deleted + +SVC_ContextSave +#ifdef __DOMAIN_NS + LDR R0,[R1,#TCB_TZM_OFS] ; Load TrustZone memory identifier + CBZ R0,SVC_ContextSave1 ; Branch if there is no secure context + PUSH {R1,R2,R3,R7} ; Save registers + MOV R7,LR ; Get EXC_RETURN + BL TZ_StoreContext_S ; Store secure context + MOV LR,R7 ; Set EXC_RETURN + POP {R1,R2,R3,R7} ; Restore registers +#endif + +SVC_ContextSave1 + MRS R0,PSP ; Get PSP + SUBS R0,R0,#32 ; Adjust PSP + STR R0,[R1,#TCB_SP_OFS] ; Store SP + STMIA R0!,{R4-R7} ; Save R4..R7 + MOV R4,R8 + MOV R5,R9 + MOV R6,R10 + MOV R7,R11 + STMIA R0!,{R4-R7} ; Save R8..R11 + +SVC_ContextSave2 + MOV R0,LR ; Get EXC_RETURN + ADDS R1,R1,#TCB_SF_OFS ; Adjust address + STRB R0,[R1] ; Store stack frame information + +SVC_ContextSwitch + SUBS R3,R3,#8 ; Adjust address + STR R2,[R3] ; osRtxInfo.thread.run: curr = next + +SVC_ContextRestore +#ifdef __DOMAIN_NS + LDR R0,[R2,#TCB_TZM_OFS] ; Load TrustZone memory identifier + CBZ R0,SVC_ContextRestore1 ; Branch if there is no secure context + PUSH {R2,R3} ; Save registers + BL TZ_LoadContext_S ; Load secure context + POP {R2,R3} ; Restore registers +#endif + +SVC_ContextRestore1 + MOV R1,R2 + ADDS R1,R1,#TCB_SF_OFS ; Adjust address + LDRB R0,[R1] ; Load stack frame information + MOVS R1,#0xFF + MVNS R1,R1 ; R1=0xFFFFFF00 + ORRS R0,R1 + MOV LR,R0 ; Set EXC_RETURN + +#ifdef __DOMAIN_NS + LSLS R0,R0,#25 ; Check domain of interrupted thread + BPL SVC_ContextRestore2 ; Branch if non-secure + LDR R0,[R2,#TCB_SP_OFS] ; Load SP + MSR PSP,R0 ; Set PSP + BX LR ; Exit from handler +#else + LDR R0,[R2,#TCB_SM_OFS] ; Load stack memory base + MSR PSPLIM,R0 ; Set PSPLIM +#endif + +SVC_ContextRestore2 + LDR R0,[R2,#TCB_SP_OFS] ; Load SP + ADDS R0,R0,#16 ; Adjust address + LDMIA R0!,{R4-R7} ; Restore R8..R11 + MOV R8,R4 + MOV R9,R5 + MOV R10,R6 + MOV R11,R7 + MSR PSP,R0 ; Set PSP + SUBS R0,R0,#32 ; Adjust address + LDMIA R0!,{R4-R7} ; Restore R4..R7 + +SVC_Exit + BX LR ; Exit from handler + +SVC_User + PUSH {R4,LR} ; Save registers + LDR R2,=osRtxUserSVC ; Load address of SVC table + LDR R3,[R2] ; Load SVC maximum number + CMP R1,R3 ; Check SVC number range + BHI SVC_Done ; Branch if out of range + + LSLS R1,R1,#2 + LDR R4,[R2,R1] ; Load address of SVC function + + LDM R0,{R0-R3} ; Load function parameters from stack + BLX R4 ; Call service function + MRS R4,PSP ; Get PSP + STR R0,[R4] ; Store function return value + +SVC_Done + POP {R4,PC} ; Return from handler + + +PendSV_Handler + EXPORT PendSV_Handler + IMPORT osRtxPendSV_Handler + + PUSH {R0,LR} ; Save EXC_RETURN + BL osRtxPendSV_Handler ; Call osRtxPendSV_Handler + POP {R0,R1} ; Restore EXC_RETURN + MOV LR,R1 ; Set EXC_RETURN + B Sys_Context + + +SysTick_Handler + EXPORT SysTick_Handler + IMPORT osRtxTick_Handler + + PUSH {R0,LR} ; Save EXC_RETURN + BL osRtxTick_Handler ; Call osRtxTick_Handler + POP {R0,R1} ; Restore EXC_RETURN + MOV LR,R1 ; Set EXC_RETURN + B Sys_Context + + +Sys_Context + EXPORT Sys_Context + IMPORT osRtxInfo +#ifdef __DOMAIN_NS + IMPORT TZ_LoadContext_S + IMPORT TZ_StoreContext_S +#endif + + LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.run + LDM R3!,{R1,R2} ; Load osRtxInfo.thread.run: curr & next + CMP R1,R2 ; Check if thread switch is required + BEQ Sys_ContextExit ; Branch when threads are the same + + +Sys_ContextSave +#ifdef __DOMAIN_NS + LDR R0,[R1,#TCB_TZM_OFS] ; Load TrustZone memory identifier + CBZ R0,Sys_ContextSave1 ; Branch if there is no secure context + PUSH {R1,R2,R3,R7} ; Save registers + MOV R7,LR ; Get EXC_RETURN + BL TZ_StoreContext_S ; Store secure context + MOV LR,R7 ; Set EXC_RETURN + POP {R1,R2,R3,R7} ; Restore registers + LSLS R7,R7,#25 ; Check domain of interrupted thread + BMI Sys_ContextSave1 ; Branch if secure + MRS R0,PSP ; Get PSP + STR R0,[R1,#TCB_SP_OFS] ; Store SP + B Sys_ContextSave2 +#endif + +Sys_ContextSave1 + MRS R0,PSP ; Get PSP + SUBS R0,R0,#32 ; Adjust address + STR R0,[R1,#TCB_SP_OFS] ; Store SP + STMIA R0!,{R4-R7} ; Save R4..R7 + MOV R4,R8 + MOV R5,R9 + MOV R6,R10 + MOV R7,R11 + STMIA R0!,{R4-R7} ; Save R8..R11 + +Sys_ContextSave2 + MOV R0,LR ; Get EXC_RETURN + ADDS R1,R1,#TCB_SF_OFS ; Adjust address + STRB R0,[R1] ; Store stack frame information + +Sys_ContextSwitch + SUBS R3,R3,#8 ; Adjust address + STR R2,[R3] ; osRtxInfo.run: curr = next + +Sys_ContextRestore +#ifdef __DOMAIN_NS + LDR R0,[R2,#TCB_TZM_OFS] ; Load TrustZone memory identifier + CBZ R0,Sys_ContextRestore1 ; Branch if there is no secure context + PUSH {R2,R3} ; Save registers + BL TZ_LoadContext_S ; Load secure context + POP {R2,R3} ; Restore registers +#endif + +Sys_ContextRestore1 + MOV R1,R2 + ADDS R1,R1,#TCB_SF_OFS ; Adjust offset + LDRB R0,[R1] ; Load stack frame information + MOVS R1,#0xFF + MVNS R1,R1 ; R1=0xFFFFFF00 + ORRS R0,R1 + MOV LR,R0 ; Set EXC_RETURN + +#ifdef __DOMAIN_NS + LSLS R0,R0,#25 ; Check domain of interrupted thread + BPL Sys_ContextRestore2 ; Branch if non-secure + LDR R0,[R2,#TCB_SP_OFS] ; Load SP + MSR PSP,R0 ; Set PSP + BX LR ; Exit from handler +#else + LDR R0,[R2,#TCB_SM_OFS] ; Load stack memory base + MSR PSPLIM,R0 ; Set PSPLIM +#endif + +Sys_ContextRestore2 + LDR R0,[R2,#TCB_SP_OFS] ; Load SP + ADDS R0,R0,#16 ; Adjust address + LDMIA R0!,{R4-R7} ; Restore R8..R11 + MOV R8,R4 + MOV R9,R5 + MOV R10,R6 + MOV R11,R7 + MSR PSP,R0 ; Set PSP + SUBS R0,R0,#32 ; Adjust address + LDMIA R0!,{R4-R7} ; Restore R4..R7 + +Sys_ContextExit + BX LR ; Exit from handler + + END diff --git a/rtos/TARGET_CORTEX/rtx5/TARGET_M33/TOOLCHAIN_GCC/irq_armv8mml.S b/rtos/TARGET_CORTEX/rtx5/TARGET_M33/TOOLCHAIN_GCC/irq_armv8mml.S new file mode 100644 index 0000000..c01721b --- /dev/null +++ b/rtos/TARGET_CORTEX/rtx5/TARGET_M33/TOOLCHAIN_GCC/irq_armv8mml.S @@ -0,0 +1,286 @@ +/* + * Copyright (c) 2016-2017 ARM Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * ----------------------------------------------------------------------------- + * + * Project: CMSIS-RTOS RTX + * Title: ARMv8M Mainline Exception handlers + * + * ----------------------------------------------------------------------------- + */ + + + .file "irq_armv8mml.S" + .syntax unified + + .ifndef __DOMAIN_NS + .equ __DOMAIN_NS, 0 + .endif + + .ifndef __FPU_USED + .equ __FPU_USED, 0 + .endif + + .equ I_T_RUN_OFS, 28 // osRtxInfo.thread.run offset + .equ TCB_SM_OFS, 48 // TCB.stack_mem offset + .equ TCB_SP_OFS, 56 // TCB.SP offset + .equ TCB_SF_OFS, 34 // TCB.stack_frame offset + .equ TCB_TZM_OFS, 64 // TCB.tz_memory offset + + .section ".rodata" + .global irqRtxLib // Non weak library reference +irqRtxLib: + .byte 0 + + + .thumb + .section ".text" + .align 2 + + + .thumb_func + .type SVC_Handler, %function + .global SVC_Handler + .fnstart + .cantunwind +SVC_Handler: + + MRS R0,PSP // Get PSP + LDR R1,[R0,#24] // Load saved PC from stack + LDRB R1,[R1,#-2] // Load SVC number + CMP R1,#0 + BNE SVC_User // Branch if not SVC 0 + + PUSH {R0,LR} // Save PSP and EXC_RETURN + LDM R0,{R0-R3,R12} // Load function parameters and address from stack + BLX R12 // Call service function + POP {R12,LR} // Restore PSP and EXC_RETURN + STM R12,{R0-R1} // Store function return values + +SVC_Context: + LDR R3,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.run + LDM R3,{R1,R2} // Load osRtxInfo.thread.run: curr & next + CMP R1,R2 // Check if thread switch is required + IT EQ + BXEQ LR // Exit when threads are the same + + .if __FPU_USED == 1 + CBNZ R1,SVC_ContextSave // Branch if running thread is not deleted + TST LR,#0x10 // Check if extended stack frame + BNE SVC_ContextSwitch + LDR R1,=0xE000EF34 // FPCCR Address + LDR R0,[R1] // Load FPCCR + BIC R0,#1 // Clear LSPACT (Lazy state) + STR R0,[R1] // Store FPCCR + B SVC_ContextSwitch + .else + CBZ R1,SVC_ContextSwitch // Branch if running thread is deleted + .endif + +SVC_ContextSave: + .if __DOMAIN_NS == 1 + LDR R0,[R1,#TCB_TZM_OFS] // Load TrustZone memory identifier + CBZ R0,SVC_ContextSave1 // Branch if there is no secure context + PUSH {R1,R2,R3,LR} // Save registers and EXC_RETURN + BL TZ_StoreContext_S // Store secure context + POP {R1,R2,R3,LR} // Restore registers and EXC_RETURN + .endif + +SVC_ContextSave1: + MRS R0,PSP // Get PSP + STMDB R0!,{R4-R11} // Save R4..R11 + .if __FPU_USED == 1 + TST LR,#0x10 // Check if extended stack frame + IT EQ + VSTMDBEQ R0!,{S16-S31} // Save VFP S16.S31 + .endif + +SVC_ContextSave2: + STR R0,[R1,#TCB_SP_OFS] // Store SP + STRB LR,[R1,#TCB_SF_OFS] // Store stack frame information + +SVC_ContextSwitch: + STR R2,[R3] // osRtxInfo.thread.run: curr = next + +SVC_ContextRestore: + .if __DOMAIN_NS == 1 + LDR R0,[R2,#TCB_TZM_OFS] // Load TrustZone memory identifier + CBZ R0,SVC_ContextRestore1 // Branch if there is no secure context + PUSH {R2,R3} // Save registers + BL TZ_LoadContext_S // Load secure context + POP {R2,R3} // Restore registers + .endif + +SVC_ContextRestore1: + LDR R0,[R2,#TCB_SM_OFS] // Load stack memory base + LDRB R1,[R2,#TCB_SF_OFS] // Load stack frame information + MSR PSPLIM,R0 // Set PSPLIM + LDR R0,[R2,#TCB_SP_OFS] // Load SP + ORR LR,R1,#0xFFFFFF00 // Set EXC_RETURN + + .if __DOMAIN_NS == 1 + TST LR,#0x40 // Check domain of interrupted thread + BNE SVC_ContextRestore2 // Branch if secure + .endif + + .if __FPU_USED == 1 + TST LR,#0x10 // Check if extended stack frame + IT EQ + VLDMIAEQ R0!,{S16-S31} // Restore VFP S16..S31 + .endif + LDMIA R0!,{R4-R11} // Restore R4..R11 + +SVC_ContextRestore2: + MSR PSP,R0 // Set PSP + +SVC_Exit: + BX LR // Exit from handler + +SVC_User: + PUSH {R4,LR} // Save registers + LDR R2,=osRtxUserSVC // Load address of SVC table + LDR R3,[R2] // Load SVC maximum number + CMP R1,R3 // Check SVC number range + BHI SVC_Done // Branch if out of range + + LDR R4,[R2,R1,LSL #2] // Load address of SVC function + + LDM R0,{R0-R3} // Load function parameters from stack + BLX R4 // Call service function + MRS R4,PSP // Get PSP + STR R0,[R4] // Store function return value + +SVC_Done: + POP {R4,PC} // Return from handler + + .fnend + .size SVC_Handler, .-SVC_Handler + + + .thumb_func + .type PendSV_Handler, %function + .global PendSV_Handler + .fnstart + .cantunwind +PendSV_Handler: + + PUSH {R4,LR} // Save EXC_RETURN + BL osRtxPendSV_Handler // Call osRtxPendSV_Handler + POP {R4,LR} // Restore EXC_RETURN + B Sys_Context + + .fnend + .size PendSV_Handler, .-PendSV_Handler + + + .thumb_func + .type SysTick_Handler, %function + .global SysTick_Handler + .fnstart + .cantunwind +SysTick_Handler: + + PUSH {R4,LR} // Save EXC_RETURN + BL osRtxTick_Handler // Call osRtxTick_Handler + POP {R4,LR} // Restore EXC_RETURN + B Sys_Context + + .fnend + .size SysTick_Handler, .-SysTick_Handler + + + .thumb_func + .type Sys_Context, %function + .global Sys_Context + .fnstart + .cantunwind +Sys_Context: + + LDR R3,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.run + LDM R3,{R1,R2} // Load osRtxInfo.thread.run: curr & next + CMP R1,R2 // Check if thread switch is required + IT EQ + BXEQ LR // Exit when threads are the same + +Sys_ContextSave: + .if __DOMAIN_NS == 1 + LDR R0,[R1,#TCB_TZM_OFS] // Load TrustZone memory identifier + CBZ R0,Sys_ContextSave1 // Branch if there is no secure context + PUSH {R1,R2,R3,LR} // Save registers and EXC_RETURN + BL TZ_StoreContext_S // Store secure context + POP {R1,R2,R3,LR} // Restore registers and EXC_RETURN + TST LR,#0x40 // Check domain of interrupted thread + IT NE + MRSNE R0,PSP // Get PSP + BNE Sys_ContextSave2 // Branch if secure + .endif + +Sys_ContextSave1: + MRS R0,PSP // Get PSP + STMDB R0!,{R4-R11} // Save R4..R11 + .if __FPU_USED == 1 + TST LR,#0x10 // Check if extended stack frame + IT EQ + VSTMDBEQ R0!,{S16-S31} // Save VFP S16.S31 + .endif + +Sys_ContextSave2: + STR R0,[R1,#TCB_SP_OFS] // Store SP + STRB LR,[R1,#TCB_SF_OFS] // Store stack frame information + +Sys_ContextSwitch: + STR R2,[R3] // osRtxInfo.run: curr = next + +Sys_ContextRestore: + .if __DOMAIN_NS == 1 + LDR R0,[R2,#TCB_TZM_OFS] // Load TrustZone memory identifier + CBZ R0,Sys_ContextRestore1 // Branch if there is no secure context + PUSH {R2,R3} // Save registers + BL TZ_LoadContext_S // Load secure context + POP {R2,R3} // Restore registers + .endif + +Sys_ContextRestore1: + LDR R0,[R2,#TCB_SM_OFS] // Load stack memory base + LDRB R1,[R2,#TCB_SF_OFS] // Load stack frame information + MSR PSPLIM,R0 // Set PSPLIM + LDR R0,[R2,#TCB_SP_OFS] // Load SP + ORR LR,R1,#0xFFFFFF00 // Set EXC_RETURN + + .if __DOMAIN_NS == 1 + TST LR,#0x40 // Check domain of interrupted thread + BNE Sys_ContextRestore2 // Branch if secure + .endif + + .if __FPU_USED == 1 + TST LR,#0x10 // Check if extended stack frame + IT EQ + VLDMIAEQ R0!,{S16-S31} // Restore VFP S16..S31 + .endif + LDMIA R0!,{R4-R11} // Restore R4..R11 + +Sys_ContextRestore2: + MSR PSP,R0 // Set PSP + +Sys_ContextExit: + BX LR // Exit from handler + + .fnend + .size Sys_Context, .-Sys_Context + + + .end diff --git a/tools/targets/__init__.py b/tools/targets/__init__.py index f4ff1cd..ee4e9ba 100644 --- a/tools/targets/__init__.py +++ b/tools/targets/__init__.py @@ -41,7 +41,11 @@ "Cortex-M7" : ["M7", "CORTEX_M", "RTOS_M4_M7", "LIKE_CORTEX_M7", "CORTEX"], "Cortex-M7F" : ["M7", "CORTEX_M", "RTOS_M4_M7", "LIKE_CORTEX_M7", "CORTEX"], "Cortex-M7FD" : ["M7", "CORTEX_M", "RTOS_M4_M7", "LIKE_CORTEX_M7", "CORTEX"], - "Cortex-A9" : ["A9", "CORTEX_A", "LIKE_CORTEX_A9", "CORTEX"] + "Cortex-A9" : ["A9", "CORTEX_A", "LIKE_CORTEX_A9", "CORTEX"], + "Cortex-M23": ["M23", "CORTEX_M", "LIKE_CORTEX_M23", "CORTEX"], + "Cortex-M23-NS": ["M23", "CORTEX_M", "LIKE_CORTEX_M23", "CORTEX"], + "Cortex-M33": ["M33", "CORTEX_M", "LIKE_CORTEX_M33", "CORTEX"], + "Cortex-M33-NS": ["M33", "CORTEX_M", "LIKE_CORTEX_M33", "CORTEX"] } ################################################################################ diff --git a/tools/toolchains/__init__.py b/tools/toolchains/__init__.py index fc98c19..4f168fd 100644 --- a/tools/toolchains/__init__.py +++ b/tools/toolchains/__init__.py @@ -332,6 +332,10 @@ "Cortex-M7F" : ["__CORTEX_M7", "ARM_MATH_CM7", "__FPU_PRESENT=1", "__CMSIS_RTOS", "__MBED_CMSIS_RTOS_CM"], "Cortex-M7FD" : ["__CORTEX_M7", "ARM_MATH_CM7", "__FPU_PRESENT=1", "__CMSIS_RTOS", "__MBED_CMSIS_RTOS_CM"], "Cortex-A9" : ["__CORTEX_A9", "ARM_MATH_CA9", "__FPU_PRESENT", "__CMSIS_RTOS", "__EVAL", "__MBED_CMSIS_RTOS_CA9"], + "Cortex-M23-NS": ["__CORTEX_M23", "ARM_MATH_ARMV8MBL", "__DOMAIN_NS=1", "__CMSIS_RTOS", "__MBED_CMSIS_RTOS_CM"], + "Cortex-M23": ["__CORTEX_M23", "ARM_MATH_ARMV8MBL", "__CMSIS_RTOS", "__MBED_CMSIS_RTOS_CM"], + "Cortex-M33-NS": ["__CORTEX_M33", "ARM_MATH_ARMV8MML", "__DOMAIN_NS=1", "__FPU_PRESENT", "__CMSIS_RTOS", "__MBED_CMSIS_RTOS_CM"], + "Cortex-M33": ["__CORTEX_M33", "ARM_MATH_ARMV8MML", "__FPU_PRESENT", "__CMSIS_RTOS", "__MBED_CMSIS_RTOS_CM"], } MBED_CONFIG_FILE_NAME="mbed_config.h" diff --git a/tools/toolchains/gcc.py b/tools/toolchains/gcc.py index 2eb65dc..df4a242 100644 --- a/tools/toolchains/gcc.py +++ b/tools/toolchains/gcc.py @@ -56,6 +56,10 @@ cpu = "cortex-m7" elif target.core == "Cortex-M7FD": cpu = "cortex-m7" + elif target.core == "Cortex-M23-NS": + cpu = "cortex-m23" + elif target.core == "Cortex-M33-NS": + cpu = "cortex-m33" else: cpu = target.core.lower() @@ -82,6 +86,11 @@ self.cpu.append("-mfloat-abi=hard") self.cpu.append("-mno-unaligned-access") + if target.core == "Cortex-M23" or target.core == "Cortex-M23-NS": + self.cpu.append("-march=armv8-m.base") + elif target.core == "Cortex-M33" or target.core == "Cortex-M33-NS": + self.cpu.append("-march=armv8-m.main") + self.flags["common"] += self.cpu main_cc = join(tool_path, "arm-none-eabi-gcc") diff --git a/tools/toolchains/iar.py b/tools/toolchains/iar.py index 4e0d8e4..2a8e73f 100644 --- a/tools/toolchains/iar.py +++ b/tools/toolchains/iar.py @@ -45,29 +45,19 @@ build_profile=build_profile) if target.core == "Cortex-M7F" or target.core == "Cortex-M7FD": cpuchoice = "Cortex-M7" + elif target.core == "Cortex-M23-NS" or target.core == "Cortex-M23": + cpuchoice = "8-M.baseline" + elif target.core == "Cortex-M33-NS" or target.core == "Cortex-M33": + cpuchoice = "8-M.mainline" else: cpuchoice = target.core # flags_cmd are used only by our scripts, the project files have them already defined, # using this flags results in the errors (duplication) # asm accepts --cpu Core or --fpu FPU, not like c/c++ --cpu=Core - if target.core == "Cortex-M4F": - asm_flags_cmd = [ - "--cpu", "Cortex-M4F" - ] - else: - asm_flags_cmd = [ - "--cpu", cpuchoice - ] + asm_flags_cmd = ["--cpu", cpuchoice] # custom c flags - if target.core == "Cortex-M4F": - c_flags_cmd = [ - "--cpu", "Cortex-M4F" - ] - else: - c_flags_cmd = [ - "--cpu", cpuchoice - ] + c_flags_cmd = ["--cpu", cpuchoice] c_flags_cmd.extend([ "--thumb", "--dlib_config", "DLib_Config_Full.h" @@ -82,6 +72,8 @@ elif target.core == "Cortex-M7F": asm_flags_cmd += ["--fpu", "VFPv5_sp"] c_flags_cmd.append("--fpu=VFPv5_sp") + elif target.core == "Cortex-M23" or target.core == "Cortex-M33": + self.flags["asm"] += ["--cmse"] IAR_BIN = join(TOOLCHAIN_PATHS['IAR'], "bin") main_cc = join(IAR_BIN, "iccarm")