diff --git a/include/arch/aarch64/arch.h b/include/arch/aarch64/arch.h index 9d4ad3b..10fe926 100644 --- a/include/arch/aarch64/arch.h +++ b/include/arch/aarch64/arch.h @@ -211,6 +211,17 @@ #define PARANGE_0101 U(48) #define PARANGE_0110 U(52) +#define ID_AA64MMFR0_EL1_ECV_SHIFT U(60) +#define ID_AA64MMFR0_EL1_ECV_MASK ULL(0xf) +#define ID_AA64MMFR0_EL1_ECV_NOT_SUPPORTED ULL(0x0) +#define ID_AA64MMFR0_EL1_ECV_SUPPORTED ULL(0x1) +#define ID_AA64MMFR0_EL1_ECV_SELF_SYNCH ULL(0x2) + +#define ID_AA64MMFR0_EL1_FGT_SHIFT U(56) +#define ID_AA64MMFR0_EL1_FGT_MASK ULL(0xf) +#define ID_AA64MMFR0_EL1_FGT_SUPPORTED ULL(0x1) +#define ID_AA64MMFR0_EL1_FGT_NOT_SUPPORTED ULL(0x0) + #define ID_AA64MMFR0_EL1_TGRAN4_SHIFT U(28) #define ID_AA64MMFR0_EL1_TGRAN4_MASK ULL(0xf) #define ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED ULL(0x0) @@ -324,6 +335,8 @@ #define SCR_TWEDEL_SHIFT U(30) #define SCR_TWEDEL_MASK ULL(0xf) #define SCR_TWEDEn_BIT (UL(1) << 29) +#define SCR_ECVEN_BIT (U(1) << 28) +#define SCR_FGTEN_BIT (U(1) << 27) #define SCR_ATA_BIT (U(1) << 26) #define SCR_FIEN_BIT (U(1) << 21) #define SCR_EEL2_BIT (U(1) << 18) diff --git a/include/arch/aarch64/arch_features.h b/include/arch/aarch64/arch_features.h index 321485a..6b5d326 100644 --- a/include/arch/aarch64/arch_features.h +++ b/include/arch/aarch64/arch_features.h @@ -64,6 +64,18 @@ ID_AA64MMFR1_EL1_TWED_MASK) == ID_AA64MMFR1_EL1_TWED_SUPPORTED); } +static inline bool is_armv8_6_fgt_present(void) +{ + return ((read_id_aa64mmfr0_el1() >> ID_AA64MMFR0_EL1_FGT_SHIFT) & + ID_AA64MMFR0_EL1_FGT_MASK) == ID_AA64MMFR0_EL1_FGT_SUPPORTED; +} + +static inline unsigned long int get_armv8_6_ecv_support(void) +{ + return ((read_id_aa64mmfr0_el1() >> ID_AA64MMFR0_EL1_ECV_SHIFT) & + ID_AA64MMFR0_EL1_ECV_MASK); +} + /* * Return MPAM version: * diff --git a/lib/el3_runtime/aarch64/context_mgmt.c b/lib/el3_runtime/aarch64/context_mgmt.c index 64a2d7b..53b4ea3 100644 --- a/lib/el3_runtime/aarch64/context_mgmt.c +++ b/lib/el3_runtime/aarch64/context_mgmt.c @@ -173,11 +173,26 @@ * SCR_EL3.HCE: Enable HVC instructions if next execution state is * AArch64 and next EL is EL2, or if next execution state is AArch32 and * next mode is Hyp. + * SCR_EL3.FGTEn: Enable Fine Grained Virtualization Traps under the + * same conditions as HVC instructions and when the processor supports + * ARMv8.6-FGT. + * SCR_EL3.ECVEn: Enable Enhanced Counter Virtualization (ECV) + * CNTPOFF_EL2 register under the same conditions as HVC instructions + * and when the processor supports ECV. */ if (((GET_RW(ep->spsr) == MODE_RW_64) && (GET_EL(ep->spsr) == MODE_EL2)) || ((GET_RW(ep->spsr) != MODE_RW_64) && (GET_M32(ep->spsr) == MODE32_hyp))) { scr_el3 |= SCR_HCE_BIT; + + if (is_armv8_6_fgt_present()) { + scr_el3 |= SCR_FGTEN_BIT; + } + + if (get_armv8_6_ecv_support() + == ID_AA64MMFR0_EL1_ECV_SELF_SYNCH) { + scr_el3 |= SCR_ECVEN_BIT; + } } /* Enable S-EL2 if the next EL is EL2 and security state is secure */