diff --git a/bl31/ehf.c b/bl31/ehf.c index 8673564..3d6d674 100644 --- a/bl31/ehf.c +++ b/bl31/ehf.c @@ -18,6 +18,7 @@ #include #include #include +#include /* Output EHF logs as verbose */ #define EHF_LOG(...) VERBOSE("EHF: " __VA_ARGS__) @@ -26,43 +27,44 @@ /* For a valid handler, return the actual function pointer; otherwise, 0. */ #define RAW_HANDLER(h) \ - ((ehf_handler_t) ((h & _EHF_PRI_VALID) ? (h & ~_EHF_PRI_VALID) : 0)) + ((ehf_handler_t) ((((h) & EHF_PRI_VALID_) != 0U) ? \ + ((h) & ~EHF_PRI_VALID_) : 0U)) -#define PRI_BIT(idx) (((ehf_pri_bits_t) 1) << idx) +#define PRI_BIT(idx) (((ehf_pri_bits_t) 1u) << (idx)) /* * Convert index into secure priority using the platform-defined priority bits * field. */ #define IDX_TO_PRI(idx) \ - ((idx << (7 - exception_data.pri_bits)) & 0x7f) + ((((unsigned) idx) << (7u - exception_data.pri_bits)) & 0x7fU) /* Check whether a given index is valid */ #define IS_IDX_VALID(idx) \ - ((exception_data.ehf_priorities[idx].ehf_handler & _EHF_PRI_VALID) != 0) + ((exception_data.ehf_priorities[idx].ehf_handler & EHF_PRI_VALID_) != 0U) /* Returns whether given priority is in secure priority range */ -#define IS_PRI_SECURE(pri) ((pri & 0x80) == 0) +#define IS_PRI_SECURE(pri) (((pri) & 0x80U) == 0U) /* To be defined by the platform */ extern const ehf_priorities_t exception_data; /* Translate priority to the index in the priority array */ -static int pri_to_idx(unsigned int priority) +static unsigned int pri_to_idx(unsigned int priority) { - int idx; + unsigned int idx; idx = EHF_PRI_TO_IDX(priority, exception_data.pri_bits); - assert((idx >= 0) && (idx < exception_data.num_priorities)); + assert(idx < exception_data.num_priorities); assert(IS_IDX_VALID(idx)); return idx; } /* Return whether there are outstanding priority activation */ -static int has_valid_pri_activations(pe_exc_data_t *pe_data) +static bool has_valid_pri_activations(pe_exc_data_t *pe_data) { - return pe_data->active_pri_bits != 0; + return pe_data->active_pri_bits != 0U; } static pe_exc_data_t *this_cpu_data(void) @@ -80,7 +82,7 @@ return EHF_INVALID_IDX; /* Current priority is the right-most bit */ - return __builtin_ctz(pe_data->active_pri_bits); + return (int) __builtin_ctz(pe_data->active_pri_bits); } /* @@ -95,8 +97,8 @@ */ void ehf_activate_priority(unsigned int priority) { - int idx, cur_pri_idx; - unsigned int old_mask, run_pri; + int cur_pri_idx; + unsigned int old_mask, run_pri, idx; pe_exc_data_t *pe_data = this_cpu_data(); /* @@ -118,7 +120,8 @@ */ cur_pri_idx = get_pe_highest_active_idx(pe_data); idx = pri_to_idx(priority); - if ((cur_pri_idx != EHF_INVALID_IDX) && (idx >= cur_pri_idx)) { + if ((cur_pri_idx != EHF_INVALID_IDX) && + (idx >= ((unsigned int) cur_pri_idx))) { ERROR("Activation priority mismatch: req=0x%x current=0x%x\n", priority, IDX_TO_PRI(cur_pri_idx)); panic(); @@ -144,7 +147,7 @@ * restored after the last deactivation. */ if (cur_pri_idx == EHF_INVALID_IDX) - pe_data->init_pri_mask = old_mask; + pe_data->init_pri_mask = (uint8_t) old_mask; EHF_LOG("activate prio=%d\n", get_pe_highest_active_idx(pe_data)); } @@ -161,9 +164,9 @@ */ void ehf_deactivate_priority(unsigned int priority) { - int idx, cur_pri_idx; + int cur_pri_idx; pe_exc_data_t *pe_data = this_cpu_data(); - unsigned int old_mask, run_pri; + unsigned int old_mask, run_pri, idx; /* * Query interrupt controller for the running priority, or idle priority @@ -184,21 +187,22 @@ */ cur_pri_idx = get_pe_highest_active_idx(pe_data); idx = pri_to_idx(priority); - if ((cur_pri_idx == EHF_INVALID_IDX) || (idx != cur_pri_idx)) { + if ((cur_pri_idx == EHF_INVALID_IDX) || + (idx != ((unsigned int) cur_pri_idx))) { ERROR("Deactivation priority mismatch: req=0x%x current=0x%x\n", priority, IDX_TO_PRI(cur_pri_idx)); panic(); } /* Clear bit corresponding to highest priority */ - pe_data->active_pri_bits &= (pe_data->active_pri_bits - 1); + pe_data->active_pri_bits &= (pe_data->active_pri_bits - 1u); /* * Restore priority mask corresponding to the next priority, or the * one stashed earlier if there are no more to deactivate. */ - idx = get_pe_highest_active_idx(pe_data); - if (idx == EHF_INVALID_IDX) + cur_pri_idx = get_pe_highest_active_idx(pe_data); + if (cur_pri_idx == EHF_INVALID_IDX) old_mask = plat_ic_set_priority_mask(pe_data->init_pri_mask); else old_mask = plat_ic_set_priority_mask(priority); @@ -231,16 +235,16 @@ /* If the running priority is in the secure range, do nothing */ run_pri = plat_ic_get_running_priority(); if (IS_PRI_SECURE(run_pri)) - return 0; + return NULL; /* Do nothing if there are explicit activations */ if (has_valid_pri_activations(pe_data)) - return 0; + return NULL; - assert(pe_data->ns_pri_mask == 0); + assert(pe_data->ns_pri_mask == 0u); pe_data->ns_pri_mask = - plat_ic_set_priority_mask(GIC_HIGHEST_NS_PRIORITY); + (uint8_t) plat_ic_set_priority_mask(GIC_HIGHEST_NS_PRIORITY); /* The previous Priority Mask is not expected to be in secure range */ if (IS_PRI_SECURE(pe_data->ns_pri_mask)) { @@ -252,7 +256,7 @@ EHF_LOG("Priority Mask: 0x%x => 0x%x\n", pe_data->ns_pri_mask, GIC_HIGHEST_NS_PRIORITY); - return 0; + return NULL; } /* @@ -274,18 +278,18 @@ /* If the running priority is in the secure range, do nothing */ run_pri = plat_ic_get_running_priority(); if (IS_PRI_SECURE(run_pri)) - return 0; + return NULL; /* * If there are explicit activations, do nothing. The Priority Mask will * be restored upon the last deactivation. */ if (has_valid_pri_activations(pe_data)) - return 0; + return NULL; /* Do nothing if we don't have a valid Priority Mask to restore */ - if (pe_data->ns_pri_mask == 0) - return 0; + if (pe_data->ns_pri_mask == 0U) + return NULL; old_pmr = plat_ic_set_priority_mask(pe_data->ns_pri_mask); @@ -304,7 +308,7 @@ pe_data->ns_pri_mask = 0; - return 0; + return NULL; } /* @@ -328,7 +332,7 @@ * We should have been notified earlier of entering secure world, and * therefore have stashed the Non-secure priority mask. */ - assert(pe_data->ns_pri_mask != 0); + assert(pe_data->ns_pri_mask != 0U); /* Make sure no priority levels are active when requesting this */ if (has_valid_pri_activations(pe_data)) { @@ -343,7 +347,7 @@ * to populate it, the caller would find the correct return value. */ ns_ctx = cm_get_context(NON_SECURE); - assert(ns_ctx); + assert(ns_ctx != NULL); write_ctx_reg(get_gpregs_ctx(ns_ctx), CTX_GPREG_X0, preempt_ret_code); old_pmr = plat_ic_set_priority_mask(pe_data->ns_pri_mask); @@ -376,7 +380,7 @@ */ if (has_valid_pri_activations(pe_data)) return 0; - if (pe_data->ns_pri_mask != 0) + if (pe_data->ns_pri_mask != 0U) return 0; return 1; @@ -388,7 +392,9 @@ static uint64_t ehf_el3_interrupt_handler(uint32_t id, uint32_t flags, void *handle, void *cookie) { - int pri, idx, intr, intr_raw, ret = 0; + int ret = 0; + uint32_t intr_raw; + unsigned int intr, pri, idx; ehf_handler_t handler; /* @@ -425,8 +431,9 @@ /* Validate priority */ assert(pri == IDX_TO_PRI(idx)); - handler = RAW_HANDLER(exception_data.ehf_priorities[idx].ehf_handler); - if (!handler) { + handler = (ehf_handler_t) RAW_HANDLER( + exception_data.ehf_priorities[idx].ehf_handler); + if (handler == NULL) { ERROR("No EL3 exception handler for priority 0x%x\n", IDX_TO_PRI(idx)); panic(); @@ -438,7 +445,7 @@ */ ret = handler(intr_raw, flags, handle, cookie); - return ret; + return (uint64_t) ret; } /* @@ -450,21 +457,22 @@ int ret __unused; /* Ensure EL3 interrupts are supported */ - assert(plat_ic_has_interrupt_type(INTR_TYPE_EL3)); + assert(plat_ic_has_interrupt_type(INTR_TYPE_EL3) != 0); /* * Make sure that priority water mark has enough bits to represent the * whole priority array. */ - assert(exception_data.num_priorities <= (sizeof(ehf_pri_bits_t) * 8)); + assert(exception_data.num_priorities <= (sizeof(ehf_pri_bits_t) * 8U)); - assert(exception_data.ehf_priorities); + assert(exception_data.ehf_priorities != NULL); /* * Bit 7 of GIC priority must be 0 for secure interrupts. This means * platforms must use at least 1 of the remaining 7 bits. */ - assert((exception_data.pri_bits >= 1) || (exception_data.pri_bits < 8)); + assert((exception_data.pri_bits >= 1U) || + (exception_data.pri_bits < 8U)); /* Route EL3 interrupts when in Secure and Non-secure. */ set_interrupt_rm_flag(flags, NON_SECURE); @@ -484,13 +492,13 @@ */ void ehf_register_priority_handler(unsigned int pri, ehf_handler_t handler) { - int idx; + unsigned int idx; /* Sanity check for handler */ assert(handler != NULL); /* Handler ought to be 4-byte aligned */ - assert((((uintptr_t) handler) & 3) == 0); + assert((((uintptr_t) handler) & 3U) == 0U); /* Ensure we register for valid priority */ idx = pri_to_idx(pri); @@ -498,7 +506,7 @@ assert(IDX_TO_PRI(idx) == pri); /* Return failure if a handler was already registered */ - if (exception_data.ehf_priorities[idx].ehf_handler != _EHF_NO_HANDLER) { + if (exception_data.ehf_priorities[idx].ehf_handler != EHF_NO_HANDLER_) { ERROR("Handler already registered for priority 0x%x\n", pri); panic(); } @@ -508,7 +516,7 @@ * is 4-byte aligned, which is usually the case. */ exception_data.ehf_priorities[idx].ehf_handler = - (((uintptr_t) handler) | _EHF_PRI_VALID); + (((uintptr_t) handler) | EHF_PRI_VALID_); EHF_LOG("register pri=0x%x handler=%p\n", pri, handler); } diff --git a/include/bl31/ehf.h b/include/bl31/ehf.h index f963f8d..1446279 100644 --- a/include/bl31/ehf.h +++ b/include/bl31/ehf.h @@ -4,8 +4,8 @@ * SPDX-License-Identifier: BSD-3-Clause */ -#ifndef __EHF_H__ -#define __EHF_H__ +#ifndef EHF_H +#define EHF_H #ifndef __ASSEMBLY__ @@ -13,27 +13,27 @@ #include /* Valid priorities set bit 0 of the priority handler. */ -#define _EHF_PRI_VALID (((uintptr_t) 1) << 0) +#define EHF_PRI_VALID_ (((uintptr_t) 1) << 0) /* Marker for no handler registered for a valid priority */ -#define _EHF_NO_HANDLER (0 | _EHF_PRI_VALID) +#define EHF_NO_HANDLER_ (0U | EHF_PRI_VALID_) /* Extract the specified number of top bits from 7 lower bits of priority */ #define EHF_PRI_TO_IDX(pri, plat_bits) \ - ((pri & 0x7f) >> (7 - plat_bits)) + ((((unsigned) (pri)) & 0x7fu) >> (7u - (plat_bits))) /* Install exception priority descriptor at a suitable index */ #define EHF_PRI_DESC(plat_bits, priority) \ [EHF_PRI_TO_IDX(priority, plat_bits)] = { \ - .ehf_handler = _EHF_NO_HANDLER, \ + .ehf_handler = EHF_NO_HANDLER_, \ } /* Macro for platforms to regiter its exception priorities */ #define EHF_REGISTER_PRIORITIES(priorities, num, bits) \ const ehf_priorities_t exception_data = { \ - .num_priorities = num, \ - .ehf_priorities = priorities, \ - .pri_bits = bits, \ + .num_priorities = (num), \ + .ehf_priorities = (priorities), \ + .pri_bits = (bits), \ } /* @@ -72,10 +72,10 @@ uintptr_t ehf_handler; } ehf_pri_desc_t; -typedef struct ehf_priorities { +typedef struct ehf_priority_type { ehf_pri_desc_t *ehf_priorities; unsigned int num_priorities; - int pri_bits; + unsigned int pri_bits; } ehf_priorities_t; void ehf_init(void); @@ -87,4 +87,4 @@ #endif /* __ASSEMBLY__ */ -#endif /* __EHF_H__ */ +#endif /* EHF_H */ diff --git a/include/bl31/interrupt_mgmt.h b/include/bl31/interrupt_mgmt.h index 905dcd6..49ba9f7 100644 --- a/include/bl31/interrupt_mgmt.h +++ b/include/bl31/interrupt_mgmt.h @@ -61,10 +61,10 @@ #define INTR_RM_FROM_SEC_SHIFT SECURE /* BIT[0] */ #define INTR_RM_FROM_NS_SHIFT NON_SECURE /* BIT[1] */ #define INTR_RM_FROM_FLAG_MASK U(1) -#define get_interrupt_rm_flag(flag, ss) (((flag >> INTR_RM_FLAGS_SHIFT) >> ss) \ - & INTR_RM_FROM_FLAG_MASK) -#define set_interrupt_rm_flag(flag, ss) (flag |= U(1) << ss) -#define clr_interrupt_rm_flag(flag, ss) (flag &= ~(U(1) << ss)) +#define get_interrupt_rm_flag(flag, ss) \ + ((((flag) >> INTR_RM_FLAGS_SHIFT) >> (ss)) & INTR_RM_FROM_FLAG_MASK) +#define set_interrupt_rm_flag(flag, ss) ((flag) |= U(1) << (ss)) +#define clr_interrupt_rm_flag(flag, ss) ((flag) &= ~(U(1) << (ss))) /******************************************************************************* @@ -101,9 +101,9 @@ ******************************************************************************/ #define INTR_SRC_SS_FLAG_SHIFT U(0) /* BIT[0] */ #define INTR_SRC_SS_FLAG_MASK U(1) -#define set_interrupt_src_ss(flag, val) (flag |= val << INTR_SRC_SS_FLAG_SHIFT) -#define clr_interrupt_src_ss(flag) (flag &= ~(U(1) << INTR_SRC_SS_FLAG_SHIFT)) -#define get_interrupt_src_ss(flag) ((flag >> INTR_SRC_SS_FLAG_SHIFT) & \ +#define set_interrupt_src_ss(flag, val) ((flag) |= (val) << INTR_SRC_SS_FLAG_SHIFT) +#define clr_interrupt_src_ss(flag) ((flag) &= ~(U(1) << INTR_SRC_SS_FLAG_SHIFT)) +#define get_interrupt_src_ss(flag) (((flag) >> INTR_SRC_SS_FLAG_SHIFT) & \ INTR_SRC_SS_FLAG_MASK) #ifndef __ASSEMBLY__ diff --git a/include/common/param_header.h b/include/common/param_header.h index c982fc9..4e61fad 100644 --- a/include/common/param_header.h +++ b/include/common/param_header.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -23,7 +23,7 @@ #define SET_PARAM_HEAD(_p, _type, _ver, _attr) do { \ (_p)->h.type = (uint8_t)(_type); \ (_p)->h.version = (uint8_t)(_ver); \ - (_p)->h.size = (uint16_t)sizeof(*_p); \ + (_p)->h.size = (uint16_t)sizeof(*(_p)); \ (_p)->h.attr = (uint32_t)(_attr) ; \ } while (0) diff --git a/include/drivers/arm/gic_common.h b/include/drivers/arm/gic_common.h index 6e953a0..00cbd1d 100644 --- a/include/drivers/arm/gic_common.h +++ b/include/drivers/arm/gic_common.h @@ -7,6 +7,8 @@ #ifndef __GIC_COMMON_H__ #define __GIC_COMMON_H__ +#include + /******************************************************************************* * GIC Distributor interface general definitions ******************************************************************************/ @@ -34,10 +36,10 @@ #define GIC_INTR_CFG_EDGE (1 << 1) /* Constants to categorise priorities */ -#define GIC_HIGHEST_SEC_PRIORITY 0x0 -#define GIC_LOWEST_SEC_PRIORITY 0x7f -#define GIC_HIGHEST_NS_PRIORITY 0x80 -#define GIC_LOWEST_NS_PRIORITY 0xfe /* 0xff would disable all interrupts */ +#define GIC_HIGHEST_SEC_PRIORITY U(0x00) +#define GIC_LOWEST_SEC_PRIORITY U(0x7f) +#define GIC_HIGHEST_NS_PRIORITY U(0x80) +#define GIC_LOWEST_NS_PRIORITY U(0xfe) /* 0xff would disable all interrupts */ /******************************************************************************* * GIC Distributor interface register offsets that are common to GICv3 & GICv2 diff --git a/include/lib/el3_runtime/aarch64/context.h b/include/lib/el3_runtime/aarch64/context.h index a2ce9f8..b990674 100644 --- a/include/lib/el3_runtime/aarch64/context.h +++ b/include/lib/el3_runtime/aarch64/context.h @@ -241,9 +241,9 @@ * Macros to access members of any of the above structures using their * offsets */ -#define read_ctx_reg(ctx, offset) ((ctx)->_regs[offset >> DWORD_SHIFT]) -#define write_ctx_reg(ctx, offset, val) (((ctx)->_regs[offset >> DWORD_SHIFT]) \ - = val) +#define read_ctx_reg(ctx, offset) ((ctx)->_regs[(offset) >> DWORD_SHIFT]) +#define write_ctx_reg(ctx, offset, val) (((ctx)->_regs[(offset) >> DWORD_SHIFT]) \ + = (uint64_t) (val)) /* * Top-level context structure which is used by EL3 firmware to diff --git a/include/lib/extensions/ras.h b/include/lib/extensions/ras.h index f57fc3a..400de59 100644 --- a/include/lib/extensions/ras.h +++ b/include/lib/extensions/ras.h @@ -4,10 +4,10 @@ * SPDX-License-Identifier: BSD-3-Clause */ -#ifndef __RAS_COMMON__ -#define __RAS_COMMON__ +#ifndef RAS_COMMON +#define RAS_COMMON -#define ERR_HANDLER_VERSION 1 +#define ERR_HANDLER_VERSION 1U /* Error record access mechanism */ #define ERR_ACCESS_SYSREG 0 @@ -20,18 +20,18 @@ * are declared. Only then would ARRAY_SIZE() yield a meaningful value. */ #define REGISTER_ERR_RECORD_INFO(_records) \ - const struct err_record_mapping err_record_mapping = { \ - .err_records = _records, \ + const struct err_record_mapping err_record_mappings = { \ + .err_records = (_records), \ .num_err_records = ARRAY_SIZE(_records), \ } /* Error record info iterator */ #define for_each_err_record_info(_i, _info) \ - for (_i = 0, _info = err_record_mapping.err_records; \ - _i < err_record_mapping.num_err_records; \ - _i++, _info++) + for ((_i) = 0, (_info) = err_record_mappings.err_records; \ + (_i) < err_record_mappings.num_err_records; \ + (_i)++, (_info)++) -#define _ERR_RECORD_COMMON(_probe, _handler, _aux) \ +#define ERR_RECORD_COMMON_(_probe, _handler, _aux) \ .probe = _probe, \ .handler = _handler, \ .aux_data = _aux, @@ -42,7 +42,7 @@ .sysreg.idx_start = _idx_start, \ .sysreg.num_idx = _num_idx, \ .access = ERR_ACCESS_SYSREG, \ - _ERR_RECORD_COMMON(_probe, _handler, _aux) \ + ERR_RECORD_COMMON_(_probe, _handler, _aux) \ } #define ERR_RECORD_MEMMAP_V1(_base_addr, _size_num_k, _probe, _handler, _aux) \ @@ -51,7 +51,7 @@ .memmap.base_addr = _base_addr, \ .memmap.size_num_k = _size_num_k, \ .access = ERR_ACCESS_MEMMAP, \ - _ERR_RECORD_COMMON(_probe, _handler, _aux) \ + ERR_RECORD_COMMON_(_probe, _handler, _aux) \ } /* @@ -63,8 +63,8 @@ * array is expected to be sorted in the increasing order of interrupt number. */ #define REGISTER_RAS_INTERRUPTS(_array) \ - const struct ras_interrupt_mapping ras_interrupt_mapping = { \ - .intrs = _array, \ + const struct ras_interrupt_mapping ras_interrupt_mappings = { \ + .intrs = (_array), \ .num_intrs = ARRAY_SIZE(_array), \ } @@ -165,8 +165,8 @@ size_t num_intrs; }; -extern const struct err_record_mapping err_record_mapping; -extern const struct ras_interrupt_mapping ras_interrupt_mapping; +extern const struct err_record_mapping err_record_mappings; +extern const struct ras_interrupt_mapping ras_interrupt_mappings; /* @@ -196,4 +196,4 @@ void ras_init(void); #endif /* __ASSEMBLY__ */ -#endif /* __RAS_COMMON__ */ +#endif /* RAS_COMMON */ diff --git a/include/lib/extensions/ras_arch.h b/include/lib/extensions/ras_arch.h index 6ec4da8..e6cd736 100644 --- a/include/lib/extensions/ras_arch.h +++ b/include/lib/extensions/ras_arch.h @@ -11,28 +11,28 @@ * Size of nodes implementing Standard Error Records - currently only 4k is * supported. */ -#define STD_ERR_NODE_SIZE_NUM_K 4 +#define STD_ERR_NODE_SIZE_NUM_K 4U /* * Individual register offsets within an error record in Standard Error Record * format when error records are accessed through memory-mapped registers. */ -#define ERR_FR(n) (0x0 + (64 * (n))) -#define ERR_CTLR(n) (0x8 + (64 * (n))) -#define ERR_STATUS(n) (0x10 + (64 * (n))) -#define ERR_ADDR(n) (0x18 + (64 * (n))) -#define ERR_MISC0(n) (0x20 + (64 * (n))) -#define ERR_MISC1(n) (0x28 + (64 * (n))) +#define ERR_FR(n) (0x0ULL + (64ULL * (n))) +#define ERR_CTLR(n) (0x8ULL + (64ULL * (n))) +#define ERR_STATUS(n) (0x10ULL + (64ULL * (n))) +#define ERR_ADDR(n) (0x18ULL + (64ULL * (n))) +#define ERR_MISC0(n) (0x20ULL + (64ULL * (n))) +#define ERR_MISC1(n) (0x28ULL + (64ULL * (n))) /* Group Status Register (ERR_STATUS) offset */ #define ERR_GSR(base, size_num_k, n) \ - ((base) + (0x380 * (size_num_k)) + (8 * (n))) + ((base) + (0x380ULL * (size_num_k)) + (8ULL * (n))) /* Management register offsets */ #define ERR_DEVID(base, size_num_k) \ - ((base) + ((0x400 * (size_num_k)) - 0x100) + 0xc8) + ((base) + ((0x400ULL * (size_num_k)) - 0x100ULL) + 0xc8ULL) -#define ERR_DEVID_MASK 0xffff +#define ERR_DEVID_MASK 0xffffUL /* Standard Error Record status register fields */ #define ERR_STATUS_AV_SHIFT 31 @@ -244,7 +244,8 @@ */ static inline void ser_sys_select_record(unsigned int idx) { - unsigned int max_idx __unused = read_erridr_el1() & ERRIDR_MASK; + unsigned int max_idx __unused = + (unsigned int) read_erridr_el1() & ERRIDR_MASK; assert(idx < max_idx); diff --git a/include/services/sdei.h b/include/services/sdei.h index 79d1d06..4d0fd3f 100644 --- a/include/services/sdei.h +++ b/include/services/sdei.h @@ -4,61 +4,56 @@ * SPDX-License-Identifier: BSD-3-Clause */ -#ifndef __SDEI_H__ -#define __SDEI_H__ +#ifndef SDEI_H +#define SDEI_H #include #include /* Range 0xC4000020 - 0xC400003F reserved for SDE 64bit smc calls */ -#define SDEI_VERSION 0xC4000020 -#define SDEI_EVENT_REGISTER 0xC4000021 -#define SDEI_EVENT_ENABLE 0xC4000022 -#define SDEI_EVENT_DISABLE 0xC4000023 -#define SDEI_EVENT_CONTEXT 0xC4000024 -#define SDEI_EVENT_COMPLETE 0xC4000025 -#define SDEI_EVENT_COMPLETE_AND_RESUME 0xC4000026 +#define SDEI_VERSION 0xC4000020U +#define SDEI_EVENT_REGISTER 0xC4000021U +#define SDEI_EVENT_ENABLE 0xC4000022U +#define SDEI_EVENT_DISABLE 0xC4000023U +#define SDEI_EVENT_CONTEXT 0xC4000024U +#define SDEI_EVENT_COMPLETE 0xC4000025U +#define SDEI_EVENT_COMPLETE_AND_RESUME 0xC4000026U -#define SDEI_EVENT_UNREGISTER 0xC4000027 -#define SDEI_EVENT_STATUS 0xC4000028 -#define SDEI_EVENT_GET_INFO 0xC4000029 -#define SDEI_EVENT_ROUTING_SET 0xC400002A -#define SDEI_PE_MASK 0xC400002B -#define SDEI_PE_UNMASK 0xC400002C +#define SDEI_EVENT_UNREGISTER 0xC4000027U +#define SDEI_EVENT_STATUS 0xC4000028U +#define SDEI_EVENT_GET_INFO 0xC4000029U +#define SDEI_EVENT_ROUTING_SET 0xC400002AU +#define SDEI_PE_MASK 0xC400002BU +#define SDEI_PE_UNMASK 0xC400002CU -#define SDEI_INTERRUPT_BIND 0xC400002D -#define SDEI_INTERRUPT_RELEASE 0xC400002E -#define SDEI_EVENT_SIGNAL 0xC400002F -#define SDEI_FEATURES 0xC4000030 -#define SDEI_PRIVATE_RESET 0xC4000031 -#define SDEI_SHARED_RESET 0xC4000032 +#define SDEI_INTERRUPT_BIND 0xC400002DU +#define SDEI_INTERRUPT_RELEASE 0xC400002EU +#define SDEI_EVENT_SIGNAL 0xC400002FU +#define SDEI_FEATURES 0xC4000030U +#define SDEI_PRIVATE_RESET 0xC4000031U +#define SDEI_SHARED_RESET 0xC4000032U /* SDEI_EVENT_REGISTER flags */ -#define SDEI_REGF_RM_ANY 0 -#define SDEI_REGF_RM_PE 1 +#define SDEI_REGF_RM_ANY 0ULL +#define SDEI_REGF_RM_PE 1ULL /* SDEI_EVENT_COMPLETE status flags */ -#define SDEI_EV_HANDLED 0 -#define SDEI_EV_FAILED 1 - -/* SDE event status values in bit position */ -#define SDEI_STATF_REGISTERED 0 -#define SDEI_STATF_ENABLED 1 -#define SDEI_STATF_RUNNING 2 +#define SDEI_EV_HANDLED 0U +#define SDEI_EV_FAILED 1U /* Internal: SDEI flag bit positions */ -#define _SDEI_MAPF_DYNAMIC_SHIFT 1 -#define _SDEI_MAPF_BOUND_SHIFT 2 -#define _SDEI_MAPF_SIGNALABLE_SHIFT 3 -#define _SDEI_MAPF_PRIVATE_SHIFT 4 -#define _SDEI_MAPF_CRITICAL_SHIFT 5 -#define _SDEI_MAPF_EXPLICIT_SHIFT 6 +#define SDEI_MAPF_DYNAMIC_SHIFT_ 1U +#define SDEI_MAPF_BOUND_SHIFT_ 2U +#define SDEI_MAPF_SIGNALABLE_SHIFT_ 3U +#define SDEI_MAPF_PRIVATE_SHIFT_ 4U +#define SDEI_MAPF_CRITICAL_SHIFT_ 5U +#define SDEI_MAPF_EXPLICIT_SHIFT_ 6U /* SDEI event 0 */ #define SDEI_EVENT_0 0 /* Placeholder interrupt for dynamic mapping */ -#define SDEI_DYN_IRQ 0 +#define SDEI_DYN_IRQ 0U /* SDEI flags */ @@ -80,20 +75,20 @@ * * See also the is_map_bound() macro. */ -#define SDEI_MAPF_DYNAMIC BIT(_SDEI_MAPF_DYNAMIC_SHIFT) -#define SDEI_MAPF_BOUND BIT(_SDEI_MAPF_BOUND_SHIFT) -#define SDEI_MAPF_EXPLICIT BIT(_SDEI_MAPF_EXPLICIT_SHIFT) +#define SDEI_MAPF_DYNAMIC BIT(SDEI_MAPF_DYNAMIC_SHIFT_) +#define SDEI_MAPF_BOUND BIT(SDEI_MAPF_BOUND_SHIFT_) +#define SDEI_MAPF_EXPLICIT BIT(SDEI_MAPF_EXPLICIT_SHIFT_) -#define SDEI_MAPF_SIGNALABLE BIT(_SDEI_MAPF_SIGNALABLE_SHIFT) -#define SDEI_MAPF_PRIVATE BIT(_SDEI_MAPF_PRIVATE_SHIFT) +#define SDEI_MAPF_SIGNALABLE BIT(SDEI_MAPF_SIGNALABLE_SHIFT_) +#define SDEI_MAPF_PRIVATE BIT(SDEI_MAPF_PRIVATE_SHIFT_) #define SDEI_MAPF_NORMAL 0 -#define SDEI_MAPF_CRITICAL BIT(_SDEI_MAPF_CRITICAL_SHIFT) +#define SDEI_MAPF_CRITICAL BIT(SDEI_MAPF_CRITICAL_SHIFT_) /* Indices of private and shared mappings */ -#define _SDEI_MAP_IDX_PRIV 0 -#define _SDEI_MAP_IDX_SHRD 1 -#define _SDEI_MAP_IDX_MAX 2 +#define SDEI_MAP_IDX_PRIV_ 0U +#define SDEI_MAP_IDX_SHRD_ 1U +#define SDEI_MAP_IDX_MAX_ 2U /* The macros below are used to identify SDEI calls from the SMC function ID */ #define SDEI_FID_MASK U(0xffe0) @@ -104,22 +99,22 @@ #define SDEI_EVENT_MAP(_event, _intr, _flags) \ { \ - .ev_num = _event, \ - .intr = _intr, \ - .map_flags = _flags \ + .ev_num = (_event), \ + .intr = (_intr), \ + .map_flags = (_flags) \ } #define SDEI_SHARED_EVENT(_event, _intr, _flags) \ SDEI_EVENT_MAP(_event, _intr, _flags) #define SDEI_PRIVATE_EVENT(_event, _intr, _flags) \ - SDEI_EVENT_MAP(_event, _intr, _flags | SDEI_MAPF_PRIVATE) + SDEI_EVENT_MAP(_event, _intr, (_flags) | SDEI_MAPF_PRIVATE) #define SDEI_DEFINE_EVENT_0(_intr) \ - SDEI_PRIVATE_EVENT(SDEI_EVENT_0, _intr, SDEI_MAPF_SIGNALABLE) + SDEI_PRIVATE_EVENT(SDEI_EVENT_0, (_intr), SDEI_MAPF_SIGNALABLE) #define SDEI_EXPLICIT_EVENT(_event, _pri) \ - SDEI_EVENT_MAP(_event, 0, _pri | SDEI_MAPF_EXPLICIT | SDEI_MAPF_PRIVATE) + SDEI_EVENT_MAP((_event), 0, (_pri) | SDEI_MAPF_EXPLICIT | SDEI_MAPF_PRIVATE) /* * Declare shared and private entries for each core. Also declare a global @@ -133,12 +128,12 @@ [PLATFORM_CORE_COUNT * ARRAY_SIZE(_private)]; \ sdei_entry_t sdei_shared_event_table[ARRAY_SIZE(_shared)]; \ const sdei_mapping_t sdei_global_mappings[] = { \ - [_SDEI_MAP_IDX_PRIV] = { \ - .map = _private, \ + [SDEI_MAP_IDX_PRIV_] = { \ + .map = (_private), \ .num_maps = ARRAY_SIZE(_private) \ }, \ - [_SDEI_MAP_IDX_SHRD] = { \ - .map = _shared, \ + [SDEI_MAP_IDX_SHRD_] = { \ + .map = (_shared), \ .num_maps = ARRAY_SIZE(_shared) \ }, \ } @@ -185,4 +180,4 @@ /* Public API to dispatch an event to Normal world */ int sdei_dispatch_event(int ev_num); -#endif /* __SDEI_H__ */ +#endif /* SDEI_H */ diff --git a/lib/extensions/ras/ras_common.c b/lib/extensions/ras/ras_common.c index 5a2b43c..2e65eeb 100644 --- a/lib/extensions/ras/ras_common.c +++ b/lib/extensions/ras/ras_common.c @@ -11,6 +11,7 @@ #include #include #include +#include #ifndef PLAT_RAS_PRI # error Platform must define RAS priority value @@ -20,15 +21,15 @@ int ras_ea_handler(unsigned int ea_reason, uint64_t syndrome, void *cookie, void *handle, uint64_t flags) { - unsigned int i, n_handled = 0, ret; - int probe_data; + unsigned int i, n_handled = 0; + int probe_data, ret; struct err_record_info *info; const struct err_handler_data err_data = { .version = ERR_HANDLER_VERSION, .ea_reason = ea_reason, .interrupt = 0, - .syndrome = syndrome, + .syndrome = (uint32_t) syndrome, .flags = flags, .cookie = cookie, .handle = handle @@ -39,7 +40,7 @@ assert(info->handler != NULL); /* Continue probing until the record group signals no error */ - while (1) { + while (true) { if (info->probe(info, &probe_data) == 0) break; @@ -52,20 +53,20 @@ } } - return (n_handled != 0); + return (n_handled != 0U) ? 1 : 0; } #if ENABLE_ASSERTIONS static void assert_interrupts_sorted(void) { unsigned int i, last; - struct ras_interrupt *start = ras_interrupt_mapping.intrs; + struct ras_interrupt *start = ras_interrupt_mappings.intrs; - if (ras_interrupt_mapping.num_intrs == 0) + if (ras_interrupt_mappings.num_intrs == 0UL) return; last = start[0].intr_number; - for (i = 1; i < ras_interrupt_mapping.num_intrs; i++) { + for (i = 1; i < ras_interrupt_mappings.num_intrs; i++) { assert(start[i].intr_number > last); last = start[i].intr_number; } @@ -79,7 +80,7 @@ static int ras_interrupt_handler(uint32_t intr_raw, uint32_t flags, void *handle, void *cookie) { - struct ras_interrupt *ras_inrs = ras_interrupt_mapping.intrs; + struct ras_interrupt *ras_inrs = ras_interrupt_mappings.intrs; struct ras_interrupt *selected = NULL; int start, end, mid, probe_data, ret __unused; @@ -91,10 +92,10 @@ .handle = handle }; - assert(ras_interrupt_mapping.num_intrs > 0); + assert(ras_interrupt_mappings.num_intrs > 0UL); start = 0; - end = ras_interrupt_mapping.num_intrs; + end = (int) ras_interrupt_mappings.num_intrs; while (start <= end) { mid = ((end + start) / 2); if (intr_raw == ras_inrs[mid].intr_number) { @@ -114,14 +115,14 @@ panic(); } - if (selected->err_record->probe) { + if (selected->err_record->probe != NULL) { ret = selected->err_record->probe(selected->err_record, &probe_data); assert(ret != 0); } /* Call error handler for the record group */ assert(selected->err_record->handler != NULL); - selected->err_record->handler(selected->err_record, probe_data, + (void) selected->err_record->handler(selected->err_record, probe_data, &err_data); return 0; diff --git a/lib/extensions/ras/std_err_record.c b/lib/extensions/ras/std_err_record.c index 65c007f..209cb73 100644 --- a/lib/extensions/ras/std_err_record.c +++ b/lib/extensions/ras/std_err_record.c @@ -13,28 +13,29 @@ */ int ser_probe_memmap(uintptr_t base, unsigned int size_num_k, int *probe_data) { - int num_records, num_group_regs, i; + unsigned int num_records, num_group_regs, i; uint64_t gsr; - assert(base != 0); + assert(base != 0UL); /* Only 4K supported for now */ assert(size_num_k == STD_ERR_NODE_SIZE_NUM_K); - num_records = (mmio_read_32(ERR_DEVID(base, size_num_k)) & ERR_DEVID_MASK); + num_records = (unsigned int) + (mmio_read_32(ERR_DEVID(base, size_num_k)) & ERR_DEVID_MASK); /* A group register shows error status for 2^6 error records */ - num_group_regs = (num_records >> 6) + 1; + num_group_regs = (num_records >> 6U) + 1U; /* Iterate through group registers to find a record in error */ for (i = 0; i < num_group_regs; i++) { gsr = mmio_read_64(ERR_GSR(base, size_num_k, i)); - if (gsr == 0) + if (gsr == 0ULL) continue; /* Return the index of the record in error */ if (probe_data != NULL) - *probe_data = ((i << 6) + __builtin_ctz(gsr)); + *probe_data = (((int) (i << 6U)) + __builtin_ctzll(gsr)); return 1; } @@ -49,13 +50,14 @@ */ int ser_probe_sysreg(unsigned int idx_start, unsigned int num_idx, int *probe_data) { - int i; + unsigned int i; uint64_t status; - unsigned int max_idx __unused = read_erridr_el1() & ERRIDR_MASK; + unsigned int max_idx __unused = + ((unsigned int) read_erridr_el1()) & ERRIDR_MASK; assert(idx_start < max_idx); - assert(check_u32_overflow(idx_start, num_idx) == 0); - assert((idx_start + num_idx - 1) < max_idx); + assert(check_u32_overflow(idx_start, num_idx)); + assert((idx_start + num_idx - 1U) < max_idx); for (i = 0; i < num_idx; i++) { /* Select the error record */ @@ -65,9 +67,9 @@ status = read_erxstatus_el1(); /* Check for valid field in status */ - if (ERR_STATUS_GET_FIELD(status, V)) { + if (ERR_STATUS_GET_FIELD(status, V) != 0U) { if (probe_data != NULL) - *probe_data = i; + *probe_data = (int) i; return 1; } } diff --git a/plat/arm/common/arm_sip_svc.c b/plat/arm/common/arm_sip_svc.c index bb5b5c6..e450c6f 100644 --- a/plat/arm/common/arm_sip_svc.c +++ b/plat/arm/common/arm_sip_svc.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -64,9 +64,9 @@ /* * Pointers used in execution state switch are all 32 bits wide */ - return arm_execution_state_switch(smc_fid, (uint32_t) x1, - (uint32_t) x2, (uint32_t) x3, (uint32_t) x4, - handle); + return (uintptr_t) arm_execution_state_switch(smc_fid, + (uint32_t) x1, (uint32_t) x2, (uint32_t) x3, + (uint32_t) x4, handle); } case ARM_SIP_SVC_CALL_COUNT: diff --git a/plat/arm/common/execution_state_switch.c b/plat/arm/common/execution_state_switch.c index 22d552a..b12d82c 100644 --- a/plat/arm/common/execution_state_switch.c +++ b/plat/arm/common/execution_state_switch.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -39,7 +40,8 @@ { /* Execution state can be switched only if EL3 is AArch64 */ #ifdef AARCH64 - int caller_64, from_el2, el, endianness, thumb = 0; + bool caller_64, thumb = false, from_el2; + unsigned int el, endianness; u_register_t spsr, pc, scr, sctlr; entry_point_info_t ep; cpu_context_t *ctx = (cpu_context_t *) handle; @@ -50,7 +52,7 @@ /* * Disallow state switch if any of the secondaries have been brought up. */ - if (psci_secondaries_brought_up()) + if (psci_secondaries_brought_up() != 0) goto exec_denied; spsr = read_ctx_reg(el3_ctx, CTX_SPSR_EL3); @@ -61,20 +63,20 @@ * If the call originated from AArch64, expect 32-bit pointers when * switching to AArch32. */ - if ((pc_hi != 0) || (cookie_hi != 0)) + if ((pc_hi != 0U) || (cookie_hi != 0U)) goto invalid_param; pc = pc_lo; /* Instruction state when entering AArch32 */ - thumb = pc & 1; + thumb = (pc & 1U) != 0U; } else { /* Construct AArch64 PC */ pc = (((u_register_t) pc_hi) << 32) | pc_lo; } /* Make sure PC is 4-byte aligned, except for Thumb */ - if ((pc & 0x3) && !thumb) + if (((pc & 0x3U) != 0U) && !thumb) goto invalid_param; /* @@ -95,7 +97,7 @@ * Disallow switching state if there's a Hypervisor in place; * this request must be taken up with the Hypervisor instead. */ - if (scr & SCR_HCE_BIT) + if ((scr & SCR_HCE_BIT) != 0U) goto exec_denied; } @@ -105,11 +107,11 @@ * directly. */ sctlr = from_el2 ? read_sctlr_el2() : read_sctlr_el1(); - endianness = !!(sctlr & SCTLR_EE_BIT); + endianness = ((sctlr & SCTLR_EE_BIT) != 0U) ? 1U : 0U; /* Construct SPSR for the exception state we're about to switch to */ if (caller_64) { - int impl; + unsigned long long impl; /* * Switching from AArch64 to AArch32. Ensure this CPU implements @@ -121,7 +123,8 @@ /* Return to the equivalent AArch32 privilege level */ el = from_el2 ? MODE32_hyp : MODE32_svc; - spsr = SPSR_MODE32(el, thumb ? SPSR_T_THUMB : SPSR_T_ARM, + spsr = SPSR_MODE32((u_register_t) el, + thumb ? SPSR_T_THUMB : SPSR_T_ARM, endianness, DISABLE_ALL_EXCEPTIONS); } else { /* @@ -130,7 +133,8 @@ * raised), it's safe to assume AArch64 is also implemented. */ el = from_el2 ? MODE_EL2 : MODE_EL1; - spsr = SPSR_64(el, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS); + spsr = SPSR_64((u_register_t) el, MODE_SP_ELX, + DISABLE_ALL_EXCEPTIONS); } /* @@ -143,10 +147,11 @@ */ zeromem(&ep, sizeof(ep)); ep.pc = pc; - ep.spsr = spsr; + ep.spsr = (uint32_t) spsr; SET_PARAM_HEAD(&ep, PARAM_EP, VERSION_1, - ((endianness ? EP_EE_BIG : EP_EE_LITTLE) | NON_SECURE | - EP_ST_DISABLE)); + ((unsigned int) ((endianness != 0U) ? EP_EE_BIG : + EP_EE_LITTLE) + | NON_SECURE | EP_ST_DISABLE)); /* * Re-initialize the system register context, and exit EL3 as if for the diff --git a/services/std_svc/sdei/sdei_event.c b/services/std_svc/sdei/sdei_event.c index bf0e779..ec69b9d 100644 --- a/services/std_svc/sdei/sdei_event.c +++ b/services/std_svc/sdei/sdei_event.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -21,7 +21,8 @@ { const sdei_mapping_t *mapping; sdei_entry_t *cpu_priv_base; - unsigned int idx, base_idx; + unsigned int base_idx; + long int idx; if (is_event_private(map)) { /* @@ -32,7 +33,7 @@ idx = MAP_OFF(map, mapping); /* Base of private mappings for this CPU */ - base_idx = plat_my_core_pos() * mapping->num_maps; + base_idx = plat_my_core_pos() * ((unsigned int) mapping->num_maps); cpu_priv_base = &sdei_private_event_table[base_idx]; /* @@ -52,7 +53,7 @@ * Find event mapping for a given interrupt number: On success, returns pointer * to the event mapping. On error, returns NULL. */ -sdei_ev_map_t *find_event_map_by_intr(int intr_num, int shared) +sdei_ev_map_t *find_event_map_by_intr(unsigned int intr_num, bool shared) { const sdei_mapping_t *mapping; sdei_ev_map_t *map; diff --git a/services/std_svc/sdei/sdei_intr_mgmt.c b/services/std_svc/sdei/sdei_intr_mgmt.c index a37188a..e834a2b 100644 --- a/services/std_svc/sdei/sdei_intr_mgmt.c +++ b/services/std_svc/sdei/sdei_intr_mgmt.c @@ -16,17 +16,14 @@ #include #include "sdei_private.h" -#define PE_MASKED 1 -#define PE_NOT_MASKED 0 - /* x0-x17 GPREGS context */ -#define SDEI_SAVED_GPREGS 18 +#define SDEI_SAVED_GPREGS 18U /* Maximum preemption nesting levels: Critical priority and Normal priority */ -#define MAX_EVENT_NESTING 2 +#define MAX_EVENT_NESTING 2U /* Per-CPU SDEI state access macro */ -#define sdei_get_this_pe_state() (&sdei_cpu_state[plat_my_core_pos()]) +#define sdei_get_this_pe_state() (&cpu_state[plat_my_core_pos()]) /* Structure to store information about an outstanding dispatch */ typedef struct sdei_dispatch_context { @@ -48,31 +45,33 @@ typedef struct sdei_cpu_state { sdei_dispatch_context_t dispatch_stack[MAX_EVENT_NESTING]; unsigned short stack_top; /* Empty ascending */ - unsigned int pe_masked:1; - unsigned int pending_enables:1; + bool pe_masked; + bool pending_enables; } sdei_cpu_state_t; /* SDEI states for all cores in the system */ -static sdei_cpu_state_t sdei_cpu_state[PLATFORM_CORE_COUNT]; +static sdei_cpu_state_t cpu_state[PLATFORM_CORE_COUNT]; -unsigned int sdei_pe_mask(void) +int64_t sdei_pe_mask(void) { - unsigned int ret; + int64_t ret = 0; sdei_cpu_state_t *state = sdei_get_this_pe_state(); /* * Return value indicates whether this call had any effect in the mask * status of this PE. */ - ret = (state->pe_masked ^ PE_MASKED); - state->pe_masked = PE_MASKED; + if (!state->pe_masked) { + state->pe_masked = true; + ret = 1; + } return ret; } void sdei_pe_unmask(void) { - int i; + unsigned int i; sdei_ev_map_t *map; sdei_entry_t *se; sdei_cpu_state_t *state = sdei_get_this_pe_state(); @@ -95,8 +94,7 @@ se = get_event_entry(map); sdei_map_lock(map); - if (is_map_bound(map) && - GET_EV_STATE(se, ENABLED) && + if (is_map_bound(map) && GET_EV_STATE(se, ENABLED) && (se->reg_flags == SDEI_REGF_RM_PE) && (se->affinity == my_mpidr)) { plat_ic_enable_interrupt(map->intr); @@ -105,8 +103,8 @@ } } - state->pending_enables = 0; - state->pe_masked = PE_NOT_MASKED; + state->pending_enables = false; + state->pe_masked = false; } /* Push a dispatch context to the dispatch stack */ @@ -129,7 +127,7 @@ { sdei_cpu_state_t *state = sdei_get_this_pe_state(); - if (state->stack_top == 0) + if (state->stack_top == 0U) return NULL; assert(state->stack_top <= MAX_EVENT_NESTING); @@ -144,27 +142,27 @@ { sdei_cpu_state_t *state = sdei_get_this_pe_state(); - if (state->stack_top == 0) + if (state->stack_top == 0U) return NULL; assert(state->stack_top <= MAX_EVENT_NESTING); - return &state->dispatch_stack[state->stack_top - 1]; + return &state->dispatch_stack[state->stack_top - 1U]; } static sdei_dispatch_context_t *save_event_ctx(sdei_ev_map_t *map, void *tgt_ctx) { sdei_dispatch_context_t *disp_ctx; - gp_regs_t *tgt_gpregs; - el3_state_t *tgt_el3; + const gp_regs_t *tgt_gpregs; + const el3_state_t *tgt_el3; - assert(tgt_ctx); + assert(tgt_ctx != NULL); tgt_gpregs = get_gpregs_ctx(tgt_ctx); tgt_el3 = get_el3state_ctx(tgt_ctx); disp_ctx = push_dispatch(); - assert(disp_ctx); + assert(disp_ctx != NULL); disp_ctx->map = map; /* Save general purpose and exception registers */ @@ -175,12 +173,12 @@ return disp_ctx; } -static void restore_event_ctx(sdei_dispatch_context_t *disp_ctx, void *tgt_ctx) +static void restore_event_ctx(const sdei_dispatch_context_t *disp_ctx, void *tgt_ctx) { gp_regs_t *tgt_gpregs; el3_state_t *tgt_el3; - assert(tgt_ctx); + assert(tgt_ctx != NULL); tgt_gpregs = get_gpregs_ctx(tgt_ctx); tgt_el3 = get_el3state_ctx(tgt_ctx); @@ -226,7 +224,7 @@ cm_set_next_eret_context(NON_SECURE); ns_ctx = cm_get_context(NON_SECURE); - assert(ns_ctx); + assert(ns_ctx != NULL); return ns_ctx; } @@ -251,7 +249,7 @@ * - x2: Interrupted PC * - x3: Interrupted SPSR */ - SMC_SET_GP(ctx, CTX_GPREG_X0, map->ev_num); + SMC_SET_GP(ctx, CTX_GPREG_X0, (uint64_t) map->ev_num); SMC_SET_GP(ctx, CTX_GPREG_X1, se->arg); SMC_SET_GP(ctx, CTX_GPREG_X2, disp_ctx->elr_el3); SMC_SET_GP(ctx, CTX_GPREG_X3, disp_ctx->spsr_el3); @@ -286,7 +284,7 @@ sdei_cpu_state_t *state, unsigned int intr_raw) { uint64_t my_mpidr __unused = (read_mpidr_el1() & MPIDR_AFFINITY_MASK); - int disable = 0; + bool disable = false; /* Nothing to do for event 0 */ if (map->ev_num == SDEI_EVENT_0) @@ -297,18 +295,17 @@ * this CPU, we disable interrupt, leave the interrupt pending, and do * EOI. */ - if (is_event_private(map)) { - disable = 1; - } else if (se->reg_flags == SDEI_REGF_RM_PE) { + if (is_event_private(map) || (se->reg_flags == SDEI_REGF_RM_PE)) + disable = true; + + if (se->reg_flags == SDEI_REGF_RM_PE) assert(se->affinity == my_mpidr); - disable = 1; - } if (disable) { plat_ic_disable_interrupt(map->intr); plat_ic_set_interrupt_pending(map->intr); plat_ic_end_of_interrupt(intr_raw); - state->pending_enables = 1; + state->pending_enables = true; return; } @@ -321,7 +318,7 @@ * Therefore, we set the interrupt back pending so as to give other * suitable PEs a chance of handling it. */ - assert(plat_ic_is_spi(map->intr)); + assert(plat_ic_is_spi(map->intr) != 0); plat_ic_set_interrupt_pending(map->intr); /* @@ -344,11 +341,12 @@ sdei_entry_t *se; cpu_context_t *ctx; sdei_ev_map_t *map; - sdei_dispatch_context_t *disp_ctx; + const sdei_dispatch_context_t *disp_ctx; unsigned int sec_state; sdei_cpu_state_t *state; uint32_t intr; struct jmpbuf dispatch_jmp; + const uint64_t mpidr = read_mpidr_el1(); /* * To handle an event, the following conditions must be true: @@ -374,8 +372,8 @@ * this interrupt */ intr = plat_ic_get_interrupt_id(intr_raw); - map = find_event_map_by_intr(intr, plat_ic_is_spi(intr)); - if (!map) { + map = find_event_map_by_intr(intr, (plat_ic_is_spi(intr) != 0)); + if (map == NULL) { ERROR("No SDEI map for interrupt %u\n", intr); panic(); } @@ -389,13 +387,13 @@ se = get_event_entry(map); state = sdei_get_this_pe_state(); - if (state->pe_masked == PE_MASKED) { + if (state->pe_masked) { /* * Interrupts received while this PE was masked can't be * dispatched. */ - SDEI_LOG("interrupt %u on %lx while PE masked\n", map->intr, - read_mpidr_el1()); + SDEI_LOG("interrupt %u on %llx while PE masked\n", map->intr, + mpidr); if (is_event_shared(map)) sdei_map_lock(map); @@ -416,8 +414,7 @@ /* Assert shared event routed to this PE had been configured so */ if (is_event_shared(map) && (se->reg_flags == SDEI_REGF_RM_PE)) { - assert(se->affinity == - (read_mpidr_el1() & MPIDR_AFFINITY_MASK)); + assert(se->affinity == (mpidr & MPIDR_AFFINITY_MASK)); } if (!can_sdei_state_trans(se, DO_DISPATCH)) { @@ -451,7 +448,7 @@ * dispatch, assert the latter is a Normal dispatch. Critical * events can preempt an outstanding Normal event dispatch. */ - if (disp_ctx) + if (disp_ctx != NULL) assert(is_event_normal(disp_ctx->map)); } else { /* @@ -467,9 +464,8 @@ if (is_event_shared(map)) sdei_map_unlock(map); - SDEI_LOG("ACK %lx, ev:%d ss:%d spsr:%lx ELR:%lx\n", read_mpidr_el1(), - map->ev_num, sec_state, read_spsr_el3(), - read_elr_el3()); + SDEI_LOG("ACK %llx, ev:%d ss:%d spsr:%lx ELR:%lx\n", mpidr, map->ev_num, + sec_state, read_spsr_el3(), read_elr_el3()); ctx = handle; @@ -497,7 +493,7 @@ * Non-secure context was fully saved before dispatch, and has been * returned to its pre-dispatch state. */ - if ((sec_state == SECURE) && (ehf_is_ns_preemption_allowed() == 0)) + if ((sec_state == SECURE) && (ehf_is_ns_preemption_allowed() == 0U)) restore_and_resume_secure_context(); /* @@ -511,9 +507,6 @@ } plat_ic_end_of_interrupt(intr_raw); - if (is_event_shared(map)) - sdei_map_unlock(map); - return 0; } @@ -539,7 +532,7 @@ /* Can't dispatch if events are masked on this PE */ state = sdei_get_this_pe_state(); - if (state->pe_masked == PE_MASKED) + if (state->pe_masked) return -1; /* Event 0 can't be dispatched */ @@ -548,7 +541,7 @@ /* Locate mapping corresponding to this event */ map = find_event_map(ev_num); - if (!map) + if (map == NULL) return -1; /* Only explicit events can be dispatched */ @@ -557,7 +550,7 @@ /* Examine state of dispatch stack */ disp_ctx = get_outstanding_dispatch(); - if (disp_ctx) { + if (disp_ctx != NULL) { /* * There's an outstanding dispatch. If the outstanding dispatch * is critical, no more dispatches are possible. @@ -606,7 +599,7 @@ longjmp(buffer); } -int sdei_event_complete(int resume, uint64_t pc) +int sdei_event_complete(bool resume, uint64_t pc) { sdei_dispatch_context_t *disp_ctx; sdei_entry_t *se; @@ -617,7 +610,7 @@ /* Return error if called without an active event */ disp_ctx = get_outstanding_dispatch(); - if (!disp_ctx) + if (disp_ctx == NULL) return SDEI_EDENY; /* Validate resumption point */ @@ -625,9 +618,12 @@ return SDEI_EDENY; map = disp_ctx->map; - assert(map); + assert(map != NULL); se = get_event_entry(map); + if (is_event_shared(map)) + sdei_map_lock(map); + act = resume ? DO_COMPLETE_RESUME : DO_COMPLETE; if (!can_sdei_state_trans(se, act)) { if (is_event_shared(map)) @@ -635,15 +631,15 @@ return SDEI_EDENY; } + if (is_event_shared(map)) + sdei_map_unlock(map); + /* Having done sanity checks, pop dispatch */ - pop_dispatch(); + (void) pop_dispatch(); SDEI_LOG("EOI:%lx, %d spsr:%lx elr:%lx\n", read_mpidr_el1(), map->ev_num, read_spsr_el3(), read_elr_el3()); - if (is_event_shared(map)) - sdei_map_lock(map); - /* * Restore Non-secure to how it was originally interrupted. Once done, * it's up-to-date with the saved copy. @@ -684,7 +680,7 @@ return 0; } -int sdei_event_context(void *handle, unsigned int param) +int64_t sdei_event_context(void *handle, unsigned int param) { sdei_dispatch_context_t *disp_ctx; @@ -693,10 +689,10 @@ /* Get outstanding dispatch on this CPU */ disp_ctx = get_outstanding_dispatch(); - if (!disp_ctx) + if (disp_ctx == NULL) return SDEI_EDENY; - assert(disp_ctx->map); + assert(disp_ctx->map != NULL); if (!can_sdei_state_trans(get_event_entry(disp_ctx->map), DO_CONTEXT)) return SDEI_EDENY; @@ -706,5 +702,5 @@ * which can complete the event */ - return disp_ctx->x[param]; + return (int64_t) disp_ctx->x[param]; } diff --git a/services/std_svc/sdei/sdei_main.c b/services/std_svc/sdei/sdei_main.c index 28afc1d..9b78d7f 100644 --- a/services/std_svc/sdei/sdei_main.c +++ b/services/std_svc/sdei/sdei_main.c @@ -22,14 +22,12 @@ #include #include "sdei_private.h" -#define MAJOR_VERSION 1 -#define MINOR_VERSION 0 -#define VENDOR_VERSION 0 +#define MAJOR_VERSION 1ULL +#define MINOR_VERSION 0ULL +#define VENDOR_VERSION 0ULL #define MAKE_SDEI_VERSION(_major, _minor, _vendor) \ - ((((unsigned long long)(_major)) << 48) | \ - (((unsigned long long)(_minor)) << 32) | \ - (_vendor)) + ((((_major)) << 48ULL) | (((_minor)) << 32ULL) | (_vendor)) #define LOWEST_INTR_PRIORITY 0xff @@ -47,7 +45,7 @@ } /* Convert mapping to SDEI class */ -sdei_class_t map_to_class(sdei_ev_map_t *map) +static sdei_class_t map_to_class(sdei_ev_map_t *map) { return is_event_critical(map) ? SDEI_CRITICAL : SDEI_NORMAL; } @@ -64,7 +62,7 @@ /* Perform CPU-specific state initialisation */ static void *sdei_cpu_on_init(const void *arg) { - int i; + unsigned int i; sdei_ev_map_t *map; sdei_entry_t *se; @@ -78,15 +76,16 @@ SDEI_LOG("Private events initialized on %lx\n", read_mpidr_el1()); /* All PEs start with SDEI events masked */ - sdei_pe_mask(); + (void) sdei_pe_mask(); - return 0; + return NULL; } /* Initialise an SDEI class */ -void sdei_class_init(sdei_class_t class) +static void sdei_class_init(sdei_class_t class) { - unsigned int i, zero_found __unused = 0; + unsigned int i; + bool zero_found __unused = false; int ev_num_so_far __unused; sdei_ev_map_t *map; @@ -126,7 +125,7 @@ num_dyn_shrd_slots++; } else { /* Shared mappings must be bound to shared interrupt */ - assert(plat_ic_is_spi(map->intr)); + assert(plat_ic_is_spi(map->intr) != 0); set_map_bound(map); } @@ -143,7 +142,7 @@ ev_num_so_far = map->ev_num; if (map->ev_num == SDEI_EVENT_0) { - zero_found = 1; + zero_found = true; /* Event 0 must be a Secure SGI */ assert(is_secure_sgi(map->intr)); @@ -197,7 +196,7 @@ * Private mappings must be bound to private * interrupt. */ - assert(plat_ic_is_ppi(map->intr)); + assert(plat_ic_is_ppi((unsigned) map->intr) != 0); set_map_bound(map); } } @@ -208,7 +207,7 @@ /* Ensure event 0 is in the mapping */ assert(zero_found); - sdei_cpu_on_init(NULL); + (void) sdei_cpu_on_init(NULL); } /* SDEI dispatcher initialisation */ @@ -236,7 +235,7 @@ se->reg_flags = flags; } -static unsigned long long sdei_version(void) +static uint64_t sdei_version(void) { return MAKE_SDEI_VERSION(MAJOR_VERSION, MINOR_VERSION, VENDOR_VERSION); } @@ -263,17 +262,18 @@ /* Set routing of an SDEI event */ static int sdei_event_routing_set(int ev_num, uint64_t flags, uint64_t mpidr) { - int ret, routing; + int ret; + unsigned int routing; sdei_ev_map_t *map; sdei_entry_t *se; ret = validate_flags(flags, mpidr); - if (ret) + if (ret != 0) return ret; /* Check if valid event number */ map = find_event_map(ev_num); - if (!map) + if (map == NULL) return SDEI_EINVAL; /* The event must not be private */ @@ -295,11 +295,11 @@ } /* Choose appropriate routing */ - routing = (flags == SDEI_REGF_RM_ANY) ? INTR_ROUTING_MODE_ANY : - INTR_ROUTING_MODE_PE; + routing = (unsigned int) ((flags == SDEI_REGF_RM_ANY) ? + INTR_ROUTING_MODE_ANY : INTR_ROUTING_MODE_PE); /* Update event registration flag */ - se->reg_flags = flags; + se->reg_flags = (unsigned int) flags; /* * ROUTING_SET is permissible only when event composite state is @@ -315,24 +315,27 @@ } /* Register handler and argument for an SDEI event */ -static int sdei_event_register(int ev_num, uint64_t ep, uint64_t arg, +static int64_t sdei_event_register(int ev_num, uint64_t ep, uint64_t arg, uint64_t flags, uint64_t mpidr) { int ret; + unsigned int routing; sdei_entry_t *se; sdei_ev_map_t *map; sdei_state_t backup_state; - if (!ep || (plat_sdei_validate_entry_point(ep, sdei_client_el()) != 0)) + if ((ep == 0U) || (plat_sdei_validate_entry_point( + ep, sdei_client_el()) != 0)) { return SDEI_EINVAL; + } ret = validate_flags(flags, mpidr); - if (ret) + if (ret != 0) return ret; /* Check if valid event number */ map = find_event_map(ev_num); - if (!map) + if (map == NULL) return SDEI_EINVAL; /* Private events always target the PE */ @@ -371,7 +374,7 @@ if (is_map_bound(map)) { /* Meanwhile, did any PE ACK the interrupt? */ - if (plat_ic_get_interrupt_active(map->intr)) + if (plat_ic_get_interrupt_active(map->intr) != 0U) goto fallback; /* The interrupt must currently owned by Non-secure */ @@ -404,16 +407,15 @@ * already ensure that shared events get bound to SPIs. */ if (is_event_shared(map)) { - plat_ic_set_spi_routing(map->intr, - ((flags == SDEI_REGF_RM_ANY) ? - INTR_ROUTING_MODE_ANY : - INTR_ROUTING_MODE_PE), + routing = (unsigned int) ((flags == SDEI_REGF_RM_ANY) ? + INTR_ROUTING_MODE_ANY : INTR_ROUTING_MODE_PE); + plat_ic_set_spi_routing(map->intr, routing, (u_register_t) mpidr); } } /* Populate event entries */ - set_sdei_entry(se, ep, arg, flags, mpidr); + set_sdei_entry(se, ep, arg, (unsigned int) flags, mpidr); /* Increment register count */ map->reg_count++; @@ -432,15 +434,16 @@ } /* Enable SDEI event */ -static int sdei_event_enable(int ev_num) +static int64_t sdei_event_enable(int ev_num) { sdei_ev_map_t *map; sdei_entry_t *se; - int ret, before, after; + int ret; + bool before, after; /* Check if valid event number */ map = find_event_map(ev_num); - if (!map) + if (map == NULL) return SDEI_EINVAL; se = get_event_entry(map); @@ -475,11 +478,12 @@ { sdei_ev_map_t *map; sdei_entry_t *se; - int ret, before, after; + int ret; + bool before, after; /* Check if valid event number */ map = find_event_map(ev_num); - if (!map) + if (map == NULL) return SDEI_EINVAL; se = get_event_entry(map); @@ -510,17 +514,18 @@ } /* Query SDEI event information */ -static uint64_t sdei_event_get_info(int ev_num, int info) +static int64_t sdei_event_get_info(int ev_num, int info) { sdei_entry_t *se; sdei_ev_map_t *map; - unsigned int flags, registered; + uint64_t flags; + bool registered; uint64_t affinity; /* Check if valid event number */ map = find_event_map(ev_num); - if (!map) + if (map == NULL) return SDEI_EINVAL; se = get_event_entry(map); @@ -576,7 +581,7 @@ /* Check if valid event number */ map = find_event_map(ev_num); - if (!map) + if (map == NULL) return SDEI_EINVAL; se = get_event_entry(map); @@ -648,7 +653,7 @@ /* Check if valid event number */ map = find_event_map(ev_num); - if (!map) + if (map == NULL) return SDEI_EINVAL; se = get_event_entry(map); @@ -662,27 +667,27 @@ if (is_event_shared(map)) sdei_map_unlock(map); - return state; + return (int) state; } /* Bind an SDEI event to an interrupt */ -static int sdei_interrupt_bind(int intr_num) +static int sdei_interrupt_bind(unsigned int intr_num) { sdei_ev_map_t *map; - int retry = 1, shared_mapping; + bool retry = true, shared_mapping; /* SGIs are not allowed to be bound */ - if (plat_ic_is_sgi(intr_num)) + if (plat_ic_is_sgi(intr_num) != 0) return SDEI_EINVAL; - shared_mapping = plat_ic_is_spi(intr_num); + shared_mapping = (plat_ic_is_spi(intr_num) != 0); do { /* * Bail out if there is already an event for this interrupt, * either platform-defined or dynamic. */ map = find_event_map_by_intr(intr_num, shared_mapping); - if (map) { + if (map != NULL) { if (is_map_dynamic(map)) { if (is_map_bound(map)) { /* @@ -703,7 +708,7 @@ * SDEI_DYN_IRQ. */ map = find_event_map_by_intr(SDEI_DYN_IRQ, shared_mapping); - if (!map) + if (map == NULL) return SDEI_ENOMEM; /* The returned mapping must be dynamic */ @@ -727,7 +732,7 @@ if (!is_map_bound(map)) { map->intr = intr_num; set_map_bound(map); - retry = 0; + retry = false; } sdei_map_unlock(map); } while (retry); @@ -744,7 +749,7 @@ /* Check if valid event number */ map = find_event_map(ev_num); - if (!map) + if (map == NULL) return SDEI_EINVAL; if (!is_map_dynamic(map)) @@ -774,7 +779,7 @@ * Deny release if the interrupt is active, which means it's * probably being acknowledged and handled elsewhere. */ - if (plat_ic_get_interrupt_active(map->intr)) { + if (plat_ic_get_interrupt_active(map->intr) != 0U) { ret = SDEI_EDENY; goto finish; } @@ -802,7 +807,8 @@ static int sdei_private_reset(void) { sdei_ev_map_t *map; - int ret = 0, final_ret = 0, i; + int ret = 0, final_ret = 0; + unsigned int i; /* Unregister all private events */ for_each_private_map(i, map) { @@ -824,7 +830,8 @@ { const sdei_mapping_t *mapping; sdei_ev_map_t *map; - int ret = 0, final_ret = 0, i, j; + int ret = 0, final_ret = 0; + unsigned int i, j; /* Unregister all shared events */ for_each_shared_map(i, map) { @@ -867,17 +874,17 @@ } /* Send a signal to another SDEI client PE */ -int sdei_signal(int event, uint64_t target_pe) +static int sdei_signal(int ev_num, uint64_t target_pe) { sdei_ev_map_t *map; /* Only event 0 can be signalled */ - if (event != SDEI_EVENT_0) + if (ev_num != SDEI_EVENT_0) return SDEI_EINVAL; /* Find mapping for event 0 */ map = find_event_map(SDEI_EVENT_0); - if (!map) + if (map == NULL) return SDEI_EINVAL; /* The event must be signalable */ @@ -889,20 +896,20 @@ return SDEI_EINVAL; /* Raise SGI. Platform will validate target_pe */ - plat_ic_raise_el3_sgi(map->intr, (u_register_t) target_pe); + plat_ic_raise_el3_sgi((int) map->intr, (u_register_t) target_pe); return 0; } /* Query SDEI dispatcher features */ -uint64_t sdei_features(unsigned int feature) +static uint64_t sdei_features(unsigned int feature) { if (feature == SDEI_FEATURE_BIND_SLOTS) { return FEATURE_BIND_SLOTS(num_dyn_priv_slots, num_dyn_shrd_slots); } - return SDEI_EINVAL; + return (uint64_t) SDEI_EINVAL; } /* SDEI top level handler for servicing SMCs */ @@ -917,56 +924,59 @@ { uint64_t x5; - int ss = get_interrupt_src_ss(flags); + unsigned int ss = (unsigned int) get_interrupt_src_ss(flags); int64_t ret; - unsigned int resume = 0; + bool resume = false; + cpu_context_t *ctx = handle; + int ev_num = (int) x1; if (ss != NON_SECURE) - SMC_RET1(handle, SMC_UNK); + SMC_RET1(ctx, SMC_UNK); /* Verify the caller EL */ if (GET_EL(read_spsr_el3()) != sdei_client_el()) - SMC_RET1(handle, SMC_UNK); + SMC_RET1(ctx, SMC_UNK); switch (smc_fid) { case SDEI_VERSION: SDEI_LOG("> VER\n"); - ret = sdei_version(); + ret = (int64_t) sdei_version(); SDEI_LOG("< VER:%llx\n", ret); - SMC_RET1(handle, ret); + SMC_RET1(ctx, ret); case SDEI_EVENT_REGISTER: - x5 = SMC_GET_GP(handle, CTX_GPREG_X5); - SDEI_LOG("> REG(n:%d e:%llx a:%llx f:%x m:%llx)\n", (int) x1, + x5 = SMC_GET_GP(ctx, CTX_GPREG_X5); + SDEI_LOG("> REG(n:%d e:%llx a:%llx f:%x m:%llx)\n", ev_num, x2, x3, (int) x4, x5); - ret = sdei_event_register(x1, x2, x3, x4, x5); + ret = sdei_event_register(ev_num, x2, x3, x4, x5); SDEI_LOG("< REG:%lld\n", ret); - SMC_RET1(handle, ret); + SMC_RET1(ctx, ret); case SDEI_EVENT_ENABLE: SDEI_LOG("> ENABLE(n:%d)\n", (int) x1); - ret = sdei_event_enable(x1); + ret = sdei_event_enable(ev_num); SDEI_LOG("< ENABLE:%lld\n", ret); - SMC_RET1(handle, ret); + SMC_RET1(ctx, ret); case SDEI_EVENT_DISABLE: - SDEI_LOG("> DISABLE(n:%d)\n", (int) x1); - ret = sdei_event_disable(x1); + SDEI_LOG("> DISABLE(n:%d)\n", ev_num); + ret = sdei_event_disable(ev_num); SDEI_LOG("< DISABLE:%lld\n", ret); - SMC_RET1(handle, ret); + SMC_RET1(ctx, ret); case SDEI_EVENT_CONTEXT: SDEI_LOG("> CTX(p:%d):%lx\n", (int) x1, read_mpidr_el1()); - ret = sdei_event_context(handle, x1); + ret = sdei_event_context(ctx, (unsigned int) x1); SDEI_LOG("< CTX:%lld\n", ret); - SMC_RET1(handle, ret); + SMC_RET1(ctx, ret); case SDEI_EVENT_COMPLETE_AND_RESUME: - resume = 1; + resume = true; + /* Fallthrough */ case SDEI_EVENT_COMPLETE: - SDEI_LOG("> COMPLETE(r:%d sta/ep:%llx):%lx\n", resume, x1, - read_mpidr_el1()); + SDEI_LOG("> COMPLETE(r:%u sta/ep:%llx):%lx\n", + (unsigned int) resume, x1, read_mpidr_el1()); ret = sdei_event_complete(resume, x1); SDEI_LOG("< COMPLETE:%llx\n", ret); @@ -977,82 +987,82 @@ * shouldn't be modified. We don't return to the caller in this * case anyway. */ - if (ret) - SMC_RET1(handle, ret); + if (ret != 0) + SMC_RET1(ctx, ret); - SMC_RET0(handle); + SMC_RET0(ctx); case SDEI_EVENT_STATUS: - SDEI_LOG("> STAT(n:%d)\n", (int) x1); - ret = sdei_event_status(x1); + SDEI_LOG("> STAT(n:%d)\n", ev_num); + ret = sdei_event_status(ev_num); SDEI_LOG("< STAT:%lld\n", ret); - SMC_RET1(handle, ret); + SMC_RET1(ctx, ret); case SDEI_EVENT_GET_INFO: - SDEI_LOG("> INFO(n:%d, %d)\n", (int) x1, (int) x2); - ret = sdei_event_get_info(x1, x2); + SDEI_LOG("> INFO(n:%d, %d)\n", ev_num, (int) x2); + ret = sdei_event_get_info(ev_num, (int) x2); SDEI_LOG("< INFO:%lld\n", ret); - SMC_RET1(handle, ret); + SMC_RET1(ctx, ret); case SDEI_EVENT_UNREGISTER: - SDEI_LOG("> UNREG(n:%d)\n", (int) x1); - ret = sdei_event_unregister(x1); + SDEI_LOG("> UNREG(n:%d)\n", ev_num); + ret = sdei_event_unregister(ev_num); SDEI_LOG("< UNREG:%lld\n", ret); - SMC_RET1(handle, ret); + SMC_RET1(ctx, ret); case SDEI_PE_UNMASK: SDEI_LOG("> UNMASK:%lx\n", read_mpidr_el1()); sdei_pe_unmask(); SDEI_LOG("< UNMASK:%d\n", 0); - SMC_RET1(handle, 0); + SMC_RET1(ctx, 0); case SDEI_PE_MASK: SDEI_LOG("> MASK:%lx\n", read_mpidr_el1()); ret = sdei_pe_mask(); SDEI_LOG("< MASK:%lld\n", ret); - SMC_RET1(handle, ret); + SMC_RET1(ctx, ret); case SDEI_INTERRUPT_BIND: SDEI_LOG("> BIND(%d)\n", (int) x1); - ret = sdei_interrupt_bind(x1); + ret = sdei_interrupt_bind((unsigned int) x1); SDEI_LOG("< BIND:%lld\n", ret); - SMC_RET1(handle, ret); + SMC_RET1(ctx, ret); case SDEI_INTERRUPT_RELEASE: - SDEI_LOG("> REL(%d)\n", (int) x1); - ret = sdei_interrupt_release(x1); + SDEI_LOG("> REL(%d)\n", ev_num); + ret = sdei_interrupt_release(ev_num); SDEI_LOG("< REL:%lld\n", ret); - SMC_RET1(handle, ret); + SMC_RET1(ctx, ret); case SDEI_SHARED_RESET: SDEI_LOG("> S_RESET():%lx\n", read_mpidr_el1()); ret = sdei_shared_reset(); SDEI_LOG("< S_RESET:%lld\n", ret); - SMC_RET1(handle, ret); + SMC_RET1(ctx, ret); case SDEI_PRIVATE_RESET: SDEI_LOG("> P_RESET():%lx\n", read_mpidr_el1()); ret = sdei_private_reset(); SDEI_LOG("< P_RESET:%lld\n", ret); - SMC_RET1(handle, ret); + SMC_RET1(ctx, ret); case SDEI_EVENT_ROUTING_SET: - SDEI_LOG("> ROUTE_SET(n:%d f:%llx aff:%llx)\n", (int) x1, x2, x3); - ret = sdei_event_routing_set(x1, x2, x3); + SDEI_LOG("> ROUTE_SET(n:%d f:%llx aff:%llx)\n", ev_num, x2, x3); + ret = sdei_event_routing_set(ev_num, x2, x3); SDEI_LOG("< ROUTE_SET:%lld\n", ret); - SMC_RET1(handle, ret); + SMC_RET1(ctx, ret); case SDEI_FEATURES: SDEI_LOG("> FTRS(f:%llx)\n", x1); - ret = sdei_features(x1); + ret = (int64_t) sdei_features((unsigned int) x1); SDEI_LOG("< FTRS:%llx\n", ret); - SMC_RET1(handle, ret); + SMC_RET1(ctx, ret); case SDEI_EVENT_SIGNAL: - SDEI_LOG("> SIGNAL(e:%llx t:%llx)\n", x1, x2); - ret = sdei_signal(x1, x2); + SDEI_LOG("> SIGNAL(e:%d t:%llx)\n", ev_num, x2); + ret = sdei_signal(ev_num, x2); SDEI_LOG("< SIGNAL:%lld\n", ret); - SMC_RET1(handle, ret); + SMC_RET1(ctx, ret); default: /* Do nothing in default case */ @@ -1060,7 +1070,7 @@ } WARN("Unimplemented SDEI Call: 0x%x\n", smc_fid); - SMC_RET1(handle, SMC_UNK); + SMC_RET1(ctx, SMC_UNK); } /* Subscribe to PSCI CPU on to initialize per-CPU SDEI configuration */ diff --git a/services/std_svc/sdei/sdei_private.h b/services/std_svc/sdei/sdei_private.h index 874fc22..d99acea 100644 --- a/services/std_svc/sdei/sdei_private.h +++ b/services/std_svc/sdei/sdei_private.h @@ -4,8 +4,8 @@ * SPDX-License-Identifier: BSD-3-Clause */ -#ifndef __SDEI_PRIVATE_H__ -#define __SDEI_PRIVATE_H__ +#ifndef SDEI_PRIVATE_H +#define SDEI_PRIVATE_H #include #include @@ -36,12 +36,12 @@ #define SDEI_LOG(...) VERBOSE("SDEI: " __VA_ARGS__) /* SDEI handler unregistered state. This is the default state. */ -#define SDEI_STATE_UNREGISTERED 0 +#define SDEI_STATE_UNREGISTERED 0U /* SDE event status values in bit position */ -#define SDEI_STATF_REGISTERED 0 -#define SDEI_STATF_ENABLED 1 -#define SDEI_STATF_RUNNING 2 +#define SDEI_STATF_REGISTERED 0U +#define SDEI_STATF_ENABLED 1U +#define SDEI_STATF_RUNNING 2U /* SDEI SMC error codes */ #define SDEI_EINVAL (-2) @@ -62,18 +62,18 @@ #define SDEI_INFO_EV_ROUTING_MODE 3 #define SDEI_INFO_EV_ROUTING_AFF 4 -#define SDEI_PRIVATE_MAPPING() (&sdei_global_mappings[_SDEI_MAP_IDX_PRIV]) -#define SDEI_SHARED_MAPPING() (&sdei_global_mappings[_SDEI_MAP_IDX_SHRD]) +#define SDEI_PRIVATE_MAPPING() (&sdei_global_mappings[SDEI_MAP_IDX_PRIV_]) +#define SDEI_SHARED_MAPPING() (&sdei_global_mappings[SDEI_MAP_IDX_SHRD_]) #define for_each_mapping_type(_i, _mapping) \ - for (_i = 0, _mapping = &sdei_global_mappings[i]; \ - _i < _SDEI_MAP_IDX_MAX; \ - _i++, _mapping = &sdei_global_mappings[i]) + for ((_i) = 0, (_mapping) = &sdei_global_mappings[(_i)]; \ + (_i) < SDEI_MAP_IDX_MAX_; \ + (_i)++, (_mapping) = &sdei_global_mappings[(_i)]) #define iterate_mapping(_mapping, _i, _map) \ - for (_map = (_mapping)->map, _i = 0; \ - _i < (_mapping)->num_maps; \ - _i++, _map++) + for ((_map) = (_mapping)->map, (_i) = 0; \ + (_i) < (_mapping)->num_maps; \ + (_i)++, (_map)++) #define for_each_private_map(_i, _map) \ iterate_mapping(SDEI_PRIVATE_MAPPING(), _i, _map) @@ -82,45 +82,45 @@ iterate_mapping(SDEI_SHARED_MAPPING(), _i, _map) /* SDEI_FEATURES */ -#define SDEI_FEATURE_BIND_SLOTS 0 -#define BIND_SLOTS_MASK 0xffff -#define FEATURES_SHARED_SLOTS_SHIFT 16 -#define FEATURES_PRIVATE_SLOTS_SHIFT 0 +#define SDEI_FEATURE_BIND_SLOTS 0U +#define BIND_SLOTS_MASK 0xffffU +#define FEATURES_SHARED_SLOTS_SHIFT 16U +#define FEATURES_PRIVATE_SLOTS_SHIFT 0U #define FEATURE_BIND_SLOTS(_priv, _shrd) \ - ((((_priv) & BIND_SLOTS_MASK) << FEATURES_PRIVATE_SLOTS_SHIFT) | \ - (((_shrd) & BIND_SLOTS_MASK) << FEATURES_SHARED_SLOTS_SHIFT)) + (((((uint64_t) (_priv)) & BIND_SLOTS_MASK) << FEATURES_PRIVATE_SLOTS_SHIFT) | \ + ((((uint64_t) (_shrd)) & BIND_SLOTS_MASK) << FEATURES_SHARED_SLOTS_SHIFT)) #define GET_EV_STATE(_e, _s) get_ev_state_bit(_e, SDEI_STATF_##_s) #define SET_EV_STATE(_e, _s) clr_ev_state_bit(_e->state, SDEI_STATF_##_s) -static inline int is_event_private(sdei_ev_map_t *map) +static inline bool is_event_private(sdei_ev_map_t *map) { - return ((map->map_flags & BIT(_SDEI_MAPF_PRIVATE_SHIFT)) != 0); + return ((map->map_flags & BIT_32(SDEI_MAPF_PRIVATE_SHIFT_)) != 0U); } -static inline int is_event_shared(sdei_ev_map_t *map) +static inline bool is_event_shared(sdei_ev_map_t *map) { return !is_event_private(map); } -static inline int is_event_critical(sdei_ev_map_t *map) +static inline bool is_event_critical(sdei_ev_map_t *map) { - return ((map->map_flags & BIT(_SDEI_MAPF_CRITICAL_SHIFT)) != 0); + return ((map->map_flags & BIT_32(SDEI_MAPF_CRITICAL_SHIFT_)) != 0U); } -static inline int is_event_normal(sdei_ev_map_t *map) +static inline bool is_event_normal(sdei_ev_map_t *map) { return !is_event_critical(map); } -static inline int is_event_signalable(sdei_ev_map_t *map) +static inline bool is_event_signalable(sdei_ev_map_t *map) { - return ((map->map_flags & BIT(_SDEI_MAPF_SIGNALABLE_SHIFT)) != 0); + return ((map->map_flags & BIT_32(SDEI_MAPF_SIGNALABLE_SHIFT_)) != 0U); } -static inline int is_map_dynamic(sdei_ev_map_t *map) +static inline bool is_map_dynamic(sdei_ev_map_t *map) { - return ((map->map_flags & BIT(_SDEI_MAPF_DYNAMIC_SHIFT)) != 0); + return ((map->map_flags & BIT_32(SDEI_MAPF_DYNAMIC_SHIFT_)) != 0U); } /* @@ -129,29 +129,29 @@ * called on them. This can be used on both static or dynamic events to check * for an associated interrupt. */ -static inline int is_map_bound(sdei_ev_map_t *map) +static inline bool is_map_bound(sdei_ev_map_t *map) { - return ((map->map_flags & BIT(_SDEI_MAPF_BOUND_SHIFT)) != 0); + return ((map->map_flags & BIT_32(SDEI_MAPF_BOUND_SHIFT_)) != 0U); } static inline void set_map_bound(sdei_ev_map_t *map) { - map->map_flags |= BIT(_SDEI_MAPF_BOUND_SHIFT); + map->map_flags |= BIT_32(SDEI_MAPF_BOUND_SHIFT_); } -static inline int is_map_explicit(sdei_ev_map_t *map) +static inline bool is_map_explicit(sdei_ev_map_t *map) { - return ((map->map_flags & BIT(_SDEI_MAPF_EXPLICIT_SHIFT)) != 0); + return ((map->map_flags & BIT_32(SDEI_MAPF_EXPLICIT_SHIFT_)) != 0U); } static inline void clr_map_bound(sdei_ev_map_t *map) { - map->map_flags &= ~(BIT(_SDEI_MAPF_BOUND_SHIFT)); + map->map_flags &= ~BIT_32(SDEI_MAPF_BOUND_SHIFT_); } -static inline int is_secure_sgi(unsigned int intr) +static inline bool is_secure_sgi(unsigned int intr) { - return (plat_ic_is_sgi(intr) && + return ((plat_ic_is_sgi(intr) != 0) && (plat_ic_get_interrupt_type(intr) == INTR_TYPE_EL3)); } @@ -164,24 +164,24 @@ cpu_context_t *ns_ctx = cm_get_context(NON_SECURE); el3_state_t *el3_ctx = get_el3state_ctx(ns_ctx); - return read_ctx_reg(el3_ctx, CTX_SCR_EL3) & SCR_HCE_BIT ? MODE_EL2 : - MODE_EL1; + return ((read_ctx_reg(el3_ctx, CTX_SCR_EL3) & SCR_HCE_BIT) != 0U) ? + MODE_EL2 : MODE_EL1; } static inline unsigned int sdei_event_priority(sdei_ev_map_t *map) { - return is_event_critical(map) ? PLAT_SDEI_CRITICAL_PRI : - PLAT_SDEI_NORMAL_PRI; + return (unsigned int) (is_event_critical(map) ? PLAT_SDEI_CRITICAL_PRI : + PLAT_SDEI_NORMAL_PRI); } -static inline int get_ev_state_bit(sdei_entry_t *se, unsigned int bit_no) +static inline bool get_ev_state_bit(sdei_entry_t *se, unsigned int bit_no) { - return ((se->state & BIT(bit_no)) != 0); + return ((se->state & BIT_32(bit_no)) != 0U); } static inline void clr_ev_state_bit(sdei_entry_t *se, unsigned int bit_no) { - se->state &= ~BIT(bit_no); + se->state &= ~BIT_32(bit_no); } /* SDEI actions for state transition */ @@ -228,19 +228,19 @@ void init_sdei_state(void); -sdei_ev_map_t *find_event_map_by_intr(int intr_num, int shared); +sdei_ev_map_t *find_event_map_by_intr(unsigned int intr_num, bool shared); sdei_ev_map_t *find_event_map(int ev_num); sdei_entry_t *get_event_entry(sdei_ev_map_t *map); -int sdei_event_context(void *handle, unsigned int param); -int sdei_event_complete(int resume, uint64_t arg); +int64_t sdei_event_context(void *handle, unsigned int param); +int sdei_event_complete(bool resume, uint64_t pc); void sdei_pe_unmask(void); -unsigned int sdei_pe_mask(void); +int64_t sdei_pe_mask(void); -int sdei_intr_handler(uint32_t intr, uint32_t flags, void *handle, +int sdei_intr_handler(uint32_t intr_raw, uint32_t flags, void *handle, void *cookie); bool can_sdei_state_trans(sdei_entry_t *se, sdei_action_t act); void begin_sdei_synchronous_dispatch(struct jmpbuf *buffer); -#endif /* __SDEI_PRIVATE_H__ */ +#endif /* SDEI_PRIVATE_H */ diff --git a/services/std_svc/sdei/sdei_state.c b/services/std_svc/sdei/sdei_state.c index c1f099f..6665786 100644 --- a/services/std_svc/sdei/sdei_state.c +++ b/services/std_svc/sdei/sdei_state.c @@ -10,13 +10,13 @@ #include "sdei_private.h" /* Aliases for SDEI handler states: 'R'unning, 'E'nabled, and re'G'istered */ -#define r_ 0 +#define r_ 0U #define R_ (1u << SDEI_STATF_RUNNING) -#define e_ 0 +#define e_ 0U #define E_ (1u << SDEI_STATF_ENABLED) -#define g_ 0 +#define g_ 0U #define G_ (1u << SDEI_STATF_REGISTERED) /* All possible composite handler states */ @@ -29,7 +29,7 @@ #define REg_ (R_ | E_ | g_) #define REG_ (R_ | E_ | G_) -#define MAX_STATES (REG_ + 1) +#define MAX_STATES (REG_ + 1u) /* Invalid state */ #define SDEI_STATE_INVALID ((sdei_state_t) (-1))