diff --git a/bl31/runtime_svc.c b/bl31/runtime_svc.c index b2ba685..08cd2d8 100644 --- a/bl31/runtime_svc.c +++ b/bl31/runtime_svc.c @@ -109,26 +109,35 @@ goto error; } - /* Call the initialisation routine for this runtime service */ - rc = rt_svc_descs[index].init(); - if (rc) { - ERROR("Error initializing runtime service %s\n", - rt_svc_descs[index].name); - } else { - /* - * Fill the indices corresponding to the start and end - * owning entity numbers with the index of the - * descriptor which will handle the SMCs for this owning - * entity range. - */ - start_idx = get_unique_oen(rt_svc_descs[index].start_oen, - rt_svc_descs[index].call_type); - end_idx = get_unique_oen(rt_svc_descs[index].end_oen, - rt_svc_descs[index].call_type); - - for (; start_idx <= end_idx; start_idx++) - rt_svc_descs_indices[start_idx] = index; + /* + * The runtime service may have seperate rt_svc_desc_t + * for its fast smc and standard smc. Since the service itself + * need to be initialized only once, only one of them will have + * an initialisation routine defined. Call the initialisation + * routine for this runtime service, if it is defined. + */ + if (rt_svc_descs[index].init) { + rc = rt_svc_descs[index].init(); + if (rc) { + ERROR("Error initializing runtime service %s\n", + rt_svc_descs[index].name); + continue; + } } + + /* + * Fill the indices corresponding to the start and end + * owning entity numbers with the index of the + * descriptor which will handle the SMCs for this owning + * entity range. + */ + start_idx = get_unique_oen(rt_svc_descs[index].start_oen, + rt_svc_descs[index].call_type); + end_idx = get_unique_oen(rt_svc_descs[index].end_oen, + rt_svc_descs[index].call_type); + + for (; start_idx <= end_idx; start_idx++) + rt_svc_descs_indices[start_idx] = index; } return; diff --git a/bl32/tsp/aarch64/tsp_entrypoint.S b/bl32/tsp/aarch64/tsp_entrypoint.S index 54276f2..9999c43 100644 --- a/bl32/tsp/aarch64/tsp_entrypoint.S +++ b/bl32/tsp/aarch64/tsp_entrypoint.S @@ -39,8 +39,11 @@ .globl tsp_cpu_suspend_entry .globl tsp_cpu_resume_entry .globl tsp_fast_smc_entry + .globl tsp_std_smc_entry .globl tsp_fiq_entry + + /* --------------------------------------------- * Populate the params in x0-x7 from the pointer * to the smc args structure in x0. @@ -317,8 +320,22 @@ * --------------------------------------------- */ func tsp_fast_smc_entry - bl tsp_fast_smc_handler + bl tsp_smc_handler restore_args_call_smc tsp_fast_smc_entry_panic: b tsp_fast_smc_entry_panic + /*--------------------------------------------- + * This entrypoint is used by the TSPD to ask + * the TSP to service a std smc request. + * We will enable preemption during execution + * of tsp_smc_handler. + * --------------------------------------------- + */ +func tsp_std_smc_entry + msr daifclr, #DAIF_FIQ_BIT | DAIF_IRQ_BIT + bl tsp_smc_handler + msr daifset, #DAIF_FIQ_BIT | DAIF_IRQ_BIT + restore_args_call_smc +tsp_std_smc_entry_panic: + b tsp_std_smc_entry_panic diff --git a/bl32/tsp/aarch64/tsp_exceptions.S b/bl32/tsp/aarch64/tsp_exceptions.S index ccb4cdd..f84b5e0 100644 --- a/bl32/tsp/aarch64/tsp_exceptions.S +++ b/bl32/tsp/aarch64/tsp_exceptions.S @@ -120,7 +120,14 @@ .align 7 irq_sp_elx: - b irq_sp_elx + save_caller_regs_and_lr + /* We just update some statistics in the handler */ + bl tsp_irq_received + /* Hand over control to the normal world to handle the IRQ */ + smc #0 + /* The resume std smc starts from here */ + restore_caller_regs_and_lr + eret check_vector_size irq_sp_elx .align 7 diff --git a/bl32/tsp/tsp_interrupt.c b/bl32/tsp/tsp_interrupt.c index d5d02c3..5719c06 100644 --- a/bl32/tsp/tsp_interrupt.c +++ b/bl32/tsp/tsp_interrupt.c @@ -107,3 +107,18 @@ return 0; } + +int32_t tsp_irq_received() +{ + uint64_t mpidr = read_mpidr(); + uint32_t linear_id = platform_get_core_pos(mpidr); + + tsp_stats[linear_id].irq_count++; + spin_lock(&console_lock); + printf("TSP: cpu 0x%x received irq\n\r", mpidr); + INFO("cpu 0x%x: %d irq requests \n", + mpidr, tsp_stats[linear_id].irq_count); + spin_unlock(&console_lock); + + return TSP_PREEMPTED; +} diff --git a/bl32/tsp/tsp_main.c b/bl32/tsp/tsp_main.c index ec74db4..1c3f3b9 100644 --- a/bl32/tsp/tsp_main.c +++ b/bl32/tsp/tsp_main.c @@ -66,6 +66,7 @@ * to change. ******************************************************************************/ static const entry_info_t tsp_entry_info = { + tsp_std_smc_entry, tsp_fast_smc_entry, tsp_cpu_on_entry, tsp_cpu_off_entry, @@ -309,9 +310,9 @@ * TSP fast smc handler. The secure monitor jumps to this function by * doing the ERET after populating X0-X7 registers. The arguments are received * in the function arguments in order. Once the service is rendered, this - * function returns to Secure Monitor by raising SMC + * function returns to Secure Monitor by raising SMC. ******************************************************************************/ -tsp_args_t *tsp_fast_smc_handler(uint64_t func, +tsp_args_t *tsp_smc_handler(uint64_t func, uint64_t arg1, uint64_t arg2, uint64_t arg3, @@ -324,18 +325,20 @@ uint64_t service_args[2]; uint64_t mpidr = read_mpidr(); uint32_t linear_id = platform_get_core_pos(mpidr); + const char *smc_type; /* Update this cpu's statistics */ tsp_stats[linear_id].smc_count++; tsp_stats[linear_id].eret_count++; - printf("SP: cpu 0x%x received fast smc 0x%x\n", read_mpidr(), func); + smc_type = ((func >> 31) & 1) == 1 ? "fast" : "standard"; + + printf("SP: cpu 0x%x received %s smc 0x%x\n", read_mpidr(), smc_type, func); INFO("cpu 0x%x: %d smcs, %d erets\n", mpidr, tsp_stats[linear_id].smc_count, tsp_stats[linear_id].eret_count); /* Render secure services and obtain results here */ - results[0] = arg1; results[1] = arg2; @@ -346,20 +349,20 @@ tsp_get_magic(service_args); /* Determine the function to perform based on the function ID */ - switch (func) { - case TSP_FID_ADD: + switch (TSP_BARE_FID(func)) { + case TSP_ADD: results[0] += service_args[0]; results[1] += service_args[1]; break; - case TSP_FID_SUB: + case TSP_SUB: results[0] -= service_args[0]; results[1] -= service_args[1]; break; - case TSP_FID_MUL: + case TSP_MUL: results[0] *= service_args[0]; results[1] *= service_args[1]; break; - case TSP_FID_DIV: + case TSP_DIV: results[0] /= service_args[0] ? service_args[0] : 1; results[1] /= service_args[1] ? service_args[1] : 1; break; @@ -367,9 +370,9 @@ break; } - return set_smc_args(func, + return set_smc_args(func, 0, results[0], results[1], - 0, 0, 0, 0, 0); + 0, 0, 0, 0); } diff --git a/include/bl31/runtime_svc.h b/include/bl31/runtime_svc.h index 0f510f7..66562e1 100644 --- a/include/bl31/runtime_svc.h +++ b/include/bl31/runtime_svc.h @@ -51,13 +51,15 @@ #define GET_SMC_CC(id) ((id >> FUNCID_CC_SHIFT) & \ FUNCID_CC_MASK) +#define GET_SMC_TYPE(id) ((id >> FUNCID_TYPE_SHIFT) & \ + FUNCID_TYPE_MASK) #define SMC_64 1 #define SMC_32 0 #define SMC_UNK 0xffffffff #define SMC_TYPE_FAST 1 #define SMC_TYPE_STD 0 - +#define SMC_PREEMPTED 0xfffffffe /******************************************************************************* * Owning entity number definitions inside the function id as per the SMC * calling convention diff --git a/include/bl32/payloads/tsp.h b/include/bl32/payloads/tsp.h index 3aa3e8c..2e32c77 100644 --- a/include/bl32/payloads/tsp.h +++ b/include/bl32/payloads/tsp.h @@ -40,7 +40,7 @@ #define TSP_OFF_DONE 0xf2000002 #define TSP_SUSPEND_DONE 0xf2000003 #define TSP_RESUME_DONE 0xf2000004 -#define TSP_WORK_DONE 0xf2000005 +#define TSP_PREEMPTED 0xf2000005 /* * Function identifiers to handle FIQs through the synchronous handling model. @@ -49,16 +49,35 @@ */ #define TSP_HANDLED_S_EL1_FIQ 0xf2000006 #define TSP_EL3_FIQ 0xf2000007 -#define TSP_HANDLE_FIQ_AND_RETURN 0x2004 /* SMC function ID that TSP uses to request service from secure monitor */ #define TSP_GET_ARGS 0xf2001000 -/* Function IDs for various TSP services */ -#define TSP_FID_ADD 0xf2002000 -#define TSP_FID_SUB 0xf2002001 -#define TSP_FID_MUL 0xf2002002 -#define TSP_FID_DIV 0xf2002003 +/* + * Identifiers for various TSP services. Corresponding function IDs (whether + * fast or standard) are generated by macros defined below + */ +#define TSP_ADD 0x2000 +#define TSP_SUB 0x2001 +#define TSP_MUL 0x2002 +#define TSP_DIV 0x2003 +#define TSP_HANDLE_FIQ_AND_RETURN 0x2004 + +/* + * Generate function IDs for TSP services to be used in SMC calls, by + * appropriately setting bit 31 to differentiate standard and fast SMC calls + */ +#define TSP_STD_FID(fid) ((fid) | 0x72000000 | (0 << 31)) +#define TSP_FAST_FID(fid) ((fid) | 0x72000000 | (1 << 31)) + +/* SMC function ID to request a previously preempted std smc */ +#define TSP_FID_RESUME TSP_STD_FID(0x3000) + +/* + * Identify a TSP service from function ID filtering the last 16 bits from the + * SMC function ID + */ +#define TSP_BARE_FID(fid) ((fid) & 0xffff) /* * Total number of function IDs implemented for services offered to NS clients. @@ -108,6 +127,7 @@ uint64_t arg7); typedef struct entry_info { + tsp_generic_fptr_t std_smc_entry; tsp_generic_fptr_t fast_smc_entry; tsp_generic_fptr_t cpu_on_entry; tsp_generic_fptr_t cpu_off_entry; @@ -118,6 +138,7 @@ typedef struct work_statistics { uint32_t fiq_count; /* Number of FIQs on this cpu */ + uint32_t irq_count; /* Number of IRQs on this cpu */ uint32_t sync_fiq_count; /* Number of sync. fiqs on this cpu */ uint32_t sync_fiq_ret_count; /* Number of fiq returns on this cpu */ uint32_t smc_count; /* Number of returns on this cpu */ @@ -153,6 +174,14 @@ uint64_t arg5, uint64_t arg6, uint64_t arg7); +extern void tsp_std_smc_entry(uint64_t arg0, + uint64_t arg1, + uint64_t arg2, + uint64_t arg3, + uint64_t arg4, + uint64_t arg5, + uint64_t arg6, + uint64_t arg7); extern void tsp_fast_smc_entry(uint64_t arg0, uint64_t arg1, uint64_t arg2, diff --git a/services/spd/tspd/tspd_main.c b/services/spd/tspd/tspd_main.c index 74e2af0..ec2d334 100644 --- a/services/spd/tspd/tspd_main.c +++ b/services/spd/tspd/tspd_main.c @@ -257,7 +257,6 @@ uint64_t flags) { cpu_context_t *ns_cpu_context; - gp_regs_t *ns_gp_regs; unsigned long mpidr = read_mpidr(); uint32_t linear_id = platform_get_core_pos(mpidr), ns; tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id]; @@ -268,6 +267,31 @@ switch (smc_fid) { /* + * This function ID is used by TSP to indicate that it was + * preempted by a normal world IRQ. + * + */ + case TSP_PREEMPTED: + if (ns) + SMC_RET1(handle, SMC_UNK); + + assert(handle == cm_get_context(mpidr, SECURE)); + cm_el1_sysregs_context_save(SECURE); + /* Get a reference to the non-secure context */ + ns_cpu_context = cm_get_context(mpidr, NON_SECURE); + assert(ns_cpu_context); + + /* + * Restore non-secure state. There is no need to save the + * secure system register context since the TSP was supposed + * to preserve it during S-EL1 interrupt handling. + */ + cm_el1_sysregs_context_restore(NON_SECURE); + cm_set_next_eret_context(NON_SECURE); + + SMC_RET1(ns_cpu_context, SMC_PREEMPTED); + + /* * This function ID is used only by the TSP to indicate that it has * finished handling a S-EL1 FIQ interrupt. Execution should resume * in the normal world. @@ -357,9 +381,6 @@ */ tspd_synchronous_sp_exit(tsp_ctx, x1); - /* Should never reach here */ - assert(0); - /* * These function IDs is used only by the SP to indicate it has * finished: @@ -392,18 +413,20 @@ */ tspd_synchronous_sp_exit(tsp_ctx, x1); - /* Should never reach here */ - assert(0); - /* * Request from non-secure client to perform an * arithmetic operation or response from secure * payload to an earlier request. */ - case TSP_FID_ADD: - case TSP_FID_SUB: - case TSP_FID_MUL: - case TSP_FID_DIV: + case TSP_FAST_FID(TSP_ADD): + case TSP_FAST_FID(TSP_SUB): + case TSP_FAST_FID(TSP_MUL): + case TSP_FAST_FID(TSP_DIV): + + case TSP_STD_FID(TSP_ADD): + case TSP_STD_FID(TSP_SUB): + case TSP_STD_FID(TSP_MUL): + case TSP_STD_FID(TSP_DIV): if (ns) { /* * This is a fresh request from the non-secure client. @@ -412,11 +435,15 @@ * state and send the request to the secure payload. */ assert(handle == cm_get_context(mpidr, NON_SECURE)); + + /* Check if we are already preempted */ + if (get_std_smc_active_flag(tsp_ctx->state)) + SMC_RET1(handle, SMC_UNK); + cm_el1_sysregs_context_save(NON_SECURE); /* Save x1 and x2 for use by TSP_GET_ARGS call below */ - SMC_SET_GP(handle, CTX_GPREG_X1, x1); - SMC_SET_GP(handle, CTX_GPREG_X2, x2); + store_tsp_args(tsp_ctx, x1, x2); /* * We are done stashing the non-secure context. Ask the @@ -431,17 +458,27 @@ * from this function. */ assert(&tsp_ctx->cpu_ctx == cm_get_context(mpidr, SECURE)); - set_aapcs_args7(&tsp_ctx->cpu_ctx, smc_fid, x1, x2, 0, 0, - 0, 0, 0); - cm_set_elr_el3(SECURE, (uint64_t) tsp_entry_info->fast_smc_entry); + + /* Set appropriate entry for SMC. + * We expect the TSP to manage the PSTATE.I and PSTATE.F + * flags as appropriate. + */ + if (GET_SMC_TYPE(smc_fid) == SMC_TYPE_FAST) { + cm_set_elr_el3(SECURE, (uint64_t) + tsp_entry_info->fast_smc_entry); + } else { + set_std_smc_active_flag(tsp_ctx->state); + cm_set_elr_el3(SECURE, (uint64_t) + tsp_entry_info->std_smc_entry); + } + cm_el1_sysregs_context_restore(SECURE); cm_set_next_eret_context(SECURE); - - return smc_fid; + SMC_RET3(&tsp_ctx->cpu_ctx, smc_fid, x1, x2); } else { /* * This is the result from the secure client of an - * earlier request. The results are in x1-x2. Copy it + * earlier request. The results are in x1-x3. Copy it * into the non-secure context, save the secure state * and return to the non-secure state. */ @@ -451,18 +488,53 @@ /* Get a reference to the non-secure context */ ns_cpu_context = cm_get_context(mpidr, NON_SECURE); assert(ns_cpu_context); - ns_gp_regs = get_gpregs_ctx(ns_cpu_context); /* Restore non-secure state */ cm_el1_sysregs_context_restore(NON_SECURE); cm_set_next_eret_context(NON_SECURE); - - SMC_RET2(ns_gp_regs, x1, x2); + if (GET_SMC_TYPE(smc_fid) == SMC_TYPE_STD) + clr_std_smc_active_flag(tsp_ctx->state); + SMC_RET3(ns_cpu_context, x1, x2, x3); } break; /* + * Request from non secure world to resume the preempted + * Standard SMC call. + */ + case TSP_FID_RESUME: + /* RESUME should be invoked only by normal world */ + if (!ns) { + assert(0); + break; + } + + /* + * This is a resume request from the non-secure client. + * save the non-secure state and send the request to + * the secure payload. + */ + assert(handle == cm_get_context(mpidr, NON_SECURE)); + + /* Check if we are already preempted before resume */ + if (!get_std_smc_active_flag(tsp_ctx->state)) + SMC_RET1(handle, SMC_UNK); + + cm_el1_sysregs_context_save(NON_SECURE); + + /* + * We are done stashing the non-secure context. Ask the + * secure payload to do the work now. + */ + + /* We just need to return to the preempted point in + * TSP and the execution will resume as normal. + */ + cm_el1_sysregs_context_restore(SECURE); + cm_set_next_eret_context(SECURE); + + /* * This is a request from the secure payload for more arguments * for an ongoing arithmetic operation requested by the * non-secure world. Simply return the arguments from the non- @@ -475,10 +547,9 @@ /* Get a reference to the non-secure context */ ns_cpu_context = cm_get_context(mpidr, NON_SECURE); assert(ns_cpu_context); - ns_gp_regs = get_gpregs_ctx(ns_cpu_context); - SMC_RET2(handle, read_ctx_reg(ns_gp_regs, CTX_GPREG_X1), - read_ctx_reg(ns_gp_regs, CTX_GPREG_X2)); + get_tsp_args(tsp_ctx, x1, x2); + SMC_RET2(handle, x1, x2); case TOS_CALL_COUNT: /* @@ -502,9 +573,9 @@ SMC_RET1(handle, SMC_UNK); } -/* Define a SPD runtime service descriptor */ +/* Define a SPD runtime service descriptor for fast SMC calls */ DECLARE_RT_SVC( - spd, + tspd_fast, OEN_TOS_START, OEN_TOS_END, @@ -512,3 +583,14 @@ tspd_setup, tspd_smc_handler ); + +/* Define a SPD runtime service descriptor for standard SMC calls */ +DECLARE_RT_SVC( + tspd_std, + + OEN_TOS_START, + OEN_TOS_END, + SMC_TYPE_STD, + NULL, + tspd_smc_handler +); diff --git a/services/spd/tspd/tspd_private.h b/services/spd/tspd/tspd_private.h index b9cf496..7395bb9 100644 --- a/services/spd/tspd/tspd_private.h +++ b/services/spd/tspd/tspd_private.h @@ -125,6 +125,12 @@ #include #include +/* + * The number of arguments to save during a SMC call for TSP. + * Currently only x1 and x2 are used by TSP. + */ +#define TSP_NUM_ARGS 0x2 + /* AArch64 callee saved general purpose register context structure. */ DEFINE_REG_STRUCT(c_rt_regs, TSPD_C_RT_CTX_ENTRIES); @@ -147,6 +153,8 @@ * 'c_rt_ctx' - stack address to restore C runtime context from after * returning from a synchronous entry into the SP. * 'cpu_ctx' - space to maintain SP architectural state + * 'saved_tsp_args' - space to store arguments for TSP arithmetic operations + * which will queried using the TSP_GET_ARGS SMC by TSP. ******************************************************************************/ typedef struct tsp_context { uint64_t saved_elr_el3; @@ -155,8 +163,20 @@ uint64_t mpidr; uint64_t c_rt_ctx; cpu_context_t cpu_ctx; + uint64_t saved_tsp_args[TSP_NUM_ARGS]; } tsp_context_t; +/* Helper macros to store and retrieve tsp args from tsp_context */ +#define store_tsp_args(tsp_ctx, x1, x2) do {\ + tsp_ctx->saved_tsp_args[0] = x1;\ + tsp_ctx->saved_tsp_args[1] = x2;\ + } while (0) + +#define get_tsp_args(tsp_ctx, x1, x2) do {\ + x1 = tsp_ctx->saved_tsp_args[0];\ + x2 = tsp_ctx->saved_tsp_args[1];\ + } while (0) + /* TSPD power management handlers */ extern const spd_pm_ops_t tspd_pm;