@@ -96,7 +96,12 @@ struct arm64_cpu_capabilities {
u16 capability;
int def_scope; /* default scope */
bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope);
- int (*enable)(void *); /* Called on all active CPUs */
+ /*
+ * Take the appropriate actions to enable this capability for this CPU.
+ * For each successfully booted CPU, this method is called for each
+ * globally detected capability.
+ */
+ void (*cpu_enable)(const struct arm64_cpu_capabilities *cap);
union {
struct { /* To be used for erratum handling only */
u32 midr_model;
@@ -37,6 +37,7 @@
#include <linux/string.h>
#include <asm/alternative.h>
+#include <asm/cpufeature.h>
#include <asm/fpsimd.h>
#include <asm/hw_breakpoint.h>
#include <asm/lse.h>
@@ -222,8 +223,8 @@ static inline void spin_lock_prefetch(const void *ptr)
#endif
-int cpu_enable_pan(void *__unused);
-int cpu_enable_cache_maint_trap(void *__unused);
+void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused);
+void cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused);
#endif /* __ASSEMBLY__ */
#endif /* __ASM_PROCESSOR_H */
@@ -61,11 +61,11 @@ has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
(arm64_ftr_reg_ctrel0.sys_val & mask);
}
-static int cpu_enable_trap_ctr_access(void *__unused)
+static void
+cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
{
/* Clear SCTLR_EL1.UCT */
config_sctlr_el1(SCTLR_EL1_UCT, 0);
- return 0;
}
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
@@ -169,25 +169,25 @@ static void call_hvc_arch_workaround_1(void)
arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
}
-static int enable_smccc_arch_workaround_1(void *data)
+static void
+enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
{
- const struct arm64_cpu_capabilities *entry = data;
bp_hardening_cb_t cb;
void *smccc_start, *smccc_end;
struct arm_smccc_res res;
if (!entry->matches(entry, SCOPE_LOCAL_CPU))
- return 0;
+ return;
if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
- return 0;
+ return;
switch (psci_ops.conduit) {
case PSCI_CONDUIT_HVC:
arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
if ((int)res.a0 < 0)
- return 0;
+ return;
cb = call_hvc_arch_workaround_1;
smccc_start = __smccc_workaround_1_hvc_start;
smccc_end = __smccc_workaround_1_hvc_end;
@@ -197,19 +197,19 @@ static int enable_smccc_arch_workaround_1(void *data)
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
if ((int)res.a0 < 0)
- return 0;
+ return;
cb = call_smc_arch_workaround_1;
smccc_start = __smccc_workaround_1_smc_start;
smccc_end = __smccc_workaround_1_smc_end;
break;
default:
- return 0;
+ return;
}
install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);
- return 0;
+ return;
}
static void qcom_link_stack_sanitization(void)
@@ -224,15 +224,12 @@ static void qcom_link_stack_sanitization(void)
: "=&r" (tmp));
}
-static int qcom_enable_link_stack_sanitization(void *data)
+static void
+qcom_enable_link_stack_sanitization(const struct arm64_cpu_capabilities *entry)
{
- const struct arm64_cpu_capabilities *entry = data;
-
install_bp_hardening_cb(entry, qcom_link_stack_sanitization,
__qcom_hyp_sanitize_link_stack_start,
__qcom_hyp_sanitize_link_stack_end);
-
- return 0;
}
#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
@@ -431,7 +428,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
.desc = "ARM errata 826319, 827319, 824069",
.capability = ARM64_WORKAROUND_CLEAN_CACHE,
MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x02),
- .enable = cpu_enable_cache_maint_trap,
+ .cpu_enable = cpu_enable_cache_maint_trap,
},
#endif
#ifdef CONFIG_ARM64_ERRATUM_819472
@@ -440,7 +437,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
.desc = "ARM errata 819472",
.capability = ARM64_WORKAROUND_CLEAN_CACHE,
MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x01),
- .enable = cpu_enable_cache_maint_trap,
+ .cpu_enable = cpu_enable_cache_maint_trap,
},
#endif
#ifdef CONFIG_ARM64_ERRATUM_832075
@@ -521,14 +518,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
.capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
.matches = has_mismatched_cache_type,
.def_scope = SCOPE_LOCAL_CPU,
- .enable = cpu_enable_trap_ctr_access,
+ .cpu_enable = cpu_enable_trap_ctr_access,
},
{
.desc = "Mismatched cache type",
.capability = ARM64_MISMATCHED_CACHE_TYPE,
.matches = has_mismatched_cache_type,
.def_scope = SCOPE_LOCAL_CPU,
- .enable = cpu_enable_trap_ctr_access,
+ .cpu_enable = cpu_enable_trap_ctr_access,
},
#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
{
@@ -567,27 +564,27 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
{
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
- .enable = enable_smccc_arch_workaround_1,
+ .cpu_enable = enable_smccc_arch_workaround_1,
},
{
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
- .enable = enable_smccc_arch_workaround_1,
+ .cpu_enable = enable_smccc_arch_workaround_1,
},
{
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
- .enable = enable_smccc_arch_workaround_1,
+ .cpu_enable = enable_smccc_arch_workaround_1,
},
{
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
- .enable = enable_smccc_arch_workaround_1,
+ .cpu_enable = enable_smccc_arch_workaround_1,
},
{
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
- .enable = qcom_enable_link_stack_sanitization,
+ .cpu_enable = qcom_enable_link_stack_sanitization,
},
{
.capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
@@ -596,7 +593,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
{
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
- .enable = qcom_enable_link_stack_sanitization,
+ .cpu_enable = qcom_enable_link_stack_sanitization,
},
{
.capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
@@ -605,12 +602,12 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
{
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
- .enable = enable_smccc_arch_workaround_1,
+ .cpu_enable = enable_smccc_arch_workaround_1,
},
{
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
- .enable = enable_smccc_arch_workaround_1,
+ .cpu_enable = enable_smccc_arch_workaround_1,
},
#endif
#ifdef CONFIG_ARM64_SSBD
@@ -636,8 +633,8 @@ void verify_local_cpu_errata_workarounds(void)
for (; caps->matches; caps++) {
if (cpus_have_cap(caps->capability)) {
- if (caps->enable)
- caps->enable((void *)caps);
+ if (caps->cpu_enable)
+ caps->cpu_enable(caps);
} else if (caps->matches(caps, SCOPE_LOCAL_CPU)) {
pr_crit("CPU%d: Requires work around for %s, not detected"
" at boot time\n",
@@ -859,7 +859,8 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
ID_AA64PFR0_CSV3_SHIFT);
}
-static int kpti_install_ng_mappings(void *__unused)
+static void
+kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
{
typedef void (kpti_remap_fn)(int, int, phys_addr_t);
extern kpti_remap_fn idmap_kpti_install_ng_mappings;
@@ -869,7 +870,7 @@ static int kpti_install_ng_mappings(void *__unused)
int cpu = smp_processor_id();
if (kpti_applied)
- return 0;
+ return;
remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
@@ -880,7 +881,7 @@ static int kpti_install_ng_mappings(void *__unused)
if (!cpu)
kpti_applied = true;
- return 0;
+ return;
}
static int __init parse_kpti(char *str)
@@ -897,7 +898,7 @@ static int __init parse_kpti(char *str)
early_param("kpti", parse_kpti);
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
-static int cpu_copy_el2regs(void *__unused)
+static void cpu_copy_el2regs(const struct arm64_cpu_capabilities *__unused)
{
/*
* Copy register values that aren't redirected by hardware.
@@ -909,8 +910,6 @@ static int cpu_copy_el2regs(void *__unused)
*/
if (!alternatives_applied)
write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
-
- return 0;
}
static const struct arm64_cpu_capabilities arm64_features[] = {
@@ -934,7 +933,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.field_pos = ID_AA64MMFR1_PAN_SHIFT,
.sign = FTR_UNSIGNED,
.min_field_value = 1,
- .enable = cpu_enable_pan,
+ .cpu_enable = cpu_enable_pan,
},
#endif /* CONFIG_ARM64_PAN */
#if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
@@ -982,7 +981,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.capability = ARM64_HAS_VIRT_HOST_EXTN,
.def_scope = SCOPE_SYSTEM,
.matches = runs_at_el2,
- .enable = cpu_copy_el2regs,
+ .cpu_enable = cpu_copy_el2regs,
},
{
.desc = "32-bit EL0 Support",
@@ -1006,7 +1005,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.capability = ARM64_UNMAP_KERNEL_AT_EL0,
.def_scope = SCOPE_SYSTEM,
.matches = unmap_kernel_at_el0,
- .enable = kpti_install_ng_mappings,
+ .cpu_enable = kpti_install_ng_mappings,
},
#endif
{
@@ -1169,6 +1168,14 @@ void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
}
}
+static int __enable_cpu_capability(void *arg)
+{
+ const struct arm64_cpu_capabilities *cap = arg;
+
+ cap->cpu_enable(cap);
+ return 0;
+}
+
/*
* Run through the enabled capabilities and enable() it on all active
* CPUs
@@ -1184,14 +1191,15 @@ void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
/* Ensure cpus_have_const_cap(num) works */
static_branch_enable(&cpu_hwcap_keys[num]);
- if (caps->enable) {
+ if (caps->cpu_enable) {
/*
* Use stop_machine() as it schedules the work allowing
* us to modify PSTATE, instead of on_each_cpu() which
* uses an IPI, giving us a PSTATE that disappears when
* we return.
*/
- stop_machine(caps->enable, (void *)caps, cpu_online_mask);
+ stop_machine(__enable_cpu_capability, (void *)caps,
+ cpu_online_mask);
}
}
}
@@ -1249,8 +1257,8 @@ verify_local_cpu_features(const struct arm64_cpu_capabilities *caps_list)
smp_processor_id(), caps->desc);
cpu_die_early();
}
- if (caps->enable)
- caps->enable((void *)caps);
+ if (caps->cpu_enable)
+ caps->cpu_enable(caps);
}
}
@@ -28,6 +28,7 @@
#include <linux/signal.h>
#include <asm/fpsimd.h>
+#include <asm/cpufeature.h>
#include <asm/cputype.h>
#include <asm/simd.h>
@@ -38,6 +38,7 @@
#include <asm/atomic.h>
#include <asm/bug.h>
+#include <asm/cpufeature.h>
#include <asm/debug-monitors.h>
#include <asm/esr.h>
#include <asm/insn.h>
@@ -436,10 +437,9 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0);
}
-int cpu_enable_cache_maint_trap(void *__unused)
+void cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
{
config_sctlr_el1(SCTLR_EL1_UCI, 0);
- return 0;
}
#define __user_cache_maint(insn, address, res) \
@@ -875,7 +875,7 @@ asmlinkage int __exception do_debug_exception(unsigned long addr_if_watchpoint,
NOKPROBE_SYMBOL(do_debug_exception);
#ifdef CONFIG_ARM64_PAN
-int cpu_enable_pan(void *__unused)
+void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
{
/*
* We modify PSTATE. This won't work from irq context as the PSTATE
@@ -885,6 +885,5 @@ int cpu_enable_pan(void *__unused)
config_sctlr_el1(SCTLR_EL1_SPAN, 0);
asm(SET_PSTATE_PAN(1));
- return 0;
}
#endif /* CONFIG_ARM64_PAN */