@@ -430,6 +430,11 @@ static __always_inline u64 kvm_get_vttbr(struct kvm *kvm)
return kvm_phys_to_vttbr(baddr) | vmid_field;
}
+static inline void kvm_workaround_1542418_vmid_rollover(void)
+{
+ /* not affected */
+}
+
#endif /* !__ASSEMBLY__ */
#endif /* __ARM_KVM_MMU_H__ */
@@ -9,6 +9,7 @@
#include <asm/page.h>
#include <asm/memory.h>
+#include <asm/mmu_context.h>
#include <asm/cpufeature.h>
/*
@@ -603,5 +604,19 @@ static __always_inline u64 kvm_get_vttbr(struct kvm *kvm)
return kvm_phys_to_vttbr(baddr) | vmid_field | cnp;
}
+static inline void kvm_workaround_1542418_vmid_rollover(void)
+{
+ unsigned long flags;
+
+ if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_1542418) ||
+ !cpus_have_const_cap(ARM64_WORKAROUND_1542418))
+ return;
+
+ local_irq_save(flags);
+ arm64_workaround_1542418_asid_rollover();
+ local_irq_restore(flags);
+
+}
+
#endif /* __ASSEMBLY__ */
#endif /* __ARM64_KVM_MMU_H__ */
@@ -368,6 +368,13 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
*/
if (*last_ran != vcpu->vcpu_id) {
kvm_call_hyp(__kvm_tlb_flush_local_vmid, vcpu);
+
+ /*
+ * 'last_ran' and this vcpu may share an ASID and hit the
+ * conditions for Cortex-A77 erratum 1542418.
+ */
+ kvm_workaround_1542418_vmid_rollover();
+
*last_ran = vcpu->vcpu_id;
}
@@ -458,15 +465,16 @@ bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
return vcpu_mode_priv(vcpu);
}
-/* Just ensure a guest exit from a particular CPU */
-static void exit_vm_noop(void *info)
+static void exit_vmid_rollover(void *info)
{
+ kvm_workaround_1542418_vmid_rollover();
}
-void force_vm_exit(const cpumask_t *mask)
+static void force_vmid_rollover_exit(const cpumask_t *mask)
{
preempt_disable();
- smp_call_function_many(mask, exit_vm_noop, NULL, true);
+ smp_call_function_many(mask, exit_vmid_rollover, NULL, true);
+ kvm_workaround_1542418_vmid_rollover();
preempt_enable();
}
@@ -518,10 +526,10 @@ static void update_vmid(struct kvm_vmid *vmid)
/*
* On SMP we know no other CPUs can use this CPU's or each
- * other's VMID after force_vm_exit returns since the
+ * other's VMID after force_vmid_rollover_exit returns since the
* kvm_vmid_lock blocks them from reentry to the guest.
*/
- force_vm_exit(cpu_all_mask);
+ force_vmid_rollover_exit(cpu_all_mask);
/*
* Now broadcast TLB + ICACHE invalidation over the inner
* shareable domain to make sure all data structures are