@@ -85,6 +85,7 @@ struct kvm_vcpu;
extern char __kvm_hyp_init[];
extern char __kvm_hyp_init_end[];
+extern char __kvm_hyp_reset[];
extern char __kvm_hyp_exit[];
extern char __kvm_hyp_exit_end[];
@@ -44,6 +44,7 @@
u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
int __attribute_const__ kvm_target_cpu(void);
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
+void kvm_reset_cpu(void);
void kvm_reset_coprocs(struct kvm_vcpu *vcpu);
struct kvm_arch {
@@ -211,6 +212,12 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr,
kvm_call_hyp((void*)hyp_stack_ptr, vector_ptr, pgd_ptr);
}
+static inline void __cpu_reset_hyp_mode(phys_addr_t boot_pgd_ptr,
+ phys_addr_t phys_idmap_start,
+ unsigned long reset_func)
+{
+}
+
static inline int kvm_arch_dev_ioctl_check_extension(long ext)
{
return 0;
@@ -66,6 +66,8 @@ void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
phys_addr_t kvm_mmu_get_httbr(void);
phys_addr_t kvm_mmu_get_boot_httbr(void);
phys_addr_t kvm_get_idmap_vector(void);
+phys_addr_t kvm_get_idmap_start(void);
+extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
int kvm_mmu_init(void);
void kvm_clear_hyp_idmap(void);
@@ -269,6 +271,11 @@ static inline void __kvm_flush_dcache_pud(pud_t pud)
void kvm_set_way_flush(struct kvm_vcpu *vcpu);
void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
+#define kvm_virt_to_trampoline(x) \
+ (TRAMPOLINE_VA \
+ + ((unsigned long)(x) \
+ - ((unsigned long)__hyp_idmap_text_start & PAGE_MASK)))
+
static inline bool __kvm_cpu_uses_extended_idmap(void)
{
return false;
@@ -937,6 +937,24 @@ static void cpu_init_hyp_mode(void *dummy)
kvm_arm_init_debug();
}
+void kvm_reset_cpu(void)
+{
+ phys_addr_t boot_pgd_ptr = kvm_mmu_get_boot_httbr();
+ phys_addr_t phys_idmap_start = kvm_get_idmap_start();
+
+ /* Is KVM initialised? */
+ if (boot_pgd_ptr == virt_to_phys(NULL) ||
+ phys_idmap_start == virt_to_phys(NULL))
+ return;
+
+ /* Do we need to return the vectors to hyp_default_vectors? */
+ if (__hyp_get_vectors() == hyp_default_vectors)
+ return;
+
+ __cpu_reset_hyp_mode(boot_pgd_ptr, phys_idmap_start,
+ kvm_virt_to_trampoline(__kvm_hyp_reset));
+}
+
static int hyp_init_cpu_notify(struct notifier_block *self,
unsigned long action, void *cpu)
{
@@ -151,6 +151,11 @@ target: @ We're now in the trampoline code, switch page tables
eret
+ .globl __kvm_hyp_reset
+__kvm_hyp_reset:
+ /* not yet implemented */
+ ret lr
+
.ltorg
.globl __kvm_hyp_init_end
@@ -31,8 +31,6 @@
#include "trace.h"
-extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
-
static pgd_t *boot_hyp_pgd;
static pgd_t *hyp_pgd;
static pgd_t *merged_hyp_pgd;
@@ -1644,6 +1642,11 @@ phys_addr_t kvm_get_idmap_vector(void)
return hyp_idmap_vector;
}
+phys_addr_t kvm_get_idmap_start(void)
+{
+ return hyp_idmap_start;
+}
+
int kvm_mmu_init(void)
{
int err;
@@ -108,6 +108,7 @@ struct kvm_vcpu;
extern char __kvm_hyp_init[];
extern char __kvm_hyp_init_end[];
+extern char __kvm_hyp_reset[];
extern char __kvm_hyp_vector[];
@@ -44,6 +44,7 @@
int __attribute_const__ kvm_target_cpu(void);
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
+void kvm_reset_cpu(void);
int kvm_arch_dev_ioctl_check_extension(long ext);
struct kvm_arch {
@@ -244,6 +245,13 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr,
hyp_stack_ptr, vector_ptr);
}
+static inline void __cpu_reset_hyp_mode(phys_addr_t boot_pgd_ptr,
+ phys_addr_t phys_idmap_start,
+ unsigned long reset_func)
+{
+ kvm_call_hyp((void *)reset_func, boot_pgd_ptr, phys_idmap_start);
+}
+
static inline void kvm_arch_hardware_disable(void) {}
static inline void kvm_arch_hardware_unsetup(void) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
@@ -98,6 +98,8 @@ void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
phys_addr_t kvm_mmu_get_httbr(void);
phys_addr_t kvm_mmu_get_boot_httbr(void);
phys_addr_t kvm_get_idmap_vector(void);
+phys_addr_t kvm_get_idmap_start(void);
+extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
int kvm_mmu_init(void);
void kvm_clear_hyp_idmap(void);
@@ -271,6 +273,11 @@ static inline void __kvm_flush_dcache_pud(pud_t pud)
void kvm_set_way_flush(struct kvm_vcpu *vcpu);
void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
+#define kvm_virt_to_trampoline(x) \
+ (TRAMPOLINE_VA \
+ + ((unsigned long)(x) \
+ - ((unsigned long)__hyp_idmap_text_start & PAGE_MASK)))
+
static inline bool __kvm_cpu_uses_extended_idmap(void)
{
return __cpu_uses_extended_idmap();
@@ -140,6 +140,43 @@ merged:
eret
ENDPROC(__kvm_hyp_init)
+ /*
+ * x0: HYP boot pgd
+ * x1: HYP phys_idmap_start
+ */
+ENTRY(__kvm_hyp_reset)
+ /*
+ * Restore el1's lr so we can eret from here. The stack is inaccessible
+ * after we turn the mmu off. This value was pushed in el1_sync().
+ */
+ pop lr, xzr
+
+ /* We're in trampoline code in VA, switch back to boot page tables */
+ msr ttbr0_el2, x0
+ isb
+
+ /* Invalidate the old TLBs */
+ tlbi alle2
+ dsb sy
+
+ /* Branch into PA space */
+ adr x0, 1f
+ bfi x1, x0, #0, #PAGE_SHIFT
+ br x1
+
+ /* We're now in idmap, disable MMU */
+1: mrs x0, sctlr_el2
+ bic x0, x0, #SCTLR_EL2_M
+ msr sctlr_el2, x0
+ isb
+
+ /* Install stub vectors */
+ adr_l x2, __hyp_stub_vectors
+ msr vbar_el2, x2
+
+ eret
+ENDPROC(__kvm_hyp_reset)
+
.ltorg
.popsection