diff mbox series

[RFC,v2,21/26] KVM: arm64: Refactor kvm_arm_setup_stage2()

Message ID 20210108121524.656872-22-qperret@google.com
State New
Headers show
Series KVM/arm64: A stage 2 for the host | expand

Commit Message

Quentin Perret Jan. 8, 2021, 12:15 p.m. UTC
In order to re-use some of the stage 2 setup at EL2, factor parts of
kvm_arm_setup_stage2() out into static inline functions.

No functional change intended.

Signed-off-by: Quentin Perret <qperret@google.com>
---
 arch/arm64/include/asm/kvm_mmu.h | 48 ++++++++++++++++++++++++++++++++
 arch/arm64/kvm/reset.c           | 42 +++-------------------------
 2 files changed, 52 insertions(+), 38 deletions(-)

Comments

Will Deacon Feb. 3, 2021, 3:53 p.m. UTC | #1
On Fri, Jan 08, 2021 at 12:15:19PM +0000, Quentin Perret wrote:
> In order to re-use some of the stage 2 setup at EL2, factor parts of

> kvm_arm_setup_stage2() out into static inline functions.

> 

> No functional change intended.

> 

> Signed-off-by: Quentin Perret <qperret@google.com>

> ---

>  arch/arm64/include/asm/kvm_mmu.h | 48 ++++++++++++++++++++++++++++++++

>  arch/arm64/kvm/reset.c           | 42 +++-------------------------

>  2 files changed, 52 insertions(+), 38 deletions(-)

> 

> diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h

> index 662f0415344e..83b4c5cf4768 100644

> --- a/arch/arm64/include/asm/kvm_mmu.h

> +++ b/arch/arm64/include/asm/kvm_mmu.h

> @@ -280,6 +280,54 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,

>  	return ret;

>  }

>  

> +static inline u64 kvm_get_parange(u64 mmfr0)

> +{

> +	u64 parange = cpuid_feature_extract_unsigned_field(mmfr0,

> +				ID_AA64MMFR0_PARANGE_SHIFT);

> +	if (parange > ID_AA64MMFR0_PARANGE_MAX)

> +		parange = ID_AA64MMFR0_PARANGE_MAX;

> +

> +	return parange;

> +}

> +

> +/*

> + * The VTCR value is common across all the physical CPUs on the system.

> + * We use system wide sanitised values to fill in different fields,

> + * except for Hardware Management of Access Flags. HA Flag is set

> + * unconditionally on all CPUs, as it is safe to run with or without

> + * the feature and the bit is RES0 on CPUs that don't support it.

> + */

> +static inline u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift)

> +{

> +	u64 vtcr = VTCR_EL2_FLAGS;

> +	u8 lvls;

> +

> +	vtcr |= kvm_get_parange(mmfr0) << VTCR_EL2_PS_SHIFT;

> +	vtcr |= VTCR_EL2_T0SZ(phys_shift);

> +	/*

> +	 * Use a minimum 2 level page table to prevent splitting

> +	 * host PMD huge pages at stage2.

> +	 */

> +	lvls = stage2_pgtable_levels(phys_shift);

> +	if (lvls < 2)

> +		lvls = 2;

> +	vtcr |= VTCR_EL2_LVLS_TO_SL0(lvls);

> +

> +	/*

> +	 * Enable the Hardware Access Flag management, unconditionally

> +	 * on all CPUs. The features is RES0 on CPUs without the support

> +	 * and must be ignored by the CPUs.

> +	 */

> +	vtcr |= VTCR_EL2_HA;

> +

> +	/* Set the vmid bits */

> +	vtcr |= (get_vmid_bits(mmfr1) == 16) ?

> +		VTCR_EL2_VS_16BIT :

> +		VTCR_EL2_VS_8BIT;

> +

> +	return vtcr;

> +}


Although I think this is functionally fine, I think it's unusual to see
large "static inline" functions like this in shared header files. One
alternative approach would be to follow the example of
kernel/locking/qspinlock_paravirt.h, where the header is guarded in such a
way that is only ever included by kernel/locking/qspinlock.c and therefore
doesn't need the "inline" at all. That separation really helps, I think.

Will
Quentin Perret Feb. 4, 2021, 2:07 p.m. UTC | #2
On Wednesday 03 Feb 2021 at 15:53:54 (+0000), Will Deacon wrote:
> On Fri, Jan 08, 2021 at 12:15:19PM +0000, Quentin Perret wrote:

> > In order to re-use some of the stage 2 setup at EL2, factor parts of

> > kvm_arm_setup_stage2() out into static inline functions.

> > 

> > No functional change intended.

> > 

> > Signed-off-by: Quentin Perret <qperret@google.com>

> > ---

> >  arch/arm64/include/asm/kvm_mmu.h | 48 ++++++++++++++++++++++++++++++++

> >  arch/arm64/kvm/reset.c           | 42 +++-------------------------

> >  2 files changed, 52 insertions(+), 38 deletions(-)

> > 

> > diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h

> > index 662f0415344e..83b4c5cf4768 100644

> > --- a/arch/arm64/include/asm/kvm_mmu.h

> > +++ b/arch/arm64/include/asm/kvm_mmu.h

> > @@ -280,6 +280,54 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,

> >  	return ret;

> >  }

> >  

> > +static inline u64 kvm_get_parange(u64 mmfr0)

> > +{

> > +	u64 parange = cpuid_feature_extract_unsigned_field(mmfr0,

> > +				ID_AA64MMFR0_PARANGE_SHIFT);

> > +	if (parange > ID_AA64MMFR0_PARANGE_MAX)

> > +		parange = ID_AA64MMFR0_PARANGE_MAX;

> > +

> > +	return parange;

> > +}

> > +

> > +/*

> > + * The VTCR value is common across all the physical CPUs on the system.

> > + * We use system wide sanitised values to fill in different fields,

> > + * except for Hardware Management of Access Flags. HA Flag is set

> > + * unconditionally on all CPUs, as it is safe to run with or without

> > + * the feature and the bit is RES0 on CPUs that don't support it.

> > + */

> > +static inline u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift)

> > +{

> > +	u64 vtcr = VTCR_EL2_FLAGS;

> > +	u8 lvls;

> > +

> > +	vtcr |= kvm_get_parange(mmfr0) << VTCR_EL2_PS_SHIFT;

> > +	vtcr |= VTCR_EL2_T0SZ(phys_shift);

> > +	/*

> > +	 * Use a minimum 2 level page table to prevent splitting

> > +	 * host PMD huge pages at stage2.

> > +	 */

> > +	lvls = stage2_pgtable_levels(phys_shift);

> > +	if (lvls < 2)

> > +		lvls = 2;

> > +	vtcr |= VTCR_EL2_LVLS_TO_SL0(lvls);

> > +

> > +	/*

> > +	 * Enable the Hardware Access Flag management, unconditionally

> > +	 * on all CPUs. The features is RES0 on CPUs without the support

> > +	 * and must be ignored by the CPUs.

> > +	 */

> > +	vtcr |= VTCR_EL2_HA;

> > +

> > +	/* Set the vmid bits */

> > +	vtcr |= (get_vmid_bits(mmfr1) == 16) ?

> > +		VTCR_EL2_VS_16BIT :

> > +		VTCR_EL2_VS_8BIT;

> > +

> > +	return vtcr;

> > +}

> 

> Although I think this is functionally fine, I think it's unusual to see

> large "static inline" functions like this in shared header files. One

> alternative approach would be to follow the example of

> kernel/locking/qspinlock_paravirt.h, where the header is guarded in such a

> way that is only ever included by kernel/locking/qspinlock.c and therefore

> doesn't need the "inline" at all. That separation really helps, I think.


Alternatively, I might be able to have an mmu.c file in the hyp/ folder,
and to compile it for both the host kernel and the EL2 obj as we do for
a few things already. Or maybe I'll just stick it in pgtable.c. Either
way, it'll add a function call, but I can't really see that having any
measurable impact, so we should be fine.

Cheers,
Quentin
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 662f0415344e..83b4c5cf4768 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -280,6 +280,54 @@  static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
 	return ret;
 }
 
+static inline u64 kvm_get_parange(u64 mmfr0)
+{
+	u64 parange = cpuid_feature_extract_unsigned_field(mmfr0,
+				ID_AA64MMFR0_PARANGE_SHIFT);
+	if (parange > ID_AA64MMFR0_PARANGE_MAX)
+		parange = ID_AA64MMFR0_PARANGE_MAX;
+
+	return parange;
+}
+
+/*
+ * The VTCR value is common across all the physical CPUs on the system.
+ * We use system wide sanitised values to fill in different fields,
+ * except for Hardware Management of Access Flags. HA Flag is set
+ * unconditionally on all CPUs, as it is safe to run with or without
+ * the feature and the bit is RES0 on CPUs that don't support it.
+ */
+static inline u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift)
+{
+	u64 vtcr = VTCR_EL2_FLAGS;
+	u8 lvls;
+
+	vtcr |= kvm_get_parange(mmfr0) << VTCR_EL2_PS_SHIFT;
+	vtcr |= VTCR_EL2_T0SZ(phys_shift);
+	/*
+	 * Use a minimum 2 level page table to prevent splitting
+	 * host PMD huge pages at stage2.
+	 */
+	lvls = stage2_pgtable_levels(phys_shift);
+	if (lvls < 2)
+		lvls = 2;
+	vtcr |= VTCR_EL2_LVLS_TO_SL0(lvls);
+
+	/*
+	 * Enable the Hardware Access Flag management, unconditionally
+	 * on all CPUs. The features is RES0 on CPUs without the support
+	 * and must be ignored by the CPUs.
+	 */
+	vtcr |= VTCR_EL2_HA;
+
+	/* Set the vmid bits */
+	vtcr |= (get_vmid_bits(mmfr1) == 16) ?
+		VTCR_EL2_VS_16BIT :
+		VTCR_EL2_VS_8BIT;
+
+	return vtcr;
+}
+
 #define kvm_phys_to_vttbr(addr)		phys_to_ttbr(addr)
 
 static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 47f3f035f3ea..6aae118c960a 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -332,19 +332,10 @@  int kvm_set_ipa_limit(void)
 	return 0;
 }
 
-/*
- * Configure the VTCR_EL2 for this VM. The VTCR value is common
- * across all the physical CPUs on the system. We use system wide
- * sanitised values to fill in different fields, except for Hardware
- * Management of Access Flags. HA Flag is set unconditionally on
- * all CPUs, as it is safe to run with or without the feature and
- * the bit is RES0 on CPUs that don't support it.
- */
 int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type)
 {
-	u64 vtcr = VTCR_EL2_FLAGS, mmfr0;
-	u32 parange, phys_shift;
-	u8 lvls;
+	u64 mmfr0, mmfr1;
+	u32 phys_shift;
 
 	if (type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK)
 		return -EINVAL;
@@ -359,33 +350,8 @@  int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type)
 	}
 
 	mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
-	parange = cpuid_feature_extract_unsigned_field(mmfr0,
-				ID_AA64MMFR0_PARANGE_SHIFT);
-	if (parange > ID_AA64MMFR0_PARANGE_MAX)
-		parange = ID_AA64MMFR0_PARANGE_MAX;
-	vtcr |= parange << VTCR_EL2_PS_SHIFT;
-
-	vtcr |= VTCR_EL2_T0SZ(phys_shift);
-	/*
-	 * Use a minimum 2 level page table to prevent splitting
-	 * host PMD huge pages at stage2.
-	 */
-	lvls = stage2_pgtable_levels(phys_shift);
-	if (lvls < 2)
-		lvls = 2;
-	vtcr |= VTCR_EL2_LVLS_TO_SL0(lvls);
-
-	/*
-	 * Enable the Hardware Access Flag management, unconditionally
-	 * on all CPUs. The features is RES0 on CPUs without the support
-	 * and must be ignored by the CPUs.
-	 */
-	vtcr |= VTCR_EL2_HA;
+	mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
+	kvm->arch.vtcr = kvm_get_vtcr(mmfr0, mmfr1, phys_shift);
 
-	/* Set the vmid bits */
-	vtcr |= (kvm_get_vmid_bits() == 16) ?
-		VTCR_EL2_VS_16BIT :
-		VTCR_EL2_VS_8BIT;
-	kvm->arch.vtcr = vtcr;
 	return 0;
 }