@@ -74,7 +74,7 @@ static inline enum kvm_mode kvm_get_mode(void) { return KVM_MODE_NONE; };
DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
-extern unsigned int __ro_after_init kvm_sve_max_vl;
+extern unsigned int __ro_after_init kvm_vec_max_vl[ARM64_VEC_MAX];
int __init kvm_arm_init_sve(void);
u32 __attribute_const__ kvm_target_cpu(void);
@@ -515,7 +515,7 @@ struct kvm_vcpu_arch {
*/
void *sve_state;
enum fp_type fp_type;
- unsigned int sve_max_vl;
+ unsigned int max_vl[ARM64_VEC_MAX];
u64 svcr;
/* Stage 2 paging state used by the hardware on next switch */
@@ -802,15 +802,17 @@ struct kvm_vcpu_arch {
/* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
#define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) + \
- sve_ffr_offset((vcpu)->arch.sve_max_vl))
+ sve_ffr_offset((vcpu)->arch.max_vl[ARM64_VEC_SVE]))
-#define vcpu_sve_max_vq(vcpu) sve_vq_from_vl((vcpu)->arch.sve_max_vl)
+#define vcpu_vec_max_vq(type, vcpu) sve_vq_from_vl((vcpu)->arch.max_vl[type])
+
+#define vcpu_sve_max_vq(vcpu) vcpu_vec_max_vq(ARM64_VEC_SVE, vcpu)
#define vcpu_sve_state_size(vcpu) ({ \
size_t __size_ret; \
unsigned int __vcpu_vq; \
\
- if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) { \
+ if (WARN_ON(!sve_vl_valid((vcpu)->arch.max_vl[ARM64_VEC_SVE]))) { \
__size_ret = 0; \
} else { \
__vcpu_vq = vcpu_sve_max_vq(vcpu); \
@@ -150,7 +150,7 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
*/
fp_state.st = &vcpu->arch.ctxt.fp_regs;
fp_state.sve_state = vcpu->arch.sve_state;
- fp_state.sve_vl = vcpu->arch.sve_max_vl;
+ fp_state.sve_vl = vcpu->arch.max_vl[ARM64_VEC_SVE];
fp_state.sme_state = NULL;
fp_state.svcr = &vcpu->arch.svcr;
fp_state.fp_type = &vcpu->arch.fp_type;
@@ -317,7 +317,7 @@ static int get_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
if (!vcpu_has_sve(vcpu))
return -ENOENT;
- if (WARN_ON(!sve_vl_valid(vcpu->arch.sve_max_vl)))
+ if (WARN_ON(!sve_vl_valid(vcpu->arch.max_vl[ARM64_VEC_SVE])))
return -EINVAL;
memset(vqs, 0, sizeof(vqs));
@@ -355,7 +355,7 @@ static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
if (vq_present(vqs, vq))
max_vq = vq;
- if (max_vq > sve_vq_from_vl(kvm_sve_max_vl))
+ if (max_vq > sve_vq_from_vl(kvm_vec_max_vl[ARM64_VEC_SVE]))
return -EINVAL;
/*
@@ -374,7 +374,7 @@ static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
return -EINVAL;
/* vcpu->arch.sve_state will be alloc'd by kvm_vcpu_finalize_sve() */
- vcpu->arch.sve_max_vl = sve_vl_from_vq(max_vq);
+ vcpu->arch.max_vl[ARM64_VEC_SVE] = sve_vl_from_vq(max_vq);
return 0;
}
@@ -26,11 +26,14 @@ void __kvm_hyp_host_forward_smc(struct kvm_cpu_context *host_ctxt);
static void flush_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
{
struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
+ int i;
hyp_vcpu->vcpu.arch.ctxt = host_vcpu->arch.ctxt;
+ for (i = 0; i < ARRAY_SIZE(hyp_vcpu->vcpu.arch.max_vl); i++)
+ hyp_vcpu->vcpu.arch.max_vl[i] = host_vcpu->arch.max_vl[i];
+
hyp_vcpu->vcpu.arch.sve_state = kern_hyp_va(host_vcpu->arch.sve_state);
- hyp_vcpu->vcpu.arch.sve_max_vl = host_vcpu->arch.sve_max_vl;
hyp_vcpu->vcpu.arch.hw_mmu = host_vcpu->arch.hw_mmu;
@@ -45,12 +45,12 @@ static u32 __ro_after_init kvm_ipa_limit;
#define VCPU_RESET_PSTATE_SVC (PSR_AA32_MODE_SVC | PSR_AA32_A_BIT | \
PSR_AA32_I_BIT | PSR_AA32_F_BIT)
-unsigned int __ro_after_init kvm_sve_max_vl;
+unsigned int __ro_after_init kvm_vec_max_vl[ARM64_VEC_MAX];
int __init kvm_arm_init_sve(void)
{
if (system_supports_sve()) {
- kvm_sve_max_vl = sve_max_virtualisable_vl();
+ kvm_vec_max_vl[ARM64_VEC_SVE] = sve_max_virtualisable_vl();
/*
* The get_sve_reg()/set_sve_reg() ioctl interface will need
@@ -58,16 +58,16 @@ int __init kvm_arm_init_sve(void)
* order to support vector lengths greater than
* VL_ARCH_MAX:
*/
- if (WARN_ON(kvm_sve_max_vl > VL_ARCH_MAX))
- kvm_sve_max_vl = VL_ARCH_MAX;
+ if (WARN_ON(kvm_vec_max_vl[ARM64_VEC_SVE] > VL_ARCH_MAX))
+ kvm_vec_max_vl[ARM64_VEC_SVE] = VL_ARCH_MAX;
/*
* Don't even try to make use of vector lengths that
* aren't available on all CPUs, for now:
*/
- if (kvm_sve_max_vl < sve_max_vl())
+ if (kvm_vec_max_vl[ARM64_VEC_SVE] < sve_max_vl())
pr_warn("KVM: SVE vector length for guests limited to %u bytes\n",
- kvm_sve_max_vl);
+ kvm_vec_max_vl[ARM64_VEC_SVE]);
}
return 0;
@@ -75,7 +75,7 @@ int __init kvm_arm_init_sve(void)
static void kvm_vcpu_enable_sve(struct kvm_vcpu *vcpu)
{
- vcpu->arch.sve_max_vl = kvm_sve_max_vl;
+ vcpu->arch.max_vl[ARM64_VEC_SVE] = kvm_vec_max_vl[ARM64_VEC_SVE];
/*
* Userspace can still customize the vector lengths by writing
@@ -96,7 +96,7 @@ static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu)
size_t reg_sz;
int ret;
- vl = vcpu->arch.sve_max_vl;
+ vl = vcpu->arch.max_vl[ARM64_VEC_SVE];
/*
* Responsibility for these properties is shared between
SME introduces a second vector length enumerated and configured in the same manner as for SVE. In a similar manner to the host kernel refactor to store an array of vector lengths in order to facilitate sharing code between the two. We do not fully handle vcpu_sve_pffr() since we have not yet introduced support for streaming mode, this will be updated as part of implementing streaming mode. Signed-off-by: Mark Brown <broonie@kernel.org> --- arch/arm64/include/asm/kvm_host.h | 12 +++++++----- arch/arm64/kvm/fpsimd.c | 2 +- arch/arm64/kvm/guest.c | 6 +++--- arch/arm64/kvm/hyp/nvhe/hyp-main.c | 5 ++++- arch/arm64/kvm/reset.c | 16 ++++++++-------- 5 files changed, 23 insertions(+), 18 deletions(-)