@@ -1132,7 +1132,16 @@ void aarch64_sync_64_to_32(CPUARMState *env);
int fp_exception_el(CPUARMState *env, int cur_el);
int sve_exception_el(CPUARMState *env, int cur_el);
-uint32_t sve_zcr_len_for_el(CPUARMState *env, int el);
+
+/**
+ * sve_vqm1_for_el:
+ * @env: CPUARMState
+ * @el: exception level
+ *
+ * Compute the current SVE vector length for @el, in units of
+ * Quadwords Minus 1 -- the same scale used for ZCR_ELx.LEN.
+ */
+uint32_t sve_vqm1_for_el(CPUARMState *env, int el);
static inline bool is_a64(CPUARMState *env)
{
@@ -166,7 +166,7 @@ static off_t sve_fpcr_offset(uint32_t vq)
static uint32_t sve_current_vq(CPUARMState *env)
{
- return sve_zcr_len_for_el(env, arm_current_el(env)) + 1;
+ return sve_vqm1_for_el(env, arm_current_el(env)) + 1;
}
static size_t sve_size_vq(uint32_t vq)
@@ -925,7 +925,7 @@ static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
vfp_get_fpcr(env), vfp_get_fpsr(env));
if (cpu_isar_feature(aa64_sve, cpu) && sve_exception_el(env, el) == 0) {
- int j, zcr_len = sve_zcr_len_for_el(env, el);
+ int j, zcr_len = sve_vqm1_for_el(env, el);
for (i = 0; i <= FFR_PRED_NUM; i++) {
bool eol;
@@ -152,7 +152,7 @@ int arm_gdb_get_svereg(CPUARMState *env, GByteArray *buf, int reg)
* We report in Vector Granules (VG) which is 64bit in a Z reg
* while the ZCR works in Vector Quads (VQ) which is 128bit chunks.
*/
- int vq = sve_zcr_len_for_el(env, arm_current_el(env)) + 1;
+ int vq = sve_vqm1_for_el(env, arm_current_el(env)) + 1;
return gdb_get_reg64(buf, vq * 2);
}
default:
@@ -6225,7 +6225,7 @@ int sve_exception_el(CPUARMState *env, int el)
/*
* Given that SVE is enabled, return the vector length for EL.
*/
-uint32_t sve_zcr_len_for_el(CPUARMState *env, int el)
+uint32_t sve_vqm1_for_el(CPUARMState *env, int el)
{
ARMCPU *cpu = env_archcpu(env);
uint32_t len = cpu->sve_max_vq - 1;
@@ -6248,7 +6248,7 @@ static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
int cur_el = arm_current_el(env);
- int old_len = sve_zcr_len_for_el(env, cur_el);
+ int old_len = sve_vqm1_for_el(env, cur_el);
int new_len;
/* Bits other than [3:0] are RAZ/WI. */
@@ -6259,7 +6259,7 @@ static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
* Because we arrived here, we know both FP and SVE are enabled;
* otherwise we would have trapped access to the ZCR_ELn register.
*/
- new_len = sve_zcr_len_for_el(env, cur_el);
+ new_len = sve_vqm1_for_el(env, cur_el);
if (new_len < old_len) {
aarch64_sve_narrow_vq(env, new_len + 1);
}
@@ -13683,7 +13683,7 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
sve_el = 0;
}
} else if (sve_el == 0) {
- DP_TBFLAG_A64(flags, VL, sve_zcr_len_for_el(env, el));
+ DP_TBFLAG_A64(flags, VL, sve_vqm1_for_el(env, el));
}
DP_TBFLAG_A64(flags, SVEEXC_EL, sve_el);
}
@@ -14049,10 +14049,10 @@ void aarch64_sve_change_el(CPUARMState *env, int old_el,
*/
old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64;
old_len = (old_a64 && !sve_exception_el(env, old_el)
- ? sve_zcr_len_for_el(env, old_el) : 0);
+ ? sve_vqm1_for_el(env, old_el) : 0);
new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64;
new_len = (new_a64 && !sve_exception_el(env, new_el)
- ? sve_zcr_len_for_el(env, new_el) : 0);
+ ? sve_vqm1_for_el(env, new_el) : 0);
/* When changing vector length, clear inaccessible state. */
if (new_len < old_len) {