@@ -29,6 +29,7 @@
#include "qemu/guest-random.h"
#ifdef CONFIG_TCG
#include "accel/tcg/probe.h"
+#include "accel/tcg/getpc.h"
#include "semihosting/common-semi.h"
#endif
#include "cpregs.h"
@@ -6565,9 +6566,7 @@ static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
*/
new_len = sve_vqm1_for_el(env, cur_el);
if (new_len < old_len) {
-#ifdef TARGET_AARCH64
aarch64_sve_narrow_vq(env, new_len + 1);
-#endif
}
}
@@ -10625,9 +10624,7 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
* Note that new_el can never be 0. If cur_el is 0, then
* el0_a64 is is_a64(), else el0_a64 is ignored.
*/
-#ifdef TARGET_AARCH64
aarch64_sve_change_el(env, cur_el, new_el, is_a64(env));
-#endif
}
if (cur_el < new_el) {
@@ -11418,7 +11415,6 @@ ARMMMUIdx arm_mmu_idx(CPUARMState *env)
return arm_mmu_idx_el(env, arm_current_el(env));
}
-#ifdef TARGET_AARCH64
/*
* The manual says that when SVE is enabled and VQ is widened the
* implementation is allowed to zero the previously inaccessible
@@ -11530,12 +11526,9 @@ void aarch64_sve_change_el(CPUARMState *env, int old_el,
/* When changing vector length, clear inaccessible state. */
if (new_len < old_len) {
-#ifdef TARGET_AARCH64
aarch64_sve_narrow_vq(env, new_len + 1);
-#endif
}
}
-#endif
#ifndef CONFIG_USER_ONLY
ARMSecuritySpace arm_security_space(CPUARMState *env)