@@ -192,6 +192,9 @@ extern const struct VMStateDescription vmstate_arm_cpu;
void register_cp_regs_for_features(ARMCPU *cpu);
void init_cpreg_list(ARMCPU *cpu);
+bool arm_cpu_do_hvc(CPUState *cs);
+bool arm_cpu_do_smc(CPUState *cs);
+
void arm_cpu_do_interrupt(CPUState *cpu);
void arm_v7m_cpu_do_interrupt(CPUState *cpu);
@@ -51,6 +51,8 @@
#define EXCP_EXCEPTION_EXIT 8 /* Return from v7M exception. */
#define EXCP_KERNEL_TRAP 9 /* Jumped to kernel code page. */
#define EXCP_STREX 10
+#define EXCP_HVC 11
+#define EXCP_SMC 12
#define ARMV7M_EXCP_RESET 1
#define ARMV7M_EXCP_NMI 2
@@ -485,6 +485,22 @@ void aarch64_cpu_do_interrupt(CPUState *cs)
case EXCP_FIQ:
addr += 0x100;
break;
+ case EXCP_HVC:
+ if (arm_cpu_do_hvc(cs)) {
+ return;
+ }
+ /* Treat as unallocated encoding */
+ qemu_log_mask(LOG_GUEST_ERROR, "HVC not implemented on this CPU\n");
+ env->exception.syndrome = syn_uncategorized();
+ break;
+ case EXCP_SMC:
+ if (arm_cpu_do_smc(cs)) {
+ return;
+ }
+ /* Treat as unallocated encoding */
+ qemu_log_mask(LOG_GUEST_ERROR, "SMC not implemented on this CPU\n");
+ env->exception.syndrome = syn_uncategorized();
+ break;
default:
cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
}
@@ -3492,6 +3492,16 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
env->thumb = addr & 1;
}
+bool arm_cpu_do_hvc(CPUState *cs)
+{
+ return false;
+}
+
+bool arm_cpu_do_smc(CPUState *cs)
+{
+ return false;
+}
+
/* Handle a CPU exception. */
void arm_cpu_do_interrupt(CPUState *cs)
{
@@ -3508,6 +3518,19 @@ void arm_cpu_do_interrupt(CPUState *cs)
/* TODO: Vectored interrupt controller. */
switch (cs->exception_index) {
+ case EXCP_HVC:
+ if (arm_cpu_do_hvc(cs)) {
+ return;
+ }
+ qemu_log_mask(LOG_GUEST_ERROR, "HVC not implemented on this CPU\n");
+ goto hvc_unallocated;
+ case EXCP_SMC:
+ if (arm_cpu_do_smc(cs)) {
+ return;
+ }
+ qemu_log_mask(LOG_GUEST_ERROR, "SMC not implemented on this CPU\n");
+ hvc_unallocated:
+ /* Fall through -- treat as unallocated encoding */
case EXCP_UDEF:
new_mode = ARM_CPU_MODE_UND;
addr = 0x04;
@@ -210,6 +210,26 @@ static inline uint32_t syn_aa32_svc(uint32_t imm16, bool is_thumb)
| (is_thumb ? 0 : ARM_EL_IL);
}
+static inline uint32_t syn_aa64_hvc(uint32_t imm16)
+{
+ return (EC_AA64_HVC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
+}
+
+static inline uint32_t syn_aa32_hvc(uint32_t imm16)
+{
+ return (EC_AA32_HVC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
+}
+
+static inline uint32_t syn_aa64_smc(uint32_t imm16)
+{
+ return (EC_AA64_SMC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
+}
+
+static inline uint32_t syn_aa32_smc(void)
+{
+ return (EC_AA32_SMC << ARM_EL_EC_SHIFT) | ARM_EL_IL;
+}
+
static inline uint32_t syn_aa64_bkpt(uint32_t imm16)
{
return (EC_AA64_BKPT << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
@@ -1473,20 +1473,37 @@ static void disas_exc(DisasContext *s, uint32_t insn)
switch (opc) {
case 0:
- /* SVC, HVC, SMC; since we don't support the Virtualization
- * or TrustZone extensions these all UNDEF except SVC.
- */
- if (op2_ll != 1) {
- unallocated_encoding(s);
- break;
- }
/* For SVC, HVC and SMC we advance the single-step state
* machine before taking the exception. This is architecturally
* mandated, to ensure that single-stepping a system call
* instruction works properly.
*/
- gen_ss_advance(s);
- gen_exception_insn(s, 0, EXCP_SWI, syn_aa64_svc(imm16));
+ switch (op2_ll) {
+ case 1:
+ gen_ss_advance(s);
+ gen_exception_insn(s, 0, EXCP_SWI, syn_aa64_svc(imm16));
+ break;
+ case 2:
+ if (s->current_pl != 0
+ && arm_dc_feature(s, ARM_FEATURE_EL2)) {
+ gen_ss_advance(s);
+ gen_exception_insn(s, 0, EXCP_HVC, syn_aa64_hvc(imm16));
+ break;
+ }
+ unallocated_encoding(s);
+ break;
+ case 3:
+ if (s->current_pl != 0
+ && arm_dc_feature(s, ARM_FEATURE_EL3)) {
+ gen_ss_advance(s);
+ gen_exception_insn(s, 0, EXCP_SMC, syn_aa64_smc(imm16));
+ break;
+ }
+ /* fall through */
+ default:
+ unallocated_encoding(s);
+ break;
+ }
break;
case 1:
if (op2_ll != 0) {
@@ -7871,15 +7871,33 @@ static void disas_arm_insn(CPUARMState * env, DisasContext *s)
case 7:
{
int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
- /* SMC instruction (op1 == 3)
- and undefined instructions (op1 == 0 || op1 == 2)
- will trap */
- if (op1 != 1) {
+ switch (op1) {
+ case 1:
+ /* bkpt */
+ ARCH(5);
+ gen_exception_insn(s, 4, EXCP_BKPT,
+ syn_aa32_bkpt(imm16, false));
+ break;
+ case 2:
+ /* Hypervisor call (v7) */
+ ARCH(7);
+ if (IS_USER(s))
+ goto illegal_op;
+ s->svc_imm = imm16;
+ gen_set_pc_im(s, s->pc);
+ s->is_jmp = DISAS_HVC;
+ break;
+ case 3:
+ /* Secure monitor call (v6+) */
+ ARCH(6K);
+ if (IS_USER(s))
+ goto illegal_op;
+ gen_set_pc_im(s, s->pc);
+ s->is_jmp = DISAS_SMC;
+ break;
+ default:
goto illegal_op;
}
- /* bkpt */
- ARCH(5);
- gen_exception_insn(s, 4, EXCP_BKPT, syn_aa32_bkpt(imm16, false));
break;
}
case 0x8: /* signed multiply */
@@ -9709,10 +9727,23 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
goto illegal_op;
if (insn & (1 << 26)) {
- /* Secure monitor call (v6Z) */
- qemu_log_mask(LOG_UNIMP,
- "arm: unimplemented secure monitor call\n");
- goto illegal_op; /* not implemented. */
+ if (!(insn & (1 << 20))) {
+ /* Hypervisor call (v7) */
+ ARCH(7);
+ if (IS_USER(s))
+ goto illegal_op;
+ s->svc_imm = extract32(insn, 16, 4) << 12 |
+ extract32(insn, 0, 12);
+ gen_set_pc_im(s, s->pc);
+ s->is_jmp = DISAS_HVC;
+ } else {
+ /* Secure monitor call (v6+) */
+ ARCH(6K);
+ if (IS_USER(s))
+ goto illegal_op;
+ gen_set_pc_im(s, s->pc);
+ s->is_jmp = DISAS_SMC;
+ }
} else {
op = (insn >> 20) & 7;
switch (op) {
@@ -11200,6 +11231,12 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
case DISAS_SWI:
gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
break;
+ case DISAS_HVC:
+ gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm));
+ break;
+ case DISAS_SMC:
+ gen_exception(EXCP_SMC, syn_aa32_smc());
+ break;
}
if (dc->condjmp) {
gen_set_label(dc->condlabel);
@@ -82,6 +82,8 @@ static inline int get_mem_index(DisasContext *s)
#define DISAS_EXC 6
/* WFE */
#define DISAS_WFE 7
+#define DISAS_HVC 8
+#define DISAS_SMC 9
#ifdef TARGET_AARCH64
void a64_translate_init(void);