@@ -90,7 +90,7 @@ void __cpuinit init_traps(void)
/* Setup hypervisor traps */
WRITE_SYSREG(HCR_PTW|HCR_BSU_INNER|HCR_AMO|HCR_IMO|HCR_FMO|HCR_VM|
- HCR_TWI|HCR_TSC|HCR_TAC|HCR_SWIO|HCR_TIDCP, HCR_EL2);
+ HCR_TWE|HCR_TWI|HCR_TSC|HCR_TAC|HCR_SWIO|HCR_TIDCP, HCR_EL2);
isb();
}
@@ -1803,16 +1803,21 @@ asmlinkage void do_trap_hypervisor(struct cpu_user_regs *regs)
advance_pc(regs, hsr);
return;
}
- /* at the moment we only trap WFI */
- vcpu_block();
- /* The ARM spec declares that even if local irqs are masked in
- * the CPSR register, an irq should wake up a cpu from WFI anyway.
- * For this reason we need to check for irqs that need delivery,
- * ignoring the CPSR register, *after* calling SCHEDOP_block to
- * avoid races with vgic_vcpu_inject_irq.
- */
- if ( local_events_need_delivery_nomask() )
- vcpu_unblock(current);
+ if ( hsr.wfi_wfe.ti ) {
+ /* Yield the VCPU for WFE */
+ vcpu_force_reschedule(current);
+ } else {
+ /* Block the VCPU for WFI */
+ vcpu_block();
+ /* The ARM spec declares that even if local irqs are masked in
+ * the CPSR register, an irq should wake up a cpu from WFI anyway.
+ * For this reason we need to check for irqs that need delivery,
+ * ignoring the CPSR register, *after* calling SCHEDOP_block to
+ * avoid races with vgic_vcpu_inject_irq.
+ */
+ if ( local_events_need_delivery_nomask() )
+ vcpu_unblock(current);
+ }
advance_pc(regs, hsr);
break;
case HSR_EC_CP15_32:
@@ -276,6 +276,15 @@ union hsr {
unsigned long ec:6; /* Exception Class */
} cond;
+ struct hsr_wfi_wfe {
+ unsigned long ti:1; /* Trapped instruction */
+ unsigned long sbzp:19;
+ unsigned long cc:4; /* Condition Code */
+ unsigned long ccvalid:1;/* CC Valid */
+ unsigned long len:1; /* Instruction length */
+ unsigned long ec:6; /* Exception Class */
+ } wfi_wfe;
+
/* reg, reg0, reg1 are 4 bits on AArch32, the fifth bit is sbzp. */
struct hsr_cp32 {
unsigned long read:1; /* Direction */