@@ -263,20 +263,10 @@ void vgic_enable_irqs(struct vcpu *v, uint32_t r, int n)
v_target = d->arch.vgic.handler->get_target_vcpu(v, irq);
p = irq_to_pending(v_target, irq);
set_bit(GIC_IRQ_GUEST_ENABLED, &p->status);
- /* We need to force the first injection of evtchn_irq because
- * evtchn_upcall_pending is already set by common code on vcpu
- * creation. */
- if ( irq == v_target->domain->arch.evtchn_irq &&
- vcpu_info(current, evtchn_upcall_pending) &&
- list_empty(&p->inflight) )
- vgic_vcpu_inject_irq(v_target, irq);
- else {
- unsigned long flags;
- spin_lock_irqsave(&v_target->arch.vgic.lock, flags);
- if ( !list_empty(&p->inflight) && !test_bit(GIC_IRQ_GUEST_VISIBLE, &p->status) )
- gic_raise_guest_irq(v_target, irq, p->priority);
- spin_unlock_irqrestore(&v_target->arch.vgic.lock, flags);
- }
+ spin_lock_irqsave(&v_target->arch.vgic.lock, flags);
+ if ( !list_empty(&p->inflight) && !test_bit(GIC_IRQ_GUEST_VISIBLE, &p->status) )
+ gic_raise_guest_irq(v_target, irq, p->priority);
+ spin_unlock_irqrestore(&v_target->arch.vgic.lock, flags);
if ( p->desc != NULL )
{
irq_set_affinity(p->desc, cpumask_of(v_target->processor));
@@ -432,6 +422,11 @@ void vgic_vcpu_inject_spi(struct domain *d, unsigned int irq)
vgic_vcpu_inject_irq(v, irq);
}
+void arch_evtchn_inject(struct vcpu *v)
+{
+ vgic_vcpu_inject_irq(v, v->domain->arch.evtchn_irq);
+}
+
/*
* Local variables:
* mode: C
@@ -468,6 +468,12 @@ int hvm_local_events_need_delivery(struct vcpu *v)
return !hvm_interrupt_blocked(v, intack);
}
+void arch_evtchn_inject(struct vcpu *v)
+{
+ if ( has_hvm_container_vcpu(v) )
+ hvm_assert_evtchn_irq(v);
+}
+
static void irq_dump(struct domain *d)
{
struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
@@ -1058,6 +1058,7 @@ int map_vcpu_info(struct vcpu *v, unsigned long gfn, unsigned offset)
vcpu_info(v, evtchn_upcall_pending) = 1;
for ( i = 0; i < BITS_PER_EVTCHN_WORD(d); i++ )
set_bit(i, &vcpu_info(v, evtchn_pending_sel));
+ arch_evtchn_inject(v);
return 0;
}
@@ -69,6 +69,9 @@ int guest_enabled_event(struct vcpu *v, uint32_t virq);
/* Notify remote end of a Xen-attached event channel.*/
void notify_via_xen_event_channel(struct domain *ld, int lport);
+/* inject an event channel notification into the guest */
+void arch_evtchn_inject(struct vcpu *v);
+
/*
* Internal event channel object storage.
*
evtchn_upcall_pending is already set by common code at vcpu creation, therefore on ARM we also need to call vgic_vcpu_inject_irq for it. Currently we do that from vgic_enable_irqs as a workaround. Do this properly by introducing an appropriate arch specific hook: arch_evtchn_inject. arch_evtchn_inject is called by map_vcpu_info to inject the evtchn irq into the guest. On ARM is implemented by calling vgic_vcpu_inject_irq. On x86 guests typically don't call VCPUOP_register_vcpu_info on vcpu0, therefore avoiding the issue. However theoretically they could call VCPUOP_register_vcpu_info on vcpu0 and in that case Xen would need to inject the event channel notification into the guest if the guest is HVM. So implement arch_evtchn_inject on x86 by calling hvm_assert_evtchn_irq. Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> --- Changes in v10: - provide an implementation for x86. Changes in v9: - use an arch hook. Changes in v2: - coding style fix; - add comment; - return an error if arch_set_info_guest is called without VGCF_online. --- xen/arch/arm/vgic.c | 23 +++++++++-------------- xen/arch/x86/hvm/irq.c | 6 ++++++ xen/common/domain.c | 1 + xen/include/xen/event.h | 3 +++ 4 files changed, 19 insertions(+), 14 deletions(-)