@@ -569,9 +569,20 @@ static void gicv2_guest_irq_end(struct irq_desc *desc)
/* Deactivation happens in maintenance interrupt / via GICV */
}
-static void gicv2_irq_set_affinity(struct irq_desc *desc, const cpumask_t *mask)
+static void gicv2_irq_set_affinity(struct irq_desc *desc, const cpumask_t *cpu_mask)
{
- BUG();
+ unsigned int mask;
+
+ ASSERT(!cpumask_empty(cpu_mask));
+
+ spin_lock(&gicv2.lock);
+
+ mask = gicv2_cpu_mask(cpu_mask);
+
+ /* Set target CPU mask (RAZ/WI on uniprocessor) */
+ writeb_gicd(mask, GICD_ITARGETSR + desc->irq);
+
+ spin_unlock(&gicv2.lock);
}
/* XXX different for level vs edge */
@@ -382,7 +382,11 @@ static void gic_update_one_lr(struct vcpu *v, int i)
gic_raise_guest_irq(v, irq, p->priority);
else {
list_del_init(&p->inflight);
- clear_bit(GIC_IRQ_GUEST_MIGRATING, &p->status);
+ if ( test_and_clear_bit(GIC_IRQ_GUEST_MIGRATING, &p->status) )
+ {
+ struct vcpu *v_target = vgic_get_target_vcpu(v, irq);
+ irq_set_affinity(p->desc, cpumask_of(v_target->processor));
+ }
}
}
}
@@ -134,6 +134,12 @@ static inline struct domain *irq_get_domain(struct irq_desc *desc)
return desc->action->dev_id;
}
+void irq_set_affinity(struct irq_desc *desc, const cpumask_t *cpu_mask)
+{
+ if ( desc != NULL )
+ desc->handler->set_affinity(desc, cpu_mask);
+}
+
int request_irq(unsigned int irq, unsigned int irqflags,
void (*handler)(int, void *, struct cpu_user_regs *),
const char *devname, void *dev_id)
@@ -182,6 +182,7 @@ void vgic_migrate_irq(struct vcpu *old, struct vcpu *new, unsigned int irq)
if ( list_empty(&p->inflight) )
{
+ irq_set_affinity(p->desc, cpumask_of(new->processor));
spin_unlock_irqrestore(&old->arch.vgic.lock, flags);
return;
}
@@ -190,6 +191,7 @@ void vgic_migrate_irq(struct vcpu *old, struct vcpu *new, unsigned int irq)
{
list_del_init(&p->lr_queue);
list_del_init(&p->inflight);
+ irq_set_affinity(p->desc, cpumask_of(new->processor));
spin_unlock_irqrestore(&old->arch.vgic.lock, flags);
vgic_vcpu_inject_irq(new, irq);
return;
@@ -202,6 +204,24 @@ void vgic_migrate_irq(struct vcpu *old, struct vcpu *new, unsigned int irq)
spin_unlock_irqrestore(&old->arch.vgic.lock, flags);
}
+void arch_move_irqs(struct vcpu *v)
+{
+ const cpumask_t *cpu_mask = cpumask_of(v->processor);
+ struct domain *d = v->domain;
+ struct pending_irq *p;
+ struct vcpu *v_target;
+ int i;
+
+ for ( i = 32; i < d->arch.vgic.nr_lines; i++ )
+ {
+ v_target = vgic_get_target_vcpu(v, i);
+ p = irq_to_pending(v_target, i);
+
+ if ( v_target == v && !test_bit(GIC_IRQ_GUEST_MIGRATING, &p->status) )
+ irq_set_affinity(p->desc, cpu_mask);
+ }
+}
+
void vgic_disable_irqs(struct vcpu *v, uint32_t r, int n)
{
struct domain *d = v->domain;
@@ -259,6 +279,7 @@ void vgic_enable_irqs(struct vcpu *v, uint32_t r, int n)
}
if ( p->desc != NULL )
{
+ irq_set_affinity(p->desc, cpumask_of(v_target->processor));
spin_lock_irqsave(&p->desc->lock, flags);
p->desc->handler->enable(p->desc);
spin_unlock_irqrestore(&p->desc->lock, flags);
@@ -42,12 +42,15 @@ void init_secondary_IRQ(void);
int route_irq_to_guest(struct domain *d, unsigned int irq,
const char *devname);
+void arch_move_irqs(struct vcpu *v);
/* Set IRQ type for an SPI */
int irq_set_spi_type(unsigned int spi, unsigned int type);
int platform_get_irq(const struct dt_device_node *device, int index);
+void irq_set_affinity(struct irq_desc *desc, const cpumask_t *cpu_mask);
+
#endif /* _ASM_HW_IRQ_H */
/*
* Local variables:
@@ -197,4 +197,6 @@ void cleanup_domain_irq_mapping(struct domain *);
bool_t cpu_has_pending_apic_eoi(void);
+static inline void arch_move_irqs(struct vcpu *v) { }
+
#endif /* _ASM_HW_IRQ_H */