@@ -572,9 +572,20 @@ static void gicv2_guest_irq_end(struct irq_desc *desc)
/* Deactivation happens in maintenance interrupt / via GICV */
}
-static void gicv2_irq_set_affinity(struct irq_desc *desc, const cpumask_t *mask)
+static void gicv2_irq_set_affinity(struct irq_desc *desc, const cpumask_t *cpu_mask)
{
- BUG();
+ unsigned int mask;
+
+ ASSERT(!cpumask_empty(cpu_mask));
+
+ spin_lock(&gicv2.lock);
+
+ mask = gicv2_cpu_mask(cpu_mask);
+
+ /* Set target CPU mask (RAZ/WI on uniprocessor) */
+ writeb_gicd(mask, GICD_ITARGETSR + desc->irq);
+
+ spin_unlock(&gicv2.lock);
}
/* XXX different for level vs edge */
@@ -400,6 +400,7 @@ static void gic_update_one_lr(struct vcpu *v, int i)
/* vgic_get_target_vcpu takes the rank lock, ensuring
* consistency with other itarget changes. */
v_target = vgic_get_target_vcpu(v, irq);
+ irq_set_affinity(p->desc, cpumask_of(v_target->processor));
vgic_vcpu_inject_irq(v_target, irq);
spin_lock(&v->arch.vgic.lock);
}
@@ -134,6 +134,12 @@ static inline struct domain *irq_get_domain(struct irq_desc *desc)
return desc->action->dev_id;
}
+void irq_set_affinity(struct irq_desc *desc, const cpumask_t *cpu_mask)
+{
+ if ( desc != NULL )
+ desc->handler->set_affinity(desc, cpu_mask);
+}
+
int request_irq(unsigned int irq, unsigned int irqflags,
void (*handler)(int, void *, struct cpu_user_regs *),
const char *devname, void *dev_id)
@@ -199,6 +199,7 @@ void vgic_migrate_irq(struct vcpu *old, struct vcpu *new, unsigned int irq)
if ( list_empty(&p->inflight) )
{
+ irq_set_affinity(p->desc, cpumask_of(new->processor));
spin_unlock_irqrestore(&old->arch.vgic.lock, flags);
return;
}
@@ -207,6 +208,7 @@ void vgic_migrate_irq(struct vcpu *old, struct vcpu *new, unsigned int irq)
{
list_del_init(&p->lr_queue);
list_del_init(&p->inflight);
+ irq_set_affinity(p->desc, cpumask_of(new->processor));
spin_unlock_irqrestore(&old->arch.vgic.lock, flags);
vgic_vcpu_inject_irq(new, irq);
return;
@@ -222,6 +224,24 @@ void vgic_migrate_irq(struct vcpu *old, struct vcpu *new, unsigned int irq)
spin_unlock_irqrestore(&old->arch.vgic.lock, flags);
}
+void arch_move_irqs(struct vcpu *v)
+{
+ const cpumask_t *cpu_mask = cpumask_of(v->processor);
+ struct domain *d = v->domain;
+ struct pending_irq *p;
+ struct vcpu *v_target;
+ int i;
+
+ for ( i = 32; i < d->arch.vgic.nr_lines; i++ )
+ {
+ v_target = vgic_get_target_vcpu(v, i);
+ p = irq_to_pending(v_target, i);
+
+ if ( v_target == v && !test_bit(GIC_IRQ_GUEST_MIGRATING, &p->status) )
+ irq_set_affinity(p->desc, cpu_mask);
+ }
+}
+
void vgic_disable_irqs(struct vcpu *v, uint32_t r, int n)
{
const unsigned long mask = r;
@@ -277,6 +297,7 @@ void vgic_enable_irqs(struct vcpu *v, uint32_t r, int n)
}
if ( p->desc != NULL )
{
+ irq_set_affinity(p->desc, cpumask_of(v_target->processor));
spin_lock_irqsave(&p->desc->lock, flags);
p->desc->handler->enable(p->desc);
spin_unlock_irqrestore(&p->desc->lock, flags);
@@ -321,6 +321,7 @@ struct gic_hw_operations {
void register_gic_ops(const struct gic_hw_operations *ops);
struct vcpu *vgic_get_target_vcpu(struct vcpu *v, unsigned int irq);
+void arch_move_irqs(struct vcpu *v);
#endif /* __ASSEMBLY__ */
#endif
@@ -48,6 +48,8 @@ int irq_set_spi_type(unsigned int spi, unsigned int type);
int platform_get_irq(const struct dt_device_node *device, int index);
+void irq_set_affinity(struct irq_desc *desc, const cpumask_t *cpu_mask);
+
#endif /* _ASM_HW_IRQ_H */
/*
* Local variables:
@@ -197,4 +197,6 @@ void cleanup_domain_irq_mapping(struct domain *);
bool_t cpu_has_pending_apic_eoi(void);
+static inline void arch_move_irqs(struct vcpu *v) { }
+
#endif /* _ASM_HW_IRQ_H */