@@ -89,10 +89,10 @@ static const struct vgic_register_region vgic_v2_dist_registers[] = {
vgic_mmio_read_rao, vgic_mmio_write_wi, 1,
VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ISENABLER,
- vgic_mmio_read_raz, vgic_mmio_write_wi, 1,
+ vgic_mmio_read_enable, vgic_mmio_write_senable, 1,
VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ICENABLER,
- vgic_mmio_read_raz, vgic_mmio_write_wi, 1,
+ vgic_mmio_read_enable, vgic_mmio_write_cenable, 1,
VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_BITS_PER_IRQ(GICD_ISPENDR,
vgic_mmio_read_raz, vgic_mmio_write_wi, 1,
@@ -39,6 +39,123 @@ void vgic_mmio_write_wi(struct vcpu *vcpu, paddr_t addr,
/* Ignore */
}
+/*
+ * Read accesses to both GICD_ICENABLER and GICD_ISENABLER return the value
+ * of the enabled bit, so there is only one function for both here.
+ */
+unsigned long vgic_mmio_read_enable(struct vcpu *vcpu,
+ paddr_t addr, unsigned int len)
+{
+ uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1);
+ uint32_t value = 0;
+ unsigned int i;
+
+ /* Loop over all IRQs affected by this read */
+ for ( i = 0; i < len * 8; i++ )
+ {
+ struct vgic_irq *irq = vgic_get_irq(vcpu->domain, vcpu, intid + i);
+
+ if ( irq->enabled )
+ value |= (1U << i);
+
+ vgic_put_irq(vcpu->domain, irq);
+ }
+
+ return value;
+}
+
+void vgic_mmio_write_senable(struct vcpu *vcpu,
+ paddr_t addr, unsigned int len,
+ unsigned long val)
+{
+ uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1);
+ unsigned int i;
+
+ for_each_set_bit( i, &val, len * 8 )
+ {
+ struct vgic_irq *irq = vgic_get_irq(vcpu->domain, vcpu, intid + i);
+ unsigned long flags;
+ irq_desc_t *desc;
+
+ spin_lock_irqsave(&irq->irq_lock, flags);
+
+ if ( irq->enabled ) /* skip already enabled IRQs */
+ {
+ spin_unlock_irqrestore(&irq->irq_lock, flags);
+ vgic_put_irq(vcpu->domain, irq);
+ continue;
+ }
+
+ irq->enabled = true;
+ if ( irq->hw )
+ {
+ /*
+ * The irq cannot be a PPI, we only support delivery
+ * of SPIs to guests.
+ */
+ ASSERT(irq->hwintid >= VGIC_NR_PRIVATE_IRQS);
+
+ desc = irq_to_desc(irq->hwintid);
+ }
+ else
+ desc = NULL;
+
+ vgic_queue_irq_unlock(vcpu->domain, irq, flags);
+
+ if ( desc )
+ vgic_sync_hardware_irq(vcpu->domain, desc, irq);
+
+ vgic_put_irq(vcpu->domain, irq);
+ }
+}
+
+void vgic_mmio_write_cenable(struct vcpu *vcpu,
+ paddr_t addr, unsigned int len,
+ unsigned long val)
+{
+ uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1);
+ unsigned int i;
+
+ for_each_set_bit( i, &val, len * 8 )
+ {
+ struct vgic_irq *irq;
+ unsigned long flags;
+ irq_desc_t *desc;
+
+ irq = vgic_get_irq(vcpu->domain, vcpu, intid + i);
+ spin_lock_irqsave(&irq->irq_lock, flags);
+
+ if ( !irq->enabled ) /* skip already disabled IRQs */
+ {
+ spin_unlock_irqrestore(&irq->irq_lock, flags);
+ vgic_put_irq(vcpu->domain, irq);
+ continue;
+ }
+
+ irq->enabled = false;
+
+ if ( irq->hw )
+ {
+ /*
+ * The irq cannot be a PPI, we only support delivery
+ * of SPIs to guests.
+ */
+ ASSERT(irq->hwintid >= VGIC_NR_PRIVATE_IRQS);
+
+ desc = irq_to_desc(irq->hwintid);
+ }
+ else
+ desc = NULL;
+
+ spin_unlock_irqrestore(&irq->irq_lock, flags);
+
+ if ( desc )
+ vgic_sync_hardware_irq(vcpu->domain, desc, irq);
+
+ vgic_put_irq(vcpu->domain, irq);
+ }
+}
+
static int match_region(const void *key, const void *elt)
{
const unsigned int offset = (unsigned long)key;
@@ -86,6 +86,17 @@ unsigned long vgic_mmio_read_rao(struct vcpu *vcpu,
void vgic_mmio_write_wi(struct vcpu *vcpu, paddr_t addr,
unsigned int len, unsigned long val);
+unsigned long vgic_mmio_read_enable(struct vcpu *vcpu,
+ paddr_t addr, unsigned int len);
+
+void vgic_mmio_write_senable(struct vcpu *vcpu,
+ paddr_t addr, unsigned int len,
+ unsigned long val);
+
+void vgic_mmio_write_cenable(struct vcpu *vcpu,
+ paddr_t addr, unsigned int len,
+ unsigned long val);
+
unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev);
#endif
@@ -689,6 +689,46 @@ void vgic_kick_vcpus(struct domain *d)
}
}
+static unsigned int translate_irq_type(bool is_level)
+{
+ return is_level ? IRQ_TYPE_LEVEL_HIGH : IRQ_TYPE_EDGE_RISING;
+}
+
+void vgic_sync_hardware_irq(struct domain *d,
+ irq_desc_t *desc, struct vgic_irq *irq)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&desc->lock, flags);
+ spin_lock(&irq->irq_lock);
+
+ /*
+ * We now forbid tinkering with the hardware IRQ association
+ * during a domain's lifetime.
+ */
+ ASSERT(irq->hw && desc->irq == irq->hwintid);
+
+ if ( irq->enabled )
+ {
+ /*
+ * We might end up from various callers, so check that the
+ * interrrupt is disabled before trying to change the config.
+ */
+ if ( irq_type_set_by_domain(d) &&
+ test_bit(_IRQ_DISABLED, &desc->status) )
+ gic_set_irq_type(desc, translate_irq_type(irq->config));
+
+ if ( irq->target_vcpu )
+ irq_set_affinity(desc, cpumask_of(irq->target_vcpu->processor));
+ desc->handler->enable(desc);
+ }
+ else
+ desc->handler->disable(desc);
+
+ spin_unlock(&irq->irq_lock);
+ spin_unlock_irqrestore(&desc->lock, flags);
+}
+
/*
* Local variables:
* mode: C
@@ -51,6 +51,9 @@ static inline void vgic_get_irq_kref(struct vgic_irq *irq)
atomic_inc(&irq->refcount);
}
+void vgic_sync_hardware_irq(struct domain *d,
+ irq_desc_t *desc, struct vgic_irq *irq);
+
void vgic_v2_fold_lr_state(struct vcpu *vcpu);
void vgic_v2_populate_lr(struct vcpu *vcpu, struct vgic_irq *irq, int lr);
void vgic_v2_set_underflow(struct vcpu *vcpu);
As the enable register handlers are shared between the v2 and v3 emulation, their implementation goes into vgic-mmio.c, to be easily referenced from the v3 emulation as well later. This introduces a vgic_sync_hardware_irq() function, which updates the physical side of a hardware mapped virtual IRQ. Because the existing locking order between vgic_irq->irq_lock and irq_desc->lock dictates so, we drop the irq_lock and retake them in the proper order. Signed-off-by: Andre Przywara <andre.przywara@linaro.org> --- Changelog v1 ... v2: - ASSERT on h/w IRQ and vIRQ staying in sync xen/arch/arm/vgic/vgic-mmio-v2.c | 4 +- xen/arch/arm/vgic/vgic-mmio.c | 117 +++++++++++++++++++++++++++++++++++++++ xen/arch/arm/vgic/vgic-mmio.h | 11 ++++ xen/arch/arm/vgic/vgic.c | 40 +++++++++++++ xen/arch/arm/vgic/vgic.h | 3 + 5 files changed, 173 insertions(+), 2 deletions(-)